[llvm] d798c56 - [Thumb2] Convert tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 4 03:28:58 PDT 2023


Author: Nikita Popov
Date: 2023-04-04T12:28:32+02:00
New Revision: d798c56f1a76ce1a96c44d25341d46fd27fd2987

URL: https://github.com/llvm/llvm-project/commit/d798c56f1a76ce1a96c44d25341d46fd27fd2987
DIFF: https://github.com/llvm/llvm-project/commit/d798c56f1a76ce1a96c44d25341d46fd27fd2987.diff

LOG: [Thumb2] Convert tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
    llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
    llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
    llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll
    llvm/test/CodeGen/Thumb2/mve-postinc-dct.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
index b669370b0e52..51e58fde4a5d 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -opaque-pointers=0 -mtriple=thumbv8.1m.main -mattr=+mve.fp,+fp-armv8d16sp,+fp16,+fullfp16 -tail-predication=enabled %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp,+fp-armv8d16sp,+fp16,+fullfp16 -tail-predication=enabled %s -o - | FileCheck %s
 
-define arm_aapcs_vfpcc void @fast_float_mul(float* nocapture %a, float* nocapture readonly %b, float* nocapture readonly %c, i32 %N) {
+define arm_aapcs_vfpcc void @fast_float_mul(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
 ; CHECK-LABEL: fast_float_mul:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r4, r5, r6, r7, lr}
@@ -92,14 +92,14 @@ entry:
   br i1 %cmp8, label %for.cond.cleanup, label %vector.memcheck
 
 vector.memcheck:                                  ; preds = %entry
-  %scevgep = getelementptr float, float* %a, i32 %N
-  %scevgep13 = getelementptr float, float* %b, i32 %N
-  %scevgep16 = getelementptr float, float* %c, i32 %N
-  %bound0 = icmp ugt float* %scevgep13, %a
-  %bound1 = icmp ugt float* %scevgep, %b
+  %scevgep = getelementptr float, ptr %a, i32 %N
+  %scevgep13 = getelementptr float, ptr %b, i32 %N
+  %scevgep16 = getelementptr float, ptr %c, i32 %N
+  %bound0 = icmp ugt ptr %scevgep13, %a
+  %bound1 = icmp ugt ptr %scevgep, %b
   %found.conflict = and i1 %bound0, %bound1
-  %bound018 = icmp ugt float* %scevgep16, %a
-  %bound119 = icmp ugt float* %scevgep, %c
+  %bound018 = icmp ugt ptr %scevgep16, %a
+  %bound119 = icmp ugt ptr %scevgep, %c
   %found.conflict20 = and i1 %bound018, %bound119
   %conflict.rdx = or i1 %found.conflict, %found.conflict20
   br i1 %conflict.rdx, label %for.body.preheader, label %vector.ph
@@ -121,17 +121,14 @@ vector.ph:                                        ; preds = %vector.memcheck
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i2 = getelementptr inbounds float, float* %b, i32 %index
+  %i2 = getelementptr inbounds float, ptr %b, i32 %index
   %i3 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %i4 = bitcast float* %i2 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %i3, <4 x float> undef)
-  %i5 = getelementptr inbounds float, float* %c, i32 %index
-  %i6 = bitcast float* %i5 to <4 x float>*
-  %wide.masked.load23 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i6, i32 4, <4 x i1> %i3, <4 x float> undef)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i2, i32 4, <4 x i1> %i3, <4 x float> undef)
+  %i5 = getelementptr inbounds float, ptr %c, i32 %index
+  %wide.masked.load23 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i5, i32 4, <4 x i1> %i3, <4 x float> undef)
   %i7 = fmul fast <4 x float> %wide.masked.load23, %wide.masked.load
-  %i8 = getelementptr inbounds float, float* %a, i32 %index
-  %i9 = bitcast float* %i8 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %i7, <4 x float>* %i9, i32 4, <4 x i1> %i3)
+  %i8 = getelementptr inbounds float, ptr %a, i32 %index
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %i7, ptr %i8, i32 4, <4 x i1> %i3)
   %index.next = add i32 %index, 4
   %i10 = icmp eq i32 %index.next, %n.vec
   br i1 %i10, label %for.cond.cleanup, label %vector.body
@@ -144,13 +141,13 @@ for.cond.cleanup.loopexit.unr-lcssa:              ; preds = %for.body, %for.body
 for.body.epil:                                    ; preds = %for.body.epil, %for.cond.cleanup.loopexit.unr-lcssa
   %i.09.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.09.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
   %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
-  %arrayidx.epil = getelementptr inbounds float, float* %b, i32 %i.09.epil
-  %i11 = load float, float* %arrayidx.epil, align 4
-  %arrayidx1.epil = getelementptr inbounds float, float* %c, i32 %i.09.epil
-  %i12 = load float, float* %arrayidx1.epil, align 4
+  %arrayidx.epil = getelementptr inbounds float, ptr %b, i32 %i.09.epil
+  %i11 = load float, ptr %arrayidx.epil, align 4
+  %arrayidx1.epil = getelementptr inbounds float, ptr %c, i32 %i.09.epil
+  %i12 = load float, ptr %arrayidx1.epil, align 4
   %mul.epil = fmul fast float %i12, %i11
-  %arrayidx2.epil = getelementptr inbounds float, float* %a, i32 %i.09.epil
-  store float %mul.epil, float* %arrayidx2.epil, align 4
+  %arrayidx2.epil = getelementptr inbounds float, ptr %a, i32 %i.09.epil
+  store float %mul.epil, ptr %arrayidx2.epil, align 4
   %inc.epil = add nuw i32 %i.09.epil, 1
   %epil.iter.sub = add i32 %epil.iter, -1
   %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0
@@ -162,44 +159,44 @@ for.cond.cleanup:                                 ; preds = %for.body.epil, %for
 for.body:                                         ; preds = %for.body, %for.body.preheader.new
   %i.09 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
   %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
-  %arrayidx = getelementptr inbounds float, float* %b, i32 %i.09
-  %i13 = load float, float* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds float, float* %c, i32 %i.09
-  %i14 = load float, float* %arrayidx1, align 4
+  %arrayidx = getelementptr inbounds float, ptr %b, i32 %i.09
+  %i13 = load float, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %c, i32 %i.09
+  %i14 = load float, ptr %arrayidx1, align 4
   %mul = fmul fast float %i14, %i13
-  %arrayidx2 = getelementptr inbounds float, float* %a, i32 %i.09
-  store float %mul, float* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr %a, i32 %i.09
+  store float %mul, ptr %arrayidx2, align 4
   %inc = or i32 %i.09, 1
-  %arrayidx.1 = getelementptr inbounds float, float* %b, i32 %inc
-  %i15 = load float, float* %arrayidx.1, align 4
-  %arrayidx1.1 = getelementptr inbounds float, float* %c, i32 %inc
-  %i16 = load float, float* %arrayidx1.1, align 4
+  %arrayidx.1 = getelementptr inbounds float, ptr %b, i32 %inc
+  %i15 = load float, ptr %arrayidx.1, align 4
+  %arrayidx1.1 = getelementptr inbounds float, ptr %c, i32 %inc
+  %i16 = load float, ptr %arrayidx1.1, align 4
   %mul.1 = fmul fast float %i16, %i15
-  %arrayidx2.1 = getelementptr inbounds float, float* %a, i32 %inc
-  store float %mul.1, float* %arrayidx2.1, align 4
+  %arrayidx2.1 = getelementptr inbounds float, ptr %a, i32 %inc
+  store float %mul.1, ptr %arrayidx2.1, align 4
   %inc.1 = or i32 %i.09, 2
-  %arrayidx.2 = getelementptr inbounds float, float* %b, i32 %inc.1
-  %i17 = load float, float* %arrayidx.2, align 4
-  %arrayidx1.2 = getelementptr inbounds float, float* %c, i32 %inc.1
-  %i18 = load float, float* %arrayidx1.2, align 4
+  %arrayidx.2 = getelementptr inbounds float, ptr %b, i32 %inc.1
+  %i17 = load float, ptr %arrayidx.2, align 4
+  %arrayidx1.2 = getelementptr inbounds float, ptr %c, i32 %inc.1
+  %i18 = load float, ptr %arrayidx1.2, align 4
   %mul.2 = fmul fast float %i18, %i17
-  %arrayidx2.2 = getelementptr inbounds float, float* %a, i32 %inc.1
-  store float %mul.2, float* %arrayidx2.2, align 4
+  %arrayidx2.2 = getelementptr inbounds float, ptr %a, i32 %inc.1
+  store float %mul.2, ptr %arrayidx2.2, align 4
   %inc.2 = or i32 %i.09, 3
-  %arrayidx.3 = getelementptr inbounds float, float* %b, i32 %inc.2
-  %i19 = load float, float* %arrayidx.3, align 4
-  %arrayidx1.3 = getelementptr inbounds float, float* %c, i32 %inc.2
-  %i20 = load float, float* %arrayidx1.3, align 4
+  %arrayidx.3 = getelementptr inbounds float, ptr %b, i32 %inc.2
+  %i19 = load float, ptr %arrayidx.3, align 4
+  %arrayidx1.3 = getelementptr inbounds float, ptr %c, i32 %inc.2
+  %i20 = load float, ptr %arrayidx1.3, align 4
   %mul.3 = fmul fast float %i20, %i19
-  %arrayidx2.3 = getelementptr inbounds float, float* %a, i32 %inc.2
-  store float %mul.3, float* %arrayidx2.3, align 4
+  %arrayidx2.3 = getelementptr inbounds float, ptr %a, i32 %inc.2
+  store float %mul.3, ptr %arrayidx2.3, align 4
   %inc.3 = add nuw i32 %i.09, 4
   %niter.nsub.3 = add i32 %niter, -4
   %niter.ncmp.3 = icmp eq i32 %niter.nsub.3, 0
   br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
 }
 
-define arm_aapcs_vfpcc float @fast_float_mac(float* nocapture readonly %b, float* nocapture readonly %c, i32 %N) {
+define arm_aapcs_vfpcc float @fast_float_mac(ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
 ; CHECK-LABEL: fast_float_mac:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r7, lr}
@@ -249,13 +246,11 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.phi = phi <4 x float> [ zeroinitializer, %vector.ph ], [ %i6, %vector.body ]
-  %i = getelementptr inbounds float, float* %b, i32 %index
+  %i = getelementptr inbounds float, ptr %b, i32 %index
   %i1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %i2 = bitcast float* %i to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i2, i32 4, <4 x i1> %i1, <4 x float> undef)
-  %i3 = getelementptr inbounds float, float* %c, i32 %index
-  %i4 = bitcast float* %i3 to <4 x float>*
-  %wide.masked.load13 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %i1, <4 x float> undef)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i, i32 4, <4 x i1> %i1, <4 x float> undef)
+  %i3 = getelementptr inbounds float, ptr %c, i32 %index
+  %wide.masked.load13 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i3, i32 4, <4 x i1> %i1, <4 x float> undef)
   %i5 = fmul fast <4 x float> %wide.masked.load13, %wide.masked.load
   %i6 = fadd fast <4 x float> %i5, %vec.phi
   %index.next = add i32 %index, 4
@@ -276,7 +271,7 @@ for.cond.cleanup:                                 ; preds = %middle.block, %entr
   ret float %a.0.lcssa
 }
 
-define arm_aapcs_vfpcc float @fast_float_half_mac(half* nocapture readonly %b, half* nocapture readonly %c, i32 %N) {
+define arm_aapcs_vfpcc float @fast_float_half_mac(ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
 ; CHECK-LABEL: fast_float_half_mac:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r4, r5, r7, lr}
@@ -299,7 +294,7 @@ define arm_aapcs_vfpcc float @fast_float_half_mac(half* nocapture readonly %b, h
 ; CHECK-NEXT:    vdup.32 q1, r12
 ; CHECK-NEXT:    vdup.32 q2, r12
 ; CHECK-NEXT:    b .LBB2_3
-; CHECK-NEXT:  .LBB2_2: @ %else26
+; CHECK-NEXT:  .LBB2_2: @ %else24
 ; CHECK-NEXT:    @ in Loop: Header=BB2_3 Depth=1
 ; CHECK-NEXT:    vmul.f16 q5, q6, q5
 ; CHECK-NEXT:    adds r0, #8
@@ -339,19 +334,19 @@ define arm_aapcs_vfpcc float @fast_float_half_mac(half* nocapture readonly %b, h
 ; CHECK-NEXT:    @ in Loop: Header=BB2_3 Depth=1
 ; CHECK-NEXT:    lsls r4, r2, #30
 ; CHECK-NEXT:    bmi .LBB2_13
-; CHECK-NEXT:  .LBB2_5: @ %else7
+; CHECK-NEXT:  .LBB2_5: @ %else5
 ; CHECK-NEXT:    @ in Loop: Header=BB2_3 Depth=1
 ; CHECK-NEXT:    lsls r4, r2, #29
 ; CHECK-NEXT:    bmi .LBB2_14
-; CHECK-NEXT:  .LBB2_6: @ %else10
+; CHECK-NEXT:  .LBB2_6: @ %else8
 ; CHECK-NEXT:    @ in Loop: Header=BB2_3 Depth=1
 ; CHECK-NEXT:    lsls r2, r2, #28
 ; CHECK-NEXT:    bpl .LBB2_8
-; CHECK-NEXT:  .LBB2_7: @ %cond.load12
+; CHECK-NEXT:  .LBB2_7: @ %cond.load10
 ; CHECK-NEXT:    @ in Loop: Header=BB2_3 Depth=1
 ; CHECK-NEXT:    vldr.16 s22, [r0, #6]
 ; CHECK-NEXT:    vins.f16 s21, s22
-; CHECK-NEXT:  .LBB2_8: @ %else13
+; CHECK-NEXT:  .LBB2_8: @ %else11
 ; CHECK-NEXT:    @ in Loop: Header=BB2_3 Depth=1
 ; CHECK-NEXT:    vcmp.u32 cs, q2, q4
 ; CHECK-NEXT:    @ implicit-def: $q6
@@ -371,15 +366,15 @@ define arm_aapcs_vfpcc float @fast_float_half_mac(half* nocapture readonly %b, h
 ; CHECK-NEXT:    bfi r2, r4, #3, #1
 ; CHECK-NEXT:    lsls r4, r2, #31
 ; CHECK-NEXT:    bne .LBB2_15
-; CHECK-NEXT:  @ %bb.9: @ %else17
+; CHECK-NEXT:  @ %bb.9: @ %else15
 ; CHECK-NEXT:    @ in Loop: Header=BB2_3 Depth=1
 ; CHECK-NEXT:    lsls r4, r2, #30
 ; CHECK-NEXT:    bmi .LBB2_16
-; CHECK-NEXT:  .LBB2_10: @ %else20
+; CHECK-NEXT:  .LBB2_10: @ %else18
 ; CHECK-NEXT:    @ in Loop: Header=BB2_3 Depth=1
 ; CHECK-NEXT:    lsls r4, r2, #29
 ; CHECK-NEXT:    bmi .LBB2_17
-; CHECK-NEXT:  .LBB2_11: @ %else23
+; CHECK-NEXT:  .LBB2_11: @ %else21
 ; CHECK-NEXT:    @ in Loop: Header=BB2_3 Depth=1
 ; CHECK-NEXT:    lsls r2, r2, #28
 ; CHECK-NEXT:    bpl .LBB2_2
@@ -389,13 +384,13 @@ define arm_aapcs_vfpcc float @fast_float_half_mac(half* nocapture readonly %b, h
 ; CHECK-NEXT:    vldr.16 s20, [r0]
 ; CHECK-NEXT:    lsls r4, r2, #30
 ; CHECK-NEXT:    bpl .LBB2_5
-; CHECK-NEXT:  .LBB2_13: @ %cond.load6
+; CHECK-NEXT:  .LBB2_13: @ %cond.load4
 ; CHECK-NEXT:    @ in Loop: Header=BB2_3 Depth=1
 ; CHECK-NEXT:    vldr.16 s22, [r0, #2]
 ; CHECK-NEXT:    vins.f16 s20, s22
 ; CHECK-NEXT:    lsls r4, r2, #29
 ; CHECK-NEXT:    bpl .LBB2_6
-; CHECK-NEXT:  .LBB2_14: @ %cond.load9
+; CHECK-NEXT:  .LBB2_14: @ %cond.load7
 ; CHECK-NEXT:    @ in Loop: Header=BB2_3 Depth=1
 ; CHECK-NEXT:    vldr.16 s21, [r0, #4]
 ; CHECK-NEXT:    vmovx.f16 s22, s0
@@ -403,25 +398,25 @@ define arm_aapcs_vfpcc float @fast_float_half_mac(half* nocapture readonly %b, h
 ; CHECK-NEXT:    lsls r2, r2, #28
 ; CHECK-NEXT:    bmi .LBB2_7
 ; CHECK-NEXT:    b .LBB2_8
-; CHECK-NEXT:  .LBB2_15: @ %cond.load16
+; CHECK-NEXT:  .LBB2_15: @ %cond.load14
 ; CHECK-NEXT:    @ in Loop: Header=BB2_3 Depth=1
 ; CHECK-NEXT:    vldr.16 s24, [r1]
 ; CHECK-NEXT:    lsls r4, r2, #30
 ; CHECK-NEXT:    bpl .LBB2_10
-; CHECK-NEXT:  .LBB2_16: @ %cond.load19
+; CHECK-NEXT:  .LBB2_16: @ %cond.load17
 ; CHECK-NEXT:    @ in Loop: Header=BB2_3 Depth=1
 ; CHECK-NEXT:    vldr.16 s26, [r1, #2]
 ; CHECK-NEXT:    vins.f16 s24, s26
 ; CHECK-NEXT:    lsls r4, r2, #29
 ; CHECK-NEXT:    bpl .LBB2_11
-; CHECK-NEXT:  .LBB2_17: @ %cond.load22
+; CHECK-NEXT:  .LBB2_17: @ %cond.load20
 ; CHECK-NEXT:    @ in Loop: Header=BB2_3 Depth=1
 ; CHECK-NEXT:    vldr.16 s25, [r1, #4]
 ; CHECK-NEXT:    vmovx.f16 s26, s0
 ; CHECK-NEXT:    vins.f16 s25, s26
 ; CHECK-NEXT:    lsls r2, r2, #28
 ; CHECK-NEXT:    bpl.w .LBB2_2
-; CHECK-NEXT:  .LBB2_18: @ %cond.load25
+; CHECK-NEXT:  .LBB2_18: @ %cond.load23
 ; CHECK-NEXT:    @ in Loop: Header=BB2_3 Depth=1
 ; CHECK-NEXT:    vldr.16 s26, [r1, #6]
 ; CHECK-NEXT:    vins.f16 s25, s26
@@ -469,13 +464,11 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %i = getelementptr inbounds half, half* %b, i32 %index
+  %i = getelementptr inbounds half, ptr %b, i32 %index
   %i1 = icmp ule <4 x i32> %induction, %broadcast.splat12
-  %i2 = bitcast half* %i to <4 x half>*
-  %wide.masked.load = call <4 x half> @llvm.masked.load.v4f16.p0v4f16(<4 x half>* %i2, i32 2, <4 x i1> %i1, <4 x half> undef)
-  %i3 = getelementptr inbounds half, half* %c, i32 %index
-  %i4 = bitcast half* %i3 to <4 x half>*
-  %wide.masked.load13 = call <4 x half> @llvm.masked.load.v4f16.p0v4f16(<4 x half>* %i4, i32 2, <4 x i1> %i1, <4 x half> undef)
+  %wide.masked.load = call <4 x half> @llvm.masked.load.v4f16.p0(ptr %i, i32 2, <4 x i1> %i1, <4 x half> undef)
+  %i3 = getelementptr inbounds half, ptr %c, i32 %index
+  %wide.masked.load13 = call <4 x half> @llvm.masked.load.v4f16.p0(ptr %i3, i32 2, <4 x i1> %i1, <4 x half> undef)
   %i5 = fmul fast <4 x half> %wide.masked.load13, %wide.masked.load
   %i6 = fpext <4 x half> %i5 to <4 x float>
   %i7 = fadd fast <4 x float> %vec.phi, %i6
@@ -498,12 +491,12 @@ for.cond.cleanup:                                 ; preds = %middle.block, %entr
 }
 
 ; Function Attrs: argmemonly nounwind readonly willreturn
-declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>)
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32 immarg, <4 x i1>)
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>)
 
 ; Function Attrs: argmemonly nounwind readonly willreturn
-declare <4 x half> @llvm.masked.load.v4f16.p0v4f16(<4 x half>*, i32 immarg, <4 x i1>, <4 x half>)
+declare <4 x half> @llvm.masked.load.v4f16.p0(ptr, i32 immarg, <4 x i1>, <4 x half>)
 
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll b/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
index 61e595517f5e..bf0b49d92f50 100644
--- a/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -opaque-pointers=0 -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
 
-define arm_aapcs_vfpcc void @test_fadd(half* noalias nocapture readonly %A, half %B, half* noalias nocapture %C, i32 %n) {
+define arm_aapcs_vfpcc void @test_fadd(ptr noalias nocapture readonly %A, half %B, ptr noalias nocapture %C, i32 %n) {
 ; CHECK-LABEL: test_fadd:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r2, #1
@@ -32,13 +32,11 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds half, half* %A, i32 %index
-  %i2 = bitcast half* %i1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i1 = getelementptr inbounds half, ptr %A, i32 %index
+  %wide.load = load <8 x half>, ptr %i1, align 4
   %i3 = fadd fast <8 x half> %wide.load, %broadcast.splat11
-  %i4 = getelementptr inbounds half, half* %C, i32 %index
-  %i5 = bitcast half* %i4 to <8 x half>*
-  store <8 x half> %i3, <8 x half>* %i5, align 4
+  %i4 = getelementptr inbounds half, ptr %C, i32 %index
+  store <8 x half> %i3, ptr %i4, align 4
   %index.next = add i32 %index, 8
   %i6 = icmp eq i32 %index.next, %n
   br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -47,7 +45,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fadd_r(half* noalias nocapture readonly %A, half %B, half* noalias nocapture %C, i32 %n) {
+define arm_aapcs_vfpcc void @test_fadd_r(ptr noalias nocapture readonly %A, half %B, ptr noalias nocapture %C, i32 %n) {
 ; CHECK-LABEL: test_fadd_r:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r2, #1
@@ -78,13 +76,11 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds half, half* %A, i32 %index
-  %i2 = bitcast half* %i1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i1 = getelementptr inbounds half, ptr %A, i32 %index
+  %wide.load = load <8 x half>, ptr %i1, align 4
   %i3 = fadd fast <8 x half> %broadcast.splat11, %wide.load
-  %i4 = getelementptr inbounds half, half* %C, i32 %index
-  %i5 = bitcast half* %i4 to <8 x half>*
-  store <8 x half> %i3, <8 x half>* %i5, align 4
+  %i4 = getelementptr inbounds half, ptr %C, i32 %index
+  store <8 x half> %i3, ptr %i4, align 4
   %index.next = add i32 %index, 8
   %i6 = icmp eq i32 %index.next, %n
   br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -93,7 +89,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fmul(half* noalias nocapture readonly %A, half %B, half* noalias nocapture %C, i32 %n) {
+define arm_aapcs_vfpcc void @test_fmul(ptr noalias nocapture readonly %A, half %B, ptr noalias nocapture %C, i32 %n) {
 ; CHECK-LABEL: test_fmul:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r2, #1
@@ -124,13 +120,11 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds half, half* %A, i32 %index
-  %i2 = bitcast half* %i1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i1 = getelementptr inbounds half, ptr %A, i32 %index
+  %wide.load = load <8 x half>, ptr %i1, align 4
   %i3 = fmul fast <8 x half> %wide.load, %broadcast.splat11
-  %i4 = getelementptr inbounds half, half* %C, i32 %index
-  %i5 = bitcast half* %i4 to <8 x half>*
-  store <8 x half> %i3, <8 x half>* %i5, align 4
+  %i4 = getelementptr inbounds half, ptr %C, i32 %index
+  store <8 x half> %i3, ptr %i4, align 4
   %index.next = add i32 %index, 8
   %i6 = icmp eq i32 %index.next, %n
   br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -139,7 +133,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fmul_r(half* noalias nocapture readonly %A, half %B, half* noalias nocapture %C, i32 %n) {
+define arm_aapcs_vfpcc void @test_fmul_r(ptr noalias nocapture readonly %A, half %B, ptr noalias nocapture %C, i32 %n) {
 ; CHECK-LABEL: test_fmul_r:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r2, #1
@@ -170,13 +164,11 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds half, half* %A, i32 %index
-  %i2 = bitcast half* %i1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i1 = getelementptr inbounds half, ptr %A, i32 %index
+  %wide.load = load <8 x half>, ptr %i1, align 4
   %i3 = fmul fast <8 x half> %broadcast.splat11, %wide.load
-  %i4 = getelementptr inbounds half, half* %C, i32 %index
-  %i5 = bitcast half* %i4 to <8 x half>*
-  store <8 x half> %i3, <8 x half>* %i5, align 4
+  %i4 = getelementptr inbounds half, ptr %C, i32 %index
+  store <8 x half> %i3, ptr %i4, align 4
   %index.next = add i32 %index, 8
   %i6 = icmp eq i32 %index.next, %n
   br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -185,7 +177,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fsub(half* noalias nocapture readonly %A, half %B, half* noalias nocapture %C, i32 %n) {
+define arm_aapcs_vfpcc void @test_fsub(ptr noalias nocapture readonly %A, half %B, ptr noalias nocapture %C, i32 %n) {
 ; CHECK-LABEL: test_fsub:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r2, #1
@@ -216,13 +208,11 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds half, half* %A, i32 %index
-  %i2 = bitcast half* %i1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i1 = getelementptr inbounds half, ptr %A, i32 %index
+  %wide.load = load <8 x half>, ptr %i1, align 4
   %i3 = fsub fast <8 x half> %wide.load, %broadcast.splat11
-  %i4 = getelementptr inbounds half, half* %C, i32 %index
-  %i5 = bitcast half* %i4 to <8 x half>*
-  store <8 x half> %i3, <8 x half>* %i5, align 4
+  %i4 = getelementptr inbounds half, ptr %C, i32 %index
+  store <8 x half> %i3, ptr %i4, align 4
   %index.next = add i32 %index, 8
   %i6 = icmp eq i32 %index.next, %n
   br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -231,7 +221,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fsub_r(half* noalias nocapture readonly %A, half %B, half* noalias nocapture %C, i32 %n) {
+define arm_aapcs_vfpcc void @test_fsub_r(ptr noalias nocapture readonly %A, half %B, ptr noalias nocapture %C, i32 %n) {
 ; CHECK-LABEL: test_fsub_r:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r2, #1
@@ -263,13 +253,11 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds half, half* %A, i32 %index
-  %i2 = bitcast half* %i1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i1 = getelementptr inbounds half, ptr %A, i32 %index
+  %wide.load = load <8 x half>, ptr %i1, align 4
   %i3 = fsub fast <8 x half> %broadcast.splat11, %wide.load
-  %i4 = getelementptr inbounds half, half* %C, i32 %index
-  %i5 = bitcast half* %i4 to <8 x half>*
-  store <8 x half> %i3, <8 x half>* %i5, align 4
+  %i4 = getelementptr inbounds half, ptr %C, i32 %index
+  store <8 x half> %i3, ptr %i4, align 4
   %index.next = add i32 %index, 8
   %i6 = icmp eq i32 %index.next, %n
   br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -279,7 +267,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 
-define arm_aapcs_vfpcc void @test_fmas(half* noalias nocapture readonly %A, half* noalias nocapture readonly %B, half %C, half* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fmas(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, half %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fmas:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -311,17 +299,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds half, half* %A, i32 %index
-  %i2 = bitcast half* %i1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
-  %i3 = getelementptr inbounds half, half* %B, i32 %index
-  %i4 = bitcast half* %i3 to <8 x half>*
-  %wide.load12 = load <8 x half>, <8 x half>* %i4, align 4
+  %i1 = getelementptr inbounds half, ptr %A, i32 %index
+  %wide.load = load <8 x half>, ptr %i1, align 4
+  %i3 = getelementptr inbounds half, ptr %B, i32 %index
+  %wide.load12 = load <8 x half>, ptr %i3, align 4
   %i5 = fmul fast <8 x half> %wide.load12, %wide.load
   %i6 = fadd fast <8 x half> %i5, %broadcast.splat14
-  %i7 = getelementptr inbounds half, half* %D, i32 %index
-  %i8 = bitcast half* %i7 to <8 x half>*
-  store <8 x half> %i6, <8 x half>* %i8, align 4
+  %i7 = getelementptr inbounds half, ptr %D, i32 %index
+  store <8 x half> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 8
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -330,7 +315,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fmas_r(half* noalias nocapture readonly %A, half* noalias nocapture readonly %B, half %C, half* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fmas_r(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, half %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fmas_r:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -362,17 +347,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds half, half* %A, i32 %index
-  %i2 = bitcast half* %i1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
-  %i3 = getelementptr inbounds half, half* %B, i32 %index
-  %i4 = bitcast half* %i3 to <8 x half>*
-  %wide.load12 = load <8 x half>, <8 x half>* %i4, align 4
+  %i1 = getelementptr inbounds half, ptr %A, i32 %index
+  %wide.load = load <8 x half>, ptr %i1, align 4
+  %i3 = getelementptr inbounds half, ptr %B, i32 %index
+  %wide.load12 = load <8 x half>, ptr %i3, align 4
   %i5 = fmul fast <8 x half> %wide.load12, %wide.load
   %i6 = fadd fast <8 x half> %broadcast.splat14, %i5
-  %i7 = getelementptr inbounds half, half* %D, i32 %index
-  %i8 = bitcast half* %i7 to <8 x half>*
-  store <8 x half> %i6, <8 x half>* %i8, align 4
+  %i7 = getelementptr inbounds half, ptr %D, i32 %index
+  store <8 x half> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 8
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -381,7 +363,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fma(half* noalias nocapture readonly %A, half* noalias nocapture readonly %B, half %C, half* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fma(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, half %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fma:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -413,17 +395,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds half, half* %A, i32 %index
-  %i2 = bitcast half* %i1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i1 = getelementptr inbounds half, ptr %A, i32 %index
+  %wide.load = load <8 x half>, ptr %i1, align 4
   %i3 = fmul fast <8 x half> %wide.load, %broadcast.splat13
-  %i4 = getelementptr inbounds half, half* %B, i32 %index
-  %i5 = bitcast half* %i4 to <8 x half>*
-  %wide.load14 = load <8 x half>, <8 x half>* %i5, align 4
+  %i4 = getelementptr inbounds half, ptr %B, i32 %index
+  %wide.load14 = load <8 x half>, ptr %i4, align 4
   %i6 = fadd fast <8 x half> %i3, %wide.load14
-  %i7 = getelementptr inbounds half, half* %D, i32 %index
-  %i8 = bitcast half* %i7 to <8 x half>*
-  store <8 x half> %i6, <8 x half>* %i8, align 4
+  %i7 = getelementptr inbounds half, ptr %D, i32 %index
+  store <8 x half> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 8
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -432,7 +411,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fma_r(half* noalias nocapture readonly %A, half* noalias nocapture readonly %B, half %C, half* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fma_r(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, half %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fma_r:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -464,17 +443,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds half, half* %A, i32 %index
-  %i2 = bitcast half* %i1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i1 = getelementptr inbounds half, ptr %A, i32 %index
+  %wide.load = load <8 x half>, ptr %i1, align 4
   %i3 = fmul fast <8 x half> %broadcast.splat13, %wide.load
-  %i4 = getelementptr inbounds half, half* %B, i32 %index
-  %i5 = bitcast half* %i4 to <8 x half>*
-  %wide.load14 = load <8 x half>, <8 x half>* %i5, align 4
+  %i4 = getelementptr inbounds half, ptr %B, i32 %index
+  %wide.load14 = load <8 x half>, ptr %i4, align 4
   %i6 = fadd fast <8 x half> %i3, %wide.load14
-  %i7 = getelementptr inbounds half, half* %D, i32 %index
-  %i8 = bitcast half* %i7 to <8 x half>*
-  store <8 x half> %i6, <8 x half>* %i8, align 4
+  %i7 = getelementptr inbounds half, ptr %D, i32 %index
+  store <8 x half> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 8
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -484,7 +460,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 
-define arm_aapcs_vfpcc void @test_fmss(half* noalias nocapture readonly %A, half* noalias nocapture readonly %B, half %C, half* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fmss(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, half %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fmss:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -519,17 +495,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds half, half* %A, i32 %index
-  %i2 = bitcast half* %i1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
-  %i3 = getelementptr inbounds half, half* %B, i32 %index
-  %i4 = bitcast half* %i3 to <8 x half>*
-  %wide.load12 = load <8 x half>, <8 x half>* %i4, align 4
+  %i1 = getelementptr inbounds half, ptr %A, i32 %index
+  %wide.load = load <8 x half>, ptr %i1, align 4
+  %i3 = getelementptr inbounds half, ptr %B, i32 %index
+  %wide.load12 = load <8 x half>, ptr %i3, align 4
   %i5 = fmul fast <8 x half> %wide.load12, %wide.load
   %i6 = fsub fast <8 x half> %i5, %broadcast.splat14
-  %i7 = getelementptr inbounds half, half* %D, i32 %index
-  %i8 = bitcast half* %i7 to <8 x half>*
-  store <8 x half> %i6, <8 x half>* %i8, align 4
+  %i7 = getelementptr inbounds half, ptr %D, i32 %index
+  store <8 x half> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 8
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -538,7 +511,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fmss_r(half* noalias nocapture readonly %A, half* noalias nocapture readonly %B, half %C, half* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fmss_r(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, half %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fmss_r:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -572,17 +545,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds half, half* %A, i32 %index
-  %i2 = bitcast half* %i1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
-  %i3 = getelementptr inbounds half, half* %B, i32 %index
-  %i4 = bitcast half* %i3 to <8 x half>*
-  %wide.load12 = load <8 x half>, <8 x half>* %i4, align 4
+  %i1 = getelementptr inbounds half, ptr %A, i32 %index
+  %wide.load = load <8 x half>, ptr %i1, align 4
+  %i3 = getelementptr inbounds half, ptr %B, i32 %index
+  %wide.load12 = load <8 x half>, ptr %i3, align 4
   %i5 = fmul fast <8 x half> %wide.load12, %wide.load
   %i6 = fsub fast <8 x half> %broadcast.splat14, %i5
-  %i7 = getelementptr inbounds half, half* %D, i32 %index
-  %i8 = bitcast half* %i7 to <8 x half>*
-  store <8 x half> %i6, <8 x half>* %i8, align 4
+  %i7 = getelementptr inbounds half, ptr %D, i32 %index
+  store <8 x half> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 8
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -591,7 +561,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fms(half* noalias nocapture readonly %A, half* noalias nocapture readonly %B, half %C, half* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fms(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, half %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fms:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -624,17 +594,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds half, half* %A, i32 %index
-  %i2 = bitcast half* %i1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i1 = getelementptr inbounds half, ptr %A, i32 %index
+  %wide.load = load <8 x half>, ptr %i1, align 4
   %i3 = fmul fast <8 x half> %wide.load, %broadcast.splat13
-  %i4 = getelementptr inbounds half, half* %B, i32 %index
-  %i5 = bitcast half* %i4 to <8 x half>*
-  %wide.load14 = load <8 x half>, <8 x half>* %i5, align 4
+  %i4 = getelementptr inbounds half, ptr %B, i32 %index
+  %wide.load14 = load <8 x half>, ptr %i4, align 4
   %i6 = fsub fast <8 x half> %i3, %wide.load14
-  %i7 = getelementptr inbounds half, half* %D, i32 %index
-  %i8 = bitcast half* %i7 to <8 x half>*
-  store <8 x half> %i6, <8 x half>* %i8, align 4
+  %i7 = getelementptr inbounds half, ptr %D, i32 %index
+  store <8 x half> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 8
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -643,7 +610,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fms_r(half* noalias nocapture readonly %A, half* noalias nocapture readonly %B, half %C, half* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fms_r(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, half %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fms_r:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -676,17 +643,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds half, half* %A, i32 %index
-  %i2 = bitcast half* %i1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i1 = getelementptr inbounds half, ptr %A, i32 %index
+  %wide.load = load <8 x half>, ptr %i1, align 4
   %i3 = fmul fast <8 x half> %broadcast.splat13, %wide.load
-  %i4 = getelementptr inbounds half, half* %B, i32 %index
-  %i5 = bitcast half* %i4 to <8 x half>*
-  %wide.load14 = load <8 x half>, <8 x half>* %i5, align 4
+  %i4 = getelementptr inbounds half, ptr %B, i32 %index
+  %wide.load14 = load <8 x half>, ptr %i4, align 4
   %i6 = fsub fast <8 x half> %i3, %wide.load14
-  %i7 = getelementptr inbounds half, half* %D, i32 %index
-  %i8 = bitcast half* %i7 to <8 x half>*
-  store <8 x half> %i6, <8 x half>* %i8, align 4
+  %i7 = getelementptr inbounds half, ptr %D, i32 %index
+  store <8 x half> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 8
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -696,38 +660,37 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 
-define dso_local void @test_nested(half* noalias nocapture %pInT1, half* noalias nocapture readonly %pOutT1, half* noalias nocapture readonly %pPRT_in, half* noalias nocapture readnone %pPRT_pDst, i32 %numRows, i32 %numCols, i32 %l) local_unnamed_addr {
+define dso_local void @test_nested(ptr noalias nocapture %pInT1, ptr noalias nocapture readonly %pOutT1, ptr noalias nocapture readonly %pPRT_in, ptr noalias nocapture readnone %pPRT_pDst, i32 %numRows, i32 %numCols, i32 %l) local_unnamed_addr {
 ; CHECK-LABEL: test_nested:
 ; CHECK:       @ %bb.0: @ %for.body.us.preheader
-; CHECK-NEXT:    .save {r4, r5, r6, lr}
-; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
 ; CHECK-NEXT:    ldrd lr, r12, [sp, #16]
-; CHECK-NEXT:    lsl.w r3, r12, #1
 ; CHECK-NEXT:  .LBB14_1: @ %for.body.us
 ; CHECK-NEXT:    @ =>This Loop Header: Depth=1
 ; CHECK-NEXT:    @ Child Loop BB14_2 Depth 2
-; CHECK-NEXT:    ldrh r4, [r1]
-; CHECK-NEXT:    mov r5, r2
-; CHECK-NEXT:    mov r6, r12
-; CHECK-NEXT:    vdup.16 q0, r4
+; CHECK-NEXT:    ldrh r3, [r1]
 ; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r5, r12
+; CHECK-NEXT:    vdup.16 q0, r3
+; CHECK-NEXT:    add.w r3, r2, r12, lsl #1
 ; CHECK-NEXT:  .LBB14_2: @ %vector.body
 ; CHECK-NEXT:    @ Parent Loop BB14_1 Depth=1
 ; CHECK-NEXT:    @ => This Inner Loop Header: Depth=2
-; CHECK-NEXT:    vldrw.u32 q1, [r5], #16
+; CHECK-NEXT:    vldrw.u32 q1, [r2], #16
 ; CHECK-NEXT:    vldrw.u32 q2, [r4]
-; CHECK-NEXT:    subs r6, #8
+; CHECK-NEXT:    subs r5, #8
 ; CHECK-NEXT:    vfms.f16 q2, q1, q0
 ; CHECK-NEXT:    vstrb.8 q2, [r4], #16
 ; CHECK-NEXT:    bne .LBB14_2
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond6.for.end_crit_edge.us
 ; CHECK-NEXT:    @ in Loop: Header=BB14_1 Depth=1
-; CHECK-NEXT:    add r0, r3
-; CHECK-NEXT:    add r2, r3
+; CHECK-NEXT:    add.w r0, r0, r12, lsl #1
 ; CHECK-NEXT:    adds r1, #2
+; CHECK-NEXT:    mov r2, r3
 ; CHECK-NEXT:    le lr, .LBB14_1
 ; CHECK-NEXT:  @ %bb.4: @ %for.end14
-; CHECK-NEXT:    pop {r4, r5, r6, pc}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
 for.body.us.preheader:
   %cmp = icmp sgt i32 %numRows, 0
   tail call void @llvm.assume(i1 %cmp)
@@ -741,34 +704,32 @@ for.body.us.preheader:
   br label %for.body.us
 
 for.body.us:                                      ; preds = %for.cond6.for.end_crit_edge.us, %for.body.us.preheader
-  %pInT1.addr.038.us = phi half* [ %scevgep40, %for.cond6.for.end_crit_edge.us ], [ %pInT1, %for.body.us.preheader ]
+  %pInT1.addr.038.us = phi ptr [ %scevgep40, %for.cond6.for.end_crit_edge.us ], [ %pInT1, %for.body.us.preheader ]
   %i.037.us = phi i32 [ %inc13.us, %for.cond6.for.end_crit_edge.us ], [ 0, %for.body.us.preheader ]
-  %pOutT1.addr.036.us = phi half* [ %incdec.ptr.us, %for.cond6.for.end_crit_edge.us ], [ %pOutT1, %for.body.us.preheader ]
-  %pPRT_in.addr.035.us = phi half* [ %scevgep, %for.cond6.for.end_crit_edge.us ], [ %pPRT_in, %for.body.us.preheader ]
-  %scevgep = getelementptr half, half* %pPRT_in.addr.035.us, i32 %numCols
-  %i = load half, half* %pOutT1.addr.036.us, align 4
+  %pOutT1.addr.036.us = phi ptr [ %incdec.ptr.us, %for.cond6.for.end_crit_edge.us ], [ %pOutT1, %for.body.us.preheader ]
+  %pPRT_in.addr.035.us = phi ptr [ %scevgep, %for.cond6.for.end_crit_edge.us ], [ %pPRT_in, %for.body.us.preheader ]
+  %scevgep = getelementptr half, ptr %pPRT_in.addr.035.us, i32 %numCols
+  %i = load half, ptr %pOutT1.addr.036.us, align 4
   %broadcast.splatinsert47 = insertelement <8 x half> undef, half %i, i32 0
   %broadcast.splat48 = shufflevector <8 x half> %broadcast.splatinsert47, <8 x half> undef, <8 x i32> zeroinitializer
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %for.body.us
   %index = phi i32 [ 0, %for.body.us ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr half, half* %pInT1.addr.038.us, i32 %index
-  %next.gep45 = getelementptr half, half* %pPRT_in.addr.035.us, i32 %index
-  %i1 = bitcast half* %next.gep to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %i1, align 4
-  %i2 = bitcast half* %next.gep45 to <8 x half>*
-  %wide.load46 = load <8 x half>, <8 x half>* %i2, align 4
+  %next.gep = getelementptr half, ptr %pInT1.addr.038.us, i32 %index
+  %next.gep45 = getelementptr half, ptr %pPRT_in.addr.035.us, i32 %index
+  %wide.load = load <8 x half>, ptr %next.gep, align 4
+  %wide.load46 = load <8 x half>, ptr %next.gep45, align 4
   %i3 = fmul fast <8 x half> %wide.load46, %broadcast.splat48
   %i4 = fsub fast <8 x half> %wide.load, %i3
-  store <8 x half> %i4, <8 x half>* %i1, align 4
+  store <8 x half> %i4, ptr %next.gep, align 4
   %index.next = add i32 %index, 8
   %i5 = icmp eq i32 %index.next, %numCols
   br i1 %i5, label %for.cond6.for.end_crit_edge.us, label %vector.body
 
 for.cond6.for.end_crit_edge.us:                   ; preds = %vector.body
-  %incdec.ptr.us = getelementptr inbounds half, half* %pOutT1.addr.036.us, i32 1
-  %scevgep40 = getelementptr half, half* %pInT1.addr.038.us, i32 %numCols
+  %incdec.ptr.us = getelementptr inbounds half, ptr %pOutT1.addr.036.us, i32 1
+  %scevgep40 = getelementptr half, ptr %pInT1.addr.038.us, i32 %numCols
   %inc13.us = add nuw nsw i32 %i.037.us, 1
   %exitcond41 = icmp eq i32 %inc13.us, %numRows
   br i1 %exitcond41, label %for.end14, label %for.body.us
@@ -777,8 +738,8 @@ for.end14:                                        ; preds = %for.cond6.for.end_c
   ret void
 }
 
-%struct.arm_fir_instance_f32 = type { i16, half*, half* }
-define void @arm_fir_f32_1_4_mve(%struct.arm_fir_instance_f32* nocapture readonly %S, half* nocapture readonly %pSrc, half* %pDst, i32 %blockSize) {
+%struct.arm_fir_instance_f32 = type { i16, ptr, ptr }
+define void @arm_fir_f32_1_4_mve(ptr nocapture readonly %S, ptr nocapture readonly %pSrc, ptr %pDst, i32 %blockSize) {
 ; CHECK-LABEL: arm_fir_f32_1_4_mve:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -879,26 +840,26 @@ define void @arm_fir_f32_1_4_mve(%struct.arm_fir_instance_f32* nocapture readonl
 ; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
-  %pState1 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 1
-  %i = load half*, half** %pState1, align 4
-  %pCoeffs2 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 2
-  %i1 = load half*, half** %pCoeffs2, align 4
-  %numTaps3 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 0
-  %i2 = load i16, i16* %numTaps3, align 4
+  %pState1 = getelementptr inbounds %struct.arm_fir_instance_f32, ptr %S, i32 0, i32 1
+  %i = load ptr, ptr %pState1, align 4
+  %pCoeffs2 = getelementptr inbounds %struct.arm_fir_instance_f32, ptr %S, i32 0, i32 2
+  %i1 = load ptr, ptr %pCoeffs2, align 4
+  %numTaps3 = getelementptr inbounds %struct.arm_fir_instance_f32, ptr %S, i32 0, i32 0
+  %i2 = load i16, ptr %numTaps3, align 4
   %conv = zext i16 %i2 to i32
   %sub = add nsw i32 %conv, -1
   %cmp = icmp ult i32 %sub, 4
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %arrayidx = getelementptr inbounds half, half* %i, i32 %sub
-  %incdec.ptr = getelementptr inbounds half, half* %i1, i32 1
-  %i3 = load half, half* %i1, align 4
-  %incdec.ptr6 = getelementptr inbounds half, half* %i1, i32 2
-  %i4 = load half, half* %incdec.ptr, align 4
-  %incdec.ptr7 = getelementptr inbounds half, half* %i1, i32 3
-  %i5 = load half, half* %incdec.ptr6, align 4
-  %i6 = load half, half* %incdec.ptr7, align 4
+  %arrayidx = getelementptr inbounds half, ptr %i, i32 %sub
+  %incdec.ptr = getelementptr inbounds half, ptr %i1, i32 1
+  %i3 = load half, ptr %i1, align 4
+  %incdec.ptr6 = getelementptr inbounds half, ptr %i1, i32 2
+  %i4 = load half, ptr %incdec.ptr, align 4
+  %incdec.ptr7 = getelementptr inbounds half, ptr %i1, i32 3
+  %i5 = load half, ptr %incdec.ptr6, align 4
+  %i6 = load half, ptr %incdec.ptr7, align 4
   %shr = lshr i32 %blockSize, 2
   %cmp9146 = icmp eq i32 %shr, 0
   %.pre161 = insertelement <8 x half> undef, half %i3, i32 0
@@ -913,89 +874,75 @@ if.then:                                          ; preds = %entry
 
 while.body.lr.ph:                                 ; preds = %if.then
   %i7 = and i32 %blockSize, -4
-  %scevgep158 = getelementptr half, half* %pDst, i32 %i7
+  %scevgep158 = getelementptr half, ptr %pDst, i32 %i7
   br label %while.body
 
 while.body:                                       ; preds = %while.body, %while.body.lr.ph
-  %pStateCur.0151 = phi half* [ %arrayidx, %while.body.lr.ph ], [ %add.ptr, %while.body ]
-  %pSamples.0150 = phi half* [ %i, %while.body.lr.ph ], [ %add.ptr24, %while.body ]
-  %pOutput.0149 = phi half* [ %pDst, %while.body.lr.ph ], [ %add.ptr23, %while.body ]
-  %pTempSrc.0148 = phi half* [ %pSrc, %while.body.lr.ph ], [ %add.ptr11, %while.body ]
+  %pStateCur.0151 = phi ptr [ %arrayidx, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %pSamples.0150 = phi ptr [ %i, %while.body.lr.ph ], [ %add.ptr24, %while.body ]
+  %pOutput.0149 = phi ptr [ %pDst, %while.body.lr.ph ], [ %add.ptr23, %while.body ]
+  %pTempSrc.0148 = phi ptr [ %pSrc, %while.body.lr.ph ], [ %add.ptr11, %while.body ]
   %blkCnt.0147 = phi i32 [ %shr, %while.body.lr.ph ], [ %dec, %while.body ]
-  %i8 = bitcast half* %pTempSrc.0148 to <8 x half>*
-  %i9 = load <8 x half>, <8 x half>* %i8, align 4
-  %i10 = bitcast half* %pStateCur.0151 to <8 x half>*
-  store <8 x half> %i9, <8 x half>* %i10, align 4
-  %add.ptr = getelementptr inbounds half, half* %pStateCur.0151, i32 4
-  %add.ptr11 = getelementptr inbounds half, half* %pTempSrc.0148, i32 4
-  %i11 = bitcast half* %pSamples.0150 to <8 x half>*
-  %i12 = load <8 x half>, <8 x half>* %i11, align 4
+  %i9 = load <8 x half>, ptr %pTempSrc.0148, align 4
+  store <8 x half> %i9, ptr %pStateCur.0151, align 4
+  %add.ptr = getelementptr inbounds half, ptr %pStateCur.0151, i32 4
+  %add.ptr11 = getelementptr inbounds half, ptr %pTempSrc.0148, i32 4
+  %i12 = load <8 x half>, ptr %pSamples.0150, align 4
   %i13 = fmul fast <8 x half> %i12, %.pre162
-  %arrayidx12 = getelementptr inbounds half, half* %pSamples.0150, i32 1
-  %i14 = bitcast half* %arrayidx12 to <8 x half>*
-  %i15 = load <8 x half>, <8 x half>* %i14, align 4
+  %arrayidx12 = getelementptr inbounds half, ptr %pSamples.0150, i32 1
+  %i15 = load <8 x half>, ptr %arrayidx12, align 4
   %mul = fmul fast <8 x half> %i15, %.pre164
   %add = fadd fast <8 x half> %mul, %i13
-  %arrayidx13 = getelementptr inbounds half, half* %pSamples.0150, i32 2
-  %i16 = bitcast half* %arrayidx13 to <8 x half>*
-  %i17 = load <8 x half>, <8 x half>* %i16, align 4
+  %arrayidx13 = getelementptr inbounds half, ptr %pSamples.0150, i32 2
+  %i17 = load <8 x half>, ptr %arrayidx13, align 4
   %mul16 = fmul fast <8 x half> %i17, %.pre166
   %add17 = fadd fast <8 x half> %add, %mul16
-  %arrayidx18 = getelementptr inbounds half, half* %pSamples.0150, i32 3
-  %i18 = bitcast half* %arrayidx18 to <8 x half>*
-  %i19 = load <8 x half>, <8 x half>* %i18, align 4
+  %arrayidx18 = getelementptr inbounds half, ptr %pSamples.0150, i32 3
+  %i19 = load <8 x half>, ptr %arrayidx18, align 4
   %mul21 = fmul fast <8 x half> %i19, %.pre168
   %add22 = fadd fast <8 x half> %add17, %mul21
-  %i20 = bitcast half* %pOutput.0149 to <8 x half>*
-  store <8 x half> %add22, <8 x half>* %i20, align 4
-  %add.ptr23 = getelementptr inbounds half, half* %pOutput.0149, i32 4
-  %add.ptr24 = getelementptr inbounds half, half* %pSamples.0150, i32 4
+  store <8 x half> %add22, ptr %pOutput.0149, align 4
+  %add.ptr23 = getelementptr inbounds half, ptr %pOutput.0149, i32 4
+  %add.ptr24 = getelementptr inbounds half, ptr %pSamples.0150, i32 4
   %dec = add nsw i32 %blkCnt.0147, -1
   %cmp9 = icmp eq i32 %dec, 0
   br i1 %cmp9, label %while.end.loopexit, label %while.body
 
 while.end.loopexit:                               ; preds = %while.body
-  %scevgep157 = getelementptr half, half* %pSrc, i32 %i7
-  %scevgep159 = getelementptr half, half* %i, i32 %i7
+  %scevgep157 = getelementptr half, ptr %pSrc, i32 %i7
+  %scevgep159 = getelementptr half, ptr %i, i32 %i7
   br label %while.end
 
 while.end:                                        ; preds = %while.end.loopexit, %if.then
-  %pTempSrc.0.lcssa = phi half* [ %scevgep157, %while.end.loopexit ], [ %pSrc, %if.then ]
-  %pOutput.0.lcssa = phi half* [ %scevgep158, %while.end.loopexit ], [ %pDst, %if.then ]
-  %pSamples.0.lcssa = phi half* [ %scevgep159, %while.end.loopexit ], [ %i, %if.then ]
-  %pStateCur.0.lcssa = phi half* [ %add.ptr, %while.end.loopexit ], [ %arrayidx, %if.then ]
+  %pTempSrc.0.lcssa = phi ptr [ %scevgep157, %while.end.loopexit ], [ %pSrc, %if.then ]
+  %pOutput.0.lcssa = phi ptr [ %scevgep158, %while.end.loopexit ], [ %pDst, %if.then ]
+  %pSamples.0.lcssa = phi ptr [ %scevgep159, %while.end.loopexit ], [ %i, %if.then ]
+  %pStateCur.0.lcssa = phi ptr [ %add.ptr, %while.end.loopexit ], [ %arrayidx, %if.then ]
   %and = and i32 %blockSize, 3
   %i21 = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %and)
-  %i22 = bitcast half* %pTempSrc.0.lcssa to <8 x half>*
-  %i23 = load <8 x half>, <8 x half>* %i22, align 4
-  %i24 = bitcast half* %pStateCur.0.lcssa to <8 x half>*
-  tail call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %i23, <8 x half>* %i24, i32 4, <8 x i1> %i21)
-  %i25 = bitcast half* %pSamples.0.lcssa to <8 x half>*
-  %i26 = load <8 x half>, <8 x half>* %i25, align 4
+  %i23 = load <8 x half>, ptr %pTempSrc.0.lcssa, align 4
+  tail call void @llvm.masked.store.v8f16.p0(<8 x half> %i23, ptr %pStateCur.0.lcssa, i32 4, <8 x i1> %i21)
+  %i26 = load <8 x half>, ptr %pSamples.0.lcssa, align 4
   %i27 = fmul fast <8 x half> %i26, %.pre162
-  %arrayidx29 = getelementptr inbounds half, half* %pSamples.0.lcssa, i32 1
-  %i28 = bitcast half* %arrayidx29 to <8 x half>*
-  %i29 = load <8 x half>, <8 x half>* %i28, align 4
+  %arrayidx29 = getelementptr inbounds half, ptr %pSamples.0.lcssa, i32 1
+  %i29 = load <8 x half>, ptr %arrayidx29, align 4
   %mul32 = fmul fast <8 x half> %i29, %.pre164
   %add33 = fadd fast <8 x half> %mul32, %i27
-  %arrayidx34 = getelementptr inbounds half, half* %pSamples.0.lcssa, i32 2
-  %i30 = bitcast half* %arrayidx34 to <8 x half>*
-  %i31 = load <8 x half>, <8 x half>* %i30, align 4
+  %arrayidx34 = getelementptr inbounds half, ptr %pSamples.0.lcssa, i32 2
+  %i31 = load <8 x half>, ptr %arrayidx34, align 4
   %mul37 = fmul fast <8 x half> %i31, %.pre166
   %add38 = fadd fast <8 x half> %add33, %mul37
-  %arrayidx39 = getelementptr inbounds half, half* %pSamples.0.lcssa, i32 3
-  %i32 = bitcast half* %arrayidx39 to <8 x half>*
-  %i33 = load <8 x half>, <8 x half>* %i32, align 4
+  %arrayidx39 = getelementptr inbounds half, ptr %pSamples.0.lcssa, i32 3
+  %i33 = load <8 x half>, ptr %arrayidx39, align 4
   %mul42 = fmul fast <8 x half> %i33, %.pre168
   %add43 = fadd fast <8 x half> %add38, %mul42
-  %i34 = bitcast half* %pOutput.0.lcssa to <8 x half>*
-  tail call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %add43, <8 x half>* %i34, i32 4, <8 x i1> %i21)
-  %.pre = load half*, half** %pState1, align 4
+  tail call void @llvm.masked.store.v8f16.p0(<8 x half> %add43, ptr %pOutput.0.lcssa, i32 4, <8 x i1> %i21)
+  %.pre = load ptr, ptr %pState1, align 4
   br label %if.end
 
 if.end:                                           ; preds = %while.end, %entry
-  %i35 = phi half* [ %.pre, %while.end ], [ %i, %entry ]
-  %arrayidx45 = getelementptr inbounds half, half* %i35, i32 %blockSize
+  %i35 = phi ptr [ %.pre, %while.end ], [ %i, %entry ]
+  %arrayidx45 = getelementptr inbounds half, ptr %i35, i32 %blockSize
   %shr47 = lshr i32 %conv, 2
   %cmp49141 = icmp eq i32 %shr47, 0
   br i1 %cmp49141, label %while.end55, label %while.body51.preheader
@@ -1003,40 +950,36 @@ if.end:                                           ; preds = %while.end, %entry
 while.body51.preheader:                           ; preds = %if.end
   %i36 = and i32 %conv, 65532
   %i37 = add i32 %i36, %blockSize
-  %scevgep = getelementptr half, half* %i35, i32 %i37
+  %scevgep = getelementptr half, ptr %i35, i32 %i37
   br label %while.body51
 
 while.body51:                                     ; preds = %while.body51, %while.body51.preheader
-  %pTempSrc.1144 = phi half* [ %add.ptr52, %while.body51 ], [ %arrayidx45, %while.body51.preheader ]
-  %pTempDest.0143 = phi half* [ %add.ptr53, %while.body51 ], [ %i35, %while.body51.preheader ]
+  %pTempSrc.1144 = phi ptr [ %add.ptr52, %while.body51 ], [ %arrayidx45, %while.body51.preheader ]
+  %pTempDest.0143 = phi ptr [ %add.ptr53, %while.body51 ], [ %i35, %while.body51.preheader ]
   %blkCnt.1142 = phi i32 [ %dec54, %while.body51 ], [ %shr47, %while.body51.preheader ]
-  %i38 = bitcast half* %pTempSrc.1144 to <8 x half>*
-  %i39 = load <8 x half>, <8 x half>* %i38, align 4
-  %i40 = bitcast half* %pTempDest.0143 to <8 x half>*
-  store <8 x half> %i39, <8 x half>* %i40, align 4
-  %add.ptr52 = getelementptr inbounds half, half* %pTempSrc.1144, i32 4
-  %add.ptr53 = getelementptr inbounds half, half* %pTempDest.0143, i32 4
+  %i39 = load <8 x half>, ptr %pTempSrc.1144, align 4
+  store <8 x half> %i39, ptr %pTempDest.0143, align 4
+  %add.ptr52 = getelementptr inbounds half, ptr %pTempSrc.1144, i32 4
+  %add.ptr53 = getelementptr inbounds half, ptr %pTempDest.0143, i32 4
   %dec54 = add nsw i32 %blkCnt.1142, -1
   %cmp49 = icmp eq i32 %dec54, 0
   br i1 %cmp49, label %while.end55.loopexit, label %while.body51
 
 while.end55.loopexit:                             ; preds = %while.body51
-  %scevgep156 = getelementptr half, half* %i35, i32 %i36
+  %scevgep156 = getelementptr half, ptr %i35, i32 %i36
   br label %while.end55
 
 while.end55:                                      ; preds = %while.end55.loopexit, %if.end
-  %pTempDest.0.lcssa = phi half* [ %i35, %if.end ], [ %scevgep156, %while.end55.loopexit ]
-  %pTempSrc.1.lcssa = phi half* [ %arrayidx45, %if.end ], [ %scevgep, %while.end55.loopexit ]
+  %pTempDest.0.lcssa = phi ptr [ %i35, %if.end ], [ %scevgep156, %while.end55.loopexit ]
+  %pTempSrc.1.lcssa = phi ptr [ %arrayidx45, %if.end ], [ %scevgep, %while.end55.loopexit ]
   %and56 = and i32 %conv, 3
   %cmp57 = icmp eq i32 %and56, 0
   br i1 %cmp57, label %if.end61, label %if.then59
 
 if.then59:                                        ; preds = %while.end55
   %i41 = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %and56)
-  %i42 = bitcast half* %pTempSrc.1.lcssa to <8 x half>*
-  %i43 = load <8 x half>, <8 x half>* %i42, align 4
-  %i44 = bitcast half* %pTempDest.0.lcssa to <8 x half>*
-  tail call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %i43, <8 x half>* %i44, i32 4, <8 x i1> %i41)
+  %i43 = load <8 x half>, ptr %pTempSrc.1.lcssa, align 4
+  tail call void @llvm.masked.store.v8f16.p0(<8 x half> %i43, ptr %pTempDest.0.lcssa, i32 4, <8 x i1> %i41)
   br label %if.end61
 
 if.end61:                                         ; preds = %if.then59, %while.end55
@@ -1044,7 +987,7 @@ if.end61:                                         ; preds = %if.then59, %while.e
 }
 
 
-define void @fir(%struct.arm_fir_instance_f32* nocapture readonly %S, half* nocapture readonly %pSrc, half* nocapture %pDst, i32 %blockSize) {
+define void @fir(ptr nocapture readonly %S, ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 %blockSize) {
 ; CHECK-LABEL: fir:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -1192,12 +1135,12 @@ define void @fir(%struct.arm_fir_instance_f32* nocapture readonly %S, half* noca
 ; CHECK-NEXT:    add sp, #24
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
-  %pState1 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 1
-  %i = load half*, half** %pState1, align 4
-  %pCoeffs2 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 2
-  %i1 = load half*, half** %pCoeffs2, align 4
-  %numTaps3 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 0
-  %i2 = load i16, i16* %numTaps3, align 4
+  %pState1 = getelementptr inbounds %struct.arm_fir_instance_f32, ptr %S, i32 0, i32 1
+  %i = load ptr, ptr %pState1, align 4
+  %pCoeffs2 = getelementptr inbounds %struct.arm_fir_instance_f32, ptr %S, i32 0, i32 2
+  %i1 = load ptr, ptr %pCoeffs2, align 4
+  %numTaps3 = getelementptr inbounds %struct.arm_fir_instance_f32, ptr %S, i32 0, i32 0
+  %i2 = load i16, ptr %numTaps3, align 4
   %conv = zext i16 %i2 to i32
   %cmp = icmp ugt i32 %blockSize, 7
   br i1 %cmp, label %if.then, label %if.end
@@ -1209,17 +1152,17 @@ if.then:                                          ; preds = %entry
 
 while.body.lr.ph:                                 ; preds = %if.then
   %sub = add nsw i32 %conv, -1
-  %arrayidx = getelementptr inbounds half, half* %i, i32 %sub
-  %incdec.ptr = getelementptr inbounds half, half* %i1, i32 1
-  %incdec.ptr7 = getelementptr inbounds half, half* %i1, i32 2
-  %incdec.ptr8 = getelementptr inbounds half, half* %i1, i32 3
-  %incdec.ptr9 = getelementptr inbounds half, half* %i1, i32 4
-  %incdec.ptr10 = getelementptr inbounds half, half* %i1, i32 5
-  %incdec.ptr11 = getelementptr inbounds half, half* %i1, i32 6
-  %incdec.ptr12 = getelementptr inbounds half, half* %i1, i32 7
+  %arrayidx = getelementptr inbounds half, ptr %i, i32 %sub
+  %incdec.ptr = getelementptr inbounds half, ptr %i1, i32 1
+  %incdec.ptr7 = getelementptr inbounds half, ptr %i1, i32 2
+  %incdec.ptr8 = getelementptr inbounds half, ptr %i1, i32 3
+  %incdec.ptr9 = getelementptr inbounds half, ptr %i1, i32 4
+  %incdec.ptr10 = getelementptr inbounds half, ptr %i1, i32 5
+  %incdec.ptr11 = getelementptr inbounds half, ptr %i1, i32 6
+  %incdec.ptr12 = getelementptr inbounds half, ptr %i1, i32 7
   %sub37 = add nsw i32 %conv, -8
   %div = sdiv i32 %sub37, 8
-  %pCoeffsCur.0199 = getelementptr inbounds half, half* %i1, i32 8
+  %pCoeffsCur.0199 = getelementptr inbounds half, ptr %i1, i32 8
   %cmp38201 = icmp ugt i16 %i2, 15
   %and = and i32 %sub37, 7
   %cmp74210 = icmp eq i32 %and, 0
@@ -1230,184 +1173,164 @@ while.body.lr.ph:                                 ; preds = %if.then
 
 while.body:                                       ; preds = %while.end, %while.body.lr.ph
   %blkCnt.0222 = phi i32 [ %shr, %while.body.lr.ph ], [ %dec84, %while.end ]
-  %pStateCur.0221 = phi half* [ %arrayidx, %while.body.lr.ph ], [ %add.ptr, %while.end ]
-  %pSamples.0220 = phi half* [ %i, %while.body.lr.ph ], [ %add.ptr83, %while.end ]
-  %pTempSrc.0219 = phi half* [ %pSrc, %while.body.lr.ph ], [ %add.ptr14, %while.end ]
-  %pOutput.0218 = phi half* [ %pDst, %while.body.lr.ph ], [ %add.ptr81, %while.end ]
-  %i4 = load half, half* %i1, align 4
-  %i5 = load half, half* %incdec.ptr, align 4
-  %i6 = load half, half* %incdec.ptr7, align 4
-  %i7 = load half, half* %incdec.ptr8, align 4
-  %i8 = load half, half* %incdec.ptr9, align 4
-  %i9 = load half, half* %incdec.ptr10, align 4
-  %i10 = load half, half* %incdec.ptr11, align 4
-  %i11 = load half, half* %incdec.ptr12, align 4
-  %i12 = bitcast half* %pTempSrc.0219 to <8 x half>*
-  %i13 = load <8 x half>, <8 x half>* %i12, align 4
-  %i14 = bitcast half* %pStateCur.0221 to <8 x half>*
-  store <8 x half> %i13, <8 x half>* %i14, align 4
-  %add.ptr = getelementptr inbounds half, half* %pStateCur.0221, i32 4
-  %add.ptr14 = getelementptr inbounds half, half* %pTempSrc.0219, i32 4
-  %i15 = bitcast half* %pSamples.0220 to <8 x half>*
-  %i16 = load <8 x half>, <8 x half>* %i15, align 4
+  %pStateCur.0221 = phi ptr [ %arrayidx, %while.body.lr.ph ], [ %add.ptr, %while.end ]
+  %pSamples.0220 = phi ptr [ %i, %while.body.lr.ph ], [ %add.ptr83, %while.end ]
+  %pTempSrc.0219 = phi ptr [ %pSrc, %while.body.lr.ph ], [ %add.ptr14, %while.end ]
+  %pOutput.0218 = phi ptr [ %pDst, %while.body.lr.ph ], [ %add.ptr81, %while.end ]
+  %i4 = load half, ptr %i1, align 4
+  %i5 = load half, ptr %incdec.ptr, align 4
+  %i6 = load half, ptr %incdec.ptr7, align 4
+  %i7 = load half, ptr %incdec.ptr8, align 4
+  %i8 = load half, ptr %incdec.ptr9, align 4
+  %i9 = load half, ptr %incdec.ptr10, align 4
+  %i10 = load half, ptr %incdec.ptr11, align 4
+  %i11 = load half, ptr %incdec.ptr12, align 4
+  %i13 = load <8 x half>, ptr %pTempSrc.0219, align 4
+  store <8 x half> %i13, ptr %pStateCur.0221, align 4
+  %add.ptr = getelementptr inbounds half, ptr %pStateCur.0221, i32 4
+  %add.ptr14 = getelementptr inbounds half, ptr %pTempSrc.0219, i32 4
+  %i16 = load <8 x half>, ptr %pSamples.0220, align 4
   %.splatinsert = insertelement <8 x half> undef, half %i4, i32 0
   %.splat = shufflevector <8 x half> %.splatinsert, <8 x half> undef, <8 x i32> zeroinitializer
   %i17 = fmul fast <8 x half> %i16, %.splat
-  %arrayidx15 = getelementptr inbounds half, half* %pSamples.0220, i32 1
-  %i18 = bitcast half* %arrayidx15 to <8 x half>*
-  %i19 = load <8 x half>, <8 x half>* %i18, align 4
+  %arrayidx15 = getelementptr inbounds half, ptr %pSamples.0220, i32 1
+  %i19 = load <8 x half>, ptr %arrayidx15, align 4
   %.splatinsert16 = insertelement <8 x half> undef, half %i5, i32 0
   %.splat17 = shufflevector <8 x half> %.splatinsert16, <8 x half> undef, <8 x i32> zeroinitializer
   %i20 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i19, <8 x half> %.splat17, <8 x half> %i17)
-  %arrayidx18 = getelementptr inbounds half, half* %pSamples.0220, i32 2
-  %i21 = bitcast half* %arrayidx18 to <8 x half>*
-  %i22 = load <8 x half>, <8 x half>* %i21, align 4
+  %arrayidx18 = getelementptr inbounds half, ptr %pSamples.0220, i32 2
+  %i22 = load <8 x half>, ptr %arrayidx18, align 4
   %.splatinsert19 = insertelement <8 x half> undef, half %i6, i32 0
   %.splat20 = shufflevector <8 x half> %.splatinsert19, <8 x half> undef, <8 x i32> zeroinitializer
   %i23 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i22, <8 x half> %.splat20, <8 x half> %i20)
-  %arrayidx21 = getelementptr inbounds half, half* %pSamples.0220, i32 3
-  %i24 = bitcast half* %arrayidx21 to <8 x half>*
-  %i25 = load <8 x half>, <8 x half>* %i24, align 4
+  %arrayidx21 = getelementptr inbounds half, ptr %pSamples.0220, i32 3
+  %i25 = load <8 x half>, ptr %arrayidx21, align 4
   %.splatinsert22 = insertelement <8 x half> undef, half %i7, i32 0
   %.splat23 = shufflevector <8 x half> %.splatinsert22, <8 x half> undef, <8 x i32> zeroinitializer
   %i26 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i25, <8 x half> %.splat23, <8 x half> %i23)
-  %arrayidx24 = getelementptr inbounds half, half* %pSamples.0220, i32 4
-  %i27 = bitcast half* %arrayidx24 to <8 x half>*
-  %i28 = load <8 x half>, <8 x half>* %i27, align 4
+  %arrayidx24 = getelementptr inbounds half, ptr %pSamples.0220, i32 4
+  %i28 = load <8 x half>, ptr %arrayidx24, align 4
   %.splatinsert25 = insertelement <8 x half> undef, half %i8, i32 0
   %.splat26 = shufflevector <8 x half> %.splatinsert25, <8 x half> undef, <8 x i32> zeroinitializer
   %i29 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i28, <8 x half> %.splat26, <8 x half> %i26)
-  %arrayidx27 = getelementptr inbounds half, half* %pSamples.0220, i32 5
-  %i30 = bitcast half* %arrayidx27 to <8 x half>*
-  %i31 = load <8 x half>, <8 x half>* %i30, align 4
+  %arrayidx27 = getelementptr inbounds half, ptr %pSamples.0220, i32 5
+  %i31 = load <8 x half>, ptr %arrayidx27, align 4
   %.splatinsert28 = insertelement <8 x half> undef, half %i9, i32 0
   %.splat29 = shufflevector <8 x half> %.splatinsert28, <8 x half> undef, <8 x i32> zeroinitializer
   %i32 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i31, <8 x half> %.splat29, <8 x half> %i29)
-  %arrayidx30 = getelementptr inbounds half, half* %pSamples.0220, i32 6
-  %i33 = bitcast half* %arrayidx30 to <8 x half>*
-  %i34 = load <8 x half>, <8 x half>* %i33, align 4
+  %arrayidx30 = getelementptr inbounds half, ptr %pSamples.0220, i32 6
+  %i34 = load <8 x half>, ptr %arrayidx30, align 4
   %.splatinsert31 = insertelement <8 x half> undef, half %i10, i32 0
   %.splat32 = shufflevector <8 x half> %.splatinsert31, <8 x half> undef, <8 x i32> zeroinitializer
   %i35 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i34, <8 x half> %.splat32, <8 x half> %i32)
-  %arrayidx33 = getelementptr inbounds half, half* %pSamples.0220, i32 7
-  %i36 = bitcast half* %arrayidx33 to <8 x half>*
-  %i37 = load <8 x half>, <8 x half>* %i36, align 4
+  %arrayidx33 = getelementptr inbounds half, ptr %pSamples.0220, i32 7
+  %i37 = load <8 x half>, ptr %arrayidx33, align 4
   %.splatinsert34 = insertelement <8 x half> undef, half %i11, i32 0
   %.splat35 = shufflevector <8 x half> %.splatinsert34, <8 x half> undef, <8 x i32> zeroinitializer
   %i38 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i37, <8 x half> %.splat35, <8 x half> %i35)
-  %pSamples.1200 = getelementptr inbounds half, half* %pSamples.0220, i32 8
+  %pSamples.1200 = getelementptr inbounds half, ptr %pSamples.0220, i32 8
   br i1 %cmp38201, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.body, %while.body
-  %pSamples.1207 = phi half* [ %pSamples.1, %for.body ], [ %pSamples.1200, %while.body ]
-  %pCoeffsCur.0206 = phi half* [ %pCoeffsCur.0, %for.body ], [ %pCoeffsCur.0199, %while.body ]
-  %.pn205 = phi half* [ %pCoeffsCur.0206, %for.body ], [ %i1, %while.body ]
+  %pSamples.1207 = phi ptr [ %pSamples.1, %for.body ], [ %pSamples.1200, %while.body ]
+  %pCoeffsCur.0206 = phi ptr [ %pCoeffsCur.0, %for.body ], [ %pCoeffsCur.0199, %while.body ]
+  %.pn205 = phi ptr [ %pCoeffsCur.0206, %for.body ], [ %i1, %while.body ]
   %i.0204 = phi i32 [ %inc, %for.body ], [ 0, %while.body ]
   %vecAcc0.0203 = phi <8 x half> [ %i70, %for.body ], [ %i38, %while.body ]
-  %pSamples.0.pn202 = phi half* [ %pSamples.1207, %for.body ], [ %pSamples.0220, %while.body ]
-  %incdec.ptr40 = getelementptr inbounds half, half* %.pn205, i32 9
-  %i39 = load half, half* %pCoeffsCur.0206, align 4
-  %incdec.ptr41 = getelementptr inbounds half, half* %.pn205, i32 10
-  %i40 = load half, half* %incdec.ptr40, align 4
-  %incdec.ptr42 = getelementptr inbounds half, half* %.pn205, i32 11
-  %i41 = load half, half* %incdec.ptr41, align 4
-  %incdec.ptr43 = getelementptr inbounds half, half* %.pn205, i32 12
-  %i42 = load half, half* %incdec.ptr42, align 4
-  %incdec.ptr44 = getelementptr inbounds half, half* %.pn205, i32 13
-  %i43 = load half, half* %incdec.ptr43, align 4
-  %incdec.ptr45 = getelementptr inbounds half, half* %.pn205, i32 14
-  %i44 = load half, half* %incdec.ptr44, align 4
-  %incdec.ptr46 = getelementptr inbounds half, half* %.pn205, i32 15
-  %i45 = load half, half* %incdec.ptr45, align 4
-  %i46 = load half, half* %incdec.ptr46, align 4
-  %i47 = bitcast half* %pSamples.1207 to <8 x half>*
-  %i48 = load <8 x half>, <8 x half>* %i47, align 4
+  %pSamples.0.pn202 = phi ptr [ %pSamples.1207, %for.body ], [ %pSamples.0220, %while.body ]
+  %incdec.ptr40 = getelementptr inbounds half, ptr %.pn205, i32 9
+  %i39 = load half, ptr %pCoeffsCur.0206, align 4
+  %incdec.ptr41 = getelementptr inbounds half, ptr %.pn205, i32 10
+  %i40 = load half, ptr %incdec.ptr40, align 4
+  %incdec.ptr42 = getelementptr inbounds half, ptr %.pn205, i32 11
+  %i41 = load half, ptr %incdec.ptr41, align 4
+  %incdec.ptr43 = getelementptr inbounds half, ptr %.pn205, i32 12
+  %i42 = load half, ptr %incdec.ptr42, align 4
+  %incdec.ptr44 = getelementptr inbounds half, ptr %.pn205, i32 13
+  %i43 = load half, ptr %incdec.ptr43, align 4
+  %incdec.ptr45 = getelementptr inbounds half, ptr %.pn205, i32 14
+  %i44 = load half, ptr %incdec.ptr44, align 4
+  %incdec.ptr46 = getelementptr inbounds half, ptr %.pn205, i32 15
+  %i45 = load half, ptr %incdec.ptr45, align 4
+  %i46 = load half, ptr %incdec.ptr46, align 4
+  %i48 = load <8 x half>, ptr %pSamples.1207, align 4
   %.splatinsert48 = insertelement <8 x half> undef, half %i39, i32 0
   %.splat49 = shufflevector <8 x half> %.splatinsert48, <8 x half> undef, <8 x i32> zeroinitializer
   %i49 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i48, <8 x half> %.splat49, <8 x half> %vecAcc0.0203)
-  %arrayidx50 = getelementptr inbounds half, half* %pSamples.0.pn202, i32 9
-  %i50 = bitcast half* %arrayidx50 to <8 x half>*
-  %i51 = load <8 x half>, <8 x half>* %i50, align 4
+  %arrayidx50 = getelementptr inbounds half, ptr %pSamples.0.pn202, i32 9
+  %i51 = load <8 x half>, ptr %arrayidx50, align 4
   %.splatinsert51 = insertelement <8 x half> undef, half %i40, i32 0
   %.splat52 = shufflevector <8 x half> %.splatinsert51, <8 x half> undef, <8 x i32> zeroinitializer
   %i52 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i51, <8 x half> %.splat52, <8 x half> %i49)
-  %arrayidx53 = getelementptr inbounds half, half* %pSamples.0.pn202, i32 10
-  %i53 = bitcast half* %arrayidx53 to <8 x half>*
-  %i54 = load <8 x half>, <8 x half>* %i53, align 4
+  %arrayidx53 = getelementptr inbounds half, ptr %pSamples.0.pn202, i32 10
+  %i54 = load <8 x half>, ptr %arrayidx53, align 4
   %.splatinsert54 = insertelement <8 x half> undef, half %i41, i32 0
   %.splat55 = shufflevector <8 x half> %.splatinsert54, <8 x half> undef, <8 x i32> zeroinitializer
   %i55 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i54, <8 x half> %.splat55, <8 x half> %i52)
-  %arrayidx56 = getelementptr inbounds half, half* %pSamples.0.pn202, i32 11
-  %i56 = bitcast half* %arrayidx56 to <8 x half>*
-  %i57 = load <8 x half>, <8 x half>* %i56, align 4
+  %arrayidx56 = getelementptr inbounds half, ptr %pSamples.0.pn202, i32 11
+  %i57 = load <8 x half>, ptr %arrayidx56, align 4
   %.splatinsert57 = insertelement <8 x half> undef, half %i42, i32 0
   %.splat58 = shufflevector <8 x half> %.splatinsert57, <8 x half> undef, <8 x i32> zeroinitializer
   %i58 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i57, <8 x half> %.splat58, <8 x half> %i55)
-  %arrayidx59 = getelementptr inbounds half, half* %pSamples.0.pn202, i32 12
-  %i59 = bitcast half* %arrayidx59 to <8 x half>*
-  %i60 = load <8 x half>, <8 x half>* %i59, align 4
+  %arrayidx59 = getelementptr inbounds half, ptr %pSamples.0.pn202, i32 12
+  %i60 = load <8 x half>, ptr %arrayidx59, align 4
   %.splatinsert60 = insertelement <8 x half> undef, half %i43, i32 0
   %.splat61 = shufflevector <8 x half> %.splatinsert60, <8 x half> undef, <8 x i32> zeroinitializer
   %i61 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i60, <8 x half> %.splat61, <8 x half> %i58)
-  %arrayidx62 = getelementptr inbounds half, half* %pSamples.0.pn202, i32 13
-  %i62 = bitcast half* %arrayidx62 to <8 x half>*
-  %i63 = load <8 x half>, <8 x half>* %i62, align 4
+  %arrayidx62 = getelementptr inbounds half, ptr %pSamples.0.pn202, i32 13
+  %i63 = load <8 x half>, ptr %arrayidx62, align 4
   %.splatinsert63 = insertelement <8 x half> undef, half %i44, i32 0
   %.splat64 = shufflevector <8 x half> %.splatinsert63, <8 x half> undef, <8 x i32> zeroinitializer
   %i64 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i63, <8 x half> %.splat64, <8 x half> %i61)
-  %arrayidx65 = getelementptr inbounds half, half* %pSamples.0.pn202, i32 14
-  %i65 = bitcast half* %arrayidx65 to <8 x half>*
-  %i66 = load <8 x half>, <8 x half>* %i65, align 4
+  %arrayidx65 = getelementptr inbounds half, ptr %pSamples.0.pn202, i32 14
+  %i66 = load <8 x half>, ptr %arrayidx65, align 4
   %.splatinsert66 = insertelement <8 x half> undef, half %i45, i32 0
   %.splat67 = shufflevector <8 x half> %.splatinsert66, <8 x half> undef, <8 x i32> zeroinitializer
   %i67 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i66, <8 x half> %.splat67, <8 x half> %i64)
-  %arrayidx68 = getelementptr inbounds half, half* %pSamples.0.pn202, i32 15
-  %i68 = bitcast half* %arrayidx68 to <8 x half>*
-  %i69 = load <8 x half>, <8 x half>* %i68, align 4
+  %arrayidx68 = getelementptr inbounds half, ptr %pSamples.0.pn202, i32 15
+  %i69 = load <8 x half>, ptr %arrayidx68, align 4
   %.splatinsert69 = insertelement <8 x half> undef, half %i46, i32 0
   %.splat70 = shufflevector <8 x half> %.splatinsert69, <8 x half> undef, <8 x i32> zeroinitializer
   %i70 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i69, <8 x half> %.splat70, <8 x half> %i67)
   %inc = add nuw nsw i32 %i.0204, 1
-  %pCoeffsCur.0 = getelementptr inbounds half, half* %pCoeffsCur.0206, i32 8
-  %pSamples.1 = getelementptr inbounds half, half* %pSamples.1207, i32 8
+  %pCoeffsCur.0 = getelementptr inbounds half, ptr %pCoeffsCur.0206, i32 8
+  %pSamples.1 = getelementptr inbounds half, ptr %pSamples.1207, i32 8
   %exitcond = icmp eq i32 %inc, %smax
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:                                          ; preds = %for.body, %while.body
   %vecAcc0.0.lcssa = phi <8 x half> [ %i38, %while.body ], [ %i70, %for.body ]
-  %pCoeffsCur.0.lcssa = phi half* [ %pCoeffsCur.0199, %while.body ], [ %pCoeffsCur.0, %for.body ]
-  %pSamples.1.lcssa = phi half* [ %pSamples.1200, %while.body ], [ %pSamples.1, %for.body ]
+  %pCoeffsCur.0.lcssa = phi ptr [ %pCoeffsCur.0199, %while.body ], [ %pCoeffsCur.0, %for.body ]
+  %pSamples.1.lcssa = phi ptr [ %pSamples.1200, %while.body ], [ %pSamples.1, %for.body ]
   br i1 %cmp74210, label %while.end, label %while.body76
 
 while.body76:                                     ; preds = %while.body76, %for.end
-  %pCoeffsCur.1214 = phi half* [ %incdec.ptr77, %while.body76 ], [ %pCoeffsCur.0.lcssa, %for.end ]
+  %pCoeffsCur.1214 = phi ptr [ %incdec.ptr77, %while.body76 ], [ %pCoeffsCur.0.lcssa, %for.end ]
   %vecAcc0.1213 = phi <8 x half> [ %i74, %while.body76 ], [ %vecAcc0.0.lcssa, %for.end ]
   %numCnt.0212 = phi i32 [ %dec, %while.body76 ], [ %and, %for.end ]
-  %pSamples.2211 = phi half* [ %incdec.ptr80, %while.body76 ], [ %pSamples.1.lcssa, %for.end ]
-  %incdec.ptr77 = getelementptr inbounds half, half* %pCoeffsCur.1214, i32 1
-  %i71 = load half, half* %pCoeffsCur.1214, align 4
-  %i72 = bitcast half* %pSamples.2211 to <8 x half>*
-  %i73 = load <8 x half>, <8 x half>* %i72, align 4
+  %pSamples.2211 = phi ptr [ %incdec.ptr80, %while.body76 ], [ %pSamples.1.lcssa, %for.end ]
+  %incdec.ptr77 = getelementptr inbounds half, ptr %pCoeffsCur.1214, i32 1
+  %i71 = load half, ptr %pCoeffsCur.1214, align 4
+  %i73 = load <8 x half>, ptr %pSamples.2211, align 4
   %.splatinsert78 = insertelement <8 x half> undef, half %i71, i32 0
   %.splat79 = shufflevector <8 x half> %.splatinsert78, <8 x half> undef, <8 x i32> zeroinitializer
   %i74 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i73, <8 x half> %.splat79, <8 x half> %vecAcc0.1213)
-  %incdec.ptr80 = getelementptr inbounds half, half* %pSamples.2211, i32 1
+  %incdec.ptr80 = getelementptr inbounds half, ptr %pSamples.2211, i32 1
   %dec = add nsw i32 %numCnt.0212, -1
   %cmp74 = icmp sgt i32 %numCnt.0212, 1
   br i1 %cmp74, label %while.body76, label %while.end.loopexit
 
 while.end.loopexit:                               ; preds = %while.body76
-  %scevgep = getelementptr half, half* %pSamples.1.lcssa, i32 %and
+  %scevgep = getelementptr half, ptr %pSamples.1.lcssa, i32 %and
   br label %while.end
 
 while.end:                                        ; preds = %while.end.loopexit, %for.end
-  %pSamples.2.lcssa = phi half* [ %pSamples.1.lcssa, %for.end ], [ %scevgep, %while.end.loopexit ]
+  %pSamples.2.lcssa = phi ptr [ %pSamples.1.lcssa, %for.end ], [ %scevgep, %while.end.loopexit ]
   %vecAcc0.1.lcssa = phi <8 x half> [ %vecAcc0.0.lcssa, %for.end ], [ %i74, %while.end.loopexit ]
-  %i75 = bitcast half* %pOutput.0218 to <8 x half>*
-  store <8 x half> %vecAcc0.1.lcssa, <8 x half>* %i75, align 4
-  %add.ptr81 = getelementptr inbounds half, half* %pOutput.0218, i32 4
-  %add.ptr82 = getelementptr inbounds half, half* %pSamples.2.lcssa, i32 4
-  %add.ptr83 = getelementptr inbounds half, half* %add.ptr82, i32 %idx.neg
+  store <8 x half> %vecAcc0.1.lcssa, ptr %pOutput.0218, align 4
+  %add.ptr81 = getelementptr inbounds half, ptr %pOutput.0218, i32 4
+  %add.ptr82 = getelementptr inbounds half, ptr %pSamples.2.lcssa, i32 4
+  %add.ptr83 = getelementptr inbounds half, ptr %add.ptr82, i32 %idx.neg
   %dec84 = add nsw i32 %blkCnt.0222, -1
   %cmp5 = icmp eq i32 %dec84, 0
   br i1 %cmp5, label %if.end, label %while.body
@@ -1416,8 +1339,8 @@ if.end:                                           ; preds = %while.end, %if.then
   ret void
 }
 
-%struct.arm_biquad_cascade_df2T_instance_f16 = type { i8, half*, half* }
-define void @arm_biquad_cascade_df2T_f16(%struct.arm_biquad_cascade_df2T_instance_f16* nocapture readonly %S, half* nocapture readonly %pSrc, half* nocapture %pDst, i32 %blockSize) {
+%struct.arm_biquad_cascade_df2T_instance_f16 = type { i8, ptr, ptr }
+define void @arm_biquad_cascade_df2T_f16(ptr nocapture readonly %S, ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 %blockSize) {
 ; CHECK-LABEL: arm_biquad_cascade_df2T_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, lr}
@@ -1502,13 +1425,13 @@ define void @arm_biquad_cascade_df2T_f16(%struct.arm_biquad_cascade_df2T_instanc
 ; CHECK-NEXT:  .LCPI17_0:
 ; CHECK-NEXT:    .short 0x0000 @ half 0
 entry:
-  %pState1 = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f16, %struct.arm_biquad_cascade_df2T_instance_f16* %S, i32 0, i32 1
-  %i = load half*, half** %pState1, align 4
-  %numStages = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f16, %struct.arm_biquad_cascade_df2T_instance_f16* %S, i32 0, i32 0
-  %i1 = load i8, i8* %numStages, align 4
+  %pState1 = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f16, ptr %S, i32 0, i32 1
+  %i = load ptr, ptr %pState1, align 4
+  %numStages = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f16, ptr %S, i32 0, i32 0
+  %i1 = load i8, ptr %numStages, align 4
   %conv = zext i8 %i1 to i32
-  %pCoeffs = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f16, %struct.arm_biquad_cascade_df2T_instance_f16* %S, i32 0, i32 2
-  %i2 = load half*, half** %pCoeffs, align 4
+  %pCoeffs = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f16, ptr %S, i32 0, i32 2
+  %i2 = load ptr, ptr %pCoeffs, align 4
   %div = lshr i32 %blockSize, 1
   %cmp.not90 = icmp eq i32 %div, 0
   %and = and i32 %blockSize, 1
@@ -1517,17 +1440,14 @@ entry:
 
 do.body:                                          ; preds = %if.end, %entry
   %stage.0 = phi i32 [ %conv, %entry ], [ %dec23, %if.end ]
-  %pCurCoeffs.0 = phi half* [ %i2, %entry ], [ %add.ptr2, %if.end ]
-  %pState.0 = phi half* [ %i, %entry ], [ %pState.1, %if.end ]
-  %pIn.0 = phi half* [ %pSrc, %entry ], [ %pDst, %if.end ]
-  %i3 = bitcast half* %pCurCoeffs.0 to <8 x half>*
-  %i4 = load <8 x half>, <8 x half>* %i3, align 2
-  %add.ptr = getelementptr inbounds half, half* %pCurCoeffs.0, i32 2
-  %i5 = bitcast half* %add.ptr to <8 x half>*
-  %i6 = load <8 x half>, <8 x half>* %i5, align 2
-  %add.ptr2 = getelementptr inbounds half, half* %pCurCoeffs.0, i32 5
-  %i7 = bitcast half* %pState.0 to <8 x half>*
-  %i8 = load <8 x half>, <8 x half>* %i7, align 2
+  %pCurCoeffs.0 = phi ptr [ %i2, %entry ], [ %add.ptr2, %if.end ]
+  %pState.0 = phi ptr [ %i, %entry ], [ %pState.1, %if.end ]
+  %pIn.0 = phi ptr [ %pSrc, %entry ], [ %pDst, %if.end ]
+  %i4 = load <8 x half>, ptr %pCurCoeffs.0, align 2
+  %add.ptr = getelementptr inbounds half, ptr %pCurCoeffs.0, i32 2
+  %i6 = load <8 x half>, ptr %add.ptr, align 2
+  %add.ptr2 = getelementptr inbounds half, ptr %pCurCoeffs.0, i32 5
+  %i8 = load <8 x half>, ptr %pState.0, align 2
   %i9 = shufflevector <8 x half> %i8, <8 x half> <half poison, half poison, half 0xH0000, half 0xH0000, half poison, half poison, half poison, half poison>, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
   %i10 = bitcast <8 x half> %i4 to <8 x i16>
   %i11 = tail call { i32, <8 x i16> } @llvm.arm.mve.vshlc.v8i16(<8 x i16> %i10, i32 0, i32 16)
@@ -1541,14 +1461,14 @@ do.body:                                          ; preds = %if.end, %entry
   br i1 %cmp.not90, label %while.end, label %while.body
 
 while.body:                                       ; preds = %while.body, %do.body
-  %pIn.194 = phi half* [ %incdec.ptr4, %while.body ], [ %pIn.0, %do.body ]
+  %pIn.194 = phi ptr [ %incdec.ptr4, %while.body ], [ %pIn.0, %do.body ]
   %state.093 = phi <8 x half> [ %i30, %while.body ], [ %i9, %do.body ]
-  %pOut.192 = phi half* [ %incdec.ptr12, %while.body ], [ %pDst, %do.body ]
+  %pOut.192 = phi ptr [ %incdec.ptr12, %while.body ], [ %pDst, %do.body ]
   %sample.091 = phi i32 [ %dec, %while.body ], [ %div, %do.body ]
-  %incdec.ptr = getelementptr inbounds half, half* %pIn.194, i32 1
-  %i19 = load half, half* %pIn.194, align 2
-  %incdec.ptr4 = getelementptr inbounds half, half* %pIn.194, i32 2
-  %i20 = load half, half* %incdec.ptr, align 2
+  %incdec.ptr = getelementptr inbounds half, ptr %pIn.194, i32 1
+  %i19 = load half, ptr %pIn.194, align 2
+  %incdec.ptr4 = getelementptr inbounds half, ptr %pIn.194, i32 2
+  %i20 = load half, ptr %incdec.ptr, align 2
   %.splatinsert = insertelement <8 x half> poison, half %i19, i32 0
   %.splat = shufflevector <8 x half> %.splatinsert, <8 x half> poison, <8 x i32> zeroinitializer
   %i21 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i4, <8 x half> %.splat, <8 x half> %state.093)
@@ -1565,45 +1485,45 @@ while.body:                                       ; preds = %while.body, %do.bod
   %i28 = shufflevector <8 x half> %i27, <8 x half> undef, <8 x i32> <i32 2, i32 undef, i32 undef, i32 3, i32 4, i32 5, i32 6, i32 7>
   %i29 = insertelement <8 x half> %i28, half 0xH0000, i32 2
   %i30 = shufflevector <8 x half> %i29, <8 x half> %i27, <8 x i32> <i32 0, i32 11, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  %incdec.ptr11 = getelementptr inbounds half, half* %pOut.192, i32 1
-  store half %i22, half* %pOut.192, align 2
-  %incdec.ptr12 = getelementptr inbounds half, half* %pOut.192, i32 2
-  store half %i26, half* %incdec.ptr11, align 2
+  %incdec.ptr11 = getelementptr inbounds half, ptr %pOut.192, i32 1
+  store half %i22, ptr %pOut.192, align 2
+  %incdec.ptr12 = getelementptr inbounds half, ptr %pOut.192, i32 2
+  store half %i26, ptr %incdec.ptr11, align 2
   %dec = add nsw i32 %sample.091, -1
   %cmp.not = icmp eq i32 %dec, 0
   br i1 %cmp.not, label %while.end, label %while.body
 
 while.end:                                        ; preds = %while.body, %do.body
-  %pOut.1.lcssa = phi half* [ %pDst, %do.body ], [ %incdec.ptr12, %while.body ]
+  %pOut.1.lcssa = phi ptr [ %pDst, %do.body ], [ %incdec.ptr12, %while.body ]
   %state.0.lcssa = phi <8 x half> [ %i9, %do.body ], [ %i30, %while.body ]
-  %pIn.1.lcssa = phi half* [ %pIn.0, %do.body ], [ %incdec.ptr4, %while.body ]
+  %pIn.1.lcssa = phi ptr [ %pIn.0, %do.body ], [ %incdec.ptr4, %while.body ]
   br i1 %tobool.not, label %if.else, label %if.then
 
 if.then:                                          ; preds = %while.end
-  %i31 = load half, half* %pIn.1.lcssa, align 2
+  %i31 = load half, ptr %pIn.1.lcssa, align 2
   %.splatinsert14 = insertelement <8 x half> poison, half %i31, i32 0
   %.splat15 = shufflevector <8 x half> %.splatinsert14, <8 x half> poison, <8 x i32> zeroinitializer
   %i32 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i4, <8 x half> %.splat15, <8 x half> %state.0.lcssa)
   %i33 = extractelement <8 x half> %i32, i32 0
   %.splat17 = shufflevector <8 x half> %i32, <8 x half> poison, <8 x i32> zeroinitializer
   %i34 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i6, <8 x half> %.splat17, <8 x half> %i32)
-  store half %i33, half* %pOut.1.lcssa, align 2
+  store half %i33, ptr %pOut.1.lcssa, align 2
   %i35 = extractelement <8 x half> %i34, i32 1
-  store half %i35, half* %pState.0, align 2
+  store half %i35, ptr %pState.0, align 2
   %i36 = extractelement <8 x half> %i34, i32 2
   br label %if.end
 
 if.else:                                          ; preds = %while.end
   %i37 = extractelement <8 x half> %state.0.lcssa, i32 0
-  store half %i37, half* %pState.0, align 2
+  store half %i37, ptr %pState.0, align 2
   %i38 = extractelement <8 x half> %state.0.lcssa, i32 1
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
   %.sink = phi half [ %i38, %if.else ], [ %i36, %if.then ]
-  %i39 = getelementptr inbounds half, half* %pState.0, i32 1
-  store half %.sink, half* %i39, align 2
-  %pState.1 = getelementptr inbounds half, half* %pState.0, i32 2
+  %i39 = getelementptr inbounds half, ptr %pState.0, i32 1
+  store half %.sink, ptr %i39, align 2
+  %pState.1 = getelementptr inbounds half, ptr %pState.0, i32 2
   %dec23 = add i32 %stage.0, -1
   %cmp24.not = icmp eq i32 %dec23, 0
   br i1 %cmp24.not, label %do.end, label %do.body
@@ -1638,4 +1558,4 @@ declare { i32, <8 x i16> } @llvm.arm.mve.vshlc.v8i16(<8 x i16>, i32, i32)
 declare void @llvm.assume(i1)
 declare <8 x i1> @llvm.arm.mve.vctp16(i32)
 declare <8 x half> @llvm.fma.v8f16(<8 x half>, <8 x half>, <8 x half>)
-declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32 immarg, <8 x i1>)
+declare void @llvm.masked.store.v8f16.p0(<8 x half>, ptr, i32 immarg, <8 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll b/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
index e5f831cd4963..6228d616b584 100644
--- a/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -opaque-pointers=0 -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
 
-define arm_aapcs_vfpcc void @test_fadd(float* noalias nocapture readonly %A, float %B, float* noalias nocapture %C, i32 %n) {
+define arm_aapcs_vfpcc void @test_fadd(ptr noalias nocapture readonly %A, float %B, ptr noalias nocapture %C, i32 %n) {
 ; CHECK-LABEL: test_fadd:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r2, #1
@@ -32,13 +32,11 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds float, float* %A, i32 %index
-  %i2 = bitcast float* %i1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i1 = getelementptr inbounds float, ptr %A, i32 %index
+  %wide.load = load <4 x float>, ptr %i1, align 4
   %i3 = fadd fast <4 x float> %wide.load, %broadcast.splat11
-  %i4 = getelementptr inbounds float, float* %C, i32 %index
-  %i5 = bitcast float* %i4 to <4 x float>*
-  store <4 x float> %i3, <4 x float>* %i5, align 4
+  %i4 = getelementptr inbounds float, ptr %C, i32 %index
+  store <4 x float> %i3, ptr %i4, align 4
   %index.next = add i32 %index, 4
   %i6 = icmp eq i32 %index.next, %n
   br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -47,7 +45,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fadd_r(float* noalias nocapture readonly %A, float %B, float* noalias nocapture %C, i32 %n) {
+define arm_aapcs_vfpcc void @test_fadd_r(ptr noalias nocapture readonly %A, float %B, ptr noalias nocapture %C, i32 %n) {
 ; CHECK-LABEL: test_fadd_r:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r2, #1
@@ -78,13 +76,11 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds float, float* %A, i32 %index
-  %i2 = bitcast float* %i1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i1 = getelementptr inbounds float, ptr %A, i32 %index
+  %wide.load = load <4 x float>, ptr %i1, align 4
   %i3 = fadd fast <4 x float> %broadcast.splat11, %wide.load
-  %i4 = getelementptr inbounds float, float* %C, i32 %index
-  %i5 = bitcast float* %i4 to <4 x float>*
-  store <4 x float> %i3, <4 x float>* %i5, align 4
+  %i4 = getelementptr inbounds float, ptr %C, i32 %index
+  store <4 x float> %i3, ptr %i4, align 4
   %index.next = add i32 %index, 4
   %i6 = icmp eq i32 %index.next, %n
   br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -93,7 +89,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fmul(float* noalias nocapture readonly %A, float %B, float* noalias nocapture %C, i32 %n) {
+define arm_aapcs_vfpcc void @test_fmul(ptr noalias nocapture readonly %A, float %B, ptr noalias nocapture %C, i32 %n) {
 ; CHECK-LABEL: test_fmul:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r2, #1
@@ -124,13 +120,11 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds float, float* %A, i32 %index
-  %i2 = bitcast float* %i1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i1 = getelementptr inbounds float, ptr %A, i32 %index
+  %wide.load = load <4 x float>, ptr %i1, align 4
   %i3 = fmul fast <4 x float> %wide.load, %broadcast.splat11
-  %i4 = getelementptr inbounds float, float* %C, i32 %index
-  %i5 = bitcast float* %i4 to <4 x float>*
-  store <4 x float> %i3, <4 x float>* %i5, align 4
+  %i4 = getelementptr inbounds float, ptr %C, i32 %index
+  store <4 x float> %i3, ptr %i4, align 4
   %index.next = add i32 %index, 4
   %i6 = icmp eq i32 %index.next, %n
   br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -139,7 +133,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fmul_r(float* noalias nocapture readonly %A, float %B, float* noalias nocapture %C, i32 %n) {
+define arm_aapcs_vfpcc void @test_fmul_r(ptr noalias nocapture readonly %A, float %B, ptr noalias nocapture %C, i32 %n) {
 ; CHECK-LABEL: test_fmul_r:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r2, #1
@@ -170,13 +164,11 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds float, float* %A, i32 %index
-  %i2 = bitcast float* %i1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i1 = getelementptr inbounds float, ptr %A, i32 %index
+  %wide.load = load <4 x float>, ptr %i1, align 4
   %i3 = fmul fast <4 x float> %broadcast.splat11, %wide.load
-  %i4 = getelementptr inbounds float, float* %C, i32 %index
-  %i5 = bitcast float* %i4 to <4 x float>*
-  store <4 x float> %i3, <4 x float>* %i5, align 4
+  %i4 = getelementptr inbounds float, ptr %C, i32 %index
+  store <4 x float> %i3, ptr %i4, align 4
   %index.next = add i32 %index, 4
   %i6 = icmp eq i32 %index.next, %n
   br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -185,7 +177,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fsub(float* noalias nocapture readonly %A, float %B, float* noalias nocapture %C, i32 %n) {
+define arm_aapcs_vfpcc void @test_fsub(ptr noalias nocapture readonly %A, float %B, ptr noalias nocapture %C, i32 %n) {
 ; CHECK-LABEL: test_fsub:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r2, #1
@@ -216,13 +208,11 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds float, float* %A, i32 %index
-  %i2 = bitcast float* %i1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i1 = getelementptr inbounds float, ptr %A, i32 %index
+  %wide.load = load <4 x float>, ptr %i1, align 4
   %i3 = fsub fast <4 x float> %wide.load, %broadcast.splat11
-  %i4 = getelementptr inbounds float, float* %C, i32 %index
-  %i5 = bitcast float* %i4 to <4 x float>*
-  store <4 x float> %i3, <4 x float>* %i5, align 4
+  %i4 = getelementptr inbounds float, ptr %C, i32 %index
+  store <4 x float> %i3, ptr %i4, align 4
   %index.next = add i32 %index, 4
   %i6 = icmp eq i32 %index.next, %n
   br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -231,7 +221,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fsub_r(float* noalias nocapture readonly %A, float %B, float* noalias nocapture %C, i32 %n) {
+define arm_aapcs_vfpcc void @test_fsub_r(ptr noalias nocapture readonly %A, float %B, ptr noalias nocapture %C, i32 %n) {
 ; CHECK-LABEL: test_fsub_r:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r2, #1
@@ -263,13 +253,11 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds float, float* %A, i32 %index
-  %i2 = bitcast float* %i1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i1 = getelementptr inbounds float, ptr %A, i32 %index
+  %wide.load = load <4 x float>, ptr %i1, align 4
   %i3 = fsub fast <4 x float> %broadcast.splat11, %wide.load
-  %i4 = getelementptr inbounds float, float* %C, i32 %index
-  %i5 = bitcast float* %i4 to <4 x float>*
-  store <4 x float> %i3, <4 x float>* %i5, align 4
+  %i4 = getelementptr inbounds float, ptr %C, i32 %index
+  store <4 x float> %i3, ptr %i4, align 4
   %index.next = add i32 %index, 4
   %i6 = icmp eq i32 %index.next, %n
   br i1 %i6, label %for.cond.cleanup, label %vector.body
@@ -279,7 +267,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 
-define arm_aapcs_vfpcc void @test_fmas(float* noalias nocapture readonly %A, float* noalias nocapture readonly %B, float %C, float* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fmas(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, float %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fmas:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -311,17 +299,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds float, float* %A, i32 %index
-  %i2 = bitcast float* %i1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
-  %i3 = getelementptr inbounds float, float* %B, i32 %index
-  %i4 = bitcast float* %i3 to <4 x float>*
-  %wide.load12 = load <4 x float>, <4 x float>* %i4, align 4
+  %i1 = getelementptr inbounds float, ptr %A, i32 %index
+  %wide.load = load <4 x float>, ptr %i1, align 4
+  %i3 = getelementptr inbounds float, ptr %B, i32 %index
+  %wide.load12 = load <4 x float>, ptr %i3, align 4
   %i5 = fmul fast <4 x float> %wide.load12, %wide.load
   %i6 = fadd fast <4 x float> %i5, %broadcast.splat14
-  %i7 = getelementptr inbounds float, float* %D, i32 %index
-  %i8 = bitcast float* %i7 to <4 x float>*
-  store <4 x float> %i6, <4 x float>* %i8, align 4
+  %i7 = getelementptr inbounds float, ptr %D, i32 %index
+  store <4 x float> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 4
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -330,7 +315,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fmas_r(float* noalias nocapture readonly %A, float* noalias nocapture readonly %B, float %C, float* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fmas_r(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, float %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fmas_r:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -362,17 +347,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds float, float* %A, i32 %index
-  %i2 = bitcast float* %i1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
-  %i3 = getelementptr inbounds float, float* %B, i32 %index
-  %i4 = bitcast float* %i3 to <4 x float>*
-  %wide.load12 = load <4 x float>, <4 x float>* %i4, align 4
+  %i1 = getelementptr inbounds float, ptr %A, i32 %index
+  %wide.load = load <4 x float>, ptr %i1, align 4
+  %i3 = getelementptr inbounds float, ptr %B, i32 %index
+  %wide.load12 = load <4 x float>, ptr %i3, align 4
   %i5 = fmul fast <4 x float> %wide.load12, %wide.load
   %i6 = fadd fast <4 x float> %broadcast.splat14, %i5
-  %i7 = getelementptr inbounds float, float* %D, i32 %index
-  %i8 = bitcast float* %i7 to <4 x float>*
-  store <4 x float> %i6, <4 x float>* %i8, align 4
+  %i7 = getelementptr inbounds float, ptr %D, i32 %index
+  store <4 x float> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 4
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -381,7 +363,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fma(float* noalias nocapture readonly %A, float* noalias nocapture readonly %B, float %C, float* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fma(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, float %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fma:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -413,17 +395,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds float, float* %A, i32 %index
-  %i2 = bitcast float* %i1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i1 = getelementptr inbounds float, ptr %A, i32 %index
+  %wide.load = load <4 x float>, ptr %i1, align 4
   %i3 = fmul fast <4 x float> %wide.load, %broadcast.splat13
-  %i4 = getelementptr inbounds float, float* %B, i32 %index
-  %i5 = bitcast float* %i4 to <4 x float>*
-  %wide.load14 = load <4 x float>, <4 x float>* %i5, align 4
+  %i4 = getelementptr inbounds float, ptr %B, i32 %index
+  %wide.load14 = load <4 x float>, ptr %i4, align 4
   %i6 = fadd fast <4 x float> %i3, %wide.load14
-  %i7 = getelementptr inbounds float, float* %D, i32 %index
-  %i8 = bitcast float* %i7 to <4 x float>*
-  store <4 x float> %i6, <4 x float>* %i8, align 4
+  %i7 = getelementptr inbounds float, ptr %D, i32 %index
+  store <4 x float> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 4
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -432,7 +411,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fma_r(float* noalias nocapture readonly %A, float* noalias nocapture readonly %B, float %C, float* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fma_r(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, float %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fma_r:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -464,17 +443,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds float, float* %A, i32 %index
-  %i2 = bitcast float* %i1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i1 = getelementptr inbounds float, ptr %A, i32 %index
+  %wide.load = load <4 x float>, ptr %i1, align 4
   %i3 = fmul fast <4 x float> %broadcast.splat13, %wide.load
-  %i4 = getelementptr inbounds float, float* %B, i32 %index
-  %i5 = bitcast float* %i4 to <4 x float>*
-  %wide.load14 = load <4 x float>, <4 x float>* %i5, align 4
+  %i4 = getelementptr inbounds float, ptr %B, i32 %index
+  %wide.load14 = load <4 x float>, ptr %i4, align 4
   %i6 = fadd fast <4 x float> %i3, %wide.load14
-  %i7 = getelementptr inbounds float, float* %D, i32 %index
-  %i8 = bitcast float* %i7 to <4 x float>*
-  store <4 x float> %i6, <4 x float>* %i8, align 4
+  %i7 = getelementptr inbounds float, ptr %D, i32 %index
+  store <4 x float> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 4
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -484,7 +460,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 
-define arm_aapcs_vfpcc void @test_fmss(float* noalias nocapture readonly %A, float* noalias nocapture readonly %B, float %C, float* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fmss(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, float %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fmss:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -519,17 +495,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds float, float* %A, i32 %index
-  %i2 = bitcast float* %i1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
-  %i3 = getelementptr inbounds float, float* %B, i32 %index
-  %i4 = bitcast float* %i3 to <4 x float>*
-  %wide.load12 = load <4 x float>, <4 x float>* %i4, align 4
+  %i1 = getelementptr inbounds float, ptr %A, i32 %index
+  %wide.load = load <4 x float>, ptr %i1, align 4
+  %i3 = getelementptr inbounds float, ptr %B, i32 %index
+  %wide.load12 = load <4 x float>, ptr %i3, align 4
   %i5 = fmul fast <4 x float> %wide.load12, %wide.load
   %i6 = fsub fast <4 x float> %i5, %broadcast.splat14
-  %i7 = getelementptr inbounds float, float* %D, i32 %index
-  %i8 = bitcast float* %i7 to <4 x float>*
-  store <4 x float> %i6, <4 x float>* %i8, align 4
+  %i7 = getelementptr inbounds float, ptr %D, i32 %index
+  store <4 x float> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 4
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -538,7 +511,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fmss_r(float* noalias nocapture readonly %A, float* noalias nocapture readonly %B, float %C, float* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fmss_r(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, float %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fmss_r:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -572,17 +545,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds float, float* %A, i32 %index
-  %i2 = bitcast float* %i1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
-  %i3 = getelementptr inbounds float, float* %B, i32 %index
-  %i4 = bitcast float* %i3 to <4 x float>*
-  %wide.load12 = load <4 x float>, <4 x float>* %i4, align 4
+  %i1 = getelementptr inbounds float, ptr %A, i32 %index
+  %wide.load = load <4 x float>, ptr %i1, align 4
+  %i3 = getelementptr inbounds float, ptr %B, i32 %index
+  %wide.load12 = load <4 x float>, ptr %i3, align 4
   %i5 = fmul fast <4 x float> %wide.load12, %wide.load
   %i6 = fsub fast <4 x float> %broadcast.splat14, %i5
-  %i7 = getelementptr inbounds float, float* %D, i32 %index
-  %i8 = bitcast float* %i7 to <4 x float>*
-  store <4 x float> %i6, <4 x float>* %i8, align 4
+  %i7 = getelementptr inbounds float, ptr %D, i32 %index
+  store <4 x float> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 4
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -591,7 +561,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fms(float* noalias nocapture readonly %A, float* noalias nocapture readonly %B, float %C, float* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fms(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, float %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fms:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -624,17 +594,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds float, float* %A, i32 %index
-  %i2 = bitcast float* %i1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i1 = getelementptr inbounds float, ptr %A, i32 %index
+  %wide.load = load <4 x float>, ptr %i1, align 4
   %i3 = fmul fast <4 x float> %wide.load, %broadcast.splat13
-  %i4 = getelementptr inbounds float, float* %B, i32 %index
-  %i5 = bitcast float* %i4 to <4 x float>*
-  %wide.load14 = load <4 x float>, <4 x float>* %i5, align 4
+  %i4 = getelementptr inbounds float, ptr %B, i32 %index
+  %wide.load14 = load <4 x float>, ptr %i4, align 4
   %i6 = fsub fast <4 x float> %i3, %wide.load14
-  %i7 = getelementptr inbounds float, float* %D, i32 %index
-  %i8 = bitcast float* %i7 to <4 x float>*
-  store <4 x float> %i6, <4 x float>* %i8, align 4
+  %i7 = getelementptr inbounds float, ptr %D, i32 %index
+  store <4 x float> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 4
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -643,7 +610,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_fms_r(float* noalias nocapture readonly %A, float* noalias nocapture readonly %B, float %C, float* noalias nocapture %D, i32 %n) {
+define arm_aapcs_vfpcc void @test_fms_r(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, float %C, ptr noalias nocapture %D, i32 %n) {
 ; CHECK-LABEL: test_fms_r:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -676,17 +643,14 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %i1 = getelementptr inbounds float, float* %A, i32 %index
-  %i2 = bitcast float* %i1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i1 = getelementptr inbounds float, ptr %A, i32 %index
+  %wide.load = load <4 x float>, ptr %i1, align 4
   %i3 = fmul fast <4 x float> %broadcast.splat13, %wide.load
-  %i4 = getelementptr inbounds float, float* %B, i32 %index
-  %i5 = bitcast float* %i4 to <4 x float>*
-  %wide.load14 = load <4 x float>, <4 x float>* %i5, align 4
+  %i4 = getelementptr inbounds float, ptr %B, i32 %index
+  %wide.load14 = load <4 x float>, ptr %i4, align 4
   %i6 = fsub fast <4 x float> %i3, %wide.load14
-  %i7 = getelementptr inbounds float, float* %D, i32 %index
-  %i8 = bitcast float* %i7 to <4 x float>*
-  store <4 x float> %i6, <4 x float>* %i8, align 4
+  %i7 = getelementptr inbounds float, ptr %D, i32 %index
+  store <4 x float> %i6, ptr %i7, align 4
   %index.next = add i32 %index, 4
   %i9 = icmp eq i32 %index.next, %n
   br i1 %i9, label %for.cond.cleanup, label %vector.body
@@ -696,38 +660,37 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 
-define dso_local void @test_nested(float* noalias nocapture %pInT1, float* noalias nocapture readonly %pOutT1, float* noalias nocapture readonly %pPRT_in, float* noalias nocapture readnone %pPRT_pDst, i32 %numRows, i32 %numCols, i32 %l) local_unnamed_addr {
+define dso_local void @test_nested(ptr noalias nocapture %pInT1, ptr noalias nocapture readonly %pOutT1, ptr noalias nocapture readonly %pPRT_in, ptr noalias nocapture readnone %pPRT_pDst, i32 %numRows, i32 %numCols, i32 %l) local_unnamed_addr {
 ; CHECK-LABEL: test_nested:
 ; CHECK:       @ %bb.0: @ %for.body.us.preheader
-; CHECK-NEXT:    .save {r4, r5, r6, lr}
-; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
 ; CHECK-NEXT:    ldrd lr, r12, [sp, #16]
-; CHECK-NEXT:    lsl.w r3, r12, #2
 ; CHECK-NEXT:  .LBB14_1: @ %for.body.us
 ; CHECK-NEXT:    @ =>This Loop Header: Depth=1
 ; CHECK-NEXT:    @ Child Loop BB14_2 Depth 2
-; CHECK-NEXT:    ldr r4, [r1]
-; CHECK-NEXT:    mov r5, r2
-; CHECK-NEXT:    mov r6, r12
-; CHECK-NEXT:    vdup.32 q0, r4
+; CHECK-NEXT:    ldr r3, [r1]
 ; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r5, r12
+; CHECK-NEXT:    vdup.32 q0, r3
+; CHECK-NEXT:    add.w r3, r2, r12, lsl #2
 ; CHECK-NEXT:  .LBB14_2: @ %vector.body
 ; CHECK-NEXT:    @ Parent Loop BB14_1 Depth=1
 ; CHECK-NEXT:    @ => This Inner Loop Header: Depth=2
-; CHECK-NEXT:    vldrw.u32 q1, [r5], #16
+; CHECK-NEXT:    vldrw.u32 q1, [r2], #16
 ; CHECK-NEXT:    vldrw.u32 q2, [r4]
-; CHECK-NEXT:    subs r6, #4
+; CHECK-NEXT:    subs r5, #4
 ; CHECK-NEXT:    vfms.f32 q2, q1, q0
 ; CHECK-NEXT:    vstrb.8 q2, [r4], #16
 ; CHECK-NEXT:    bne .LBB14_2
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond6.for.end_crit_edge.us
 ; CHECK-NEXT:    @ in Loop: Header=BB14_1 Depth=1
-; CHECK-NEXT:    add r0, r3
-; CHECK-NEXT:    add r2, r3
+; CHECK-NEXT:    add.w r0, r0, r12, lsl #2
 ; CHECK-NEXT:    adds r1, #4
+; CHECK-NEXT:    mov r2, r3
 ; CHECK-NEXT:    le lr, .LBB14_1
 ; CHECK-NEXT:  @ %bb.4: @ %for.end14
-; CHECK-NEXT:    pop {r4, r5, r6, pc}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
 for.body.us.preheader:
   %cmp = icmp sgt i32 %numRows, 0
   tail call void @llvm.assume(i1 %cmp)
@@ -741,34 +704,32 @@ for.body.us.preheader:
   br label %for.body.us
 
 for.body.us:                                      ; preds = %for.cond6.for.end_crit_edge.us, %for.body.us.preheader
-  %pInT1.addr.038.us = phi float* [ %scevgep40, %for.cond6.for.end_crit_edge.us ], [ %pInT1, %for.body.us.preheader ]
+  %pInT1.addr.038.us = phi ptr [ %scevgep40, %for.cond6.for.end_crit_edge.us ], [ %pInT1, %for.body.us.preheader ]
   %i.037.us = phi i32 [ %inc13.us, %for.cond6.for.end_crit_edge.us ], [ 0, %for.body.us.preheader ]
-  %pOutT1.addr.036.us = phi float* [ %incdec.ptr.us, %for.cond6.for.end_crit_edge.us ], [ %pOutT1, %for.body.us.preheader ]
-  %pPRT_in.addr.035.us = phi float* [ %scevgep, %for.cond6.for.end_crit_edge.us ], [ %pPRT_in, %for.body.us.preheader ]
-  %scevgep = getelementptr float, float* %pPRT_in.addr.035.us, i32 %numCols
-  %i = load float, float* %pOutT1.addr.036.us, align 4
+  %pOutT1.addr.036.us = phi ptr [ %incdec.ptr.us, %for.cond6.for.end_crit_edge.us ], [ %pOutT1, %for.body.us.preheader ]
+  %pPRT_in.addr.035.us = phi ptr [ %scevgep, %for.cond6.for.end_crit_edge.us ], [ %pPRT_in, %for.body.us.preheader ]
+  %scevgep = getelementptr float, ptr %pPRT_in.addr.035.us, i32 %numCols
+  %i = load float, ptr %pOutT1.addr.036.us, align 4
   %broadcast.splatinsert47 = insertelement <4 x float> undef, float %i, i32 0
   %broadcast.splat48 = shufflevector <4 x float> %broadcast.splatinsert47, <4 x float> undef, <4 x i32> zeroinitializer
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %for.body.us
   %index = phi i32 [ 0, %for.body.us ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr float, float* %pInT1.addr.038.us, i32 %index
-  %next.gep45 = getelementptr float, float* %pPRT_in.addr.035.us, i32 %index
-  %i1 = bitcast float* %next.gep to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %i1, align 4
-  %i2 = bitcast float* %next.gep45 to <4 x float>*
-  %wide.load46 = load <4 x float>, <4 x float>* %i2, align 4
+  %next.gep = getelementptr float, ptr %pInT1.addr.038.us, i32 %index
+  %next.gep45 = getelementptr float, ptr %pPRT_in.addr.035.us, i32 %index
+  %wide.load = load <4 x float>, ptr %next.gep, align 4
+  %wide.load46 = load <4 x float>, ptr %next.gep45, align 4
   %i3 = fmul fast <4 x float> %wide.load46, %broadcast.splat48
   %i4 = fsub fast <4 x float> %wide.load, %i3
-  store <4 x float> %i4, <4 x float>* %i1, align 4
+  store <4 x float> %i4, ptr %next.gep, align 4
   %index.next = add i32 %index, 4
   %i5 = icmp eq i32 %index.next, %numCols
   br i1 %i5, label %for.cond6.for.end_crit_edge.us, label %vector.body
 
 for.cond6.for.end_crit_edge.us:                   ; preds = %vector.body
-  %incdec.ptr.us = getelementptr inbounds float, float* %pOutT1.addr.036.us, i32 1
-  %scevgep40 = getelementptr float, float* %pInT1.addr.038.us, i32 %numCols
+  %incdec.ptr.us = getelementptr inbounds float, ptr %pOutT1.addr.036.us, i32 1
+  %scevgep40 = getelementptr float, ptr %pInT1.addr.038.us, i32 %numCols
   %inc13.us = add nuw nsw i32 %i.037.us, 1
   %exitcond41 = icmp eq i32 %inc13.us, %numRows
   br i1 %exitcond41, label %for.end14, label %for.body.us
@@ -777,8 +738,8 @@ for.end14:                                        ; preds = %for.cond6.for.end_c
   ret void
 }
 
-%struct.arm_fir_instance_f32 = type { i16, float*, float* }
-define void @arm_fir_f32_1_4_mve(%struct.arm_fir_instance_f32* nocapture readonly %S, float* nocapture readonly %pSrc, float* %pDst, i32 %blockSize) {
+%struct.arm_fir_instance_f32 = type { i16, ptr, ptr }
+define void @arm_fir_f32_1_4_mve(ptr nocapture readonly %S, ptr nocapture readonly %pSrc, ptr %pDst, i32 %blockSize) {
 ; CHECK-LABEL: arm_fir_f32_1_4_mve:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -870,26 +831,26 @@ define void @arm_fir_f32_1_4_mve(%struct.arm_fir_instance_f32* nocapture readonl
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
-  %pState1 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 1
-  %i = load float*, float** %pState1, align 4
-  %pCoeffs2 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 2
-  %i1 = load float*, float** %pCoeffs2, align 4
-  %numTaps3 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 0
-  %i2 = load i16, i16* %numTaps3, align 4
+  %pState1 = getelementptr inbounds %struct.arm_fir_instance_f32, ptr %S, i32 0, i32 1
+  %i = load ptr, ptr %pState1, align 4
+  %pCoeffs2 = getelementptr inbounds %struct.arm_fir_instance_f32, ptr %S, i32 0, i32 2
+  %i1 = load ptr, ptr %pCoeffs2, align 4
+  %numTaps3 = getelementptr inbounds %struct.arm_fir_instance_f32, ptr %S, i32 0, i32 0
+  %i2 = load i16, ptr %numTaps3, align 4
   %conv = zext i16 %i2 to i32
   %sub = add nsw i32 %conv, -1
   %cmp = icmp ult i32 %sub, 4
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %arrayidx = getelementptr inbounds float, float* %i, i32 %sub
-  %incdec.ptr = getelementptr inbounds float, float* %i1, i32 1
-  %i3 = load float, float* %i1, align 4
-  %incdec.ptr6 = getelementptr inbounds float, float* %i1, i32 2
-  %i4 = load float, float* %incdec.ptr, align 4
-  %incdec.ptr7 = getelementptr inbounds float, float* %i1, i32 3
-  %i5 = load float, float* %incdec.ptr6, align 4
-  %i6 = load float, float* %incdec.ptr7, align 4
+  %arrayidx = getelementptr inbounds float, ptr %i, i32 %sub
+  %incdec.ptr = getelementptr inbounds float, ptr %i1, i32 1
+  %i3 = load float, ptr %i1, align 4
+  %incdec.ptr6 = getelementptr inbounds float, ptr %i1, i32 2
+  %i4 = load float, ptr %incdec.ptr, align 4
+  %incdec.ptr7 = getelementptr inbounds float, ptr %i1, i32 3
+  %i5 = load float, ptr %incdec.ptr6, align 4
+  %i6 = load float, ptr %incdec.ptr7, align 4
   %shr = lshr i32 %blockSize, 2
   %cmp9146 = icmp eq i32 %shr, 0
   %.pre161 = insertelement <4 x float> undef, float %i3, i32 0
@@ -904,89 +865,75 @@ if.then:                                          ; preds = %entry
 
 while.body.lr.ph:                                 ; preds = %if.then
   %i7 = and i32 %blockSize, -4
-  %scevgep158 = getelementptr float, float* %pDst, i32 %i7
+  %scevgep158 = getelementptr float, ptr %pDst, i32 %i7
   br label %while.body
 
 while.body:                                       ; preds = %while.body, %while.body.lr.ph
-  %pStateCur.0151 = phi float* [ %arrayidx, %while.body.lr.ph ], [ %add.ptr, %while.body ]
-  %pSamples.0150 = phi float* [ %i, %while.body.lr.ph ], [ %add.ptr24, %while.body ]
-  %pOutput.0149 = phi float* [ %pDst, %while.body.lr.ph ], [ %add.ptr23, %while.body ]
-  %pTempSrc.0148 = phi float* [ %pSrc, %while.body.lr.ph ], [ %add.ptr11, %while.body ]
+  %pStateCur.0151 = phi ptr [ %arrayidx, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %pSamples.0150 = phi ptr [ %i, %while.body.lr.ph ], [ %add.ptr24, %while.body ]
+  %pOutput.0149 = phi ptr [ %pDst, %while.body.lr.ph ], [ %add.ptr23, %while.body ]
+  %pTempSrc.0148 = phi ptr [ %pSrc, %while.body.lr.ph ], [ %add.ptr11, %while.body ]
   %blkCnt.0147 = phi i32 [ %shr, %while.body.lr.ph ], [ %dec, %while.body ]
-  %i8 = bitcast float* %pTempSrc.0148 to <4 x float>*
-  %i9 = load <4 x float>, <4 x float>* %i8, align 4
-  %i10 = bitcast float* %pStateCur.0151 to <4 x float>*
-  store <4 x float> %i9, <4 x float>* %i10, align 4
-  %add.ptr = getelementptr inbounds float, float* %pStateCur.0151, i32 4
-  %add.ptr11 = getelementptr inbounds float, float* %pTempSrc.0148, i32 4
-  %i11 = bitcast float* %pSamples.0150 to <4 x float>*
-  %i12 = load <4 x float>, <4 x float>* %i11, align 4
+  %i9 = load <4 x float>, ptr %pTempSrc.0148, align 4
+  store <4 x float> %i9, ptr %pStateCur.0151, align 4
+  %add.ptr = getelementptr inbounds float, ptr %pStateCur.0151, i32 4
+  %add.ptr11 = getelementptr inbounds float, ptr %pTempSrc.0148, i32 4
+  %i12 = load <4 x float>, ptr %pSamples.0150, align 4
   %i13 = fmul fast <4 x float> %i12, %.pre162
-  %arrayidx12 = getelementptr inbounds float, float* %pSamples.0150, i32 1
-  %i14 = bitcast float* %arrayidx12 to <4 x float>*
-  %i15 = load <4 x float>, <4 x float>* %i14, align 4
+  %arrayidx12 = getelementptr inbounds float, ptr %pSamples.0150, i32 1
+  %i15 = load <4 x float>, ptr %arrayidx12, align 4
   %mul = fmul fast <4 x float> %i15, %.pre164
   %add = fadd fast <4 x float> %mul, %i13
-  %arrayidx13 = getelementptr inbounds float, float* %pSamples.0150, i32 2
-  %i16 = bitcast float* %arrayidx13 to <4 x float>*
-  %i17 = load <4 x float>, <4 x float>* %i16, align 4
+  %arrayidx13 = getelementptr inbounds float, ptr %pSamples.0150, i32 2
+  %i17 = load <4 x float>, ptr %arrayidx13, align 4
   %mul16 = fmul fast <4 x float> %i17, %.pre166
   %add17 = fadd fast <4 x float> %add, %mul16
-  %arrayidx18 = getelementptr inbounds float, float* %pSamples.0150, i32 3
-  %i18 = bitcast float* %arrayidx18 to <4 x float>*
-  %i19 = load <4 x float>, <4 x float>* %i18, align 4
+  %arrayidx18 = getelementptr inbounds float, ptr %pSamples.0150, i32 3
+  %i19 = load <4 x float>, ptr %arrayidx18, align 4
   %mul21 = fmul fast <4 x float> %i19, %.pre168
   %add22 = fadd fast <4 x float> %add17, %mul21
-  %i20 = bitcast float* %pOutput.0149 to <4 x float>*
-  store <4 x float> %add22, <4 x float>* %i20, align 4
-  %add.ptr23 = getelementptr inbounds float, float* %pOutput.0149, i32 4
-  %add.ptr24 = getelementptr inbounds float, float* %pSamples.0150, i32 4
+  store <4 x float> %add22, ptr %pOutput.0149, align 4
+  %add.ptr23 = getelementptr inbounds float, ptr %pOutput.0149, i32 4
+  %add.ptr24 = getelementptr inbounds float, ptr %pSamples.0150, i32 4
   %dec = add nsw i32 %blkCnt.0147, -1
   %cmp9 = icmp eq i32 %dec, 0
   br i1 %cmp9, label %while.end.loopexit, label %while.body
 
 while.end.loopexit:                               ; preds = %while.body
-  %scevgep157 = getelementptr float, float* %pSrc, i32 %i7
-  %scevgep159 = getelementptr float, float* %i, i32 %i7
+  %scevgep157 = getelementptr float, ptr %pSrc, i32 %i7
+  %scevgep159 = getelementptr float, ptr %i, i32 %i7
   br label %while.end
 
 while.end:                                        ; preds = %while.end.loopexit, %if.then
-  %pTempSrc.0.lcssa = phi float* [ %scevgep157, %while.end.loopexit ], [ %pSrc, %if.then ]
-  %pOutput.0.lcssa = phi float* [ %scevgep158, %while.end.loopexit ], [ %pDst, %if.then ]
-  %pSamples.0.lcssa = phi float* [ %scevgep159, %while.end.loopexit ], [ %i, %if.then ]
-  %pStateCur.0.lcssa = phi float* [ %add.ptr, %while.end.loopexit ], [ %arrayidx, %if.then ]
+  %pTempSrc.0.lcssa = phi ptr [ %scevgep157, %while.end.loopexit ], [ %pSrc, %if.then ]
+  %pOutput.0.lcssa = phi ptr [ %scevgep158, %while.end.loopexit ], [ %pDst, %if.then ]
+  %pSamples.0.lcssa = phi ptr [ %scevgep159, %while.end.loopexit ], [ %i, %if.then ]
+  %pStateCur.0.lcssa = phi ptr [ %add.ptr, %while.end.loopexit ], [ %arrayidx, %if.then ]
   %and = and i32 %blockSize, 3
   %i21 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %and)
-  %i22 = bitcast float* %pTempSrc.0.lcssa to <4 x float>*
-  %i23 = load <4 x float>, <4 x float>* %i22, align 4
-  %i24 = bitcast float* %pStateCur.0.lcssa to <4 x float>*
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %i23, <4 x float>* %i24, i32 4, <4 x i1> %i21)
-  %i25 = bitcast float* %pSamples.0.lcssa to <4 x float>*
-  %i26 = load <4 x float>, <4 x float>* %i25, align 4
+  %i23 = load <4 x float>, ptr %pTempSrc.0.lcssa, align 4
+  tail call void @llvm.masked.store.v4f32.p0(<4 x float> %i23, ptr %pStateCur.0.lcssa, i32 4, <4 x i1> %i21)
+  %i26 = load <4 x float>, ptr %pSamples.0.lcssa, align 4
   %i27 = fmul fast <4 x float> %i26, %.pre162
-  %arrayidx29 = getelementptr inbounds float, float* %pSamples.0.lcssa, i32 1
-  %i28 = bitcast float* %arrayidx29 to <4 x float>*
-  %i29 = load <4 x float>, <4 x float>* %i28, align 4
+  %arrayidx29 = getelementptr inbounds float, ptr %pSamples.0.lcssa, i32 1
+  %i29 = load <4 x float>, ptr %arrayidx29, align 4
   %mul32 = fmul fast <4 x float> %i29, %.pre164
   %add33 = fadd fast <4 x float> %mul32, %i27
-  %arrayidx34 = getelementptr inbounds float, float* %pSamples.0.lcssa, i32 2
-  %i30 = bitcast float* %arrayidx34 to <4 x float>*
-  %i31 = load <4 x float>, <4 x float>* %i30, align 4
+  %arrayidx34 = getelementptr inbounds float, ptr %pSamples.0.lcssa, i32 2
+  %i31 = load <4 x float>, ptr %arrayidx34, align 4
   %mul37 = fmul fast <4 x float> %i31, %.pre166
   %add38 = fadd fast <4 x float> %add33, %mul37
-  %arrayidx39 = getelementptr inbounds float, float* %pSamples.0.lcssa, i32 3
-  %i32 = bitcast float* %arrayidx39 to <4 x float>*
-  %i33 = load <4 x float>, <4 x float>* %i32, align 4
+  %arrayidx39 = getelementptr inbounds float, ptr %pSamples.0.lcssa, i32 3
+  %i33 = load <4 x float>, ptr %arrayidx39, align 4
   %mul42 = fmul fast <4 x float> %i33, %.pre168
   %add43 = fadd fast <4 x float> %add38, %mul42
-  %i34 = bitcast float* %pOutput.0.lcssa to <4 x float>*
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %add43, <4 x float>* %i34, i32 4, <4 x i1> %i21)
-  %.pre = load float*, float** %pState1, align 4
+  tail call void @llvm.masked.store.v4f32.p0(<4 x float> %add43, ptr %pOutput.0.lcssa, i32 4, <4 x i1> %i21)
+  %.pre = load ptr, ptr %pState1, align 4
   br label %if.end
 
 if.end:                                           ; preds = %while.end, %entry
-  %i35 = phi float* [ %.pre, %while.end ], [ %i, %entry ]
-  %arrayidx45 = getelementptr inbounds float, float* %i35, i32 %blockSize
+  %i35 = phi ptr [ %.pre, %while.end ], [ %i, %entry ]
+  %arrayidx45 = getelementptr inbounds float, ptr %i35, i32 %blockSize
   %shr47 = lshr i32 %conv, 2
   %cmp49141 = icmp eq i32 %shr47, 0
   br i1 %cmp49141, label %while.end55, label %while.body51.preheader
@@ -994,40 +941,36 @@ if.end:                                           ; preds = %while.end, %entry
 while.body51.preheader:                           ; preds = %if.end
   %i36 = and i32 %conv, 65532
   %i37 = add i32 %i36, %blockSize
-  %scevgep = getelementptr float, float* %i35, i32 %i37
+  %scevgep = getelementptr float, ptr %i35, i32 %i37
   br label %while.body51
 
 while.body51:                                     ; preds = %while.body51, %while.body51.preheader
-  %pTempSrc.1144 = phi float* [ %add.ptr52, %while.body51 ], [ %arrayidx45, %while.body51.preheader ]
-  %pTempDest.0143 = phi float* [ %add.ptr53, %while.body51 ], [ %i35, %while.body51.preheader ]
+  %pTempSrc.1144 = phi ptr [ %add.ptr52, %while.body51 ], [ %arrayidx45, %while.body51.preheader ]
+  %pTempDest.0143 = phi ptr [ %add.ptr53, %while.body51 ], [ %i35, %while.body51.preheader ]
   %blkCnt.1142 = phi i32 [ %dec54, %while.body51 ], [ %shr47, %while.body51.preheader ]
-  %i38 = bitcast float* %pTempSrc.1144 to <4 x float>*
-  %i39 = load <4 x float>, <4 x float>* %i38, align 4
-  %i40 = bitcast float* %pTempDest.0143 to <4 x float>*
-  store <4 x float> %i39, <4 x float>* %i40, align 4
-  %add.ptr52 = getelementptr inbounds float, float* %pTempSrc.1144, i32 4
-  %add.ptr53 = getelementptr inbounds float, float* %pTempDest.0143, i32 4
+  %i39 = load <4 x float>, ptr %pTempSrc.1144, align 4
+  store <4 x float> %i39, ptr %pTempDest.0143, align 4
+  %add.ptr52 = getelementptr inbounds float, ptr %pTempSrc.1144, i32 4
+  %add.ptr53 = getelementptr inbounds float, ptr %pTempDest.0143, i32 4
   %dec54 = add nsw i32 %blkCnt.1142, -1
   %cmp49 = icmp eq i32 %dec54, 0
   br i1 %cmp49, label %while.end55.loopexit, label %while.body51
 
 while.end55.loopexit:                             ; preds = %while.body51
-  %scevgep156 = getelementptr float, float* %i35, i32 %i36
+  %scevgep156 = getelementptr float, ptr %i35, i32 %i36
   br label %while.end55
 
 while.end55:                                      ; preds = %while.end55.loopexit, %if.end
-  %pTempDest.0.lcssa = phi float* [ %i35, %if.end ], [ %scevgep156, %while.end55.loopexit ]
-  %pTempSrc.1.lcssa = phi float* [ %arrayidx45, %if.end ], [ %scevgep, %while.end55.loopexit ]
+  %pTempDest.0.lcssa = phi ptr [ %i35, %if.end ], [ %scevgep156, %while.end55.loopexit ]
+  %pTempSrc.1.lcssa = phi ptr [ %arrayidx45, %if.end ], [ %scevgep, %while.end55.loopexit ]
   %and56 = and i32 %conv, 3
   %cmp57 = icmp eq i32 %and56, 0
   br i1 %cmp57, label %if.end61, label %if.then59
 
 if.then59:                                        ; preds = %while.end55
   %i41 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %and56)
-  %i42 = bitcast float* %pTempSrc.1.lcssa to <4 x float>*
-  %i43 = load <4 x float>, <4 x float>* %i42, align 4
-  %i44 = bitcast float* %pTempDest.0.lcssa to <4 x float>*
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %i43, <4 x float>* %i44, i32 4, <4 x i1> %i41)
+  %i43 = load <4 x float>, ptr %pTempSrc.1.lcssa, align 4
+  tail call void @llvm.masked.store.v4f32.p0(<4 x float> %i43, ptr %pTempDest.0.lcssa, i32 4, <4 x i1> %i41)
   br label %if.end61
 
 if.end61:                                         ; preds = %if.then59, %while.end55
@@ -1035,7 +978,7 @@ if.end61:                                         ; preds = %if.then59, %while.e
 }
 
 
-define void @fir(%struct.arm_fir_instance_f32* nocapture readonly %S, float* nocapture readonly %pSrc, float* nocapture %pDst, i32 %blockSize) {
+define void @fir(ptr nocapture readonly %S, ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 %blockSize) {
 ; CHECK-LABEL: fir:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -1169,12 +1112,12 @@ define void @fir(%struct.arm_fir_instance_f32* nocapture readonly %S, float* noc
 ; CHECK-NEXT:    add sp, #4
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
-  %pState1 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 1
-  %i = load float*, float** %pState1, align 4
-  %pCoeffs2 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 2
-  %i1 = load float*, float** %pCoeffs2, align 4
-  %numTaps3 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 0
-  %i2 = load i16, i16* %numTaps3, align 4
+  %pState1 = getelementptr inbounds %struct.arm_fir_instance_f32, ptr %S, i32 0, i32 1
+  %i = load ptr, ptr %pState1, align 4
+  %pCoeffs2 = getelementptr inbounds %struct.arm_fir_instance_f32, ptr %S, i32 0, i32 2
+  %i1 = load ptr, ptr %pCoeffs2, align 4
+  %numTaps3 = getelementptr inbounds %struct.arm_fir_instance_f32, ptr %S, i32 0, i32 0
+  %i2 = load i16, ptr %numTaps3, align 4
   %conv = zext i16 %i2 to i32
   %cmp = icmp ugt i32 %blockSize, 7
   br i1 %cmp, label %if.then, label %if.end
@@ -1186,17 +1129,17 @@ if.then:                                          ; preds = %entry
 
 while.body.lr.ph:                                 ; preds = %if.then
   %sub = add nsw i32 %conv, -1
-  %arrayidx = getelementptr inbounds float, float* %i, i32 %sub
-  %incdec.ptr = getelementptr inbounds float, float* %i1, i32 1
-  %incdec.ptr7 = getelementptr inbounds float, float* %i1, i32 2
-  %incdec.ptr8 = getelementptr inbounds float, float* %i1, i32 3
-  %incdec.ptr9 = getelementptr inbounds float, float* %i1, i32 4
-  %incdec.ptr10 = getelementptr inbounds float, float* %i1, i32 5
-  %incdec.ptr11 = getelementptr inbounds float, float* %i1, i32 6
-  %incdec.ptr12 = getelementptr inbounds float, float* %i1, i32 7
+  %arrayidx = getelementptr inbounds float, ptr %i, i32 %sub
+  %incdec.ptr = getelementptr inbounds float, ptr %i1, i32 1
+  %incdec.ptr7 = getelementptr inbounds float, ptr %i1, i32 2
+  %incdec.ptr8 = getelementptr inbounds float, ptr %i1, i32 3
+  %incdec.ptr9 = getelementptr inbounds float, ptr %i1, i32 4
+  %incdec.ptr10 = getelementptr inbounds float, ptr %i1, i32 5
+  %incdec.ptr11 = getelementptr inbounds float, ptr %i1, i32 6
+  %incdec.ptr12 = getelementptr inbounds float, ptr %i1, i32 7
   %sub37 = add nsw i32 %conv, -8
   %div = sdiv i32 %sub37, 8
-  %pCoeffsCur.0199 = getelementptr inbounds float, float* %i1, i32 8
+  %pCoeffsCur.0199 = getelementptr inbounds float, ptr %i1, i32 8
   %cmp38201 = icmp ugt i16 %i2, 15
   %and = and i32 %sub37, 7
   %cmp74210 = icmp eq i32 %and, 0
@@ -1207,184 +1150,164 @@ while.body.lr.ph:                                 ; preds = %if.then
 
 while.body:                                       ; preds = %while.end, %while.body.lr.ph
   %blkCnt.0222 = phi i32 [ %shr, %while.body.lr.ph ], [ %dec84, %while.end ]
-  %pStateCur.0221 = phi float* [ %arrayidx, %while.body.lr.ph ], [ %add.ptr, %while.end ]
-  %pSamples.0220 = phi float* [ %i, %while.body.lr.ph ], [ %add.ptr83, %while.end ]
-  %pTempSrc.0219 = phi float* [ %pSrc, %while.body.lr.ph ], [ %add.ptr14, %while.end ]
-  %pOutput.0218 = phi float* [ %pDst, %while.body.lr.ph ], [ %add.ptr81, %while.end ]
-  %i4 = load float, float* %i1, align 4
-  %i5 = load float, float* %incdec.ptr, align 4
-  %i6 = load float, float* %incdec.ptr7, align 4
-  %i7 = load float, float* %incdec.ptr8, align 4
-  %i8 = load float, float* %incdec.ptr9, align 4
-  %i9 = load float, float* %incdec.ptr10, align 4
-  %i10 = load float, float* %incdec.ptr11, align 4
-  %i11 = load float, float* %incdec.ptr12, align 4
-  %i12 = bitcast float* %pTempSrc.0219 to <4 x float>*
-  %i13 = load <4 x float>, <4 x float>* %i12, align 4
-  %i14 = bitcast float* %pStateCur.0221 to <4 x float>*
-  store <4 x float> %i13, <4 x float>* %i14, align 4
-  %add.ptr = getelementptr inbounds float, float* %pStateCur.0221, i32 4
-  %add.ptr14 = getelementptr inbounds float, float* %pTempSrc.0219, i32 4
-  %i15 = bitcast float* %pSamples.0220 to <4 x float>*
-  %i16 = load <4 x float>, <4 x float>* %i15, align 4
+  %pStateCur.0221 = phi ptr [ %arrayidx, %while.body.lr.ph ], [ %add.ptr, %while.end ]
+  %pSamples.0220 = phi ptr [ %i, %while.body.lr.ph ], [ %add.ptr83, %while.end ]
+  %pTempSrc.0219 = phi ptr [ %pSrc, %while.body.lr.ph ], [ %add.ptr14, %while.end ]
+  %pOutput.0218 = phi ptr [ %pDst, %while.body.lr.ph ], [ %add.ptr81, %while.end ]
+  %i4 = load float, ptr %i1, align 4
+  %i5 = load float, ptr %incdec.ptr, align 4
+  %i6 = load float, ptr %incdec.ptr7, align 4
+  %i7 = load float, ptr %incdec.ptr8, align 4
+  %i8 = load float, ptr %incdec.ptr9, align 4
+  %i9 = load float, ptr %incdec.ptr10, align 4
+  %i10 = load float, ptr %incdec.ptr11, align 4
+  %i11 = load float, ptr %incdec.ptr12, align 4
+  %i13 = load <4 x float>, ptr %pTempSrc.0219, align 4
+  store <4 x float> %i13, ptr %pStateCur.0221, align 4
+  %add.ptr = getelementptr inbounds float, ptr %pStateCur.0221, i32 4
+  %add.ptr14 = getelementptr inbounds float, ptr %pTempSrc.0219, i32 4
+  %i16 = load <4 x float>, ptr %pSamples.0220, align 4
   %.splatinsert = insertelement <4 x float> undef, float %i4, i32 0
   %.splat = shufflevector <4 x float> %.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
   %i17 = fmul fast <4 x float> %i16, %.splat
-  %arrayidx15 = getelementptr inbounds float, float* %pSamples.0220, i32 1
-  %i18 = bitcast float* %arrayidx15 to <4 x float>*
-  %i19 = load <4 x float>, <4 x float>* %i18, align 4
+  %arrayidx15 = getelementptr inbounds float, ptr %pSamples.0220, i32 1
+  %i19 = load <4 x float>, ptr %arrayidx15, align 4
   %.splatinsert16 = insertelement <4 x float> undef, float %i5, i32 0
   %.splat17 = shufflevector <4 x float> %.splatinsert16, <4 x float> undef, <4 x i32> zeroinitializer
   %i20 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i19, <4 x float> %.splat17, <4 x float> %i17)
-  %arrayidx18 = getelementptr inbounds float, float* %pSamples.0220, i32 2
-  %i21 = bitcast float* %arrayidx18 to <4 x float>*
-  %i22 = load <4 x float>, <4 x float>* %i21, align 4
+  %arrayidx18 = getelementptr inbounds float, ptr %pSamples.0220, i32 2
+  %i22 = load <4 x float>, ptr %arrayidx18, align 4
   %.splatinsert19 = insertelement <4 x float> undef, float %i6, i32 0
   %.splat20 = shufflevector <4 x float> %.splatinsert19, <4 x float> undef, <4 x i32> zeroinitializer
   %i23 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i22, <4 x float> %.splat20, <4 x float> %i20)
-  %arrayidx21 = getelementptr inbounds float, float* %pSamples.0220, i32 3
-  %i24 = bitcast float* %arrayidx21 to <4 x float>*
-  %i25 = load <4 x float>, <4 x float>* %i24, align 4
+  %arrayidx21 = getelementptr inbounds float, ptr %pSamples.0220, i32 3
+  %i25 = load <4 x float>, ptr %arrayidx21, align 4
   %.splatinsert22 = insertelement <4 x float> undef, float %i7, i32 0
   %.splat23 = shufflevector <4 x float> %.splatinsert22, <4 x float> undef, <4 x i32> zeroinitializer
   %i26 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i25, <4 x float> %.splat23, <4 x float> %i23)
-  %arrayidx24 = getelementptr inbounds float, float* %pSamples.0220, i32 4
-  %i27 = bitcast float* %arrayidx24 to <4 x float>*
-  %i28 = load <4 x float>, <4 x float>* %i27, align 4
+  %arrayidx24 = getelementptr inbounds float, ptr %pSamples.0220, i32 4
+  %i28 = load <4 x float>, ptr %arrayidx24, align 4
   %.splatinsert25 = insertelement <4 x float> undef, float %i8, i32 0
   %.splat26 = shufflevector <4 x float> %.splatinsert25, <4 x float> undef, <4 x i32> zeroinitializer
   %i29 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i28, <4 x float> %.splat26, <4 x float> %i26)
-  %arrayidx27 = getelementptr inbounds float, float* %pSamples.0220, i32 5
-  %i30 = bitcast float* %arrayidx27 to <4 x float>*
-  %i31 = load <4 x float>, <4 x float>* %i30, align 4
+  %arrayidx27 = getelementptr inbounds float, ptr %pSamples.0220, i32 5
+  %i31 = load <4 x float>, ptr %arrayidx27, align 4
   %.splatinsert28 = insertelement <4 x float> undef, float %i9, i32 0
   %.splat29 = shufflevector <4 x float> %.splatinsert28, <4 x float> undef, <4 x i32> zeroinitializer
   %i32 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i31, <4 x float> %.splat29, <4 x float> %i29)
-  %arrayidx30 = getelementptr inbounds float, float* %pSamples.0220, i32 6
-  %i33 = bitcast float* %arrayidx30 to <4 x float>*
-  %i34 = load <4 x float>, <4 x float>* %i33, align 4
+  %arrayidx30 = getelementptr inbounds float, ptr %pSamples.0220, i32 6
+  %i34 = load <4 x float>, ptr %arrayidx30, align 4
   %.splatinsert31 = insertelement <4 x float> undef, float %i10, i32 0
   %.splat32 = shufflevector <4 x float> %.splatinsert31, <4 x float> undef, <4 x i32> zeroinitializer
   %i35 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i34, <4 x float> %.splat32, <4 x float> %i32)
-  %arrayidx33 = getelementptr inbounds float, float* %pSamples.0220, i32 7
-  %i36 = bitcast float* %arrayidx33 to <4 x float>*
-  %i37 = load <4 x float>, <4 x float>* %i36, align 4
+  %arrayidx33 = getelementptr inbounds float, ptr %pSamples.0220, i32 7
+  %i37 = load <4 x float>, ptr %arrayidx33, align 4
   %.splatinsert34 = insertelement <4 x float> undef, float %i11, i32 0
   %.splat35 = shufflevector <4 x float> %.splatinsert34, <4 x float> undef, <4 x i32> zeroinitializer
   %i38 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i37, <4 x float> %.splat35, <4 x float> %i35)
-  %pSamples.1200 = getelementptr inbounds float, float* %pSamples.0220, i32 8
+  %pSamples.1200 = getelementptr inbounds float, ptr %pSamples.0220, i32 8
   br i1 %cmp38201, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.body, %while.body
-  %pSamples.1207 = phi float* [ %pSamples.1, %for.body ], [ %pSamples.1200, %while.body ]
-  %pCoeffsCur.0206 = phi float* [ %pCoeffsCur.0, %for.body ], [ %pCoeffsCur.0199, %while.body ]
-  %.pn205 = phi float* [ %pCoeffsCur.0206, %for.body ], [ %i1, %while.body ]
+  %pSamples.1207 = phi ptr [ %pSamples.1, %for.body ], [ %pSamples.1200, %while.body ]
+  %pCoeffsCur.0206 = phi ptr [ %pCoeffsCur.0, %for.body ], [ %pCoeffsCur.0199, %while.body ]
+  %.pn205 = phi ptr [ %pCoeffsCur.0206, %for.body ], [ %i1, %while.body ]
   %i.0204 = phi i32 [ %inc, %for.body ], [ 0, %while.body ]
   %vecAcc0.0203 = phi <4 x float> [ %i70, %for.body ], [ %i38, %while.body ]
-  %pSamples.0.pn202 = phi float* [ %pSamples.1207, %for.body ], [ %pSamples.0220, %while.body ]
-  %incdec.ptr40 = getelementptr inbounds float, float* %.pn205, i32 9
-  %i39 = load float, float* %pCoeffsCur.0206, align 4
-  %incdec.ptr41 = getelementptr inbounds float, float* %.pn205, i32 10
-  %i40 = load float, float* %incdec.ptr40, align 4
-  %incdec.ptr42 = getelementptr inbounds float, float* %.pn205, i32 11
-  %i41 = load float, float* %incdec.ptr41, align 4
-  %incdec.ptr43 = getelementptr inbounds float, float* %.pn205, i32 12
-  %i42 = load float, float* %incdec.ptr42, align 4
-  %incdec.ptr44 = getelementptr inbounds float, float* %.pn205, i32 13
-  %i43 = load float, float* %incdec.ptr43, align 4
-  %incdec.ptr45 = getelementptr inbounds float, float* %.pn205, i32 14
-  %i44 = load float, float* %incdec.ptr44, align 4
-  %incdec.ptr46 = getelementptr inbounds float, float* %.pn205, i32 15
-  %i45 = load float, float* %incdec.ptr45, align 4
-  %i46 = load float, float* %incdec.ptr46, align 4
-  %i47 = bitcast float* %pSamples.1207 to <4 x float>*
-  %i48 = load <4 x float>, <4 x float>* %i47, align 4
+  %pSamples.0.pn202 = phi ptr [ %pSamples.1207, %for.body ], [ %pSamples.0220, %while.body ]
+  %incdec.ptr40 = getelementptr inbounds float, ptr %.pn205, i32 9
+  %i39 = load float, ptr %pCoeffsCur.0206, align 4
+  %incdec.ptr41 = getelementptr inbounds float, ptr %.pn205, i32 10
+  %i40 = load float, ptr %incdec.ptr40, align 4
+  %incdec.ptr42 = getelementptr inbounds float, ptr %.pn205, i32 11
+  %i41 = load float, ptr %incdec.ptr41, align 4
+  %incdec.ptr43 = getelementptr inbounds float, ptr %.pn205, i32 12
+  %i42 = load float, ptr %incdec.ptr42, align 4
+  %incdec.ptr44 = getelementptr inbounds float, ptr %.pn205, i32 13
+  %i43 = load float, ptr %incdec.ptr43, align 4
+  %incdec.ptr45 = getelementptr inbounds float, ptr %.pn205, i32 14
+  %i44 = load float, ptr %incdec.ptr44, align 4
+  %incdec.ptr46 = getelementptr inbounds float, ptr %.pn205, i32 15
+  %i45 = load float, ptr %incdec.ptr45, align 4
+  %i46 = load float, ptr %incdec.ptr46, align 4
+  %i48 = load <4 x float>, ptr %pSamples.1207, align 4
   %.splatinsert48 = insertelement <4 x float> undef, float %i39, i32 0
   %.splat49 = shufflevector <4 x float> %.splatinsert48, <4 x float> undef, <4 x i32> zeroinitializer
   %i49 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i48, <4 x float> %.splat49, <4 x float> %vecAcc0.0203)
-  %arrayidx50 = getelementptr inbounds float, float* %pSamples.0.pn202, i32 9
-  %i50 = bitcast float* %arrayidx50 to <4 x float>*
-  %i51 = load <4 x float>, <4 x float>* %i50, align 4
+  %arrayidx50 = getelementptr inbounds float, ptr %pSamples.0.pn202, i32 9
+  %i51 = load <4 x float>, ptr %arrayidx50, align 4
   %.splatinsert51 = insertelement <4 x float> undef, float %i40, i32 0
   %.splat52 = shufflevector <4 x float> %.splatinsert51, <4 x float> undef, <4 x i32> zeroinitializer
   %i52 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i51, <4 x float> %.splat52, <4 x float> %i49)
-  %arrayidx53 = getelementptr inbounds float, float* %pSamples.0.pn202, i32 10
-  %i53 = bitcast float* %arrayidx53 to <4 x float>*
-  %i54 = load <4 x float>, <4 x float>* %i53, align 4
+  %arrayidx53 = getelementptr inbounds float, ptr %pSamples.0.pn202, i32 10
+  %i54 = load <4 x float>, ptr %arrayidx53, align 4
   %.splatinsert54 = insertelement <4 x float> undef, float %i41, i32 0
   %.splat55 = shufflevector <4 x float> %.splatinsert54, <4 x float> undef, <4 x i32> zeroinitializer
   %i55 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i54, <4 x float> %.splat55, <4 x float> %i52)
-  %arrayidx56 = getelementptr inbounds float, float* %pSamples.0.pn202, i32 11
-  %i56 = bitcast float* %arrayidx56 to <4 x float>*
-  %i57 = load <4 x float>, <4 x float>* %i56, align 4
+  %arrayidx56 = getelementptr inbounds float, ptr %pSamples.0.pn202, i32 11
+  %i57 = load <4 x float>, ptr %arrayidx56, align 4
   %.splatinsert57 = insertelement <4 x float> undef, float %i42, i32 0
   %.splat58 = shufflevector <4 x float> %.splatinsert57, <4 x float> undef, <4 x i32> zeroinitializer
   %i58 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i57, <4 x float> %.splat58, <4 x float> %i55)
-  %arrayidx59 = getelementptr inbounds float, float* %pSamples.0.pn202, i32 12
-  %i59 = bitcast float* %arrayidx59 to <4 x float>*
-  %i60 = load <4 x float>, <4 x float>* %i59, align 4
+  %arrayidx59 = getelementptr inbounds float, ptr %pSamples.0.pn202, i32 12
+  %i60 = load <4 x float>, ptr %arrayidx59, align 4
   %.splatinsert60 = insertelement <4 x float> undef, float %i43, i32 0
   %.splat61 = shufflevector <4 x float> %.splatinsert60, <4 x float> undef, <4 x i32> zeroinitializer
   %i61 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i60, <4 x float> %.splat61, <4 x float> %i58)
-  %arrayidx62 = getelementptr inbounds float, float* %pSamples.0.pn202, i32 13
-  %i62 = bitcast float* %arrayidx62 to <4 x float>*
-  %i63 = load <4 x float>, <4 x float>* %i62, align 4
+  %arrayidx62 = getelementptr inbounds float, ptr %pSamples.0.pn202, i32 13
+  %i63 = load <4 x float>, ptr %arrayidx62, align 4
   %.splatinsert63 = insertelement <4 x float> undef, float %i44, i32 0
   %.splat64 = shufflevector <4 x float> %.splatinsert63, <4 x float> undef, <4 x i32> zeroinitializer
   %i64 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i63, <4 x float> %.splat64, <4 x float> %i61)
-  %arrayidx65 = getelementptr inbounds float, float* %pSamples.0.pn202, i32 14
-  %i65 = bitcast float* %arrayidx65 to <4 x float>*
-  %i66 = load <4 x float>, <4 x float>* %i65, align 4
+  %arrayidx65 = getelementptr inbounds float, ptr %pSamples.0.pn202, i32 14
+  %i66 = load <4 x float>, ptr %arrayidx65, align 4
   %.splatinsert66 = insertelement <4 x float> undef, float %i45, i32 0
   %.splat67 = shufflevector <4 x float> %.splatinsert66, <4 x float> undef, <4 x i32> zeroinitializer
   %i67 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i66, <4 x float> %.splat67, <4 x float> %i64)
-  %arrayidx68 = getelementptr inbounds float, float* %pSamples.0.pn202, i32 15
-  %i68 = bitcast float* %arrayidx68 to <4 x float>*
-  %i69 = load <4 x float>, <4 x float>* %i68, align 4
+  %arrayidx68 = getelementptr inbounds float, ptr %pSamples.0.pn202, i32 15
+  %i69 = load <4 x float>, ptr %arrayidx68, align 4
   %.splatinsert69 = insertelement <4 x float> undef, float %i46, i32 0
   %.splat70 = shufflevector <4 x float> %.splatinsert69, <4 x float> undef, <4 x i32> zeroinitializer
   %i70 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i69, <4 x float> %.splat70, <4 x float> %i67)
   %inc = add nuw nsw i32 %i.0204, 1
-  %pCoeffsCur.0 = getelementptr inbounds float, float* %pCoeffsCur.0206, i32 8
-  %pSamples.1 = getelementptr inbounds float, float* %pSamples.1207, i32 8
+  %pCoeffsCur.0 = getelementptr inbounds float, ptr %pCoeffsCur.0206, i32 8
+  %pSamples.1 = getelementptr inbounds float, ptr %pSamples.1207, i32 8
   %exitcond = icmp eq i32 %inc, %smax
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:                                          ; preds = %for.body, %while.body
   %vecAcc0.0.lcssa = phi <4 x float> [ %i38, %while.body ], [ %i70, %for.body ]
-  %pCoeffsCur.0.lcssa = phi float* [ %pCoeffsCur.0199, %while.body ], [ %pCoeffsCur.0, %for.body ]
-  %pSamples.1.lcssa = phi float* [ %pSamples.1200, %while.body ], [ %pSamples.1, %for.body ]
+  %pCoeffsCur.0.lcssa = phi ptr [ %pCoeffsCur.0199, %while.body ], [ %pCoeffsCur.0, %for.body ]
+  %pSamples.1.lcssa = phi ptr [ %pSamples.1200, %while.body ], [ %pSamples.1, %for.body ]
   br i1 %cmp74210, label %while.end, label %while.body76
 
 while.body76:                                     ; preds = %while.body76, %for.end
-  %pCoeffsCur.1214 = phi float* [ %incdec.ptr77, %while.body76 ], [ %pCoeffsCur.0.lcssa, %for.end ]
+  %pCoeffsCur.1214 = phi ptr [ %incdec.ptr77, %while.body76 ], [ %pCoeffsCur.0.lcssa, %for.end ]
   %vecAcc0.1213 = phi <4 x float> [ %i74, %while.body76 ], [ %vecAcc0.0.lcssa, %for.end ]
   %numCnt.0212 = phi i32 [ %dec, %while.body76 ], [ %and, %for.end ]
-  %pSamples.2211 = phi float* [ %incdec.ptr80, %while.body76 ], [ %pSamples.1.lcssa, %for.end ]
-  %incdec.ptr77 = getelementptr inbounds float, float* %pCoeffsCur.1214, i32 1
-  %i71 = load float, float* %pCoeffsCur.1214, align 4
-  %i72 = bitcast float* %pSamples.2211 to <4 x float>*
-  %i73 = load <4 x float>, <4 x float>* %i72, align 4
+  %pSamples.2211 = phi ptr [ %incdec.ptr80, %while.body76 ], [ %pSamples.1.lcssa, %for.end ]
+  %incdec.ptr77 = getelementptr inbounds float, ptr %pCoeffsCur.1214, i32 1
+  %i71 = load float, ptr %pCoeffsCur.1214, align 4
+  %i73 = load <4 x float>, ptr %pSamples.2211, align 4
   %.splatinsert78 = insertelement <4 x float> undef, float %i71, i32 0
   %.splat79 = shufflevector <4 x float> %.splatinsert78, <4 x float> undef, <4 x i32> zeroinitializer
   %i74 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i73, <4 x float> %.splat79, <4 x float> %vecAcc0.1213)
-  %incdec.ptr80 = getelementptr inbounds float, float* %pSamples.2211, i32 1
+  %incdec.ptr80 = getelementptr inbounds float, ptr %pSamples.2211, i32 1
   %dec = add nsw i32 %numCnt.0212, -1
   %cmp74 = icmp sgt i32 %numCnt.0212, 1
   br i1 %cmp74, label %while.body76, label %while.end.loopexit
 
 while.end.loopexit:                               ; preds = %while.body76
-  %scevgep = getelementptr float, float* %pSamples.1.lcssa, i32 %and
+  %scevgep = getelementptr float, ptr %pSamples.1.lcssa, i32 %and
   br label %while.end
 
 while.end:                                        ; preds = %while.end.loopexit, %for.end
-  %pSamples.2.lcssa = phi float* [ %pSamples.1.lcssa, %for.end ], [ %scevgep, %while.end.loopexit ]
+  %pSamples.2.lcssa = phi ptr [ %pSamples.1.lcssa, %for.end ], [ %scevgep, %while.end.loopexit ]
   %vecAcc0.1.lcssa = phi <4 x float> [ %vecAcc0.0.lcssa, %for.end ], [ %i74, %while.end.loopexit ]
-  %i75 = bitcast float* %pOutput.0218 to <4 x float>*
-  store <4 x float> %vecAcc0.1.lcssa, <4 x float>* %i75, align 4
-  %add.ptr81 = getelementptr inbounds float, float* %pOutput.0218, i32 4
-  %add.ptr82 = getelementptr inbounds float, float* %pSamples.2.lcssa, i32 4
-  %add.ptr83 = getelementptr inbounds float, float* %add.ptr82, i32 %idx.neg
+  store <4 x float> %vecAcc0.1.lcssa, ptr %pOutput.0218, align 4
+  %add.ptr81 = getelementptr inbounds float, ptr %pOutput.0218, i32 4
+  %add.ptr82 = getelementptr inbounds float, ptr %pSamples.2.lcssa, i32 4
+  %add.ptr83 = getelementptr inbounds float, ptr %add.ptr82, i32 %idx.neg
   %dec84 = add nsw i32 %blkCnt.0222, -1
   %cmp5 = icmp eq i32 %dec84, 0
   br i1 %cmp5, label %if.end, label %while.body
@@ -1393,8 +1316,8 @@ if.end:                                           ; preds = %while.end, %if.then
   ret void
 }
 
-%struct.arm_biquad_cascade_stereo_df2T_instance_f32 = type { i8, float*, float* }
-define arm_aapcs_vfpcc void @arm_biquad_cascade_stereo_df2T_f32(%struct.arm_biquad_cascade_stereo_df2T_instance_f32* nocapture readonly %arg, float* %arg1, float* %arg2, i32 %arg3) {
+%struct.arm_biquad_cascade_stereo_df2T_instance_f32 = type { i8, ptr, ptr }
+define arm_aapcs_vfpcc void @arm_biquad_cascade_stereo_df2T_f32(ptr nocapture readonly %arg, ptr %arg1, ptr %arg2, i32 %arg3) {
 ; CHECK-LABEL: arm_biquad_cascade_stereo_df2T_f32:
 ; CHECK:       @ %bb.0: @ %bb
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
@@ -1466,53 +1389,48 @@ define arm_aapcs_vfpcc void @arm_biquad_cascade_stereo_df2T_f32(%struct.arm_biqu
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
 bb:
   %i = alloca [6 x float], align 4
-  %i4 = getelementptr inbounds %struct.arm_biquad_cascade_stereo_df2T_instance_f32, %struct.arm_biquad_cascade_stereo_df2T_instance_f32* %arg, i32 0, i32 1
-  %i5 = load float*, float** %i4, align 4
-  %i6 = getelementptr inbounds %struct.arm_biquad_cascade_stereo_df2T_instance_f32, %struct.arm_biquad_cascade_stereo_df2T_instance_f32* %arg, i32 0, i32 2
-  %i7 = load float*, float** %i6, align 4
-  %i8 = getelementptr inbounds %struct.arm_biquad_cascade_stereo_df2T_instance_f32, %struct.arm_biquad_cascade_stereo_df2T_instance_f32* %arg, i32 0, i32 0
-  %i9 = load i8, i8* %i8, align 4
+  %i4 = getelementptr inbounds %struct.arm_biquad_cascade_stereo_df2T_instance_f32, ptr %arg, i32 0, i32 1
+  %i5 = load ptr, ptr %i4, align 4
+  %i6 = getelementptr inbounds %struct.arm_biquad_cascade_stereo_df2T_instance_f32, ptr %arg, i32 0, i32 2
+  %i7 = load ptr, ptr %i6, align 4
+  %i8 = getelementptr inbounds %struct.arm_biquad_cascade_stereo_df2T_instance_f32, ptr %arg, i32 0, i32 0
+  %i9 = load i8, ptr %i8, align 4
   %i10 = zext i8 %i9 to i32
-  %i11 = bitcast [6 x float]* %i to i8*
-  call void @llvm.lifetime.start.p0i8(i64 24, i8* nonnull %i11)
+  call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %i)
   %i12 = tail call { <4 x i32>, i32 } @llvm.arm.mve.viwdup.v4i32(i32 0, i32 2, i32 1)
   %i13 = extractvalue { <4 x i32>, i32 } %i12, 0
-  %i14 = getelementptr inbounds [6 x float], [6 x float]* %i, i32 0, i32 4
-  store float 0.000000e+00, float* %i14, align 4
-  %i15 = getelementptr inbounds [6 x float], [6 x float]* %i, i32 0, i32 5
-  store float 0.000000e+00, float* %i15, align 4
-  %i16 = bitcast [6 x float]* %i to <4 x float>*
+  %i14 = getelementptr inbounds [6 x float], ptr %i, i32 0, i32 4
+  store float 0.000000e+00, ptr %i14, align 4
+  %i15 = getelementptr inbounds [6 x float], ptr %i, i32 0, i32 5
+  store float 0.000000e+00, ptr %i15, align 4
   %i17 = icmp eq i32 %arg3, 0
-  %i18 = bitcast [6 x float]* %i to i32*
-  %i19 = getelementptr inbounds [6 x float], [6 x float]* %i, i32 0, i32 2
-  %i20 = bitcast float* %i19 to <4 x float>*
+  %i19 = getelementptr inbounds [6 x float], ptr %i, i32 0, i32 2
   br i1 %i17, label %bb21, label %bb29
 
 bb21:                                             ; preds = %bb21, %bb
   %i22 = phi i32 [ %i27, %bb21 ], [ %i10, %bb ]
-  %i23 = phi float* [ %i26, %bb21 ], [ %i5, %bb ]
-  %i24 = bitcast float* %i23 to <4 x float>*
-  %i25 = load <4 x float>, <4 x float>* %i24, align 8
-  store <4 x float> %i25, <4 x float>* %i16, align 4
-  %i26 = getelementptr inbounds float, float* %i23, i32 4
+  %i23 = phi ptr [ %i26, %bb21 ], [ %i5, %bb ]
+  %i25 = load <4 x float>, ptr %i23, align 8
+  store <4 x float> %i25, ptr %i, align 4
+  %i26 = getelementptr inbounds float, ptr %i23, i32 4
   %i27 = add i32 %i22, -1
   %i28 = icmp eq i32 %i27, 0
   br i1 %i28, label %bb80, label %bb21
 
 bb29:                                             ; preds = %bb75, %bb
   %i30 = phi i32 [ %i78, %bb75 ], [ %i10, %bb ]
-  %i31 = phi float* [ %i76, %bb75 ], [ %i7, %bb ]
-  %i32 = phi float* [ %i77, %bb75 ], [ %i5, %bb ]
-  %i33 = phi float* [ %arg2, %bb75 ], [ %arg1, %bb ]
-  %i34 = getelementptr inbounds float, float* %i31, i32 1
-  %i35 = load float, float* %i31, align 4
-  %i36 = getelementptr inbounds float, float* %i31, i32 2
-  %i37 = load float, float* %i34, align 4
-  %i38 = getelementptr inbounds float, float* %i31, i32 3
-  %i39 = load float, float* %i36, align 4
-  %i40 = getelementptr inbounds float, float* %i31, i32 4
-  %i41 = load float, float* %i38, align 4
-  %i42 = load float, float* %i40, align 4
+  %i31 = phi ptr [ %i76, %bb75 ], [ %i7, %bb ]
+  %i32 = phi ptr [ %i77, %bb75 ], [ %i5, %bb ]
+  %i33 = phi ptr [ %arg2, %bb75 ], [ %arg1, %bb ]
+  %i34 = getelementptr inbounds float, ptr %i31, i32 1
+  %i35 = load float, ptr %i31, align 4
+  %i36 = getelementptr inbounds float, ptr %i31, i32 2
+  %i37 = load float, ptr %i34, align 4
+  %i38 = getelementptr inbounds float, ptr %i31, i32 3
+  %i39 = load float, ptr %i36, align 4
+  %i40 = getelementptr inbounds float, ptr %i31, i32 4
+  %i41 = load float, ptr %i38, align 4
+  %i42 = load float, ptr %i40, align 4
   %i43 = insertelement <4 x float> undef, float %i41, i32 0
   %i44 = shufflevector <4 x float> %i43, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 undef, i32 undef>
   %i45 = insertelement <4 x float> %i44, float %i42, i32 2
@@ -1521,52 +1439,50 @@ bb29:                                             ; preds = %bb75, %bb
   %i48 = shufflevector <4 x float> %i47, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 undef, i32 undef>
   %i49 = insertelement <4 x float> %i48, float %i39, i32 2
   %i50 = insertelement <4 x float> %i49, float %i39, i32 3
-  %i51 = bitcast float* %i32 to <4 x float>*
-  %i52 = load <4 x float>, <4 x float>* %i51, align 8
-  store <4 x float> %i52, <4 x float>* %i16, align 4
+  %i52 = load <4 x float>, ptr %i32, align 8
+  store <4 x float> %i52, ptr %i, align 4
   %i53 = insertelement <4 x float> undef, float %i35, i32 0
   %i54 = shufflevector <4 x float> %i53, <4 x float> undef, <4 x i32> zeroinitializer
   br label %bb55
 
 bb55:                                             ; preds = %bb55, %bb29
-  %i56 = phi float* [ %i33, %bb29 ], [ %i72, %bb55 ]
-  %i57 = phi float* [ %arg2, %bb29 ], [ %i68, %bb55 ]
+  %i56 = phi ptr [ %i33, %bb29 ], [ %i72, %bb55 ]
+  %i57 = phi ptr [ %arg2, %bb29 ], [ %i68, %bb55 ]
   %i58 = phi i32 [ %arg3, %bb29 ], [ %i73, %bb55 ]
-  %i59 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* nonnull %i18, <4 x i32> %i13, i32 32, i32 2, i32 1)
+  %i59 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr nonnull %i, <4 x i32> %i13, i32 32, i32 2, i32 1)
   %i60 = bitcast <4 x i32> %i59 to <4 x float>
-  %i61 = bitcast float* %i56 to i32*
-  %i62 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* %i61, <4 x i32> %i13, i32 32, i32 2, i32 1)
+  %i62 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr %i56, <4 x i32> %i13, i32 32, i32 2, i32 1)
   %i63 = bitcast <4 x i32> %i62 to <4 x float>
   %i64 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i63, <4 x float> %i54, <4 x float> %i60)
   %i65 = extractelement <4 x float> %i64, i32 0
-  %i66 = getelementptr inbounds float, float* %i57, i32 1
-  store float %i65, float* %i57, align 4
+  %i66 = getelementptr inbounds float, ptr %i57, i32 1
+  store float %i65, ptr %i57, align 4
   %i67 = extractelement <4 x float> %i64, i32 1
-  %i68 = getelementptr inbounds float, float* %i57, i32 2
-  store float %i67, float* %i66, align 4
-  %i69 = load <4 x float>, <4 x float>* %i20, align 4
+  %i68 = getelementptr inbounds float, ptr %i57, i32 2
+  store float %i67, ptr %i66, align 4
+  %i69 = load <4 x float>, ptr %i19, align 4
   %i70 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i64, <4 x float> %i46, <4 x float> %i69)
   %i71 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i63, <4 x float> %i50, <4 x float> %i70)
-  store <4 x float> %i71, <4 x float>* %i16, align 4
-  %i72 = getelementptr inbounds float, float* %i56, i32 2
+  store <4 x float> %i71, ptr %i, align 4
+  %i72 = getelementptr inbounds float, ptr %i56, i32 2
   %i73 = add i32 %i58, -1
   %i74 = icmp eq i32 %i73, 0
   br i1 %i74, label %bb75, label %bb55
 
 bb75:                                             ; preds = %bb55
-  %i76 = getelementptr inbounds float, float* %i31, i32 5
-  store <4 x float> %i71, <4 x float>* %i51, align 4
-  %i77 = getelementptr inbounds float, float* %i32, i32 4
+  %i76 = getelementptr inbounds float, ptr %i31, i32 5
+  store <4 x float> %i71, ptr %i32, align 4
+  %i77 = getelementptr inbounds float, ptr %i32, i32 4
   %i78 = add i32 %i30, -1
   %i79 = icmp eq i32 %i78, 0
   br i1 %i79, label %bb80, label %bb29
 
 bb80:                                             ; preds = %bb75, %bb21
-  call void @llvm.lifetime.end.p0i8(i64 24, i8* nonnull %i11)
+  call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %i)
   ret void
 }
 
-define arm_aapcs_vfpcc void @fms(float* nocapture readonly %pSrc1, float* nocapture readonly %pSrc2, float* nocapture readonly %pSrc3, float* nocapture %pDst, i32 %N, i32 %M) {
+define arm_aapcs_vfpcc void @fms(ptr nocapture readonly %pSrc1, ptr nocapture readonly %pSrc2, ptr nocapture readonly %pSrc3, ptr nocapture %pDst, i32 %N, i32 %M) {
 ; CHECK-LABEL: fms:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -1603,38 +1519,35 @@ entry:
   br i1 %cmp15, label %do.end, label %do.body
 
 do.body:                                          ; preds = %while.end, %entry
-  %pDst.addr.0 = phi float* [ %add.ptr2, %while.end ], [ %pDst, %entry ]
+  %pDst.addr.0 = phi ptr [ %add.ptr2, %while.end ], [ %pDst, %entry ]
   %M.addr.0 = phi i32 [ %dec3, %while.end ], [ %M, %entry ]
-  %pSrc3.addr.0 = phi float* [ %incdec.ptr, %while.end ], [ %pSrc3, %entry ]
-  %pSrc2.addr.0 = phi float* [ %add.ptr1, %while.end ], [ %pSrc2, %entry ]
-  %pSrc1.addr.0 = phi float* [ %add.ptr, %while.end ], [ %pSrc1, %entry ]
-  %i = load float, float* %pSrc3.addr.0, align 4
+  %pSrc3.addr.0 = phi ptr [ %incdec.ptr, %while.end ], [ %pSrc3, %entry ]
+  %pSrc2.addr.0 = phi ptr [ %add.ptr1, %while.end ], [ %pSrc2, %entry ]
+  %pSrc1.addr.0 = phi ptr [ %add.ptr, %while.end ], [ %pSrc1, %entry ]
+  %i = load float, ptr %pSrc3.addr.0, align 4
   %.splatinsert = insertelement <4 x float> undef, float %i, i32 0
   %.splat = shufflevector <4 x float> %.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
   br label %while.body
 
 while.body:                                       ; preds = %while.body, %do.body
-  %pSrc1.addr.119 = phi float* [ %pSrc1.addr.0, %do.body ], [ %add.ptr, %while.body ]
-  %pSrc2.addr.118 = phi float* [ %pSrc2.addr.0, %do.body ], [ %add.ptr1, %while.body ]
+  %pSrc1.addr.119 = phi ptr [ %pSrc1.addr.0, %do.body ], [ %add.ptr, %while.body ]
+  %pSrc2.addr.118 = phi ptr [ %pSrc2.addr.0, %do.body ], [ %add.ptr1, %while.body ]
   %blkCnt.017 = phi i32 [ %shr, %do.body ], [ %dec, %while.body ]
-  %pDst.addr.116 = phi float* [ %pDst.addr.0, %do.body ], [ %add.ptr2, %while.body ]
-  %i1 = bitcast float* %pSrc1.addr.119 to <4 x float>*
-  %i2 = load <4 x float>, <4 x float>* %i1, align 4
-  %i3 = bitcast float* %pSrc2.addr.118 to <4 x float>*
-  %i4 = load <4 x float>, <4 x float>* %i3, align 4
+  %pDst.addr.116 = phi ptr [ %pDst.addr.0, %do.body ], [ %add.ptr2, %while.body ]
+  %i2 = load <4 x float>, ptr %pSrc1.addr.119, align 4
+  %i4 = load <4 x float>, ptr %pSrc2.addr.118, align 4
   %i5 = fneg fast <4 x float> %i4
   %i6 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %.splat, <4 x float> %i5, <4 x float> %i2)
-  %i7 = bitcast float* %pDst.addr.116 to <4 x float>*
-  store <4 x float> %i6, <4 x float>* %i7, align 4
-  %add.ptr = getelementptr inbounds float, float* %pSrc1.addr.119, i32 4
-  %add.ptr1 = getelementptr inbounds float, float* %pSrc2.addr.118, i32 4
-  %add.ptr2 = getelementptr inbounds float, float* %pDst.addr.116, i32 4
+  store <4 x float> %i6, ptr %pDst.addr.116, align 4
+  %add.ptr = getelementptr inbounds float, ptr %pSrc1.addr.119, i32 4
+  %add.ptr1 = getelementptr inbounds float, ptr %pSrc2.addr.118, i32 4
+  %add.ptr2 = getelementptr inbounds float, ptr %pDst.addr.116, i32 4
   %dec = add nsw i32 %blkCnt.017, -1
   %cmp = icmp eq i32 %dec, 0
   br i1 %cmp, label %while.end, label %while.body
 
 while.end:                                        ; preds = %while.body
-  %incdec.ptr = getelementptr inbounds float, float* %pSrc3.addr.0, i32 1
+  %incdec.ptr = getelementptr inbounds float, ptr %pSrc3.addr.0, i32 1
   %dec3 = add i32 %M.addr.0, -1
   %cmp4 = icmp eq i32 %dec3, 0
   br i1 %cmp4, label %do.end, label %do.body
@@ -1644,8 +1557,8 @@ do.end:                                           ; preds = %while.end, %entry
 }
 
 
-%struct.arm_biquad_casd_df1_inst_f32 = type { i32, float*, float* }
-define arm_aapcs_vfpcc void @arm_biquad_cascade_df1_f32(%struct.arm_biquad_casd_df1_inst_f32* nocapture readonly %S, float* nocapture readonly %pSrc, float* nocapture %pDst, i32 %blockSize) {
+%struct.arm_biquad_casd_df1_inst_f32 = type { i32, ptr, ptr }
+define arm_aapcs_vfpcc void @arm_biquad_cascade_df1_f32(ptr nocapture readonly %S, ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 %blockSize) {
 ; CHECK-LABEL: arm_biquad_cascade_df1_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -1801,12 +1714,12 @@ define arm_aapcs_vfpcc void @arm_biquad_cascade_df1_f32(%struct.arm_biquad_casd_
 ; CHECK-NEXT:    add sp, #4
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
-  %pState1 = getelementptr inbounds %struct.arm_biquad_casd_df1_inst_f32, %struct.arm_biquad_casd_df1_inst_f32* %S, i32 0, i32 1
-  %i = load float*, float** %pState1, align 4
-  %pCoeffs2 = getelementptr inbounds %struct.arm_biquad_casd_df1_inst_f32, %struct.arm_biquad_casd_df1_inst_f32* %S, i32 0, i32 2
-  %i1 = load float*, float** %pCoeffs2, align 4
-  %numStages = getelementptr inbounds %struct.arm_biquad_casd_df1_inst_f32, %struct.arm_biquad_casd_df1_inst_f32* %S, i32 0, i32 0
-  %i2 = load i32, i32* %numStages, align 4
+  %pState1 = getelementptr inbounds %struct.arm_biquad_casd_df1_inst_f32, ptr %S, i32 0, i32 1
+  %i = load ptr, ptr %pState1, align 4
+  %pCoeffs2 = getelementptr inbounds %struct.arm_biquad_casd_df1_inst_f32, ptr %S, i32 0, i32 2
+  %i1 = load ptr, ptr %pCoeffs2, align 4
+  %numStages = getelementptr inbounds %struct.arm_biquad_casd_df1_inst_f32, ptr %S, i32 0, i32 0
+  %i2 = load i32, ptr %numStages, align 4
   %shr = lshr i32 %blockSize, 2
   %cmp201 = icmp eq i32 %shr, 0
   %and = and i32 %blockSize, 3
@@ -1816,89 +1729,80 @@ entry:
   br label %do.body
 
 do.body:                                          ; preds = %if.end69, %entry
-  %pState.0 = phi float* [ %i, %entry ], [ %incdec.ptr73, %if.end69 ]
-  %pCoeffs.0 = phi float* [ %i1, %entry ], [ %add.ptr74, %if.end69 ]
-  %pIn.0 = phi float* [ %pSrc, %entry ], [ %pDst, %if.end69 ]
+  %pState.0 = phi ptr [ %i, %entry ], [ %incdec.ptr73, %if.end69 ]
+  %pCoeffs.0 = phi ptr [ %i1, %entry ], [ %add.ptr74, %if.end69 ]
+  %pIn.0 = phi ptr [ %pSrc, %entry ], [ %pDst, %if.end69 ]
   %X3.0 = phi float [ undef, %entry ], [ %X3.2, %if.end69 ]
   %stage.0 = phi i32 [ %i2, %entry ], [ %dec75, %if.end69 ]
-  %i3 = load float, float* %pState.0, align 4
-  %arrayidx3 = getelementptr inbounds float, float* %pState.0, i32 1
-  %i4 = load float, float* %arrayidx3, align 4
-  %arrayidx4 = getelementptr inbounds float, float* %pState.0, i32 2
-  %i5 = load float, float* %arrayidx4, align 4
-  %arrayidx5 = getelementptr inbounds float, float* %pState.0, i32 3
-  %i6 = load float, float* %arrayidx5, align 4
+  %i3 = load float, ptr %pState.0, align 4
+  %arrayidx3 = getelementptr inbounds float, ptr %pState.0, i32 1
+  %i4 = load float, ptr %arrayidx3, align 4
+  %arrayidx4 = getelementptr inbounds float, ptr %pState.0, i32 2
+  %i5 = load float, ptr %arrayidx4, align 4
+  %arrayidx5 = getelementptr inbounds float, ptr %pState.0, i32 3
+  %i6 = load float, ptr %arrayidx5, align 4
   br i1 %cmp201, label %while.end, label %while.body.lr.ph
 
 while.body.lr.ph:                                 ; preds = %do.body
-  %i7 = bitcast float* %pCoeffs.0 to <4 x float>*
-  %arrayidx9 = getelementptr inbounds float, float* %pCoeffs.0, i32 4
-  %i8 = bitcast float* %arrayidx9 to <4 x float>*
-  %arrayidx12 = getelementptr inbounds float, float* %pCoeffs.0, i32 8
-  %i9 = bitcast float* %arrayidx12 to <4 x float>*
-  %arrayidx15 = getelementptr inbounds float, float* %pCoeffs.0, i32 12
-  %i10 = bitcast float* %arrayidx15 to <4 x float>*
-  %arrayidx18 = getelementptr inbounds float, float* %pCoeffs.0, i32 16
-  %i11 = bitcast float* %arrayidx18 to <4 x float>*
-  %arrayidx21 = getelementptr inbounds float, float* %pCoeffs.0, i32 20
-  %i12 = bitcast float* %arrayidx21 to <4 x float>*
-  %arrayidx24 = getelementptr inbounds float, float* %pCoeffs.0, i32 24
-  %i13 = bitcast float* %arrayidx24 to <4 x float>*
-  %arrayidx27 = getelementptr inbounds float, float* %pCoeffs.0, i32 28
-  %i14 = bitcast float* %arrayidx27 to <4 x float>*
+  %arrayidx9 = getelementptr inbounds float, ptr %pCoeffs.0, i32 4
+  %arrayidx12 = getelementptr inbounds float, ptr %pCoeffs.0, i32 8
+  %arrayidx15 = getelementptr inbounds float, ptr %pCoeffs.0, i32 12
+  %arrayidx18 = getelementptr inbounds float, ptr %pCoeffs.0, i32 16
+  %arrayidx21 = getelementptr inbounds float, ptr %pCoeffs.0, i32 20
+  %arrayidx24 = getelementptr inbounds float, ptr %pCoeffs.0, i32 24
+  %arrayidx27 = getelementptr inbounds float, ptr %pCoeffs.0, i32 28
   br label %while.body
 
 while.body:                                       ; preds = %while.body, %while.body.lr.ph
   %sample.0208 = phi i32 [ %shr, %while.body.lr.ph ], [ %dec, %while.body ]
-  %pIn.1207 = phi float* [ %pIn.0, %while.body.lr.ph ], [ %incdec.ptr8, %while.body ]
-  %pOut.1206 = phi float* [ %pDst, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %pIn.1207 = phi ptr [ %pIn.0, %while.body.lr.ph ], [ %incdec.ptr8, %while.body ]
+  %pOut.1206 = phi ptr [ %pDst, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %Yn2.0205 = phi float [ %i6, %while.body.lr.ph ], [ %i37, %while.body ]
   %Yn1.0204 = phi float [ %i5, %while.body.lr.ph ], [ %i36, %while.body ]
   %Xn2.0203 = phi float [ %i4, %while.body.lr.ph ], [ %i17, %while.body ]
   %Xn1.0202 = phi float [ %i3, %while.body.lr.ph ], [ %i18, %while.body ]
-  %incdec.ptr = getelementptr inbounds float, float* %pIn.1207, i32 1
-  %i15 = load float, float* %pIn.1207, align 4
-  %incdec.ptr6 = getelementptr inbounds float, float* %pIn.1207, i32 2
-  %i16 = load float, float* %incdec.ptr, align 4
-  %incdec.ptr7 = getelementptr inbounds float, float* %pIn.1207, i32 3
-  %i17 = load float, float* %incdec.ptr6, align 4
-  %incdec.ptr8 = getelementptr inbounds float, float* %pIn.1207, i32 4
-  %i18 = load float, float* %incdec.ptr7, align 4
-  %i19 = load <4 x float>, <4 x float>* %i7, align 4
+  %incdec.ptr = getelementptr inbounds float, ptr %pIn.1207, i32 1
+  %i15 = load float, ptr %pIn.1207, align 4
+  %incdec.ptr6 = getelementptr inbounds float, ptr %pIn.1207, i32 2
+  %i16 = load float, ptr %incdec.ptr, align 4
+  %incdec.ptr7 = getelementptr inbounds float, ptr %pIn.1207, i32 3
+  %i17 = load float, ptr %incdec.ptr6, align 4
+  %incdec.ptr8 = getelementptr inbounds float, ptr %pIn.1207, i32 4
+  %i18 = load float, ptr %incdec.ptr7, align 4
+  %i19 = load <4 x float>, ptr %pCoeffs.0, align 4
   %.splatinsert = insertelement <4 x float> undef, float %i18, i32 0
   %.splat = shufflevector <4 x float> %.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
   %i20 = fmul fast <4 x float> %.splat, %i19
-  %i21 = load <4 x float>, <4 x float>* %i8, align 4
+  %i21 = load <4 x float>, ptr %arrayidx9, align 4
   %.splatinsert10 = insertelement <4 x float> undef, float %i17, i32 0
   %.splat11 = shufflevector <4 x float> %.splatinsert10, <4 x float> undef, <4 x i32> zeroinitializer
   %i22 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i21, <4 x float> %.splat11, <4 x float> %i20)
-  %i23 = load <4 x float>, <4 x float>* %i9, align 4
+  %i23 = load <4 x float>, ptr %arrayidx12, align 4
   %.splatinsert13 = insertelement <4 x float> undef, float %i16, i32 0
   %.splat14 = shufflevector <4 x float> %.splatinsert13, <4 x float> undef, <4 x i32> zeroinitializer
   %i24 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i23, <4 x float> %.splat14, <4 x float> %i22)
-  %i25 = load <4 x float>, <4 x float>* %i10, align 4
+  %i25 = load <4 x float>, ptr %arrayidx15, align 4
   %.splatinsert16 = insertelement <4 x float> undef, float %i15, i32 0
   %.splat17 = shufflevector <4 x float> %.splatinsert16, <4 x float> undef, <4 x i32> zeroinitializer
   %i26 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i25, <4 x float> %.splat17, <4 x float> %i24)
-  %i27 = load <4 x float>, <4 x float>* %i11, align 4
+  %i27 = load <4 x float>, ptr %arrayidx18, align 4
   %.splatinsert19 = insertelement <4 x float> undef, float %Xn1.0202, i32 0
   %.splat20 = shufflevector <4 x float> %.splatinsert19, <4 x float> undef, <4 x i32> zeroinitializer
   %i28 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i27, <4 x float> %.splat20, <4 x float> %i26)
-  %i29 = load <4 x float>, <4 x float>* %i12, align 4
+  %i29 = load <4 x float>, ptr %arrayidx21, align 4
   %.splatinsert22 = insertelement <4 x float> undef, float %Xn2.0203, i32 0
   %.splat23 = shufflevector <4 x float> %.splatinsert22, <4 x float> undef, <4 x i32> zeroinitializer
   %i30 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i29, <4 x float> %.splat23, <4 x float> %i28)
-  %i31 = load <4 x float>, <4 x float>* %i13, align 4
+  %i31 = load <4 x float>, ptr %arrayidx24, align 4
   %.splatinsert25 = insertelement <4 x float> undef, float %Yn1.0204, i32 0
   %.splat26 = shufflevector <4 x float> %.splatinsert25, <4 x float> undef, <4 x i32> zeroinitializer
   %i32 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i31, <4 x float> %.splat26, <4 x float> %i30)
-  %i33 = load <4 x float>, <4 x float>* %i14, align 4
+  %i33 = load <4 x float>, ptr %arrayidx27, align 4
   %.splatinsert28 = insertelement <4 x float> undef, float %Yn2.0205, i32 0
   %.splat29 = shufflevector <4 x float> %.splatinsert28, <4 x float> undef, <4 x i32> zeroinitializer
   %i34 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i33, <4 x float> %.splat29, <4 x float> %i32)
-  %i35 = bitcast float* %pOut.1206 to <4 x float>*
-  store <4 x float> %i34, <4 x float>* %i35, align 4
-  %add.ptr = getelementptr inbounds float, float* %pOut.1206, i32 4
+  store <4 x float> %i34, ptr %pOut.1206, align 4
+  %add.ptr = getelementptr inbounds float, ptr %pOut.1206, i32 4
   %i36 = extractelement <4 x float> %i34, i32 3
   %i37 = extractelement <4 x float> %i34, i32 2
   %dec = add nsw i32 %sample.0208, -1
@@ -1910,63 +1814,55 @@ while.end:                                        ; preds = %while.body, %do.bod
   %Xn2.0.lcssa = phi float [ %i4, %do.body ], [ %i17, %while.body ]
   %Yn1.0.lcssa = phi float [ %i5, %do.body ], [ %i36, %while.body ]
   %Yn2.0.lcssa = phi float [ %i6, %do.body ], [ %i37, %while.body ]
-  %pOut.1.lcssa = phi float* [ %pDst, %do.body ], [ %add.ptr, %while.body ]
-  %pIn.1.lcssa = phi float* [ %pIn.0, %do.body ], [ %incdec.ptr8, %while.body ]
+  %pOut.1.lcssa = phi ptr [ %pDst, %do.body ], [ %add.ptr, %while.body ]
+  %pIn.1.lcssa = phi ptr [ %pIn.0, %do.body ], [ %incdec.ptr8, %while.body ]
   %X3.1.lcssa = phi float [ %X3.0, %do.body ], [ %i18, %while.body ]
   br i1 %tobool, label %if.end69, label %if.then
 
 if.then:                                          ; preds = %while.end
-  %incdec.ptr30 = getelementptr inbounds float, float* %pIn.1.lcssa, i32 1
-  %i38 = load float, float* %pIn.1.lcssa, align 4
-  %incdec.ptr31 = getelementptr inbounds float, float* %pIn.1.lcssa, i32 2
-  %i39 = load float, float* %incdec.ptr30, align 4
-  %incdec.ptr32 = getelementptr inbounds float, float* %pIn.1.lcssa, i32 3
-  %i40 = load float, float* %incdec.ptr31, align 4
-  %i41 = load float, float* %incdec.ptr32, align 4
-  %i42 = bitcast float* %pCoeffs.0 to <4 x float>*
-  %i43 = load <4 x float>, <4 x float>* %i42, align 4
+  %incdec.ptr30 = getelementptr inbounds float, ptr %pIn.1.lcssa, i32 1
+  %i38 = load float, ptr %pIn.1.lcssa, align 4
+  %incdec.ptr31 = getelementptr inbounds float, ptr %pIn.1.lcssa, i32 2
+  %i39 = load float, ptr %incdec.ptr30, align 4
+  %incdec.ptr32 = getelementptr inbounds float, ptr %pIn.1.lcssa, i32 3
+  %i40 = load float, ptr %incdec.ptr31, align 4
+  %i41 = load float, ptr %incdec.ptr32, align 4
+  %i43 = load <4 x float>, ptr %pCoeffs.0, align 4
   %.splatinsert34 = insertelement <4 x float> undef, float %i41, i32 0
   %.splat35 = shufflevector <4 x float> %.splatinsert34, <4 x float> undef, <4 x i32> zeroinitializer
   %i44 = fmul fast <4 x float> %.splat35, %i43
-  %arrayidx36 = getelementptr inbounds float, float* %pCoeffs.0, i32 4
-  %i45 = bitcast float* %arrayidx36 to <4 x float>*
-  %i46 = load <4 x float>, <4 x float>* %i45, align 4
+  %arrayidx36 = getelementptr inbounds float, ptr %pCoeffs.0, i32 4
+  %i46 = load <4 x float>, ptr %arrayidx36, align 4
   %.splatinsert37 = insertelement <4 x float> undef, float %i40, i32 0
   %.splat38 = shufflevector <4 x float> %.splatinsert37, <4 x float> undef, <4 x i32> zeroinitializer
   %i47 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i46, <4 x float> %.splat38, <4 x float> %i44)
-  %arrayidx39 = getelementptr inbounds float, float* %pCoeffs.0, i32 8
-  %i48 = bitcast float* %arrayidx39 to <4 x float>*
-  %i49 = load <4 x float>, <4 x float>* %i48, align 4
+  %arrayidx39 = getelementptr inbounds float, ptr %pCoeffs.0, i32 8
+  %i49 = load <4 x float>, ptr %arrayidx39, align 4
   %.splatinsert40 = insertelement <4 x float> undef, float %i39, i32 0
   %.splat41 = shufflevector <4 x float> %.splatinsert40, <4 x float> undef, <4 x i32> zeroinitializer
   %i50 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i49, <4 x float> %.splat41, <4 x float> %i47)
-  %arrayidx42 = getelementptr inbounds float, float* %pCoeffs.0, i32 12
-  %i51 = bitcast float* %arrayidx42 to <4 x float>*
-  %i52 = load <4 x float>, <4 x float>* %i51, align 4
+  %arrayidx42 = getelementptr inbounds float, ptr %pCoeffs.0, i32 12
+  %i52 = load <4 x float>, ptr %arrayidx42, align 4
   %.splatinsert43 = insertelement <4 x float> undef, float %i38, i32 0
   %.splat44 = shufflevector <4 x float> %.splatinsert43, <4 x float> undef, <4 x i32> zeroinitializer
   %i53 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i52, <4 x float> %.splat44, <4 x float> %i50)
-  %arrayidx45 = getelementptr inbounds float, float* %pCoeffs.0, i32 16
-  %i54 = bitcast float* %arrayidx45 to <4 x float>*
-  %i55 = load <4 x float>, <4 x float>* %i54, align 4
+  %arrayidx45 = getelementptr inbounds float, ptr %pCoeffs.0, i32 16
+  %i55 = load <4 x float>, ptr %arrayidx45, align 4
   %.splatinsert46 = insertelement <4 x float> undef, float %Xn1.0.lcssa, i32 0
   %.splat47 = shufflevector <4 x float> %.splatinsert46, <4 x float> undef, <4 x i32> zeroinitializer
   %i56 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i55, <4 x float> %.splat47, <4 x float> %i53)
-  %arrayidx48 = getelementptr inbounds float, float* %pCoeffs.0, i32 20
-  %i57 = bitcast float* %arrayidx48 to <4 x float>*
-  %i58 = load <4 x float>, <4 x float>* %i57, align 4
+  %arrayidx48 = getelementptr inbounds float, ptr %pCoeffs.0, i32 20
+  %i58 = load <4 x float>, ptr %arrayidx48, align 4
   %.splatinsert49 = insertelement <4 x float> undef, float %Xn2.0.lcssa, i32 0
   %.splat50 = shufflevector <4 x float> %.splatinsert49, <4 x float> undef, <4 x i32> zeroinitializer
   %i59 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i58, <4 x float> %.splat50, <4 x float> %i56)
-  %arrayidx51 = getelementptr inbounds float, float* %pCoeffs.0, i32 24
-  %i60 = bitcast float* %arrayidx51 to <4 x float>*
-  %i61 = load <4 x float>, <4 x float>* %i60, align 4
+  %arrayidx51 = getelementptr inbounds float, ptr %pCoeffs.0, i32 24
+  %i61 = load <4 x float>, ptr %arrayidx51, align 4
   %.splatinsert52 = insertelement <4 x float> undef, float %Yn1.0.lcssa, i32 0
   %.splat53 = shufflevector <4 x float> %.splatinsert52, <4 x float> undef, <4 x i32> zeroinitializer
   %i62 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i61, <4 x float> %.splat53, <4 x float> %i59)
-  %arrayidx54 = getelementptr inbounds float, float* %pCoeffs.0, i32 28
-  %i63 = bitcast float* %arrayidx54 to <4 x float>*
-  %i64 = load <4 x float>, <4 x float>* %i63, align 4
+  %arrayidx54 = getelementptr inbounds float, ptr %pCoeffs.0, i32 28
+  %i64 = load <4 x float>, ptr %arrayidx54, align 4
   %.splatinsert55 = insertelement <4 x float> undef, float %Yn2.0.lcssa, i32 0
   %.splat56 = shufflevector <4 x float> %.splatinsert55, <4 x float> undef, <4 x i32> zeroinitializer
   %i65 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i64, <4 x float> %.splat56, <4 x float> %i62)
@@ -1974,20 +1870,20 @@ if.then:                                          ; preds = %while.end
   br i1 %cmp57, label %if.then58, label %if.else
 
 if.then58:                                        ; preds = %if.then
-  store float %i66, float* %pOut.1.lcssa, align 4
+  store float %i66, ptr %pOut.1.lcssa, align 4
   br label %if.end69
 
 if.else:                                          ; preds = %if.then
-  %incdec.ptr62 = getelementptr inbounds float, float* %pOut.1.lcssa, i32 1
-  store float %i66, float* %pOut.1.lcssa, align 4
+  %incdec.ptr62 = getelementptr inbounds float, ptr %pOut.1.lcssa, i32 1
+  store float %i66, ptr %pOut.1.lcssa, align 4
   %i67 = extractelement <4 x float> %i65, i32 1
-  store float %i67, float* %incdec.ptr62, align 4
+  store float %i67, ptr %incdec.ptr62, align 4
   br i1 %cmp60, label %if.end69, label %if.else64
 
 if.else64:                                        ; preds = %if.else
-  %incdec.ptr63 = getelementptr inbounds float, float* %pOut.1.lcssa, i32 2
+  %incdec.ptr63 = getelementptr inbounds float, ptr %pOut.1.lcssa, i32 2
   %i68 = extractelement <4 x float> %i65, i32 2
-  store float %i68, float* %incdec.ptr63, align 4
+  store float %i68, ptr %incdec.ptr63, align 4
   br label %if.end69
 
 if.end69:                                         ; preds = %if.else64, %if.else, %if.then58, %while.end
@@ -1996,12 +1892,12 @@ if.end69:                                         ; preds = %if.else64, %if.else
   %Yn1.1 = phi float [ %i66, %if.then58 ], [ %i68, %if.else64 ], [ %Yn1.0.lcssa, %while.end ], [ %i67, %if.else ]
   %Yn2.1 = phi float [ %Yn1.0.lcssa, %if.then58 ], [ %i67, %if.else64 ], [ %Yn2.0.lcssa, %while.end ], [ %i66, %if.else ]
   %X3.2 = phi float [ %i41, %if.then58 ], [ %i41, %if.else64 ], [ %X3.1.lcssa, %while.end ], [ %i41, %if.else ]
-  store float %Xn1.1, float* %pState.0, align 4
-  store float %Xn2.1, float* %arrayidx3, align 4
-  store float %Yn1.1, float* %arrayidx4, align 4
-  %incdec.ptr73 = getelementptr inbounds float, float* %pState.0, i32 4
-  store float %Yn2.1, float* %arrayidx5, align 4
-  %add.ptr74 = getelementptr inbounds float, float* %pCoeffs.0, i32 32
+  store float %Xn1.1, ptr %pState.0, align 4
+  store float %Xn2.1, ptr %arrayidx3, align 4
+  store float %Yn1.1, ptr %arrayidx4, align 4
+  %incdec.ptr73 = getelementptr inbounds float, ptr %pState.0, i32 4
+  store float %Yn2.1, ptr %arrayidx5, align 4
+  %add.ptr74 = getelementptr inbounds float, ptr %pCoeffs.0, i32 32
   %dec75 = add i32 %stage.0, -1
   %cmp76 = icmp eq i32 %dec75, 0
   br i1 %cmp76, label %do.end, label %do.body
@@ -2011,8 +1907,8 @@ do.end:                                           ; preds = %if.end69
 }
 
 
-%struct.arm_biquad_cascade_df2T_instance_f32 = type { i8, float*, float* }
-define void @arm_biquad_cascade_df2T_f32(%struct.arm_biquad_cascade_df2T_instance_f32* nocapture readonly %S, float* nocapture readonly %pSrc, float* nocapture %pDst, i32 %blockSize) {
+%struct.arm_biquad_cascade_df2T_instance_f32 = type { i8, ptr, ptr }
+define void @arm_biquad_cascade_df2T_f32(ptr nocapture readonly %S, ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 %blockSize) {
 ; CHECK-LABEL: arm_biquad_cascade_df2T_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
@@ -2096,13 +1992,13 @@ define void @arm_biquad_cascade_df2T_f32(%struct.arm_biquad_cascade_df2T_instanc
 ; CHECK-NEXT:  .LCPI20_0:
 ; CHECK-NEXT:    .long 0x00000000 @ float 0
 entry:
-  %pState1 = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f32, %struct.arm_biquad_cascade_df2T_instance_f32* %S, i32 0, i32 1
-  %i = load float*, float** %pState1, align 4
-  %numStages = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f32, %struct.arm_biquad_cascade_df2T_instance_f32* %S, i32 0, i32 0
-  %i1 = load i8, i8* %numStages, align 4
+  %pState1 = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f32, ptr %S, i32 0, i32 1
+  %i = load ptr, ptr %pState1, align 4
+  %numStages = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f32, ptr %S, i32 0, i32 0
+  %i1 = load i8, ptr %numStages, align 4
   %conv = zext i8 %i1 to i32
-  %pCoeffs = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f32, %struct.arm_biquad_cascade_df2T_instance_f32* %S, i32 0, i32 2
-  %i2 = load float*, float** %pCoeffs, align 4
+  %pCoeffs = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f32, ptr %S, i32 0, i32 2
+  %i2 = load ptr, ptr %pCoeffs, align 4
   %div = lshr i32 %blockSize, 1
   %cmp.not90 = icmp eq i32 %div, 0
   %and = and i32 %blockSize, 1
@@ -2111,17 +2007,14 @@ entry:
 
 do.body:                                          ; preds = %if.end, %entry
   %stage.0 = phi i32 [ %conv, %entry ], [ %dec23, %if.end ]
-  %pCurCoeffs.0 = phi float* [ %i2, %entry ], [ %add.ptr2, %if.end ]
-  %pState.0 = phi float* [ %i, %entry ], [ %pState.1, %if.end ]
-  %pIn.0 = phi float* [ %pSrc, %entry ], [ %pDst, %if.end ]
-  %i3 = bitcast float* %pCurCoeffs.0 to <4 x float>*
-  %i4 = load <4 x float>, <4 x float>* %i3, align 4
-  %add.ptr = getelementptr inbounds float, float* %pCurCoeffs.0, i32 2
-  %i5 = bitcast float* %add.ptr to <4 x float>*
-  %i6 = load <4 x float>, <4 x float>* %i5, align 4
-  %add.ptr2 = getelementptr inbounds float, float* %pCurCoeffs.0, i32 5
-  %i7 = bitcast float* %pState.0 to <4 x float>*
-  %i8 = load <4 x float>, <4 x float>* %i7, align 8
+  %pCurCoeffs.0 = phi ptr [ %i2, %entry ], [ %add.ptr2, %if.end ]
+  %pState.0 = phi ptr [ %i, %entry ], [ %pState.1, %if.end ]
+  %pIn.0 = phi ptr [ %pSrc, %entry ], [ %pDst, %if.end ]
+  %i4 = load <4 x float>, ptr %pCurCoeffs.0, align 4
+  %add.ptr = getelementptr inbounds float, ptr %pCurCoeffs.0, i32 2
+  %i6 = load <4 x float>, ptr %add.ptr, align 4
+  %add.ptr2 = getelementptr inbounds float, ptr %pCurCoeffs.0, i32 5
+  %i8 = load <4 x float>, ptr %pState.0, align 8
   %i9 = shufflevector <4 x float> %i8, <4 x float> <float poison, float poison, float 0.000000e+00, float 0.000000e+00>, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
   %i10 = bitcast <4 x float> %i4 to <4 x i32>
   %i11 = tail call { i32, <4 x i32> } @llvm.arm.mve.vshlc.v4i32(<4 x i32> %i10, i32 0, i32 32)
@@ -2135,14 +2028,14 @@ do.body:                                          ; preds = %if.end, %entry
   br i1 %cmp.not90, label %while.end, label %while.body
 
 while.body:                                       ; preds = %while.body, %do.body
-  %pIn.194 = phi float* [ %incdec.ptr4, %while.body ], [ %pIn.0, %do.body ]
+  %pIn.194 = phi ptr [ %incdec.ptr4, %while.body ], [ %pIn.0, %do.body ]
   %state.093 = phi <4 x float> [ %i30, %while.body ], [ %i9, %do.body ]
-  %pOut.192 = phi float* [ %incdec.ptr12, %while.body ], [ %pDst, %do.body ]
+  %pOut.192 = phi ptr [ %incdec.ptr12, %while.body ], [ %pDst, %do.body ]
   %sample.091 = phi i32 [ %dec, %while.body ], [ %div, %do.body ]
-  %incdec.ptr = getelementptr inbounds float, float* %pIn.194, i32 1
-  %i19 = load float, float* %pIn.194, align 4
-  %incdec.ptr4 = getelementptr inbounds float, float* %pIn.194, i32 2
-  %i20 = load float, float* %incdec.ptr, align 4
+  %incdec.ptr = getelementptr inbounds float, ptr %pIn.194, i32 1
+  %i19 = load float, ptr %pIn.194, align 4
+  %incdec.ptr4 = getelementptr inbounds float, ptr %pIn.194, i32 2
+  %i20 = load float, ptr %incdec.ptr, align 4
   %.splatinsert = insertelement <4 x float> poison, float %i19, i32 0
   %.splat = shufflevector <4 x float> %.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
   %i21 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i4, <4 x float> %.splat, <4 x float> %state.093)
@@ -2159,45 +2052,45 @@ while.body:                                       ; preds = %while.body, %do.bod
   %i28 = shufflevector <4 x float> %i27, <4 x float> undef, <4 x i32> <i32 2, i32 undef, i32 undef, i32 3>
   %i29 = insertelement <4 x float> %i28, float 0.000000e+00, i32 2
   %i30 = shufflevector <4 x float> %i29, <4 x float> %i27, <4 x i32> <i32 0, i32 7, i32 2, i32 3>
-  %incdec.ptr11 = getelementptr inbounds float, float* %pOut.192, i32 1
-  store float %i22, float* %pOut.192, align 4
-  %incdec.ptr12 = getelementptr inbounds float, float* %pOut.192, i32 2
-  store float %i26, float* %incdec.ptr11, align 4
+  %incdec.ptr11 = getelementptr inbounds float, ptr %pOut.192, i32 1
+  store float %i22, ptr %pOut.192, align 4
+  %incdec.ptr12 = getelementptr inbounds float, ptr %pOut.192, i32 2
+  store float %i26, ptr %incdec.ptr11, align 4
   %dec = add nsw i32 %sample.091, -1
   %cmp.not = icmp eq i32 %dec, 0
   br i1 %cmp.not, label %while.end, label %while.body
 
 while.end:                                        ; preds = %while.body, %do.body
-  %pOut.1.lcssa = phi float* [ %pDst, %do.body ], [ %incdec.ptr12, %while.body ]
+  %pOut.1.lcssa = phi ptr [ %pDst, %do.body ], [ %incdec.ptr12, %while.body ]
   %state.0.lcssa = phi <4 x float> [ %i9, %do.body ], [ %i30, %while.body ]
-  %pIn.1.lcssa = phi float* [ %pIn.0, %do.body ], [ %incdec.ptr4, %while.body ]
+  %pIn.1.lcssa = phi ptr [ %pIn.0, %do.body ], [ %incdec.ptr4, %while.body ]
   br i1 %tobool.not, label %if.else, label %if.then
 
 if.then:                                          ; preds = %while.end
-  %i31 = load float, float* %pIn.1.lcssa, align 4
+  %i31 = load float, ptr %pIn.1.lcssa, align 4
   %.splatinsert14 = insertelement <4 x float> poison, float %i31, i32 0
   %.splat15 = shufflevector <4 x float> %.splatinsert14, <4 x float> poison, <4 x i32> zeroinitializer
   %i32 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i4, <4 x float> %.splat15, <4 x float> %state.0.lcssa)
   %i33 = extractelement <4 x float> %i32, i32 0
   %.splat17 = shufflevector <4 x float> %i32, <4 x float> poison, <4 x i32> zeroinitializer
   %i34 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i6, <4 x float> %.splat17, <4 x float> %i32)
-  store float %i33, float* %pOut.1.lcssa, align 4
+  store float %i33, ptr %pOut.1.lcssa, align 4
   %i35 = extractelement <4 x float> %i34, i32 1
-  store float %i35, float* %pState.0, align 4
+  store float %i35, ptr %pState.0, align 4
   %i36 = extractelement <4 x float> %i34, i32 2
   br label %if.end
 
 if.else:                                          ; preds = %while.end
   %i37 = extractelement <4 x float> %state.0.lcssa, i32 0
-  store float %i37, float* %pState.0, align 4
+  store float %i37, ptr %pState.0, align 4
   %i38 = extractelement <4 x float> %state.0.lcssa, i32 1
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
   %.sink = phi float [ %i38, %if.else ], [ %i36, %if.then ]
-  %i39 = getelementptr inbounds float, float* %pState.0, i32 1
-  store float %.sink, float* %i39, align 4
-  %pState.1 = getelementptr inbounds float, float* %pState.0, i32 2
+  %i39 = getelementptr inbounds float, ptr %pState.0, i32 1
+  store float %.sink, ptr %i39, align 4
+  %pState.1 = getelementptr inbounds float, ptr %pState.0, i32 2
   %dec23 = add i32 %stage.0, -1
   %cmp24.not = icmp eq i32 %dec23, 0
   br i1 %cmp24.not, label %do.end, label %do.body
@@ -2226,11 +2119,11 @@ entry:
 
 
 declare { i32, <4 x i32> } @llvm.arm.mve.vshlc.v4i32(<4 x i32>, i32, i32) #1
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 declare { <4 x i32>, i32 } @llvm.arm.mve.viwdup.v4i32(i32, i32, i32)
-declare <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32*, <4 x i32>, i32, i32, i32)
+declare <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr, <4 x i32>, i32, i32, i32)
 declare void @llvm.assume(i1)
 declare <4 x i1> @llvm.arm.mve.vctp32(i32)
 declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32 immarg, <4 x i1>)
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll b/llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll
index 061354b1c15c..e80ffe053b15 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll
@@ -1,10 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -opaque-pointers=0 -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedldst %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-STD
-; RUN: llc -opaque-pointers=0 -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedldst -opaque-pointers %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-OPAQ
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedldst %s -o - | FileCheck %s
 
 ; i32
 
-define arm_aapcs_vfpcc <2 x i32> @ptr_v2i32(<2 x i32*>* %offptr) {
+define arm_aapcs_vfpcc <2 x i32> @ptr_v2i32(ptr %offptr) {
 ; CHECK-LABEL: ptr_v2i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrd r1, r0, [r0]
@@ -13,24 +12,24 @@ define arm_aapcs_vfpcc <2 x i32> @ptr_v2i32(<2 x i32*>* %offptr) {
 ; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <2 x i32*>, <2 x i32*>* %offptr, align 4
-  %gather = call <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*> %offs, i32 4, <2 x i1> <i1 true, i1 true>, <2 x i32> undef)
+  %offs = load <2 x ptr>, ptr %offptr, align 4
+  %gather = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> %offs, i32 4, <2 x i1> <i1 true, i1 true>, <2 x i32> undef)
   ret <2 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @ptr_v4i32(<4 x i32*>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @ptr_v4i32(ptr %offptr) {
 ; CHECK-LABEL: ptr_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
 ; CHECK-NEXT:    vldrw.u32 q0, [q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32*>, <4 x i32*>* %offptr, align 4
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %offs = load <4 x ptr>, ptr %offptr, align 4
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i32> @ptr_v8i32(<8 x i32*>* %offptr) {
+define arm_aapcs_vfpcc <8 x i32> @ptr_v8i32(ptr %offptr) {
 ; CHECK-LABEL: ptr_v8i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -55,12 +54,12 @@ define arm_aapcs_vfpcc <8 x i32> @ptr_v8i32(<8 x i32*>* %offptr) {
 ; CHECK-NEXT:    vmov q0[3], q0[1], r4, r5
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <8 x i32*>, <8 x i32*>* %offptr, align 4
-  %gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> %offs, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+  %offs = load <8 x ptr>, ptr %offptr, align 4
+  %gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %offs, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
   ret <8 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <16 x i32> @ptr_v16i32(<16 x i32*>* %offptr) {
+define arm_aapcs_vfpcc <16 x i32> @ptr_v16i32(ptr %offptr) {
 ; CHECK-LABEL: ptr_v16i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -103,14 +102,14 @@ define arm_aapcs_vfpcc <16 x i32> @ptr_v16i32(<16 x i32*>* %offptr) {
 ; CHECK-NEXT:    vmov q2[3], q2[1], r5, r2
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <16 x i32*>, <16 x i32*>* %offptr, align 4
-  %gather = call <16 x i32> @llvm.masked.gather.v16i32.v16p0i32(<16 x i32*> %offs, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i32> undef)
+  %offs = load <16 x ptr>, ptr %offptr, align 4
+  %gather = call <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr> %offs, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i32> undef)
   ret <16 x i32> %gather
 }
 
 ; f32
 
-define arm_aapcs_vfpcc <2 x float> @ptr_v2f32(<2 x float*>* %offptr) {
+define arm_aapcs_vfpcc <2 x float> @ptr_v2f32(ptr %offptr) {
 ; CHECK-LABEL: ptr_v2f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrd r1, r0, [r0]
@@ -118,24 +117,24 @@ define arm_aapcs_vfpcc <2 x float> @ptr_v2f32(<2 x float*>* %offptr) {
 ; CHECK-NEXT:    vldr s0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <2 x float*>, <2 x float*>* %offptr, align 4
-  %gather = call <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*> %offs, i32 4, <2 x i1> <i1 true, i1 true>, <2 x float> undef)
+  %offs = load <2 x ptr>, ptr %offptr, align 4
+  %gather = call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> %offs, i32 4, <2 x i1> <i1 true, i1 true>, <2 x float> undef)
   ret <2 x float> %gather
 }
 
-define arm_aapcs_vfpcc <4 x float> @ptr_v4f32(<4 x float*>* %offptr) {
+define arm_aapcs_vfpcc <4 x float> @ptr_v4f32(ptr %offptr) {
 ; CHECK-LABEL: ptr_v4f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
 ; CHECK-NEXT:    vldrw.u32 q0, [q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x float*>, <4 x float*>* %offptr, align 4
-  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  %offs = load <4 x ptr>, ptr %offptr, align 4
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
   ret <4 x float> %gather
 }
 
-define arm_aapcs_vfpcc <8 x float> @ptr_v8f32(<8 x float*>* %offptr) {
+define arm_aapcs_vfpcc <8 x float> @ptr_v8f32(ptr %offptr) {
 ; CHECK-LABEL: ptr_v8f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -156,14 +155,14 @@ define arm_aapcs_vfpcc <8 x float> @ptr_v8f32(<8 x float*>* %offptr) {
 ; CHECK-NEXT:    vldr s4, [r4]
 ; CHECK-NEXT:    pop {r4, r5, r7, pc}
 entry:
-  %offs = load <8 x float*>, <8 x float*>* %offptr, align 4
-  %gather = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %offs, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x float> undef)
+  %offs = load <8 x ptr>, ptr %offptr, align 4
+  %gather = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %offs, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x float> undef)
   ret <8 x float> %gather
 }
 
 ; i16
 
-define arm_aapcs_vfpcc <8 x i16> @ptr_i16(<8 x i16*>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @ptr_i16(ptr %offptr) {
 ; CHECK-LABEL: ptr_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -192,12 +191,12 @@ define arm_aapcs_vfpcc <8 x i16> @ptr_i16(<8 x i16*>* %offptr) {
 ; CHECK-NEXT:    vmov.16 q0[7], r12
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %offs = load <8 x i16*>, <8 x i16*>* %offptr, align 4
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %offs = load <8 x ptr>, ptr %offptr, align 4
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <2 x i32> @ptr_v2i16_sext(<2 x i16*>* %offptr) {
+define arm_aapcs_vfpcc <2 x i32> @ptr_v2i16_sext(ptr %offptr) {
 ; CHECK-LABEL: ptr_v2i16_sext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrd r1, r0, [r0]
@@ -209,13 +208,13 @@ define arm_aapcs_vfpcc <2 x i32> @ptr_v2i16_sext(<2 x i16*>* %offptr) {
 ; CHECK-NEXT:    vmov q0[3], q0[1], r1, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <2 x i16*>, <2 x i16*>* %offptr, align 4
-  %gather = call <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*> %offs, i32 2, <2 x i1> <i1 true, i1 true>, <2 x i16> undef)
+  %offs = load <2 x ptr>, ptr %offptr, align 4
+  %gather = call <2 x i16> @llvm.masked.gather.v2i16.v2p0(<2 x ptr> %offs, i32 2, <2 x i1> <i1 true, i1 true>, <2 x i16> undef)
   %ext = sext <2 x i16> %gather to <2 x i32>
   ret <2 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <2 x i32> @ptr_v2i16_zext(<2 x i16*>* %offptr) {
+define arm_aapcs_vfpcc <2 x i32> @ptr_v2i16_zext(ptr %offptr) {
 ; CHECK-LABEL: ptr_v2i16_zext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrd r1, r0, [r0]
@@ -226,13 +225,13 @@ define arm_aapcs_vfpcc <2 x i32> @ptr_v2i16_zext(<2 x i16*>* %offptr) {
 ; CHECK-NEXT:    vand q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <2 x i16*>, <2 x i16*>* %offptr, align 4
-  %gather = call <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*> %offs, i32 2, <2 x i1> <i1 true, i1 true>, <2 x i16> undef)
+  %offs = load <2 x ptr>, ptr %offptr, align 4
+  %gather = call <2 x i16> @llvm.masked.gather.v2i16.v2p0(<2 x ptr> %offs, i32 2, <2 x i1> <i1 true, i1 true>, <2 x i16> undef)
   %ext = zext <2 x i16> %gather to <2 x i32>
   ret <2 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @ptr_v4i16_sext(<4 x i16*>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @ptr_v4i16_sext(ptr %offptr) {
 ; CHECK-LABEL: ptr_v4i16_sext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -240,13 +239,13 @@ define arm_aapcs_vfpcc <4 x i32> @ptr_v4i16_sext(<4 x i16*>* %offptr) {
 ; CHECK-NEXT:    vldrh.s32 q0, [r1, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16*>, <4 x i16*>* %offptr, align 4
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %offs = load <4 x ptr>, ptr %offptr, align 4
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %ext = sext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @ptr_v4i16_zext(<4 x i16*>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @ptr_v4i16_zext(ptr %offptr) {
 ; CHECK-LABEL: ptr_v4i16_zext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -254,13 +253,13 @@ define arm_aapcs_vfpcc <4 x i32> @ptr_v4i16_zext(<4 x i16*>* %offptr) {
 ; CHECK-NEXT:    vldrh.u32 q0, [r1, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16*>, <4 x i16*>* %offptr, align 4
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %offs = load <4 x ptr>, ptr %offptr, align 4
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %ext = zext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i16> @ptr_v4i16(<4 x i16*>* %offptr) {
+define arm_aapcs_vfpcc <4 x i16> @ptr_v4i16(ptr %offptr) {
 ; CHECK-LABEL: ptr_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -268,12 +267,12 @@ define arm_aapcs_vfpcc <4 x i16> @ptr_v4i16(<4 x i16*>* %offptr) {
 ; CHECK-NEXT:    vldrh.u32 q0, [r1, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16*>, <4 x i16*>* %offptr, align 4
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %offs = load <4 x ptr>, ptr %offptr, align 4
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   ret <4 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i32> @ptr_v8i16_sext(<8 x i16*>* %offptr) {
+define arm_aapcs_vfpcc <8 x i32> @ptr_v8i16_sext(ptr %offptr) {
 ; CHECK-LABEL: ptr_v8i16_sext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -309,13 +308,13 @@ define arm_aapcs_vfpcc <8 x i32> @ptr_v8i16_sext(<8 x i16*>* %offptr) {
 ; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <8 x i16*>, <8 x i16*>* %offptr, align 4
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %offs = load <8 x ptr>, ptr %offptr, align 4
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   %ext = sext <8 x i16> %gather to <8 x i32>
   ret <8 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <8 x i32> @ptr_v8i16_zext(<8 x i16*>* %offptr) {
+define arm_aapcs_vfpcc <8 x i32> @ptr_v8i16_zext(ptr %offptr) {
 ; CHECK-LABEL: ptr_v8i16_zext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -351,15 +350,15 @@ define arm_aapcs_vfpcc <8 x i32> @ptr_v8i16_zext(<8 x i16*>* %offptr) {
 ; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <8 x i16*>, <8 x i16*>* %offptr, align 4
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %offs = load <8 x ptr>, ptr %offptr, align 4
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   %ext = zext <8 x i16> %gather to <8 x i32>
   ret <8 x i32> %ext
 }
 
 ; f16
 
-define arm_aapcs_vfpcc <8 x half> @ptr_f16(<8 x half*>* %offptr) {
+define arm_aapcs_vfpcc <8 x half> @ptr_f16(ptr %offptr) {
 ; CHECK-LABEL: ptr_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -382,12 +381,12 @@ define arm_aapcs_vfpcc <8 x half> @ptr_f16(<8 x half*>* %offptr) {
 ; CHECK-NEXT:    vins.f16 s3, s4
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x half*>, <8 x half*>* %offptr, align 4
-  %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
+  %offs = load <8 x ptr>, ptr %offptr, align 4
+  %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
   ret <8 x half> %gather
 }
 
-define arm_aapcs_vfpcc <4 x half> @ptr_v4f16(<4 x half*>* %offptr) {
+define arm_aapcs_vfpcc <4 x half> @ptr_v4f16(ptr %offptr) {
 ; CHECK-LABEL: ptr_v4f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -401,14 +400,14 @@ define arm_aapcs_vfpcc <4 x half> @ptr_v4f16(<4 x half*>* %offptr) {
 ; CHECK-NEXT:    vins.f16 s1, s2
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x half*>, <4 x half*>* %offptr, align 4
-  %gather = call <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x half> undef)
+  %offs = load <4 x ptr>, ptr %offptr, align 4
+  %gather = call <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x half> undef)
   ret <4 x half> %gather
 }
 
 ; i8
 
-define arm_aapcs_vfpcc <16 x i8> @ptr_i8(<16 x i8*>* %offptr) {
+define arm_aapcs_vfpcc <16 x i8> @ptr_i8(ptr %offptr) {
 ; CHECK-LABEL: ptr_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -459,12 +458,12 @@ define arm_aapcs_vfpcc <16 x i8> @ptr_i8(<16 x i8*>* %offptr) {
 ; CHECK-NEXT:    vmov.8 q0[15], r12
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <16 x i8*>, <16 x i8*>* %offptr, align 4
-  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %offs, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %offs = load <16 x ptr>, ptr %offptr, align 4
+  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %offs, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
   ret <16 x i8> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @ptr_v8i8_sext16(<8 x i8*>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @ptr_v8i8_sext16(ptr %offptr) {
 ; CHECK-LABEL: ptr_v8i8_sext16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -494,13 +493,13 @@ define arm_aapcs_vfpcc <8 x i16> @ptr_v8i8_sext16(<8 x i8*>* %offptr) {
 ; CHECK-NEXT:    vmovlb.s8 q0, q0
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <8 x i8*>, <8 x i8*>* %offptr, align 4
-  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %offs = load <8 x ptr>, ptr %offptr, align 4
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
   %ext = sext <8 x i8> %gather to <8 x i16>
   ret <8 x i16> %ext
 }
 
-define arm_aapcs_vfpcc <8 x i16> @ptr_v8i8_zext16(<8 x i8*>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @ptr_v8i8_zext16(ptr %offptr) {
 ; CHECK-LABEL: ptr_v8i8_zext16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -530,13 +529,13 @@ define arm_aapcs_vfpcc <8 x i16> @ptr_v8i8_zext16(<8 x i8*>* %offptr) {
 ; CHECK-NEXT:    vmovlb.u8 q0, q0
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <8 x i8*>, <8 x i8*>* %offptr, align 4
-  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %offs = load <8 x ptr>, ptr %offptr, align 4
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
   %ext = zext <8 x i8> %gather to <8 x i16>
   ret <8 x i16> %ext
 }
 
-define arm_aapcs_vfpcc <8 x i8> @ptr_v8i8(<8 x i8*>* %offptr) {
+define arm_aapcs_vfpcc <8 x i8> @ptr_v8i8(ptr %offptr) {
 ; CHECK-LABEL: ptr_v8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -565,12 +564,12 @@ define arm_aapcs_vfpcc <8 x i8> @ptr_v8i8(<8 x i8*>* %offptr) {
 ; CHECK-NEXT:    vmov.16 q0[7], r12
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %offs = load <8 x i8*>, <8 x i8*>* %offptr, align 4
-  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %offs = load <8 x ptr>, ptr %offptr, align 4
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
   ret <8 x i8> %gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @ptr_v4i8_sext32(<4 x i8*>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @ptr_v4i8_sext32(ptr %offptr) {
 ; CHECK-LABEL: ptr_v4i8_sext32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -578,13 +577,13 @@ define arm_aapcs_vfpcc <4 x i32> @ptr_v4i8_sext32(<4 x i8*>* %offptr) {
 ; CHECK-NEXT:    vldrb.s32 q0, [r1, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8*>, <4 x i8*>* %offptr, align 4
-  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %offs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %offs = load <4 x ptr>, ptr %offptr, align 4
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %offs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
   %ext = sext <4 x i8> %gather to <4 x i32>
   ret <4 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @ptr_v4i8_zext32(<4 x i8*>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @ptr_v4i8_zext32(ptr %offptr) {
 ; CHECK-LABEL: ptr_v4i8_zext32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -592,13 +591,13 @@ define arm_aapcs_vfpcc <4 x i32> @ptr_v4i8_zext32(<4 x i8*>* %offptr) {
 ; CHECK-NEXT:    vldrb.u32 q0, [r1, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8*>, <4 x i8*>* %offptr, align 4
-  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %offs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %offs = load <4 x ptr>, ptr %offptr, align 4
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %offs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
   %ext = zext <4 x i8> %gather to <4 x i32>
   ret <4 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i8> @ptr_v4i8(<4 x i8*>* %offptr) {
+define arm_aapcs_vfpcc <4 x i8> @ptr_v4i8(ptr %offptr) {
 ; CHECK-LABEL: ptr_v4i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -606,12 +605,12 @@ define arm_aapcs_vfpcc <4 x i8> @ptr_v4i8(<4 x i8*>* %offptr) {
 ; CHECK-NEXT:    vldrb.u32 q0, [r1, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8*>, <4 x i8*>* %offptr, align 4
-  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %offs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %offs = load <4 x ptr>, ptr %offptr, align 4
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %offs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
   ret <4 x i8> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i32> @ptr_v8i8_sext32(<8 x i8*>* %offptr) {
+define arm_aapcs_vfpcc <8 x i32> @ptr_v8i8_sext32(ptr %offptr) {
 ; CHECK-LABEL: ptr_v8i8_sext32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -640,13 +639,13 @@ define arm_aapcs_vfpcc <8 x i32> @ptr_v8i8_sext32(<8 x i8*>* %offptr) {
 ; CHECK-NEXT:    vmovlb.s16 q0, q0
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <8 x i8*>, <8 x i8*>* %offptr, align 4
-  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %offs = load <8 x ptr>, ptr %offptr, align 4
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
   %ext = sext <8 x i8> %gather to <8 x i32>
   ret <8 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <8 x i32> @ptr_v8i8_zext32(<8 x i8*>* %offptr) {
+define arm_aapcs_vfpcc <8 x i32> @ptr_v8i8_zext32(ptr %offptr) {
 ; CHECK-LABEL: ptr_v8i8_zext32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -674,15 +673,15 @@ define arm_aapcs_vfpcc <8 x i32> @ptr_v8i8_zext32(<8 x i8*>* %offptr) {
 ; CHECK-NEXT:    vand q1, q2, q1
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <8 x i8*>, <8 x i8*>* %offptr, align 4
-  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %offs = load <8 x ptr>, ptr %offptr, align 4
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
   %ext = zext <8 x i8> %gather to <8 x i32>
   ret <8 x i32> %ext
 }
 
 ; loops
 
-define void @foo_ptr_p_int32_t(i32* %dest, i32** %src, i32 %n) {
+define void @foo_ptr_p_int32_t(ptr %dest, ptr %src, i32 %n) {
 ; CHECK-LABEL: foo_ptr_p_int32_t:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -711,14 +710,12 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ %index.next, %vector.body ], [ 0, %entry ]
-  %i = getelementptr inbounds i32*, i32** %src, i32 %index
-  %i1 = bitcast i32** %i to <4 x i32*>*
-  %wide.load = load <4 x i32*>, <4 x i32*>* %i1, align 4
-  %i2 = icmp ne <4 x i32*> %wide.load, zeroinitializer
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %wide.load, i32 4, <4 x i1> %i2, <4 x i32> undef)
-  %i3 = getelementptr inbounds i32, i32* %dest, i32 %index
-  %i4 = bitcast i32* %i3 to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %wide.masked.gather, <4 x i32>* %i4, i32 4, <4 x i1> %i2)
+  %i = getelementptr inbounds ptr, ptr %src, i32 %index
+  %wide.load = load <4 x ptr>, ptr %i, align 4
+  %i2 = icmp ne <4 x ptr> %wide.load, zeroinitializer
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %wide.load, i32 4, <4 x i1> %i2, <4 x i32> undef)
+  %i3 = getelementptr inbounds i32, ptr %dest, i32 %index
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %wide.masked.gather, ptr %i3, i32 4, <4 x i1> %i2)
   %index.next = add i32 %index, 4
   %i5 = icmp eq i32 %index.next, %and
   br i1 %i5, label %for.end, label %vector.body
@@ -727,7 +724,7 @@ for.end:                                          ; preds = %vector.body, %entry
   ret void
 }
 
-define void @foo_ptr_p_float(float* %dest, float** %src, i32 %n) {
+define void @foo_ptr_p_float(ptr %dest, ptr %src, i32 %n) {
 ; CHECK-LABEL: foo_ptr_p_float:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -756,15 +753,13 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ %index.next, %vector.body ], [ 0, %entry ]
-  %i = getelementptr inbounds float*, float** %src, i32 %index
-  %i1 = bitcast float** %i to <4 x float*>*
-  %wide.load = load <4 x float*>, <4 x float*>* %i1, align 4
-  %i2 = icmp ne <4 x float*> %wide.load, zeroinitializer
-  %i3 = bitcast <4 x float*> %wide.load to <4 x i32*>
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %i3, i32 4, <4 x i1> %i2, <4 x i32> undef)
-  %i4 = getelementptr inbounds float, float* %dest, i32 %index
-  %i5 = bitcast float* %i4 to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %wide.masked.gather, <4 x i32>* %i5, i32 4, <4 x i1> %i2)
+  %i = getelementptr inbounds ptr, ptr %src, i32 %index
+  %wide.load = load <4 x ptr>, ptr %i, align 4
+  %i2 = icmp ne <4 x ptr> %wide.load, zeroinitializer
+  %i3 = bitcast <4 x ptr> %wide.load to <4 x ptr>
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %i3, i32 4, <4 x i1> %i2, <4 x i32> undef)
+  %i4 = getelementptr inbounds float, ptr %dest, i32 %index
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %wide.masked.gather, ptr %i4, i32 4, <4 x i1> %i2)
   %index.next = add i32 %index, 4
   %i6 = icmp eq i32 %index.next, %and
   br i1 %i6, label %for.end, label %vector.body
@@ -773,7 +768,7 @@ for.end:                                          ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc <4 x i32> @qi4(<4 x i32*> %p) {
+define arm_aapcs_vfpcc <4 x i32> @qi4(<4 x ptr> %p) {
 ; CHECK-LABEL: qi4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    movs r0, #16
@@ -781,12 +776,12 @@ define arm_aapcs_vfpcc <4 x i32> @qi4(<4 x i32*> %p) {
 ; CHECK-NEXT:    vldrw.u32 q0, [q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %g = getelementptr inbounds i32, <4 x i32*> %p, i32 4
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %g, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %g = getelementptr inbounds i32, <4 x ptr> %p, i32 4
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %g, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i32> @sext_unsigned_unscaled_i8_i8_toi64(i8* %base, <8 x i8>* %offptr) {
+define arm_aapcs_vfpcc <8 x i32> @sext_unsigned_unscaled_i8_i8_toi64(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_unsigned_unscaled_i8_i8_toi64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
@@ -809,15 +804,15 @@ define arm_aapcs_vfpcc <8 x i32> @sext_unsigned_unscaled_i8_i8_toi64(i8* %base,
 ; CHECK-NEXT:    vmovlb.s16 q1, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
   %gather.sext = sext <8 x i8> %gather to <8 x i32>
   ret <8 x i32> %gather.sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @gepconstoff_i32(i32* %base) {
+define arm_aapcs_vfpcc <4 x i32> @gepconstoff_i32(ptr %base) {
 ; CHECK-LABEL: gepconstoff_i32:
 ; CHECK:       @ %bb.0: @ %bb
 ; CHECK-NEXT:    adr r1, .LCPI30_0
@@ -832,103 +827,73 @@ define arm_aapcs_vfpcc <4 x i32> @gepconstoff_i32(i32* %base) {
 ; CHECK-NEXT:    .long 8 @ 0x8
 ; CHECK-NEXT:    .long 12 @ 0xc
 bb:
-  %a = getelementptr i32, i32* %base, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
-  %g = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %a, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
+  %a = getelementptr i32, ptr %base, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+  %g = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %a, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
   ret <4 x i32> %g
 }
 
-define arm_aapcs_vfpcc <4 x i32> @gepconstoff_i8(i8* %base) {
-; CHECK-STD-LABEL: gepconstoff_i8:
-; CHECK-STD:       @ %bb.0: @ %bb
-; CHECK-STD-NEXT:    adr r1, .LCPI31_0
-; CHECK-STD-NEXT:    vldrw.u32 q0, [r1]
-; CHECK-STD-NEXT:    vadd.i32 q1, q0, r0
-; CHECK-STD-NEXT:    vldrw.u32 q0, [q1]
-; CHECK-STD-NEXT:    bx lr
-; CHECK-STD-NEXT:    .p2align 4
-; CHECK-STD-NEXT:  @ %bb.1:
-; CHECK-STD-NEXT:  .LCPI31_0:
-; CHECK-STD-NEXT:    .long 4294967292 @ 0xfffffffc
-; CHECK-STD-NEXT:    .long 12 @ 0xc
-; CHECK-STD-NEXT:    .long 28 @ 0x1c
-; CHECK-STD-NEXT:    .long 44 @ 0x2c
-;
-; CHECK-OPAQ-LABEL: gepconstoff_i8:
-; CHECK-OPAQ:       @ %bb.0: @ %bb
-; CHECK-OPAQ-NEXT:    adr r1, .LCPI31_0
-; CHECK-OPAQ-NEXT:    vldrw.u32 q1, [r1]
-; CHECK-OPAQ-NEXT:    vldrw.u32 q0, [r0, q1]
-; CHECK-OPAQ-NEXT:    bx lr
-; CHECK-OPAQ-NEXT:    .p2align 4
-; CHECK-OPAQ-NEXT:  @ %bb.1:
-; CHECK-OPAQ-NEXT:  .LCPI31_0:
-; CHECK-OPAQ-NEXT:    .long 4294967292 @ 0xfffffffc
-; CHECK-OPAQ-NEXT:    .long 12 @ 0xc
-; CHECK-OPAQ-NEXT:    .long 28 @ 0x1c
-; CHECK-OPAQ-NEXT:    .long 44 @ 0x2c
+define arm_aapcs_vfpcc <4 x i32> @gepconstoff_i8(ptr %base) {
+; CHECK-LABEL: gepconstoff_i8:
+; CHECK:       @ %bb.0: @ %bb
+; CHECK-NEXT:    adr r1, .LCPI31_0
+; CHECK-NEXT:    vldrw.u32 q1, [r1]
+; CHECK-NEXT:    vldrw.u32 q0, [r0, q1]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI31_0:
+; CHECK-NEXT:    .long 4294967292 @ 0xfffffffc
+; CHECK-NEXT:    .long 12 @ 0xc
+; CHECK-NEXT:    .long 28 @ 0x1c
+; CHECK-NEXT:    .long 44 @ 0x2c
 bb:
-  %a = getelementptr i8, i8* %base, <4 x i32> <i32 0, i32 16, i32 32, i32 48>
-  %b = bitcast <4 x i8*> %a to <4 x i32*>
-  %c = getelementptr inbounds i32, <4 x i32*> %b, i32 -1
-  %g = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %c, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
+  %a = getelementptr i8, ptr %base, <4 x i32> <i32 0, i32 16, i32 32, i32 48>
+  %b = bitcast <4 x ptr> %a to <4 x ptr>
+  %c = getelementptr inbounds i32, <4 x ptr> %b, i32 -1
+  %g = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %c, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
   ret <4 x i32> %g
 }
 
-define arm_aapcs_vfpcc <4 x i32> @gepconstoff3_i16(i16* %base) {
-; CHECK-STD-LABEL: gepconstoff3_i16:
-; CHECK-STD:       @ %bb.0: @ %bb
-; CHECK-STD-NEXT:    adr r1, .LCPI32_0
-; CHECK-STD-NEXT:    vldrw.u32 q0, [r1]
-; CHECK-STD-NEXT:    vadd.i32 q1, q0, r0
-; CHECK-STD-NEXT:    vldrw.u32 q0, [q1]
-; CHECK-STD-NEXT:    bx lr
-; CHECK-STD-NEXT:    .p2align 4
-; CHECK-STD-NEXT:  @ %bb.1:
-; CHECK-STD-NEXT:  .LCPI32_0:
-; CHECK-STD-NEXT:    .long 12 @ 0xc
-; CHECK-STD-NEXT:    .long 18 @ 0x12
-; CHECK-STD-NEXT:    .long 58 @ 0x3a
-; CHECK-STD-NEXT:    .long 280 @ 0x118
-;
-; CHECK-OPAQ-LABEL: gepconstoff3_i16:
-; CHECK-OPAQ:       @ %bb.0: @ %bb
-; CHECK-OPAQ-NEXT:    adr r1, .LCPI32_0
-; CHECK-OPAQ-NEXT:    vldrw.u32 q1, [r1]
-; CHECK-OPAQ-NEXT:    vldrw.u32 q0, [r0, q1]
-; CHECK-OPAQ-NEXT:    bx lr
-; CHECK-OPAQ-NEXT:    .p2align 4
-; CHECK-OPAQ-NEXT:  @ %bb.1:
-; CHECK-OPAQ-NEXT:  .LCPI32_0:
-; CHECK-OPAQ-NEXT:    .long 12 @ 0xc
-; CHECK-OPAQ-NEXT:    .long 18 @ 0x12
-; CHECK-OPAQ-NEXT:    .long 58 @ 0x3a
-; CHECK-OPAQ-NEXT:    .long 280 @ 0x118
+define arm_aapcs_vfpcc <4 x i32> @gepconstoff3_i16(ptr %base) {
+; CHECK-LABEL: gepconstoff3_i16:
+; CHECK:       @ %bb.0: @ %bb
+; CHECK-NEXT:    adr r1, .LCPI32_0
+; CHECK-NEXT:    vldrw.u32 q1, [r1]
+; CHECK-NEXT:    vldrw.u32 q0, [r0, q1]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI32_0:
+; CHECK-NEXT:    .long 12 @ 0xc
+; CHECK-NEXT:    .long 18 @ 0x12
+; CHECK-NEXT:    .long 58 @ 0x3a
+; CHECK-NEXT:    .long 280 @ 0x118
 bb:
-  %a = getelementptr i16, i16* %base, <4 x i32> <i32 0, i32 16, i32 32, i32 48>
-  %b = bitcast <4 x i16*> %a to <4 x i8*>
-  %c = getelementptr i8, <4 x i8*> %b, <4 x i32> <i32 16, i32 -10, i32 -2, i32 188>
-  %d = bitcast <4 x i8*> %c to <4 x i32*>
-  %e = getelementptr inbounds i32, <4 x i32*> %d, i32 -1
-  %g = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %e, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
+  %a = getelementptr i16, ptr %base, <4 x i32> <i32 0, i32 16, i32 32, i32 48>
+  %b = bitcast <4 x ptr> %a to <4 x ptr>
+  %c = getelementptr i8, <4 x ptr> %b, <4 x i32> <i32 16, i32 -10, i32 -2, i32 188>
+  %d = bitcast <4 x ptr> %c to <4 x ptr>
+  %e = getelementptr inbounds i32, <4 x ptr> %d, i32 -1
+  %g = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %e, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
   ret <4 x i32> %g
 }
 
-declare <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*>, i32, <2 x i1>, <2 x i32>)
-declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
-declare <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*>, i32, <8 x i1>, <8 x i32>)
-declare <16 x i32> @llvm.masked.gather.v16i32.v16p0i32(<16 x i32*>, i32, <16 x i1>, <16 x i32>)
-declare <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*>, i32, <2 x i1>, <2 x float>)
-declare <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*>, i32, <4 x i1>, <4 x float>)
-declare <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*>, i32, <8 x i1>, <8 x float>)
-declare <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*>, i32, <2 x i1>, <2 x i16>)
-declare <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*>, i32, <4 x i1>, <4 x i16>)
-declare <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*>, i32, <8 x i1>, <8 x i16>)
-declare <16 x i16> @llvm.masked.gather.v16i16.v16p0i16(<16 x i16*>, i32, <16 x i1>, <16 x i16>)
-declare <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*>, i32, <4 x i1>, <4 x half>)
-declare <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*>, i32, <8 x i1>, <8 x half>)
-declare <16 x half> @llvm.masked.gather.v16f16.v16p0f16(<16 x half*>, i32, <16 x i1>, <16 x half>)
-declare <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>)
-declare <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*>, i32, <8 x i1>, <8 x i8>)
-declare <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*>, i32, <16 x i1>, <16 x i8>)
-declare <32 x i8> @llvm.masked.gather.v32i8.v32p0i8(<32 x i8*>, i32, <32 x i1>, <32 x i8>)
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
+declare <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i32>)
+declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>)
+declare <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i32>)
+declare <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i32>)
+declare <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x float>)
+declare <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x float>)
+declare <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x float>)
+declare <2 x i16> @llvm.masked.gather.v2i16.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i16>)
+declare <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i16>)
+declare <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i16>)
+declare <16 x i16> @llvm.masked.gather.v16i16.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i16>)
+declare <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x half>)
+declare <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x half>)
+declare <16 x half> @llvm.masked.gather.v16f16.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x half>)
+declare <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i8>)
+declare <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i8>)
+declare <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i8>)
+declare <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr>, i32, <32 x i1>, <32 x i8>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-postinc-dct.ll b/llvm/test/CodeGen/Thumb2/mve-postinc-dct.ll
index 569699cf66cb..7e059ae726fc 100644
--- a/llvm/test/CodeGen/Thumb2/mve-postinc-dct.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-postinc-dct.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -opaque-pointers=0 -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s
 
-%struct.DCT_InstanceTypeDef = type { float*, i32, i32 }
+%struct.DCT_InstanceTypeDef = type { ptr, i32, i32 }
 
-define void @DCT_mve1(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float* nocapture readonly %pIn, float* nocapture %pOut) {
+define void @DCT_mve1(ptr nocapture readonly %S, ptr nocapture readonly %pIn, ptr nocapture %pOut) {
 ; CHECK-LABEL: DCT_mve1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, lr}
@@ -46,12 +46,12 @@ define void @DCT_mve1(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:  .LBB0_5: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, pc}
 entry:
-  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %i = load i32, i32* %NumInputs, align 4
-  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %i1 = load i32, i32* %NumFilters, align 4
-  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %i2 = load float*, float** %pDCTCoefs, align 4
+  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 2
+  %i = load i32, ptr %NumInputs, align 4
+  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 1
+  %i1 = load i32, ptr %NumFilters, align 4
+  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 0
+  %i2 = load ptr, ptr %pDCTCoefs, align 4
   %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
   %sub = add i32 %i1, -1
@@ -75,13 +75,11 @@ vector.body:                                      ; preds = %vector.body, %for.b
   %index = phi i32 [ 0, %for.body ], [ %index.next, %vector.body ]
   %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %i10, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
-  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %i4 = bitcast float* %i3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i3 = getelementptr inbounds float, ptr %pIn, i32 %index
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i3, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i5 = add i32 %index, %mul4
-  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
-  %i7 = bitcast float* %i6 to <4 x float>*
-  %wide.masked.load53 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i6 = getelementptr inbounds float, ptr %i2, i32 %i5
+  %wide.masked.load53 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i6, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i8 = fmul fast <4 x float> %wide.masked.load53, %wide.masked.load
   %i9 = fadd fast <4 x float> %i8, %vec.phi
   %i10 = select <4 x i1> %active.lane.mask, <4 x float> %i9, <4 x float> %vec.phi
@@ -91,14 +89,14 @@ vector.body:                                      ; preds = %vector.body, %for.b
 
 middle.block:                                     ; preds = %vector.body
   %i12 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i10)
-  %arrayidx14 = getelementptr inbounds float, float* %pOut, i32 %k2.051
-  store float %i12, float* %arrayidx14, align 4
+  %arrayidx14 = getelementptr inbounds float, ptr %pOut, i32 %k2.051
+  store float %i12, ptr %arrayidx14, align 4
   %add16 = add nuw i32 %k2.051, 1
   %exitcond52.not = icmp eq i32 %add16, %sub
   br i1 %exitcond52.not, label %for.cond.cleanup, label %for.body
 }
 
-define void @DCT_mve2(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float* nocapture readonly %pIn, float* nocapture %pOut) {
+define void @DCT_mve2(ptr nocapture readonly %S, ptr nocapture readonly %pIn, ptr nocapture %pOut) {
 ; CHECK-LABEL: DCT_mve2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -157,12 +155,12 @@ define void @DCT_mve2(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    add sp, #4
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
-  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %i = load i32, i32* %NumInputs, align 4
-  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %i1 = load i32, i32* %NumFilters, align 4
-  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %i2 = load float*, float** %pDCTCoefs, align 4
+  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 2
+  %i = load i32, ptr %NumInputs, align 4
+  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 1
+  %i1 = load i32, ptr %NumFilters, align 4
+  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 0
+  %i2 = load ptr, ptr %pDCTCoefs, align 4
   %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
   %sub = add i32 %i1, -2
@@ -189,19 +187,16 @@ vector.body:                                      ; preds = %vector.body, %for.b
   %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %i15, %vector.body ]
   %vec.phi73 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i16, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
-  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %i4 = bitcast float* %i3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i3 = getelementptr inbounds float, ptr %pIn, i32 %index
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i3, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i5 = add i32 %index, %mul4
-  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
-  %i7 = bitcast float* %i6 to <4 x float>*
-  %wide.masked.load74 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i6 = getelementptr inbounds float, ptr %i2, i32 %i5
+  %wide.masked.load74 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i6, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i8 = fmul fast <4 x float> %wide.masked.load74, %wide.masked.load
   %i9 = fadd fast <4 x float> %i8, %vec.phi73
   %i10 = add i32 %index, %mul5
-  %i11 = getelementptr inbounds float, float* %i2, i32 %i10
-  %i12 = bitcast float* %i11 to <4 x float>*
-  %wide.masked.load75 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i11 = getelementptr inbounds float, ptr %i2, i32 %i10
+  %wide.masked.load75 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i11, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i13 = fmul fast <4 x float> %wide.masked.load75, %wide.masked.load
   %i14 = fadd fast <4 x float> %i13, %vec.phi
   %i15 = select <4 x i1> %active.lane.mask, <4 x float> %i14, <4 x float> %vec.phi
@@ -213,16 +208,16 @@ vector.body:                                      ; preds = %vector.body, %for.b
 middle.block:                                     ; preds = %vector.body
   %i18 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i16)
   %i19 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i15)
-  %arrayidx21 = getelementptr inbounds float, float* %pOut, i32 %k2.072
-  store float %i18, float* %arrayidx21, align 4
-  %arrayidx23 = getelementptr inbounds float, float* %pOut, i32 %add
-  store float %i19, float* %arrayidx23, align 4
+  %arrayidx21 = getelementptr inbounds float, ptr %pOut, i32 %k2.072
+  store float %i18, ptr %arrayidx21, align 4
+  %arrayidx23 = getelementptr inbounds float, ptr %pOut, i32 %add
+  store float %i19, ptr %arrayidx23, align 4
   %add25 = add i32 %k2.072, 2
   %cmp3 = icmp ult i32 %add25, %sub
   br i1 %cmp3, label %for.body, label %for.cond.cleanup
 }
 
-define void @DCT_mve3(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float* nocapture readonly %pIn, float* nocapture %pOut) {
+define void @DCT_mve3(ptr nocapture readonly %S, ptr nocapture readonly %pIn, ptr nocapture %pOut) {
 ; CHECK-LABEL: DCT_mve3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -314,12 +309,12 @@ define void @DCT_mve3(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    add sp, #4
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
-  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %i = load i32, i32* %NumInputs, align 4
-  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %i1 = load i32, i32* %NumFilters, align 4
-  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %i2 = load float*, float** %pDCTCoefs, align 4
+  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 2
+  %i = load i32, ptr %NumInputs, align 4
+  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 1
+  %i1 = load i32, ptr %NumFilters, align 4
+  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 0
+  %i2 = load ptr, ptr %pDCTCoefs, align 4
   %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
   %sub = add i32 %i1, -3
@@ -349,25 +344,21 @@ vector.body:                                      ; preds = %vector.body, %for.b
   %vec.phi94 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i21, %vector.body ]
   %vec.phi95 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i22, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
-  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %i4 = bitcast float* %i3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i3 = getelementptr inbounds float, ptr %pIn, i32 %index
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i3, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i5 = add i32 %index, %mul4
-  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
-  %i7 = bitcast float* %i6 to <4 x float>*
-  %wide.masked.load96 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i6 = getelementptr inbounds float, ptr %i2, i32 %i5
+  %wide.masked.load96 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i6, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i8 = fmul fast <4 x float> %wide.masked.load96, %wide.masked.load
   %i9 = fadd fast <4 x float> %i8, %vec.phi95
   %i10 = add i32 %index, %mul5
-  %i11 = getelementptr inbounds float, float* %i2, i32 %i10
-  %i12 = bitcast float* %i11 to <4 x float>*
-  %wide.masked.load97 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i11 = getelementptr inbounds float, ptr %i2, i32 %i10
+  %wide.masked.load97 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i11, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i13 = fmul fast <4 x float> %wide.masked.load97, %wide.masked.load
   %i14 = fadd fast <4 x float> %i13, %vec.phi94
   %i15 = add i32 %index, %mul7
-  %i16 = getelementptr inbounds float, float* %i2, i32 %i15
-  %i17 = bitcast float* %i16 to <4 x float>*
-  %wide.masked.load98 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i16 = getelementptr inbounds float, ptr %i2, i32 %i15
+  %wide.masked.load98 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i16, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i18 = fmul fast <4 x float> %wide.masked.load98, %wide.masked.load
   %i19 = fadd fast <4 x float> %i18, %vec.phi
   %i20 = select <4 x i1> %active.lane.mask, <4 x float> %i19, <4 x float> %vec.phi
@@ -381,18 +372,18 @@ middle.block:                                     ; preds = %vector.body
   %i24 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i22)
   %i25 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i21)
   %i26 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i20)
-  %arrayidx28 = getelementptr inbounds float, float* %pOut, i32 %k2.093
-  store float %i24, float* %arrayidx28, align 4
-  %arrayidx30 = getelementptr inbounds float, float* %pOut, i32 %add
-  store float %i25, float* %arrayidx30, align 4
-  %arrayidx32 = getelementptr inbounds float, float* %pOut, i32 %add6
-  store float %i26, float* %arrayidx32, align 4
+  %arrayidx28 = getelementptr inbounds float, ptr %pOut, i32 %k2.093
+  store float %i24, ptr %arrayidx28, align 4
+  %arrayidx30 = getelementptr inbounds float, ptr %pOut, i32 %add
+  store float %i25, ptr %arrayidx30, align 4
+  %arrayidx32 = getelementptr inbounds float, ptr %pOut, i32 %add6
+  store float %i26, ptr %arrayidx32, align 4
   %add34 = add i32 %k2.093, 3
   %cmp3 = icmp ult i32 %add34, %sub
   br i1 %cmp3, label %for.body, label %for.cond.cleanup
 }
 
-define void @DCT_mve4(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float* nocapture readonly %pIn, float* nocapture %pOut) {
+define void @DCT_mve4(ptr nocapture readonly %S, ptr nocapture readonly %pIn, ptr nocapture %pOut) {
 ; CHECK-LABEL: DCT_mve4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -500,12 +491,12 @@ define void @DCT_mve4(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    add sp, #4
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
-  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %i = load i32, i32* %NumInputs, align 4
-  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %i1 = load i32, i32* %NumFilters, align 4
-  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %i2 = load float*, float** %pDCTCoefs, align 4
+  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 2
+  %i = load i32, ptr %NumInputs, align 4
+  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 1
+  %i1 = load i32, ptr %NumFilters, align 4
+  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 0
+  %i2 = load ptr, ptr %pDCTCoefs, align 4
   %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
   %sub = add i32 %i1, -4
@@ -538,31 +529,26 @@ vector.body:                                      ; preds = %vector.body, %for.b
   %vec.phi116 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i27, %vector.body ]
   %vec.phi117 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i28, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
-  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %i4 = bitcast float* %i3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i3 = getelementptr inbounds float, ptr %pIn, i32 %index
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i3, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i5 = add i32 %index, %mul4
-  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
-  %i7 = bitcast float* %i6 to <4 x float>*
-  %wide.masked.load118 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i6 = getelementptr inbounds float, ptr %i2, i32 %i5
+  %wide.masked.load118 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i6, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i8 = fmul fast <4 x float> %wide.masked.load118, %wide.masked.load
   %i9 = fadd fast <4 x float> %i8, %vec.phi116
   %i10 = add i32 %index, %mul5
-  %i11 = getelementptr inbounds float, float* %i2, i32 %i10
-  %i12 = bitcast float* %i11 to <4 x float>*
-  %wide.masked.load119 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i11 = getelementptr inbounds float, ptr %i2, i32 %i10
+  %wide.masked.load119 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i11, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i13 = fmul fast <4 x float> %wide.masked.load119, %wide.masked.load
   %i14 = fadd fast <4 x float> %i13, %vec.phi117
   %i15 = add i32 %index, %mul7
-  %i16 = getelementptr inbounds float, float* %i2, i32 %i15
-  %i17 = bitcast float* %i16 to <4 x float>*
-  %wide.masked.load120 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i16 = getelementptr inbounds float, ptr %i2, i32 %i15
+  %wide.masked.load120 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i16, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i18 = fmul fast <4 x float> %wide.masked.load120, %wide.masked.load
   %i19 = fadd fast <4 x float> %i18, %vec.phi115
   %i20 = add i32 %index, %mul9
-  %i21 = getelementptr inbounds float, float* %i2, i32 %i20
-  %i22 = bitcast float* %i21 to <4 x float>*
-  %wide.masked.load121 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i22, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i21 = getelementptr inbounds float, ptr %i2, i32 %i20
+  %wide.masked.load121 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i21, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i23 = fmul fast <4 x float> %wide.masked.load121, %wide.masked.load
   %i24 = fadd fast <4 x float> %i23, %vec.phi
   %i25 = select <4 x i1> %active.lane.mask, <4 x float> %i24, <4 x float> %vec.phi
@@ -578,20 +564,20 @@ middle.block:                                     ; preds = %vector.body
   %i31 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i27)
   %i32 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i26)
   %i33 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i25)
-  %arrayidx35 = getelementptr inbounds float, float* %pOut, i32 %k2.0114
-  store float %i31, float* %arrayidx35, align 4
-  %arrayidx37 = getelementptr inbounds float, float* %pOut, i32 %add
-  store float %i30, float* %arrayidx37, align 4
-  %arrayidx39 = getelementptr inbounds float, float* %pOut, i32 %add6
-  store float %i32, float* %arrayidx39, align 4
-  %arrayidx41 = getelementptr inbounds float, float* %pOut, i32 %add8
-  store float %i33, float* %arrayidx41, align 4
+  %arrayidx35 = getelementptr inbounds float, ptr %pOut, i32 %k2.0114
+  store float %i31, ptr %arrayidx35, align 4
+  %arrayidx37 = getelementptr inbounds float, ptr %pOut, i32 %add
+  store float %i30, ptr %arrayidx37, align 4
+  %arrayidx39 = getelementptr inbounds float, ptr %pOut, i32 %add6
+  store float %i32, ptr %arrayidx39, align 4
+  %arrayidx41 = getelementptr inbounds float, ptr %pOut, i32 %add8
+  store float %i33, ptr %arrayidx41, align 4
   %add43 = add i32 %k2.0114, 4
   %cmp3 = icmp ult i32 %add43, %sub
   br i1 %cmp3, label %for.body, label %for.cond.cleanup
 }
 
-define void @DCT_mve5(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float* nocapture readonly %pIn, float* nocapture %pOut) {
+define void @DCT_mve5(ptr nocapture readonly %S, ptr nocapture readonly %pIn, ptr nocapture %pOut) {
 ; CHECK-LABEL: DCT_mve5:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -702,12 +688,12 @@ define void @DCT_mve5(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    add sp, #4
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
-  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %i = load i32, i32* %NumInputs, align 4
-  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %i1 = load i32, i32* %NumFilters, align 4
-  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %i2 = load float*, float** %pDCTCoefs, align 4
+  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 2
+  %i = load i32, ptr %NumInputs, align 4
+  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 1
+  %i1 = load i32, ptr %NumFilters, align 4
+  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 0
+  %i2 = load ptr, ptr %pDCTCoefs, align 4
   %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
   %sub = add i32 %i1, -5
@@ -743,37 +729,31 @@ vector.body:                                      ; preds = %vector.body, %for.b
   %vec.phi138 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i33, %vector.body ]
   %vec.phi139 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i34, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
-  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %i4 = bitcast float* %i3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i3 = getelementptr inbounds float, ptr %pIn, i32 %index
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i3, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i5 = add i32 %index, %mul4
-  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
-  %i7 = bitcast float* %i6 to <4 x float>*
-  %wide.masked.load140 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i6 = getelementptr inbounds float, ptr %i2, i32 %i5
+  %wide.masked.load140 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i6, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i8 = fmul fast <4 x float> %wide.masked.load140, %wide.masked.load
   %i9 = fadd fast <4 x float> %i8, %vec.phi137
   %i10 = add i32 %index, %mul5
-  %i11 = getelementptr inbounds float, float* %i2, i32 %i10
-  %i12 = bitcast float* %i11 to <4 x float>*
-  %wide.masked.load141 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i11 = getelementptr inbounds float, ptr %i2, i32 %i10
+  %wide.masked.load141 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i11, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i13 = fmul fast <4 x float> %wide.masked.load141, %wide.masked.load
   %i14 = fadd fast <4 x float> %i13, %vec.phi139
   %i15 = add i32 %index, %mul7
-  %i16 = getelementptr inbounds float, float* %i2, i32 %i15
-  %i17 = bitcast float* %i16 to <4 x float>*
-  %wide.masked.load142 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i16 = getelementptr inbounds float, ptr %i2, i32 %i15
+  %wide.masked.load142 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i16, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i18 = fmul fast <4 x float> %wide.masked.load142, %wide.masked.load
   %i19 = fadd fast <4 x float> %i18, %vec.phi138
   %i20 = add i32 %index, %mul9
-  %i21 = getelementptr inbounds float, float* %i2, i32 %i20
-  %i22 = bitcast float* %i21 to <4 x float>*
-  %wide.masked.load143 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i22, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i21 = getelementptr inbounds float, ptr %i2, i32 %i20
+  %wide.masked.load143 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i21, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i23 = fmul fast <4 x float> %wide.masked.load143, %wide.masked.load
   %i24 = fadd fast <4 x float> %i23, %vec.phi136
   %i25 = add i32 %index, %mul11
-  %i26 = getelementptr inbounds float, float* %i2, i32 %i25
-  %i27 = bitcast float* %i26 to <4 x float>*
-  %wide.masked.load144 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i27, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i26 = getelementptr inbounds float, ptr %i2, i32 %i25
+  %wide.masked.load144 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i26, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i28 = fmul fast <4 x float> %wide.masked.load144, %wide.masked.load
   %i29 = fadd fast <4 x float> %i28, %vec.phi
   %i30 = select <4 x i1> %active.lane.mask, <4 x float> %i29, <4 x float> %vec.phi
@@ -791,22 +771,22 @@ middle.block:                                     ; preds = %vector.body
   %i38 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i32)
   %i39 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i31)
   %i40 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i30)
-  %arrayidx42 = getelementptr inbounds float, float* %pOut, i32 %k2.0135
-  store float %i38, float* %arrayidx42, align 4
-  %arrayidx44 = getelementptr inbounds float, float* %pOut, i32 %add
-  store float %i36, float* %arrayidx44, align 4
-  %arrayidx46 = getelementptr inbounds float, float* %pOut, i32 %add6
-  store float %i37, float* %arrayidx46, align 4
-  %arrayidx48 = getelementptr inbounds float, float* %pOut, i32 %add8
-  store float %i39, float* %arrayidx48, align 4
-  %arrayidx50 = getelementptr inbounds float, float* %pOut, i32 %add10
-  store float %i40, float* %arrayidx50, align 4
+  %arrayidx42 = getelementptr inbounds float, ptr %pOut, i32 %k2.0135
+  store float %i38, ptr %arrayidx42, align 4
+  %arrayidx44 = getelementptr inbounds float, ptr %pOut, i32 %add
+  store float %i36, ptr %arrayidx44, align 4
+  %arrayidx46 = getelementptr inbounds float, ptr %pOut, i32 %add6
+  store float %i37, ptr %arrayidx46, align 4
+  %arrayidx48 = getelementptr inbounds float, ptr %pOut, i32 %add8
+  store float %i39, ptr %arrayidx48, align 4
+  %arrayidx50 = getelementptr inbounds float, ptr %pOut, i32 %add10
+  store float %i40, ptr %arrayidx50, align 4
   %add52 = add i32 %k2.0135, 5
   %cmp3 = icmp ult i32 %add52, %sub
   br i1 %cmp3, label %for.body, label %for.cond.cleanup
 }
 
-define void @DCT_mve6(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float* nocapture readonly %pIn, float* nocapture %pOut) {
+define void @DCT_mve6(ptr nocapture readonly %S, ptr nocapture readonly %pIn, ptr nocapture %pOut) {
 ; CHECK-LABEL: DCT_mve6:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -929,12 +909,12 @@ define void @DCT_mve6(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    add sp, #4
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
-  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %i = load i32, i32* %NumInputs, align 4
-  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %i1 = load i32, i32* %NumFilters, align 4
-  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %i2 = load float*, float** %pDCTCoefs, align 4
+  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 2
+  %i = load i32, ptr %NumInputs, align 4
+  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 1
+  %i1 = load i32, ptr %NumFilters, align 4
+  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 0
+  %i2 = load ptr, ptr %pDCTCoefs, align 4
   %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
   %sub = add i32 %i1, -6
@@ -973,43 +953,36 @@ vector.body:                                      ; preds = %vector.body, %for.b
   %vec.phi160 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i39, %vector.body ]
   %vec.phi161 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i40, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
-  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %i4 = bitcast float* %i3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i3 = getelementptr inbounds float, ptr %pIn, i32 %index
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i3, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i5 = add i32 %index, %mul4
-  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
-  %i7 = bitcast float* %i6 to <4 x float>*
-  %wide.masked.load162 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i6 = getelementptr inbounds float, ptr %i2, i32 %i5
+  %wide.masked.load162 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i6, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i8 = fmul fast <4 x float> %wide.masked.load162, %wide.masked.load
   %i9 = fadd fast <4 x float> %i8, %vec.phi158
   %i10 = add i32 %index, %mul5
-  %i11 = getelementptr inbounds float, float* %i2, i32 %i10
-  %i12 = bitcast float* %i11 to <4 x float>*
-  %wide.masked.load163 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i11 = getelementptr inbounds float, ptr %i2, i32 %i10
+  %wide.masked.load163 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i11, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i13 = fmul fast <4 x float> %wide.masked.load163, %wide.masked.load
   %i14 = fadd fast <4 x float> %i13, %vec.phi160
   %i15 = add i32 %index, %mul7
-  %i16 = getelementptr inbounds float, float* %i2, i32 %i15
-  %i17 = bitcast float* %i16 to <4 x float>*
-  %wide.masked.load164 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i16 = getelementptr inbounds float, ptr %i2, i32 %i15
+  %wide.masked.load164 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i16, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i18 = fmul fast <4 x float> %wide.masked.load164, %wide.masked.load
   %i19 = fadd fast <4 x float> %i18, %vec.phi161
   %i20 = add i32 %index, %mul9
-  %i21 = getelementptr inbounds float, float* %i2, i32 %i20
-  %i22 = bitcast float* %i21 to <4 x float>*
-  %wide.masked.load165 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i22, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i21 = getelementptr inbounds float, ptr %i2, i32 %i20
+  %wide.masked.load165 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i21, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i23 = fmul fast <4 x float> %wide.masked.load165, %wide.masked.load
   %i24 = fadd fast <4 x float> %i23, %vec.phi159
   %i25 = add i32 %index, %mul11
-  %i26 = getelementptr inbounds float, float* %i2, i32 %i25
-  %i27 = bitcast float* %i26 to <4 x float>*
-  %wide.masked.load166 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i27, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i26 = getelementptr inbounds float, ptr %i2, i32 %i25
+  %wide.masked.load166 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i26, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i28 = fmul fast <4 x float> %wide.masked.load166, %wide.masked.load
   %i29 = fadd fast <4 x float> %i28, %vec.phi157
   %i30 = add i32 %index, %mul13
-  %i31 = getelementptr inbounds float, float* %i2, i32 %i30
-  %i32 = bitcast float* %i31 to <4 x float>*
-  %wide.masked.load167 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i32, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i31 = getelementptr inbounds float, ptr %i2, i32 %i30
+  %wide.masked.load167 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i31, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i33 = fmul fast <4 x float> %wide.masked.load167, %wide.masked.load
   %i34 = fadd fast <4 x float> %i33, %vec.phi
   %i35 = select <4 x i1> %active.lane.mask, <4 x float> %i34, <4 x float> %vec.phi
@@ -1029,24 +1002,24 @@ middle.block:                                     ; preds = %vector.body
   %i45 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i37)
   %i46 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i36)
   %i47 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i35)
-  %arrayidx49 = getelementptr inbounds float, float* %pOut, i32 %k2.0156
-  store float %i45, float* %arrayidx49, align 4
-  %arrayidx51 = getelementptr inbounds float, float* %pOut, i32 %add
-  store float %i43, float* %arrayidx51, align 4
-  %arrayidx53 = getelementptr inbounds float, float* %pOut, i32 %add6
-  store float %i42, float* %arrayidx53, align 4
-  %arrayidx55 = getelementptr inbounds float, float* %pOut, i32 %add8
-  store float %i44, float* %arrayidx55, align 4
-  %arrayidx57 = getelementptr inbounds float, float* %pOut, i32 %add10
-  store float %i46, float* %arrayidx57, align 4
-  %arrayidx59 = getelementptr inbounds float, float* %pOut, i32 %add12
-  store float %i47, float* %arrayidx59, align 4
+  %arrayidx49 = getelementptr inbounds float, ptr %pOut, i32 %k2.0156
+  store float %i45, ptr %arrayidx49, align 4
+  %arrayidx51 = getelementptr inbounds float, ptr %pOut, i32 %add
+  store float %i43, ptr %arrayidx51, align 4
+  %arrayidx53 = getelementptr inbounds float, ptr %pOut, i32 %add6
+  store float %i42, ptr %arrayidx53, align 4
+  %arrayidx55 = getelementptr inbounds float, ptr %pOut, i32 %add8
+  store float %i44, ptr %arrayidx55, align 4
+  %arrayidx57 = getelementptr inbounds float, ptr %pOut, i32 %add10
+  store float %i46, ptr %arrayidx57, align 4
+  %arrayidx59 = getelementptr inbounds float, ptr %pOut, i32 %add12
+  store float %i47, ptr %arrayidx59, align 4
   %add61 = add i32 %k2.0156, 6
   %cmp3 = icmp ult i32 %add61, %sub
   br i1 %cmp3, label %for.body, label %for.cond.cleanup
 }
 
-define void @DCT_mve7(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float* nocapture readonly %pIn, float* nocapture %pOut) {
+define void @DCT_mve7(ptr nocapture readonly %S, ptr nocapture readonly %pIn, ptr nocapture %pOut) {
 ; CHECK-LABEL: DCT_mve7:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -1206,12 +1179,12 @@ define void @DCT_mve7(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    add sp, #4
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
-  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %i = load i32, i32* %NumInputs, align 4
-  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %i1 = load i32, i32* %NumFilters, align 4
-  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %i2 = load float*, float** %pDCTCoefs, align 4
+  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 2
+  %i = load i32, ptr %NumInputs, align 4
+  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 1
+  %i1 = load i32, ptr %NumFilters, align 4
+  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 0
+  %i2 = load ptr, ptr %pDCTCoefs, align 4
   %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
   %sub = add i32 %i1, -7
@@ -1253,49 +1226,41 @@ vector.body:                                      ; preds = %vector.body, %for.b
   %vec.phi182 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i45, %vector.body ]
   %vec.phi183 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i46, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
-  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %i4 = bitcast float* %i3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i3 = getelementptr inbounds float, ptr %pIn, i32 %index
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i3, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i5 = add i32 %index, %mul4
-  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
-  %i7 = bitcast float* %i6 to <4 x float>*
-  %wide.masked.load184 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i6 = getelementptr inbounds float, ptr %i2, i32 %i5
+  %wide.masked.load184 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i6, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i8 = fmul fast <4 x float> %wide.masked.load184, %wide.masked.load
   %i9 = fadd fast <4 x float> %i8, %vec.phi179
   %i10 = add i32 %index, %mul5
-  %i11 = getelementptr inbounds float, float* %i2, i32 %i10
-  %i12 = bitcast float* %i11 to <4 x float>*
-  %wide.masked.load185 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i11 = getelementptr inbounds float, ptr %i2, i32 %i10
+  %wide.masked.load185 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i11, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i13 = fmul fast <4 x float> %wide.masked.load185, %wide.masked.load
   %i14 = fadd fast <4 x float> %i13, %vec.phi181
   %i15 = add i32 %index, %mul7
-  %i16 = getelementptr inbounds float, float* %i2, i32 %i15
-  %i17 = bitcast float* %i16 to <4 x float>*
-  %wide.masked.load186 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i16 = getelementptr inbounds float, ptr %i2, i32 %i15
+  %wide.masked.load186 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i16, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i18 = fmul fast <4 x float> %wide.masked.load186, %wide.masked.load
   %i19 = fadd fast <4 x float> %i18, %vec.phi183
   %i20 = add i32 %index, %mul9
-  %i21 = getelementptr inbounds float, float* %i2, i32 %i20
-  %i22 = bitcast float* %i21 to <4 x float>*
-  %wide.masked.load187 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i22, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i21 = getelementptr inbounds float, ptr %i2, i32 %i20
+  %wide.masked.load187 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i21, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i23 = fmul fast <4 x float> %wide.masked.load187, %wide.masked.load
   %i24 = fadd fast <4 x float> %i23, %vec.phi182
   %i25 = add i32 %index, %mul11
-  %i26 = getelementptr inbounds float, float* %i2, i32 %i25
-  %i27 = bitcast float* %i26 to <4 x float>*
-  %wide.masked.load188 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i27, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i26 = getelementptr inbounds float, ptr %i2, i32 %i25
+  %wide.masked.load188 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i26, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i28 = fmul fast <4 x float> %wide.masked.load188, %wide.masked.load
   %i29 = fadd fast <4 x float> %i28, %vec.phi180
   %i30 = add i32 %index, %mul13
-  %i31 = getelementptr inbounds float, float* %i2, i32 %i30
-  %i32 = bitcast float* %i31 to <4 x float>*
-  %wide.masked.load189 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i32, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i31 = getelementptr inbounds float, ptr %i2, i32 %i30
+  %wide.masked.load189 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i31, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i33 = fmul fast <4 x float> %wide.masked.load189, %wide.masked.load
   %i34 = fadd fast <4 x float> %i33, %vec.phi178
   %i35 = add i32 %index, %mul15
-  %i36 = getelementptr inbounds float, float* %i2, i32 %i35
-  %i37 = bitcast float* %i36 to <4 x float>*
-  %wide.masked.load190 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i37, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i36 = getelementptr inbounds float, ptr %i2, i32 %i35
+  %wide.masked.load190 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i36, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i38 = fmul fast <4 x float> %wide.masked.load190, %wide.masked.load
   %i39 = fadd fast <4 x float> %i38, %vec.phi
   %i40 = select <4 x i1> %active.lane.mask, <4 x float> %i39, <4 x float> %vec.phi
@@ -1317,26 +1282,26 @@ middle.block:                                     ; preds = %vector.body
   %i52 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i42)
   %i53 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i41)
   %i54 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i40)
-  %arrayidx56 = getelementptr inbounds float, float* %pOut, i32 %k2.0177
-  store float %i52, float* %arrayidx56, align 4
-  %arrayidx58 = getelementptr inbounds float, float* %pOut, i32 %add
-  store float %i50, float* %arrayidx58, align 4
-  %arrayidx60 = getelementptr inbounds float, float* %pOut, i32 %add6
-  store float %i48, float* %arrayidx60, align 4
-  %arrayidx62 = getelementptr inbounds float, float* %pOut, i32 %add8
-  store float %i49, float* %arrayidx62, align 4
-  %arrayidx64 = getelementptr inbounds float, float* %pOut, i32 %add10
-  store float %i51, float* %arrayidx64, align 4
-  %arrayidx66 = getelementptr inbounds float, float* %pOut, i32 %add12
-  store float %i53, float* %arrayidx66, align 4
-  %arrayidx68 = getelementptr inbounds float, float* %pOut, i32 %add14
-  store float %i54, float* %arrayidx68, align 4
+  %arrayidx56 = getelementptr inbounds float, ptr %pOut, i32 %k2.0177
+  store float %i52, ptr %arrayidx56, align 4
+  %arrayidx58 = getelementptr inbounds float, ptr %pOut, i32 %add
+  store float %i50, ptr %arrayidx58, align 4
+  %arrayidx60 = getelementptr inbounds float, ptr %pOut, i32 %add6
+  store float %i48, ptr %arrayidx60, align 4
+  %arrayidx62 = getelementptr inbounds float, ptr %pOut, i32 %add8
+  store float %i49, ptr %arrayidx62, align 4
+  %arrayidx64 = getelementptr inbounds float, ptr %pOut, i32 %add10
+  store float %i51, ptr %arrayidx64, align 4
+  %arrayidx66 = getelementptr inbounds float, ptr %pOut, i32 %add12
+  store float %i53, ptr %arrayidx66, align 4
+  %arrayidx68 = getelementptr inbounds float, ptr %pOut, i32 %add14
+  store float %i54, ptr %arrayidx68, align 4
   %add70 = add i32 %k2.0177, 7
   %cmp3 = icmp ult i32 %add70, %sub
   br i1 %cmp3, label %for.body, label %for.cond.cleanup
 }
 
-define void @DCT_mve8(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float* nocapture readonly %pIn, float* nocapture %pOut) {
+define void @DCT_mve8(ptr nocapture readonly %S, ptr nocapture readonly %pIn, ptr nocapture %pOut) {
 ; CHECK-LABEL: DCT_mve8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -1362,7 +1327,7 @@ define void @DCT_mve8(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    add.w r12, r1, r3, lsl #2
 ; CHECK-NEXT:    subs r1, r0, #4
 ; CHECK-NEXT:    movs r0, #1
-; CHECK-NEXT:    lsls r5, r3, #2
+; CHECK-NEXT:    lsls r6, r3, #2
 ; CHECK-NEXT:    add.w r1, r0, r1, lsr #2
 ; CHECK-NEXT:    str r1, [sp, #8] @ 4-byte Spill
 ; CHECK-NEXT:    lsls r1, r3, #5
@@ -1380,7 +1345,7 @@ define void @DCT_mve8(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    adds r1, r0, #4
 ; CHECK-NEXT:    ldr.w r9, [sp, #20] @ 4-byte Reload
 ; CHECK-NEXT:    vmov.i32 q3, #0x0
-; CHECK-NEXT:    ldr r6, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    ldr r5, [sp, #8] @ 4-byte Reload
 ; CHECK-NEXT:    adds r4, r0, #3
 ; CHECK-NEXT:    str r1, [sp, #24] @ 4-byte Spill
 ; CHECK-NEXT:    add.w r8, r0, #2
@@ -1394,16 +1359,16 @@ define void @DCT_mve8(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    mov r10, r7
 ; CHECK-NEXT:    vstrw.32 q3, [sp, #56] @ 16-byte Spill
 ; CHECK-NEXT:    vstrw.32 q3, [sp, #72] @ 16-byte Spill
-; CHECK-NEXT:    dls lr, r6
+; CHECK-NEXT:    dls lr, r5
 ; CHECK-NEXT:  .LBB7_3: @ %vector.body
 ; CHECK-NEXT:    @ Parent Loop BB7_2 Depth=1
 ; CHECK-NEXT:    @ => This Inner Loop Header: Depth=2
 ; CHECK-NEXT:    vctp.32 r10
-; CHECK-NEXT:    add.w r11, r3, r5
+; CHECK-NEXT:    add.w r11, r3, r6
 ; CHECK-NEXT:    vpstt
 ; CHECK-NEXT:    vldrwt.u32 q0, [r9], #16
 ; CHECK-NEXT:    vldrwt.u32 q1, [r3], #16
-; CHECK-NEXT:    add.w r6, r11, r5
+; CHECK-NEXT:    add.w r5, r11, r6
 ; CHECK-NEXT:    sub.w r10, r10, #4
 ; CHECK-NEXT:    vpstt
 ; CHECK-NEXT:    vfmat.f32 q6, q1, q0
@@ -1416,35 +1381,35 @@ define void @DCT_mve8(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    vmov q3, q4
 ; CHECK-NEXT:    vmov q4, q2
 ; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vldrwt.u32 q1, [r6]
+; CHECK-NEXT:    vldrwt.u32 q1, [r5]
 ; CHECK-NEXT:    vldrw.u32 q2, [sp, #56] @ 16-byte Reload
-; CHECK-NEXT:    adds r7, r6, r5
+; CHECK-NEXT:    adds r7, r5, r6
 ; CHECK-NEXT:    vpstt
 ; CHECK-NEXT:    vfmat.f32 q2, q1, q0
 ; CHECK-NEXT:    vldrwt.u32 q1, [r7]
 ; CHECK-NEXT:    vstrw.32 q2, [sp, #56] @ 16-byte Spill
 ; CHECK-NEXT:    vldrw.u32 q2, [sp, #72] @ 16-byte Reload
-; CHECK-NEXT:    adds r6, r7, r5
+; CHECK-NEXT:    adds r5, r7, r6
 ; CHECK-NEXT:    vpstt
 ; CHECK-NEXT:    vfmat.f32 q2, q1, q0
-; CHECK-NEXT:    vldrwt.u32 q1, [r6]
-; CHECK-NEXT:    adds r7, r6, r5
+; CHECK-NEXT:    vldrwt.u32 q1, [r5]
+; CHECK-NEXT:    adds r7, r5, r6
 ; CHECK-NEXT:    vstrw.32 q2, [sp, #72] @ 16-byte Spill
 ; CHECK-NEXT:    vmov q2, q4
 ; CHECK-NEXT:    vmov q4, q3
 ; CHECK-NEXT:    vpstt
 ; CHECK-NEXT:    vfmat.f32 q2, q1, q0
 ; CHECK-NEXT:    vldrwt.u32 q1, [r7]
-; CHECK-NEXT:    adds r6, r7, r5
+; CHECK-NEXT:    adds r5, r7, r6
 ; CHECK-NEXT:    vmov q3, q5
 ; CHECK-NEXT:    vpstt
 ; CHECK-NEXT:    vfmat.f32 q4, q1, q0
-; CHECK-NEXT:    vldrwt.u32 q1, [r6]
+; CHECK-NEXT:    vldrwt.u32 q1, [r5]
 ; CHECK-NEXT:    vmov q5, q6
-; CHECK-NEXT:    add r6, r5
+; CHECK-NEXT:    add r5, r6
 ; CHECK-NEXT:    vpstt
 ; CHECK-NEXT:    vfmat.f32 q5, q1, q0
-; CHECK-NEXT:    vldrwt.u32 q1, [r6]
+; CHECK-NEXT:    vldrwt.u32 q1, [r5]
 ; CHECK-NEXT:    vldrw.u32 q6, [sp, #40] @ 16-byte Reload
 ; CHECK-NEXT:    vpst
 ; CHECK-NEXT:    vfmat.f32 q3, q1, q0
@@ -1509,12 +1474,12 @@ define void @DCT_mve8(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    add sp, #4
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
-  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %i = load i32, i32* %NumInputs, align 4
-  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %i1 = load i32, i32* %NumFilters, align 4
-  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %i2 = load float*, float** %pDCTCoefs, align 4
+  %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 2
+  %i = load i32, ptr %NumInputs, align 4
+  %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 1
+  %i1 = load i32, ptr %NumFilters, align 4
+  %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, ptr %S, i32 0, i32 0
+  %i2 = load ptr, ptr %pDCTCoefs, align 4
   %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
   %sub = add i32 %i1, -8
@@ -1559,55 +1524,46 @@ vector.body:                                      ; preds = %vector.body, %for.b
   %vec.phi204 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i51, %vector.body ]
   %vec.phi205 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i52, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
-  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %i4 = bitcast float* %i3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i3 = getelementptr inbounds float, ptr %pIn, i32 %index
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i3, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i5 = add i32 %index, %mul4
-  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
-  %i7 = bitcast float* %i6 to <4 x float>*
-  %wide.masked.load206 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i6 = getelementptr inbounds float, ptr %i2, i32 %i5
+  %wide.masked.load206 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i6, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i8 = fmul fast <4 x float> %wide.masked.load206, %wide.masked.load
   %i9 = fadd fast <4 x float> %i8, %vec.phi200
   %i10 = add i32 %index, %mul5
-  %i11 = getelementptr inbounds float, float* %i2, i32 %i10
-  %i12 = bitcast float* %i11 to <4 x float>*
-  %wide.masked.load207 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i11 = getelementptr inbounds float, ptr %i2, i32 %i10
+  %wide.masked.load207 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i11, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i13 = fmul fast <4 x float> %wide.masked.load207, %wide.masked.load
   %i14 = fadd fast <4 x float> %i13, %vec.phi202
   %i15 = add i32 %index, %mul7
-  %i16 = getelementptr inbounds float, float* %i2, i32 %i15
-  %i17 = bitcast float* %i16 to <4 x float>*
-  %wide.masked.load208 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i16 = getelementptr inbounds float, ptr %i2, i32 %i15
+  %wide.masked.load208 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i16, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i18 = fmul fast <4 x float> %wide.masked.load208, %wide.masked.load
   %i19 = fadd fast <4 x float> %i18, %vec.phi204
   %i20 = add i32 %index, %mul9
-  %i21 = getelementptr inbounds float, float* %i2, i32 %i20
-  %i22 = bitcast float* %i21 to <4 x float>*
-  %wide.masked.load209 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i22, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i21 = getelementptr inbounds float, ptr %i2, i32 %i20
+  %wide.masked.load209 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i21, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i23 = fmul fast <4 x float> %wide.masked.load209, %wide.masked.load
   %i24 = fadd fast <4 x float> %i23, %vec.phi205
   %i25 = add i32 %index, %mul11
-  %i26 = getelementptr inbounds float, float* %i2, i32 %i25
-  %i27 = bitcast float* %i26 to <4 x float>*
-  %wide.masked.load210 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i27, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i26 = getelementptr inbounds float, ptr %i2, i32 %i25
+  %wide.masked.load210 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i26, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i28 = fmul fast <4 x float> %wide.masked.load210, %wide.masked.load
   %i29 = fadd fast <4 x float> %i28, %vec.phi203
   %i30 = add i32 %index, %mul13
-  %i31 = getelementptr inbounds float, float* %i2, i32 %i30
-  %i32 = bitcast float* %i31 to <4 x float>*
-  %wide.masked.load211 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i32, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i31 = getelementptr inbounds float, ptr %i2, i32 %i30
+  %wide.masked.load211 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i31, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i33 = fmul fast <4 x float> %wide.masked.load211, %wide.masked.load
   %i34 = fadd fast <4 x float> %i33, %vec.phi201
   %i35 = add i32 %index, %mul15
-  %i36 = getelementptr inbounds float, float* %i2, i32 %i35
-  %i37 = bitcast float* %i36 to <4 x float>*
-  %wide.masked.load212 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i37, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i36 = getelementptr inbounds float, ptr %i2, i32 %i35
+  %wide.masked.load212 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i36, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i38 = fmul fast <4 x float> %wide.masked.load212, %wide.masked.load
   %i39 = fadd fast <4 x float> %i38, %vec.phi199
   %i40 = add i32 %index, %mul17
-  %i41 = getelementptr inbounds float, float* %i2, i32 %i40
-  %i42 = bitcast float* %i41 to <4 x float>*
-  %wide.masked.load213 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i42, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i41 = getelementptr inbounds float, ptr %i2, i32 %i40
+  %wide.masked.load213 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %i41, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
   %i43 = fmul fast <4 x float> %wide.masked.load213, %wide.masked.load
   %i44 = fadd fast <4 x float> %i43, %vec.phi
   %i45 = select <4 x i1> %active.lane.mask, <4 x float> %i44, <4 x float> %vec.phi
@@ -1631,22 +1587,22 @@ middle.block:                                     ; preds = %vector.body
   %i59 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i47)
   %i60 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i46)
   %i61 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i45)
-  %arrayidx63 = getelementptr inbounds float, float* %pOut, i32 %k2.0198
-  store float %i59, float* %arrayidx63, align 4
-  %arrayidx65 = getelementptr inbounds float, float* %pOut, i32 %add
-  store float %i57, float* %arrayidx65, align 4
-  %arrayidx67 = getelementptr inbounds float, float* %pOut, i32 %add6
-  store float %i55, float* %arrayidx67, align 4
-  %arrayidx69 = getelementptr inbounds float, float* %pOut, i32 %add8
-  store float %i54, float* %arrayidx69, align 4
-  %arrayidx71 = getelementptr inbounds float, float* %pOut, i32 %add10
-  store float %i56, float* %arrayidx71, align 4
-  %arrayidx73 = getelementptr inbounds float, float* %pOut, i32 %add12
-  store float %i58, float* %arrayidx73, align 4
-  %arrayidx75 = getelementptr inbounds float, float* %pOut, i32 %add14
-  store float %i60, float* %arrayidx75, align 4
-  %arrayidx77 = getelementptr inbounds float, float* %pOut, i32 %add16
-  store float %i61, float* %arrayidx77, align 4
+  %arrayidx63 = getelementptr inbounds float, ptr %pOut, i32 %k2.0198
+  store float %i59, ptr %arrayidx63, align 4
+  %arrayidx65 = getelementptr inbounds float, ptr %pOut, i32 %add
+  store float %i57, ptr %arrayidx65, align 4
+  %arrayidx67 = getelementptr inbounds float, ptr %pOut, i32 %add6
+  store float %i55, ptr %arrayidx67, align 4
+  %arrayidx69 = getelementptr inbounds float, ptr %pOut, i32 %add8
+  store float %i54, ptr %arrayidx69, align 4
+  %arrayidx71 = getelementptr inbounds float, ptr %pOut, i32 %add10
+  store float %i56, ptr %arrayidx71, align 4
+  %arrayidx73 = getelementptr inbounds float, ptr %pOut, i32 %add12
+  store float %i58, ptr %arrayidx73, align 4
+  %arrayidx75 = getelementptr inbounds float, ptr %pOut, i32 %add14
+  store float %i60, ptr %arrayidx75, align 4
+  %arrayidx77 = getelementptr inbounds float, ptr %pOut, i32 %add16
+  store float %i61, ptr %arrayidx77, align 4
   %add79 = add i32 %k2.0198, 8
   %cmp3 = icmp ult i32 %add79, %sub
   br i1 %cmp3, label %for.body, label %for.cond.cleanup
@@ -1654,5 +1610,5 @@ middle.block:                                     ; preds = %vector.body
 
 declare void @llvm.assume(i1 noundef)
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
-declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>)
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)
 declare float @llvm.vector.reduce.fadd.v4f32(float, <4 x float>)


        


More information about the llvm-commits mailing list