[llvm] 496a694 - [Thumb2] Name instructions in tests (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 4 03:25:25 PDT 2023


Author: Nikita Popov
Date: 2023-04-04T12:25:16+02:00
New Revision: 496a69471c85e2bcd4be9f38c91ea2bd81750f62

URL: https://github.com/llvm/llvm-project/commit/496a69471c85e2bcd4be9f38c91ea2bd81750f62
DIFF: https://github.com/llvm/llvm-project/commit/496a69471c85e2bcd4be9f38c91ea2bd81750f62.diff

LOG: [Thumb2] Name instructions in tests (NFC)

Added: 
    

Modified: 
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
    llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
    llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
    llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll
    llvm/test/CodeGen/Thumb2/mve-postinc-dct.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
index c2b946abfbf78..b669370b0e526 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
@@ -105,10 +105,10 @@ vector.memcheck:                                  ; preds = %entry
   br i1 %conflict.rdx, label %for.body.preheader, label %vector.ph
 
 for.body.preheader:                               ; preds = %vector.memcheck
-  %0 = add i32 %N, -1
+  %i = add i32 %N, -1
   %xtraiter = and i32 %N, 3
-  %1 = icmp ult i32 %0, 3
-  br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+  %i1 = icmp ult i32 %i, 3
+  br i1 %i1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
 
 for.body.preheader.new:                           ; preds = %for.body.preheader
   %unroll_iter = sub i32 %N, %xtraiter
@@ -121,34 +121,34 @@ vector.ph:                                        ; preds = %vector.memcheck
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %2 = getelementptr inbounds float, float* %b, i32 %index
-  %3 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %4 = bitcast float* %2 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %3, <4 x float> undef)
-  %5 = getelementptr inbounds float, float* %c, i32 %index
-  %6 = bitcast float* %5 to <4 x float>*
-  %wide.masked.load23 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %6, i32 4, <4 x i1> %3, <4 x float> undef)
-  %7 = fmul fast <4 x float> %wide.masked.load23, %wide.masked.load
-  %8 = getelementptr inbounds float, float* %a, i32 %index
-  %9 = bitcast float* %8 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %7, <4 x float>* %9, i32 4, <4 x i1> %3)
+  %i2 = getelementptr inbounds float, float* %b, i32 %index
+  %i3 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
+  %i4 = bitcast float* %i2 to <4 x float>*
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %i3, <4 x float> undef)
+  %i5 = getelementptr inbounds float, float* %c, i32 %index
+  %i6 = bitcast float* %i5 to <4 x float>*
+  %wide.masked.load23 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i6, i32 4, <4 x i1> %i3, <4 x float> undef)
+  %i7 = fmul fast <4 x float> %wide.masked.load23, %wide.masked.load
+  %i8 = getelementptr inbounds float, float* %a, i32 %index
+  %i9 = bitcast float* %i8 to <4 x float>*
+  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %i7, <4 x float>* %i9, i32 4, <4 x i1> %i3)
   %index.next = add i32 %index, 4
-  %10 = icmp eq i32 %index.next, %n.vec
-  br i1 %10, label %for.cond.cleanup, label %vector.body
+  %i10 = icmp eq i32 %index.next, %n.vec
+  br i1 %i10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup.loopexit.unr-lcssa:              ; preds = %for.body, %for.body.preheader
   %i.09.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
   %lcmp.mod = icmp eq i32 %xtraiter, 0
   br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil
 
-for.body.epil:                                    ; preds = %for.cond.cleanup.loopexit.unr-lcssa, %for.body.epil
+for.body.epil:                                    ; preds = %for.body.epil, %for.cond.cleanup.loopexit.unr-lcssa
   %i.09.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.09.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
   %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
   %arrayidx.epil = getelementptr inbounds float, float* %b, i32 %i.09.epil
-  %11 = load float, float* %arrayidx.epil, align 4
+  %i11 = load float, float* %arrayidx.epil, align 4
   %arrayidx1.epil = getelementptr inbounds float, float* %c, i32 %i.09.epil
-  %12 = load float, float* %arrayidx1.epil, align 4
-  %mul.epil = fmul fast float %12, %11
+  %i12 = load float, float* %arrayidx1.epil, align 4
+  %mul.epil = fmul fast float %i12, %i11
   %arrayidx2.epil = getelementptr inbounds float, float* %a, i32 %i.09.epil
   store float %mul.epil, float* %arrayidx2.epil, align 4
   %inc.epil = add nuw i32 %i.09.epil, 1
@@ -156,41 +156,41 @@ for.body.epil:                                    ; preds = %for.cond.cleanup.lo
   %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0
   br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
 
-for.cond.cleanup:                                 ; preds = %vector.body, %for.cond.cleanup.loopexit.unr-lcssa, %for.body.epil, %entry
+for.cond.cleanup:                                 ; preds = %for.body.epil, %for.cond.cleanup.loopexit.unr-lcssa, %vector.body, %entry
   ret void
 
 for.body:                                         ; preds = %for.body, %for.body.preheader.new
   %i.09 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
   %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
   %arrayidx = getelementptr inbounds float, float* %b, i32 %i.09
-  %13 = load float, float* %arrayidx, align 4
+  %i13 = load float, float* %arrayidx, align 4
   %arrayidx1 = getelementptr inbounds float, float* %c, i32 %i.09
-  %14 = load float, float* %arrayidx1, align 4
-  %mul = fmul fast float %14, %13
+  %i14 = load float, float* %arrayidx1, align 4
+  %mul = fmul fast float %i14, %i13
   %arrayidx2 = getelementptr inbounds float, float* %a, i32 %i.09
   store float %mul, float* %arrayidx2, align 4
   %inc = or i32 %i.09, 1
   %arrayidx.1 = getelementptr inbounds float, float* %b, i32 %inc
-  %15 = load float, float* %arrayidx.1, align 4
+  %i15 = load float, float* %arrayidx.1, align 4
   %arrayidx1.1 = getelementptr inbounds float, float* %c, i32 %inc
-  %16 = load float, float* %arrayidx1.1, align 4
-  %mul.1 = fmul fast float %16, %15
+  %i16 = load float, float* %arrayidx1.1, align 4
+  %mul.1 = fmul fast float %i16, %i15
   %arrayidx2.1 = getelementptr inbounds float, float* %a, i32 %inc
   store float %mul.1, float* %arrayidx2.1, align 4
   %inc.1 = or i32 %i.09, 2
   %arrayidx.2 = getelementptr inbounds float, float* %b, i32 %inc.1
-  %17 = load float, float* %arrayidx.2, align 4
+  %i17 = load float, float* %arrayidx.2, align 4
   %arrayidx1.2 = getelementptr inbounds float, float* %c, i32 %inc.1
-  %18 = load float, float* %arrayidx1.2, align 4
-  %mul.2 = fmul fast float %18, %17
+  %i18 = load float, float* %arrayidx1.2, align 4
+  %mul.2 = fmul fast float %i18, %i17
   %arrayidx2.2 = getelementptr inbounds float, float* %a, i32 %inc.1
   store float %mul.2, float* %arrayidx2.2, align 4
   %inc.2 = or i32 %i.09, 3
   %arrayidx.3 = getelementptr inbounds float, float* %b, i32 %inc.2
-  %19 = load float, float* %arrayidx.3, align 4
+  %i19 = load float, float* %arrayidx.3, align 4
   %arrayidx1.3 = getelementptr inbounds float, float* %c, i32 %inc.2
-  %20 = load float, float* %arrayidx1.3, align 4
-  %mul.3 = fmul fast float %20, %19
+  %i20 = load float, float* %arrayidx1.3, align 4
+  %mul.3 = fmul fast float %i20, %i19
   %arrayidx2.3 = getelementptr inbounds float, float* %a, i32 %inc.2
   store float %mul.3, float* %arrayidx2.3, align 4
   %inc.3 = add nuw i32 %i.09, 4
@@ -248,31 +248,31 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %vec.phi = phi <4 x float> [ zeroinitializer, %vector.ph ], [ %6, %vector.body ]
-  %0 = getelementptr inbounds float, float* %b, i32 %index
-  %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %2 = bitcast float* %0 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %2, i32 4, <4 x i1> %1, <4 x float> undef)
-  %3 = getelementptr inbounds float, float* %c, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load13 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %1, <4 x float> undef)
-  %5 = fmul fast <4 x float> %wide.masked.load13, %wide.masked.load
-  %6 = fadd fast <4 x float> %5, %vec.phi
+  %vec.phi = phi <4 x float> [ zeroinitializer, %vector.ph ], [ %i6, %vector.body ]
+  %i = getelementptr inbounds float, float* %b, i32 %index
+  %i1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
+  %i2 = bitcast float* %i to <4 x float>*
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i2, i32 4, <4 x i1> %i1, <4 x float> undef)
+  %i3 = getelementptr inbounds float, float* %c, i32 %index
+  %i4 = bitcast float* %i3 to <4 x float>*
+  %wide.masked.load13 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %i1, <4 x float> undef)
+  %i5 = fmul fast <4 x float> %wide.masked.load13, %wide.masked.load
+  %i6 = fadd fast <4 x float> %i5, %vec.phi
   %index.next = add i32 %index, 4
-  %7 = icmp eq i32 %index.next, %n.vec
-  br i1 %7, label %middle.block, label %vector.body
+  %i7 = icmp eq i32 %index.next, %n.vec
+  br i1 %i7, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
-  %8 = select <4 x i1> %1, <4 x float> %6, <4 x float> %vec.phi
-  %rdx.shuf = shufflevector <4 x float> %8, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
-  %bin.rdx = fadd fast <4 x float> %8, %rdx.shuf
+  %i8 = select <4 x i1> %i1, <4 x float> %i6, <4 x float> %vec.phi
+  %rdx.shuf = shufflevector <4 x float> %i8, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+  %bin.rdx = fadd fast <4 x float> %i8, %rdx.shuf
   %rdx.shuf14 = shufflevector <4 x float> %bin.rdx, <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
   %bin.rdx15 = fadd fast <4 x float> %bin.rdx, %rdx.shuf14
-  %9 = extractelement <4 x float> %bin.rdx15, i32 0
+  %i9 = extractelement <4 x float> %bin.rdx15, i32 0
   br label %for.cond.cleanup
 
 for.cond.cleanup:                                 ; preds = %middle.block, %entry
-  %a.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %9, %middle.block ]
+  %a.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %i9, %middle.block ]
   ret float %a.0.lcssa
 }
 
@@ -465,35 +465,35 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %vec.phi = phi <4 x float> [ zeroinitializer, %vector.ph ], [ %7, %vector.body ]
+  %vec.phi = phi <4 x float> [ zeroinitializer, %vector.ph ], [ %i7, %vector.body ]
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %0 = getelementptr inbounds half, half* %b, i32 %index
-  %1 = icmp ule <4 x i32> %induction, %broadcast.splat12
-  %2 = bitcast half* %0 to <4 x half>*
-  %wide.masked.load = call <4 x half> @llvm.masked.load.v4f16.p0v4f16(<4 x half>* %2, i32 2, <4 x i1> %1, <4 x half> undef)
-  %3 = getelementptr inbounds half, half* %c, i32 %index
-  %4 = bitcast half* %3 to <4 x half>*
-  %wide.masked.load13 = call <4 x half> @llvm.masked.load.v4f16.p0v4f16(<4 x half>* %4, i32 2, <4 x i1> %1, <4 x half> undef)
-  %5 = fmul fast <4 x half> %wide.masked.load13, %wide.masked.load
-  %6 = fpext <4 x half> %5 to <4 x float>
-  %7 = fadd fast <4 x float> %vec.phi, %6
+  %i = getelementptr inbounds half, half* %b, i32 %index
+  %i1 = icmp ule <4 x i32> %induction, %broadcast.splat12
+  %i2 = bitcast half* %i to <4 x half>*
+  %wide.masked.load = call <4 x half> @llvm.masked.load.v4f16.p0v4f16(<4 x half>* %i2, i32 2, <4 x i1> %i1, <4 x half> undef)
+  %i3 = getelementptr inbounds half, half* %c, i32 %index
+  %i4 = bitcast half* %i3 to <4 x half>*
+  %wide.masked.load13 = call <4 x half> @llvm.masked.load.v4f16.p0v4f16(<4 x half>* %i4, i32 2, <4 x i1> %i1, <4 x half> undef)
+  %i5 = fmul fast <4 x half> %wide.masked.load13, %wide.masked.load
+  %i6 = fpext <4 x half> %i5 to <4 x float>
+  %i7 = fadd fast <4 x float> %vec.phi, %i6
   %index.next = add i32 %index, 4
-  %8 = icmp eq i32 %index.next, %n.vec
-  br i1 %8, label %middle.block, label %vector.body
+  %i8 = icmp eq i32 %index.next, %n.vec
+  br i1 %i8, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
-  %9 = select <4 x i1> %1, <4 x float> %7, <4 x float> %vec.phi
-  %rdx.shuf = shufflevector <4 x float> %9, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
-  %bin.rdx = fadd fast <4 x float> %9, %rdx.shuf
+  %i9 = select <4 x i1> %i1, <4 x float> %i7, <4 x float> %vec.phi
+  %rdx.shuf = shufflevector <4 x float> %i9, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+  %bin.rdx = fadd fast <4 x float> %i9, %rdx.shuf
   %rdx.shuf14 = shufflevector <4 x float> %bin.rdx, <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
   %bin.rdx15 = fadd fast <4 x float> %bin.rdx, %rdx.shuf14
-  %10 = extractelement <4 x float> %bin.rdx15, i32 0
+  %i10 = extractelement <4 x float> %bin.rdx15, i32 0
   br label %for.cond.cleanup
 
 for.cond.cleanup:                                 ; preds = %middle.block, %entry
-  %a.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %10, %middle.block ]
+  %a.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %i10, %middle.block ]
   ret float %a.0.lcssa
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll b/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
index 15d43f3ed0161..61e595517f5e3 100644
--- a/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
@@ -19,8 +19,8 @@ define arm_aapcs_vfpcc void @test_fadd(half* noalias nocapture readonly %A, half
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp18 = icmp sgt i32 %n, 0
   br i1 %cmp18, label %vector.ph, label %for.cond.cleanup
@@ -32,16 +32,16 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds half, half* %A, i32 %index
-  %2 = bitcast half* %1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %2, align 4
-  %3 = fadd fast <8 x half> %wide.load, %broadcast.splat11
-  %4 = getelementptr inbounds half, half* %C, i32 %index
-  %5 = bitcast half* %4 to <8 x half>*
-  store <8 x half> %3, <8 x half>* %5, align 4
+  %i1 = getelementptr inbounds half, half* %A, i32 %index
+  %i2 = bitcast half* %i1 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i3 = fadd fast <8 x half> %wide.load, %broadcast.splat11
+  %i4 = getelementptr inbounds half, half* %C, i32 %index
+  %i5 = bitcast half* %i4 to <8 x half>*
+  store <8 x half> %i3, <8 x half>* %i5, align 4
   %index.next = add i32 %index, 8
-  %6 = icmp eq i32 %index.next, %n
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %i6 = icmp eq i32 %index.next, %n
+  br i1 %i6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -65,8 +65,8 @@ define arm_aapcs_vfpcc void @test_fadd_r(half* noalias nocapture readonly %A, ha
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp18 = icmp sgt i32 %n, 0
   br i1 %cmp18, label %vector.ph, label %for.cond.cleanup
@@ -78,16 +78,16 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds half, half* %A, i32 %index
-  %2 = bitcast half* %1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %2, align 4
-  %3 = fadd fast <8 x half> %broadcast.splat11, %wide.load
-  %4 = getelementptr inbounds half, half* %C, i32 %index
-  %5 = bitcast half* %4 to <8 x half>*
-  store <8 x half> %3, <8 x half>* %5, align 4
+  %i1 = getelementptr inbounds half, half* %A, i32 %index
+  %i2 = bitcast half* %i1 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i3 = fadd fast <8 x half> %broadcast.splat11, %wide.load
+  %i4 = getelementptr inbounds half, half* %C, i32 %index
+  %i5 = bitcast half* %i4 to <8 x half>*
+  store <8 x half> %i3, <8 x half>* %i5, align 4
   %index.next = add i32 %index, 8
-  %6 = icmp eq i32 %index.next, %n
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %i6 = icmp eq i32 %index.next, %n
+  br i1 %i6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -111,8 +111,8 @@ define arm_aapcs_vfpcc void @test_fmul(half* noalias nocapture readonly %A, half
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp18 = icmp sgt i32 %n, 0
   br i1 %cmp18, label %vector.ph, label %for.cond.cleanup
@@ -124,16 +124,16 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds half, half* %A, i32 %index
-  %2 = bitcast half* %1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %2, align 4
-  %3 = fmul fast <8 x half> %wide.load, %broadcast.splat11
-  %4 = getelementptr inbounds half, half* %C, i32 %index
-  %5 = bitcast half* %4 to <8 x half>*
-  store <8 x half> %3, <8 x half>* %5, align 4
+  %i1 = getelementptr inbounds half, half* %A, i32 %index
+  %i2 = bitcast half* %i1 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i3 = fmul fast <8 x half> %wide.load, %broadcast.splat11
+  %i4 = getelementptr inbounds half, half* %C, i32 %index
+  %i5 = bitcast half* %i4 to <8 x half>*
+  store <8 x half> %i3, <8 x half>* %i5, align 4
   %index.next = add i32 %index, 8
-  %6 = icmp eq i32 %index.next, %n
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %i6 = icmp eq i32 %index.next, %n
+  br i1 %i6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -157,8 +157,8 @@ define arm_aapcs_vfpcc void @test_fmul_r(half* noalias nocapture readonly %A, ha
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp18 = icmp sgt i32 %n, 0
   br i1 %cmp18, label %vector.ph, label %for.cond.cleanup
@@ -170,16 +170,16 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds half, half* %A, i32 %index
-  %2 = bitcast half* %1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %2, align 4
-  %3 = fmul fast <8 x half> %broadcast.splat11, %wide.load
-  %4 = getelementptr inbounds half, half* %C, i32 %index
-  %5 = bitcast half* %4 to <8 x half>*
-  store <8 x half> %3, <8 x half>* %5, align 4
+  %i1 = getelementptr inbounds half, half* %A, i32 %index
+  %i2 = bitcast half* %i1 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i3 = fmul fast <8 x half> %broadcast.splat11, %wide.load
+  %i4 = getelementptr inbounds half, half* %C, i32 %index
+  %i5 = bitcast half* %i4 to <8 x half>*
+  store <8 x half> %i3, <8 x half>* %i5, align 4
   %index.next = add i32 %index, 8
-  %6 = icmp eq i32 %index.next, %n
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %i6 = icmp eq i32 %index.next, %n
+  br i1 %i6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -203,8 +203,8 @@ define arm_aapcs_vfpcc void @test_fsub(half* noalias nocapture readonly %A, half
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp18 = icmp sgt i32 %n, 0
   br i1 %cmp18, label %vector.ph, label %for.cond.cleanup
@@ -216,16 +216,16 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds half, half* %A, i32 %index
-  %2 = bitcast half* %1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %2, align 4
-  %3 = fsub fast <8 x half> %wide.load, %broadcast.splat11
-  %4 = getelementptr inbounds half, half* %C, i32 %index
-  %5 = bitcast half* %4 to <8 x half>*
-  store <8 x half> %3, <8 x half>* %5, align 4
+  %i1 = getelementptr inbounds half, half* %A, i32 %index
+  %i2 = bitcast half* %i1 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i3 = fsub fast <8 x half> %wide.load, %broadcast.splat11
+  %i4 = getelementptr inbounds half, half* %C, i32 %index
+  %i5 = bitcast half* %i4 to <8 x half>*
+  store <8 x half> %i3, <8 x half>* %i5, align 4
   %index.next = add i32 %index, 8
-  %6 = icmp eq i32 %index.next, %n
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %i6 = icmp eq i32 %index.next, %n
+  br i1 %i6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -250,8 +250,8 @@ define arm_aapcs_vfpcc void @test_fsub_r(half* noalias nocapture readonly %A, ha
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp18 = icmp sgt i32 %n, 0
   br i1 %cmp18, label %vector.ph, label %for.cond.cleanup
@@ -263,16 +263,16 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds half, half* %A, i32 %index
-  %2 = bitcast half* %1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %2, align 4
-  %3 = fsub fast <8 x half> %broadcast.splat11, %wide.load
-  %4 = getelementptr inbounds half, half* %C, i32 %index
-  %5 = bitcast half* %4 to <8 x half>*
-  store <8 x half> %3, <8 x half>* %5, align 4
+  %i1 = getelementptr inbounds half, half* %A, i32 %index
+  %i2 = bitcast half* %i1 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i3 = fsub fast <8 x half> %broadcast.splat11, %wide.load
+  %i4 = getelementptr inbounds half, half* %C, i32 %index
+  %i5 = bitcast half* %i4 to <8 x half>*
+  store <8 x half> %i3, <8 x half>* %i5, align 4
   %index.next = add i32 %index, 8
-  %6 = icmp eq i32 %index.next, %n
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %i6 = icmp eq i32 %index.next, %n
+  br i1 %i6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -298,8 +298,8 @@ define arm_aapcs_vfpcc void @test_fmas(half* noalias nocapture readonly %A, half
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -311,20 +311,20 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds half, half* %A, i32 %index
-  %2 = bitcast half* %1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %2, align 4
-  %3 = getelementptr inbounds half, half* %B, i32 %index
-  %4 = bitcast half* %3 to <8 x half>*
-  %wide.load12 = load <8 x half>, <8 x half>* %4, align 4
-  %5 = fmul fast <8 x half> %wide.load12, %wide.load
-  %6 = fadd fast <8 x half> %5, %broadcast.splat14
-  %7 = getelementptr inbounds half, half* %D, i32 %index
-  %8 = bitcast half* %7 to <8 x half>*
-  store <8 x half> %6, <8 x half>* %8, align 4
+  %i1 = getelementptr inbounds half, half* %A, i32 %index
+  %i2 = bitcast half* %i1 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i3 = getelementptr inbounds half, half* %B, i32 %index
+  %i4 = bitcast half* %i3 to <8 x half>*
+  %wide.load12 = load <8 x half>, <8 x half>* %i4, align 4
+  %i5 = fmul fast <8 x half> %wide.load12, %wide.load
+  %i6 = fadd fast <8 x half> %i5, %broadcast.splat14
+  %i7 = getelementptr inbounds half, half* %D, i32 %index
+  %i8 = bitcast half* %i7 to <8 x half>*
+  store <8 x half> %i6, <8 x half>* %i8, align 4
   %index.next = add i32 %index, 8
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -349,8 +349,8 @@ define arm_aapcs_vfpcc void @test_fmas_r(half* noalias nocapture readonly %A, ha
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -362,20 +362,20 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds half, half* %A, i32 %index
-  %2 = bitcast half* %1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %2, align 4
-  %3 = getelementptr inbounds half, half* %B, i32 %index
-  %4 = bitcast half* %3 to <8 x half>*
-  %wide.load12 = load <8 x half>, <8 x half>* %4, align 4
-  %5 = fmul fast <8 x half> %wide.load12, %wide.load
-  %6 = fadd fast <8 x half> %broadcast.splat14, %5
-  %7 = getelementptr inbounds half, half* %D, i32 %index
-  %8 = bitcast half* %7 to <8 x half>*
-  store <8 x half> %6, <8 x half>* %8, align 4
+  %i1 = getelementptr inbounds half, half* %A, i32 %index
+  %i2 = bitcast half* %i1 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i3 = getelementptr inbounds half, half* %B, i32 %index
+  %i4 = bitcast half* %i3 to <8 x half>*
+  %wide.load12 = load <8 x half>, <8 x half>* %i4, align 4
+  %i5 = fmul fast <8 x half> %wide.load12, %wide.load
+  %i6 = fadd fast <8 x half> %broadcast.splat14, %i5
+  %i7 = getelementptr inbounds half, half* %D, i32 %index
+  %i8 = bitcast half* %i7 to <8 x half>*
+  store <8 x half> %i6, <8 x half>* %i8, align 4
   %index.next = add i32 %index, 8
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -400,8 +400,8 @@ define arm_aapcs_vfpcc void @test_fma(half* noalias nocapture readonly %A, half*
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -413,20 +413,20 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds half, half* %A, i32 %index
-  %2 = bitcast half* %1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %2, align 4
-  %3 = fmul fast <8 x half> %wide.load, %broadcast.splat13
-  %4 = getelementptr inbounds half, half* %B, i32 %index
-  %5 = bitcast half* %4 to <8 x half>*
-  %wide.load14 = load <8 x half>, <8 x half>* %5, align 4
-  %6 = fadd fast <8 x half> %3, %wide.load14
-  %7 = getelementptr inbounds half, half* %D, i32 %index
-  %8 = bitcast half* %7 to <8 x half>*
-  store <8 x half> %6, <8 x half>* %8, align 4
+  %i1 = getelementptr inbounds half, half* %A, i32 %index
+  %i2 = bitcast half* %i1 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i3 = fmul fast <8 x half> %wide.load, %broadcast.splat13
+  %i4 = getelementptr inbounds half, half* %B, i32 %index
+  %i5 = bitcast half* %i4 to <8 x half>*
+  %wide.load14 = load <8 x half>, <8 x half>* %i5, align 4
+  %i6 = fadd fast <8 x half> %i3, %wide.load14
+  %i7 = getelementptr inbounds half, half* %D, i32 %index
+  %i8 = bitcast half* %i7 to <8 x half>*
+  store <8 x half> %i6, <8 x half>* %i8, align 4
   %index.next = add i32 %index, 8
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -451,8 +451,8 @@ define arm_aapcs_vfpcc void @test_fma_r(half* noalias nocapture readonly %A, hal
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -464,20 +464,20 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds half, half* %A, i32 %index
-  %2 = bitcast half* %1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %2, align 4
-  %3 = fmul fast <8 x half> %broadcast.splat13, %wide.load
-  %4 = getelementptr inbounds half, half* %B, i32 %index
-  %5 = bitcast half* %4 to <8 x half>*
-  %wide.load14 = load <8 x half>, <8 x half>* %5, align 4
-  %6 = fadd fast <8 x half> %3, %wide.load14
-  %7 = getelementptr inbounds half, half* %D, i32 %index
-  %8 = bitcast half* %7 to <8 x half>*
-  store <8 x half> %6, <8 x half>* %8, align 4
+  %i1 = getelementptr inbounds half, half* %A, i32 %index
+  %i2 = bitcast half* %i1 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i3 = fmul fast <8 x half> %broadcast.splat13, %wide.load
+  %i4 = getelementptr inbounds half, half* %B, i32 %index
+  %i5 = bitcast half* %i4 to <8 x half>*
+  %wide.load14 = load <8 x half>, <8 x half>* %i5, align 4
+  %i6 = fadd fast <8 x half> %i3, %wide.load14
+  %i7 = getelementptr inbounds half, half* %D, i32 %index
+  %i8 = bitcast half* %i7 to <8 x half>*
+  store <8 x half> %i6, <8 x half>* %i8, align 4
   %index.next = add i32 %index, 8
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -506,8 +506,8 @@ define arm_aapcs_vfpcc void @test_fmss(half* noalias nocapture readonly %A, half
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -519,20 +519,20 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds half, half* %A, i32 %index
-  %2 = bitcast half* %1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %2, align 4
-  %3 = getelementptr inbounds half, half* %B, i32 %index
-  %4 = bitcast half* %3 to <8 x half>*
-  %wide.load12 = load <8 x half>, <8 x half>* %4, align 4
-  %5 = fmul fast <8 x half> %wide.load12, %wide.load
-  %6 = fsub fast <8 x half> %5, %broadcast.splat14
-  %7 = getelementptr inbounds half, half* %D, i32 %index
-  %8 = bitcast half* %7 to <8 x half>*
-  store <8 x half> %6, <8 x half>* %8, align 4
+  %i1 = getelementptr inbounds half, half* %A, i32 %index
+  %i2 = bitcast half* %i1 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i3 = getelementptr inbounds half, half* %B, i32 %index
+  %i4 = bitcast half* %i3 to <8 x half>*
+  %wide.load12 = load <8 x half>, <8 x half>* %i4, align 4
+  %i5 = fmul fast <8 x half> %wide.load12, %wide.load
+  %i6 = fsub fast <8 x half> %i5, %broadcast.splat14
+  %i7 = getelementptr inbounds half, half* %D, i32 %index
+  %i8 = bitcast half* %i7 to <8 x half>*
+  store <8 x half> %i6, <8 x half>* %i8, align 4
   %index.next = add i32 %index, 8
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -559,8 +559,8 @@ define arm_aapcs_vfpcc void @test_fmss_r(half* noalias nocapture readonly %A, ha
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -572,20 +572,20 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds half, half* %A, i32 %index
-  %2 = bitcast half* %1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %2, align 4
-  %3 = getelementptr inbounds half, half* %B, i32 %index
-  %4 = bitcast half* %3 to <8 x half>*
-  %wide.load12 = load <8 x half>, <8 x half>* %4, align 4
-  %5 = fmul fast <8 x half> %wide.load12, %wide.load
-  %6 = fsub fast <8 x half> %broadcast.splat14, %5
-  %7 = getelementptr inbounds half, half* %D, i32 %index
-  %8 = bitcast half* %7 to <8 x half>*
-  store <8 x half> %6, <8 x half>* %8, align 4
+  %i1 = getelementptr inbounds half, half* %A, i32 %index
+  %i2 = bitcast half* %i1 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i3 = getelementptr inbounds half, half* %B, i32 %index
+  %i4 = bitcast half* %i3 to <8 x half>*
+  %wide.load12 = load <8 x half>, <8 x half>* %i4, align 4
+  %i5 = fmul fast <8 x half> %wide.load12, %wide.load
+  %i6 = fsub fast <8 x half> %broadcast.splat14, %i5
+  %i7 = getelementptr inbounds half, half* %D, i32 %index
+  %i8 = bitcast half* %i7 to <8 x half>*
+  store <8 x half> %i6, <8 x half>* %i8, align 4
   %index.next = add i32 %index, 8
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -611,8 +611,8 @@ define arm_aapcs_vfpcc void @test_fms(half* noalias nocapture readonly %A, half*
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -624,20 +624,20 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds half, half* %A, i32 %index
-  %2 = bitcast half* %1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %2, align 4
-  %3 = fmul fast <8 x half> %wide.load, %broadcast.splat13
-  %4 = getelementptr inbounds half, half* %B, i32 %index
-  %5 = bitcast half* %4 to <8 x half>*
-  %wide.load14 = load <8 x half>, <8 x half>* %5, align 4
-  %6 = fsub fast <8 x half> %3, %wide.load14
-  %7 = getelementptr inbounds half, half* %D, i32 %index
-  %8 = bitcast half* %7 to <8 x half>*
-  store <8 x half> %6, <8 x half>* %8, align 4
+  %i1 = getelementptr inbounds half, half* %A, i32 %index
+  %i2 = bitcast half* %i1 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i3 = fmul fast <8 x half> %wide.load, %broadcast.splat13
+  %i4 = getelementptr inbounds half, half* %B, i32 %index
+  %i5 = bitcast half* %i4 to <8 x half>*
+  %wide.load14 = load <8 x half>, <8 x half>* %i5, align 4
+  %i6 = fsub fast <8 x half> %i3, %wide.load14
+  %i7 = getelementptr inbounds half, half* %D, i32 %index
+  %i8 = bitcast half* %i7 to <8 x half>*
+  store <8 x half> %i6, <8 x half>* %i8, align 4
   %index.next = add i32 %index, 8
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -663,8 +663,8 @@ define arm_aapcs_vfpcc void @test_fms_r(half* noalias nocapture readonly %A, hal
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -676,27 +676,27 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds half, half* %A, i32 %index
-  %2 = bitcast half* %1 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %2, align 4
-  %3 = fmul fast <8 x half> %broadcast.splat13, %wide.load
-  %4 = getelementptr inbounds half, half* %B, i32 %index
-  %5 = bitcast half* %4 to <8 x half>*
-  %wide.load14 = load <8 x half>, <8 x half>* %5, align 4
-  %6 = fsub fast <8 x half> %3, %wide.load14
-  %7 = getelementptr inbounds half, half* %D, i32 %index
-  %8 = bitcast half* %7 to <8 x half>*
-  store <8 x half> %6, <8 x half>* %8, align 4
+  %i1 = getelementptr inbounds half, half* %A, i32 %index
+  %i2 = bitcast half* %i1 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %i2, align 4
+  %i3 = fmul fast <8 x half> %broadcast.splat13, %wide.load
+  %i4 = getelementptr inbounds half, half* %B, i32 %index
+  %i5 = bitcast half* %i4 to <8 x half>*
+  %wide.load14 = load <8 x half>, <8 x half>* %i5, align 4
+  %i6 = fsub fast <8 x half> %i3, %wide.load14
+  %i7 = getelementptr inbounds half, half* %D, i32 %index
+  %i8 = bitcast half* %i7 to <8 x half>*
+  store <8 x half> %i6, <8 x half>* %i8, align 4
   %index.next = add i32 %index, 8
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
 
-define dso_local void @test_nested(half* noalias nocapture %pInT1, half* noalias nocapture readonly %pOutT1, half* noalias nocapture readonly %pPRT_in, half* noalias nocapture readnone %pPRT_pDst, i32 %numRows, i32 %numCols, i32 %l) local_unnamed_addr #0 {
+define dso_local void @test_nested(half* noalias nocapture %pInT1, half* noalias nocapture readonly %pOutT1, half* noalias nocapture readonly %pPRT_in, half* noalias nocapture readnone %pPRT_pDst, i32 %numRows, i32 %numCols, i32 %l) local_unnamed_addr {
 ; CHECK-LABEL: test_nested:
 ; CHECK:       @ %bb.0: @ %for.body.us.preheader
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -746,8 +746,8 @@ for.body.us:                                      ; preds = %for.cond6.for.end_c
   %pOutT1.addr.036.us = phi half* [ %incdec.ptr.us, %for.cond6.for.end_crit_edge.us ], [ %pOutT1, %for.body.us.preheader ]
   %pPRT_in.addr.035.us = phi half* [ %scevgep, %for.cond6.for.end_crit_edge.us ], [ %pPRT_in, %for.body.us.preheader ]
   %scevgep = getelementptr half, half* %pPRT_in.addr.035.us, i32 %numCols
-  %0 = load half, half* %pOutT1.addr.036.us, align 4
-  %broadcast.splatinsert47 = insertelement <8 x half> undef, half %0, i32 0
+  %i = load half, half* %pOutT1.addr.036.us, align 4
+  %broadcast.splatinsert47 = insertelement <8 x half> undef, half %i, i32 0
   %broadcast.splat48 = shufflevector <8 x half> %broadcast.splatinsert47, <8 x half> undef, <8 x i32> zeroinitializer
   br label %vector.body
 
@@ -755,16 +755,16 @@ vector.body:                                      ; preds = %vector.body, %for.b
   %index = phi i32 [ 0, %for.body.us ], [ %index.next, %vector.body ]
   %next.gep = getelementptr half, half* %pInT1.addr.038.us, i32 %index
   %next.gep45 = getelementptr half, half* %pPRT_in.addr.035.us, i32 %index
-  %1 = bitcast half* %next.gep to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %1, align 4
-  %2 = bitcast half* %next.gep45 to <8 x half>*
-  %wide.load46 = load <8 x half>, <8 x half>* %2, align 4
-  %3 = fmul fast <8 x half> %wide.load46, %broadcast.splat48
-  %4 = fsub fast <8 x half> %wide.load, %3
-  store <8 x half> %4, <8 x half>* %1, align 4
+  %i1 = bitcast half* %next.gep to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %i1, align 4
+  %i2 = bitcast half* %next.gep45 to <8 x half>*
+  %wide.load46 = load <8 x half>, <8 x half>* %i2, align 4
+  %i3 = fmul fast <8 x half> %wide.load46, %broadcast.splat48
+  %i4 = fsub fast <8 x half> %wide.load, %i3
+  store <8 x half> %i4, <8 x half>* %i1, align 4
   %index.next = add i32 %index, 8
-  %5 = icmp eq i32 %index.next, %numCols
-  br i1 %5, label %for.cond6.for.end_crit_edge.us, label %vector.body
+  %i5 = icmp eq i32 %index.next, %numCols
+  br i1 %i5, label %for.cond6.for.end_crit_edge.us, label %vector.body
 
 for.cond6.for.end_crit_edge.us:                   ; preds = %vector.body
   %incdec.ptr.us = getelementptr inbounds half, half* %pOutT1.addr.036.us, i32 1
@@ -880,74 +880,74 @@ define void @arm_fir_f32_1_4_mve(%struct.arm_fir_instance_f32* nocapture readonl
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
   %pState1 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 1
-  %0 = load half*, half** %pState1, align 4
+  %i = load half*, half** %pState1, align 4
   %pCoeffs2 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 2
-  %1 = load half*, half** %pCoeffs2, align 4
+  %i1 = load half*, half** %pCoeffs2, align 4
   %numTaps3 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 0
-  %2 = load i16, i16* %numTaps3, align 4
-  %conv = zext i16 %2 to i32
+  %i2 = load i16, i16* %numTaps3, align 4
+  %conv = zext i16 %i2 to i32
   %sub = add nsw i32 %conv, -1
   %cmp = icmp ult i32 %sub, 4
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %arrayidx = getelementptr inbounds half, half* %0, i32 %sub
-  %incdec.ptr = getelementptr inbounds half, half* %1, i32 1
-  %3 = load half, half* %1, align 4
-  %incdec.ptr6 = getelementptr inbounds half, half* %1, i32 2
-  %4 = load half, half* %incdec.ptr, align 4
-  %incdec.ptr7 = getelementptr inbounds half, half* %1, i32 3
-  %5 = load half, half* %incdec.ptr6, align 4
-  %6 = load half, half* %incdec.ptr7, align 4
+  %arrayidx = getelementptr inbounds half, half* %i, i32 %sub
+  %incdec.ptr = getelementptr inbounds half, half* %i1, i32 1
+  %i3 = load half, half* %i1, align 4
+  %incdec.ptr6 = getelementptr inbounds half, half* %i1, i32 2
+  %i4 = load half, half* %incdec.ptr, align 4
+  %incdec.ptr7 = getelementptr inbounds half, half* %i1, i32 3
+  %i5 = load half, half* %incdec.ptr6, align 4
+  %i6 = load half, half* %incdec.ptr7, align 4
   %shr = lshr i32 %blockSize, 2
   %cmp9146 = icmp eq i32 %shr, 0
-  %.pre161 = insertelement <8 x half> undef, half %3, i32 0
+  %.pre161 = insertelement <8 x half> undef, half %i3, i32 0
   %.pre162 = shufflevector <8 x half> %.pre161, <8 x half> undef, <8 x i32> zeroinitializer
-  %.pre163 = insertelement <8 x half> undef, half %4, i32 0
+  %.pre163 = insertelement <8 x half> undef, half %i4, i32 0
   %.pre164 = shufflevector <8 x half> %.pre163, <8 x half> undef, <8 x i32> zeroinitializer
-  %.pre165 = insertelement <8 x half> undef, half %5, i32 0
+  %.pre165 = insertelement <8 x half> undef, half %i5, i32 0
   %.pre166 = shufflevector <8 x half> %.pre165, <8 x half> undef, <8 x i32> zeroinitializer
-  %.pre167 = insertelement <8 x half> undef, half %6, i32 0
+  %.pre167 = insertelement <8 x half> undef, half %i6, i32 0
   %.pre168 = shufflevector <8 x half> %.pre167, <8 x half> undef, <8 x i32> zeroinitializer
   br i1 %cmp9146, label %while.end, label %while.body.lr.ph
 
 while.body.lr.ph:                                 ; preds = %if.then
-  %7 = and i32 %blockSize, -4
-  %scevgep158 = getelementptr half, half* %pDst, i32 %7
+  %i7 = and i32 %blockSize, -4
+  %scevgep158 = getelementptr half, half* %pDst, i32 %i7
   br label %while.body
 
-while.body:                                       ; preds = %while.body.lr.ph, %while.body
+while.body:                                       ; preds = %while.body, %while.body.lr.ph
   %pStateCur.0151 = phi half* [ %arrayidx, %while.body.lr.ph ], [ %add.ptr, %while.body ]
-  %pSamples.0150 = phi half* [ %0, %while.body.lr.ph ], [ %add.ptr24, %while.body ]
+  %pSamples.0150 = phi half* [ %i, %while.body.lr.ph ], [ %add.ptr24, %while.body ]
   %pOutput.0149 = phi half* [ %pDst, %while.body.lr.ph ], [ %add.ptr23, %while.body ]
   %pTempSrc.0148 = phi half* [ %pSrc, %while.body.lr.ph ], [ %add.ptr11, %while.body ]
   %blkCnt.0147 = phi i32 [ %shr, %while.body.lr.ph ], [ %dec, %while.body ]
-  %8 = bitcast half* %pTempSrc.0148 to <8 x half>*
-  %9 = load <8 x half>, <8 x half>* %8, align 4
-  %10 = bitcast half* %pStateCur.0151 to <8 x half>*
-  store <8 x half> %9, <8 x half>* %10, align 4
+  %i8 = bitcast half* %pTempSrc.0148 to <8 x half>*
+  %i9 = load <8 x half>, <8 x half>* %i8, align 4
+  %i10 = bitcast half* %pStateCur.0151 to <8 x half>*
+  store <8 x half> %i9, <8 x half>* %i10, align 4
   %add.ptr = getelementptr inbounds half, half* %pStateCur.0151, i32 4
   %add.ptr11 = getelementptr inbounds half, half* %pTempSrc.0148, i32 4
-  %11 = bitcast half* %pSamples.0150 to <8 x half>*
-  %12 = load <8 x half>, <8 x half>* %11, align 4
-  %13 = fmul fast <8 x half> %12, %.pre162
+  %i11 = bitcast half* %pSamples.0150 to <8 x half>*
+  %i12 = load <8 x half>, <8 x half>* %i11, align 4
+  %i13 = fmul fast <8 x half> %i12, %.pre162
   %arrayidx12 = getelementptr inbounds half, half* %pSamples.0150, i32 1
-  %14 = bitcast half* %arrayidx12 to <8 x half>*
-  %15 = load <8 x half>, <8 x half>* %14, align 4
-  %mul = fmul fast <8 x half> %15, %.pre164
-  %add = fadd fast <8 x half> %mul, %13
+  %i14 = bitcast half* %arrayidx12 to <8 x half>*
+  %i15 = load <8 x half>, <8 x half>* %i14, align 4
+  %mul = fmul fast <8 x half> %i15, %.pre164
+  %add = fadd fast <8 x half> %mul, %i13
   %arrayidx13 = getelementptr inbounds half, half* %pSamples.0150, i32 2
-  %16 = bitcast half* %arrayidx13 to <8 x half>*
-  %17 = load <8 x half>, <8 x half>* %16, align 4
-  %mul16 = fmul fast <8 x half> %17, %.pre166
+  %i16 = bitcast half* %arrayidx13 to <8 x half>*
+  %i17 = load <8 x half>, <8 x half>* %i16, align 4
+  %mul16 = fmul fast <8 x half> %i17, %.pre166
   %add17 = fadd fast <8 x half> %add, %mul16
   %arrayidx18 = getelementptr inbounds half, half* %pSamples.0150, i32 3
-  %18 = bitcast half* %arrayidx18 to <8 x half>*
-  %19 = load <8 x half>, <8 x half>* %18, align 4
-  %mul21 = fmul fast <8 x half> %19, %.pre168
+  %i18 = bitcast half* %arrayidx18 to <8 x half>*
+  %i19 = load <8 x half>, <8 x half>* %i18, align 4
+  %mul21 = fmul fast <8 x half> %i19, %.pre168
   %add22 = fadd fast <8 x half> %add17, %mul21
-  %20 = bitcast half* %pOutput.0149 to <8 x half>*
-  store <8 x half> %add22, <8 x half>* %20, align 4
+  %i20 = bitcast half* %pOutput.0149 to <8 x half>*
+  store <8 x half> %add22, <8 x half>* %i20, align 4
   %add.ptr23 = getelementptr inbounds half, half* %pOutput.0149, i32 4
   %add.ptr24 = getelementptr inbounds half, half* %pSamples.0150, i32 4
   %dec = add nsw i32 %blkCnt.0147, -1
@@ -955,65 +955,65 @@ while.body:                                       ; preds = %while.body.lr.ph, %
   br i1 %cmp9, label %while.end.loopexit, label %while.body
 
 while.end.loopexit:                               ; preds = %while.body
-  %scevgep157 = getelementptr half, half* %pSrc, i32 %7
-  %scevgep159 = getelementptr half, half* %0, i32 %7
+  %scevgep157 = getelementptr half, half* %pSrc, i32 %i7
+  %scevgep159 = getelementptr half, half* %i, i32 %i7
   br label %while.end
 
-while.end:                                        ; preds = %if.then, %while.end.loopexit
+while.end:                                        ; preds = %while.end.loopexit, %if.then
   %pTempSrc.0.lcssa = phi half* [ %scevgep157, %while.end.loopexit ], [ %pSrc, %if.then ]
   %pOutput.0.lcssa = phi half* [ %scevgep158, %while.end.loopexit ], [ %pDst, %if.then ]
-  %pSamples.0.lcssa = phi half* [ %scevgep159, %while.end.loopexit ], [ %0, %if.then ]
+  %pSamples.0.lcssa = phi half* [ %scevgep159, %while.end.loopexit ], [ %i, %if.then ]
   %pStateCur.0.lcssa = phi half* [ %add.ptr, %while.end.loopexit ], [ %arrayidx, %if.then ]
   %and = and i32 %blockSize, 3
-  %21 = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %and)
-  %22 = bitcast half* %pTempSrc.0.lcssa to <8 x half>*
-  %23 = load <8 x half>, <8 x half>* %22, align 4
-  %24 = bitcast half* %pStateCur.0.lcssa to <8 x half>*
-  tail call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %23, <8 x half>* %24, i32 4, <8 x i1> %21)
-  %25 = bitcast half* %pSamples.0.lcssa to <8 x half>*
-  %26 = load <8 x half>, <8 x half>* %25, align 4
-  %27 = fmul fast <8 x half> %26, %.pre162
+  %i21 = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %and)
+  %i22 = bitcast half* %pTempSrc.0.lcssa to <8 x half>*
+  %i23 = load <8 x half>, <8 x half>* %i22, align 4
+  %i24 = bitcast half* %pStateCur.0.lcssa to <8 x half>*
+  tail call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %i23, <8 x half>* %i24, i32 4, <8 x i1> %i21)
+  %i25 = bitcast half* %pSamples.0.lcssa to <8 x half>*
+  %i26 = load <8 x half>, <8 x half>* %i25, align 4
+  %i27 = fmul fast <8 x half> %i26, %.pre162
   %arrayidx29 = getelementptr inbounds half, half* %pSamples.0.lcssa, i32 1
-  %28 = bitcast half* %arrayidx29 to <8 x half>*
-  %29 = load <8 x half>, <8 x half>* %28, align 4
-  %mul32 = fmul fast <8 x half> %29, %.pre164
-  %add33 = fadd fast <8 x half> %mul32, %27
+  %i28 = bitcast half* %arrayidx29 to <8 x half>*
+  %i29 = load <8 x half>, <8 x half>* %i28, align 4
+  %mul32 = fmul fast <8 x half> %i29, %.pre164
+  %add33 = fadd fast <8 x half> %mul32, %i27
   %arrayidx34 = getelementptr inbounds half, half* %pSamples.0.lcssa, i32 2
-  %30 = bitcast half* %arrayidx34 to <8 x half>*
-  %31 = load <8 x half>, <8 x half>* %30, align 4
-  %mul37 = fmul fast <8 x half> %31, %.pre166
+  %i30 = bitcast half* %arrayidx34 to <8 x half>*
+  %i31 = load <8 x half>, <8 x half>* %i30, align 4
+  %mul37 = fmul fast <8 x half> %i31, %.pre166
   %add38 = fadd fast <8 x half> %add33, %mul37
   %arrayidx39 = getelementptr inbounds half, half* %pSamples.0.lcssa, i32 3
-  %32 = bitcast half* %arrayidx39 to <8 x half>*
-  %33 = load <8 x half>, <8 x half>* %32, align 4
-  %mul42 = fmul fast <8 x half> %33, %.pre168
+  %i32 = bitcast half* %arrayidx39 to <8 x half>*
+  %i33 = load <8 x half>, <8 x half>* %i32, align 4
+  %mul42 = fmul fast <8 x half> %i33, %.pre168
   %add43 = fadd fast <8 x half> %add38, %mul42
-  %34 = bitcast half* %pOutput.0.lcssa to <8 x half>*
-  tail call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %add43, <8 x half>* %34, i32 4, <8 x i1> %21)
+  %i34 = bitcast half* %pOutput.0.lcssa to <8 x half>*
+  tail call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %add43, <8 x half>* %i34, i32 4, <8 x i1> %i21)
   %.pre = load half*, half** %pState1, align 4
   br label %if.end
 
 if.end:                                           ; preds = %while.end, %entry
-  %35 = phi half* [ %.pre, %while.end ], [ %0, %entry ]
-  %arrayidx45 = getelementptr inbounds half, half* %35, i32 %blockSize
+  %i35 = phi half* [ %.pre, %while.end ], [ %i, %entry ]
+  %arrayidx45 = getelementptr inbounds half, half* %i35, i32 %blockSize
   %shr47 = lshr i32 %conv, 2
   %cmp49141 = icmp eq i32 %shr47, 0
   br i1 %cmp49141, label %while.end55, label %while.body51.preheader
 
 while.body51.preheader:                           ; preds = %if.end
-  %36 = and i32 %conv, 65532
-  %37 = add i32 %36, %blockSize
-  %scevgep = getelementptr half, half* %35, i32 %37
+  %i36 = and i32 %conv, 65532
+  %i37 = add i32 %i36, %blockSize
+  %scevgep = getelementptr half, half* %i35, i32 %i37
   br label %while.body51
 
-while.body51:                                     ; preds = %while.body51.preheader, %while.body51
+while.body51:                                     ; preds = %while.body51, %while.body51.preheader
   %pTempSrc.1144 = phi half* [ %add.ptr52, %while.body51 ], [ %arrayidx45, %while.body51.preheader ]
-  %pTempDest.0143 = phi half* [ %add.ptr53, %while.body51 ], [ %35, %while.body51.preheader ]
+  %pTempDest.0143 = phi half* [ %add.ptr53, %while.body51 ], [ %i35, %while.body51.preheader ]
   %blkCnt.1142 = phi i32 [ %dec54, %while.body51 ], [ %shr47, %while.body51.preheader ]
-  %38 = bitcast half* %pTempSrc.1144 to <8 x half>*
-  %39 = load <8 x half>, <8 x half>* %38, align 4
-  %40 = bitcast half* %pTempDest.0143 to <8 x half>*
-  store <8 x half> %39, <8 x half>* %40, align 4
+  %i38 = bitcast half* %pTempSrc.1144 to <8 x half>*
+  %i39 = load <8 x half>, <8 x half>* %i38, align 4
+  %i40 = bitcast half* %pTempDest.0143 to <8 x half>*
+  store <8 x half> %i39, <8 x half>* %i40, align 4
   %add.ptr52 = getelementptr inbounds half, half* %pTempSrc.1144, i32 4
   %add.ptr53 = getelementptr inbounds half, half* %pTempDest.0143, i32 4
   %dec54 = add nsw i32 %blkCnt.1142, -1
@@ -1021,25 +1021,25 @@ while.body51:                                     ; preds = %while.body51.prehea
   br i1 %cmp49, label %while.end55.loopexit, label %while.body51
 
 while.end55.loopexit:                             ; preds = %while.body51
-  %scevgep156 = getelementptr half, half* %35, i32 %36
+  %scevgep156 = getelementptr half, half* %i35, i32 %i36
   br label %while.end55
 
 while.end55:                                      ; preds = %while.end55.loopexit, %if.end
-  %pTempDest.0.lcssa = phi half* [ %35, %if.end ], [ %scevgep156, %while.end55.loopexit ]
+  %pTempDest.0.lcssa = phi half* [ %i35, %if.end ], [ %scevgep156, %while.end55.loopexit ]
   %pTempSrc.1.lcssa = phi half* [ %arrayidx45, %if.end ], [ %scevgep, %while.end55.loopexit ]
   %and56 = and i32 %conv, 3
   %cmp57 = icmp eq i32 %and56, 0
   br i1 %cmp57, label %if.end61, label %if.then59
 
 if.then59:                                        ; preds = %while.end55
-  %41 = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %and56)
-  %42 = bitcast half* %pTempSrc.1.lcssa to <8 x half>*
-  %43 = load <8 x half>, <8 x half>* %42, align 4
-  %44 = bitcast half* %pTempDest.0.lcssa to <8 x half>*
-  tail call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %43, <8 x half>* %44, i32 4, <8 x i1> %41)
+  %i41 = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %and56)
+  %i42 = bitcast half* %pTempSrc.1.lcssa to <8 x half>*
+  %i43 = load <8 x half>, <8 x half>* %i42, align 4
+  %i44 = bitcast half* %pTempDest.0.lcssa to <8 x half>*
+  tail call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %i43, <8 x half>* %i44, i32 4, <8 x i1> %i41)
   br label %if.end61
 
-if.end61:                                         ; preds = %while.end55, %if.then59
+if.end61:                                         ; preds = %if.then59, %while.end55
   ret void
 }
 
@@ -1193,12 +1193,12 @@ define void @fir(%struct.arm_fir_instance_f32* nocapture readonly %S, half* noca
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
   %pState1 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 1
-  %0 = load half*, half** %pState1, align 4
+  %i = load half*, half** %pState1, align 4
   %pCoeffs2 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 2
-  %1 = load half*, half** %pCoeffs2, align 4
+  %i1 = load half*, half** %pCoeffs2, align 4
   %numTaps3 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 0
-  %2 = load i16, i16* %numTaps3, align 4
-  %conv = zext i16 %2 to i32
+  %i2 = load i16, i16* %numTaps3, align 4
+  %conv = zext i16 %i2 to i32
   %cmp = icmp ugt i32 %blockSize, 7
   br i1 %cmp, label %if.then, label %if.end
 
@@ -1209,164 +1209,164 @@ if.then:                                          ; preds = %entry
 
 while.body.lr.ph:                                 ; preds = %if.then
   %sub = add nsw i32 %conv, -1
-  %arrayidx = getelementptr inbounds half, half* %0, i32 %sub
-  %incdec.ptr = getelementptr inbounds half, half* %1, i32 1
-  %incdec.ptr7 = getelementptr inbounds half, half* %1, i32 2
-  %incdec.ptr8 = getelementptr inbounds half, half* %1, i32 3
-  %incdec.ptr9 = getelementptr inbounds half, half* %1, i32 4
-  %incdec.ptr10 = getelementptr inbounds half, half* %1, i32 5
-  %incdec.ptr11 = getelementptr inbounds half, half* %1, i32 6
-  %incdec.ptr12 = getelementptr inbounds half, half* %1, i32 7
+  %arrayidx = getelementptr inbounds half, half* %i, i32 %sub
+  %incdec.ptr = getelementptr inbounds half, half* %i1, i32 1
+  %incdec.ptr7 = getelementptr inbounds half, half* %i1, i32 2
+  %incdec.ptr8 = getelementptr inbounds half, half* %i1, i32 3
+  %incdec.ptr9 = getelementptr inbounds half, half* %i1, i32 4
+  %incdec.ptr10 = getelementptr inbounds half, half* %i1, i32 5
+  %incdec.ptr11 = getelementptr inbounds half, half* %i1, i32 6
+  %incdec.ptr12 = getelementptr inbounds half, half* %i1, i32 7
   %sub37 = add nsw i32 %conv, -8
   %div = sdiv i32 %sub37, 8
-  %pCoeffsCur.0199 = getelementptr inbounds half, half* %1, i32 8
-  %cmp38201 = icmp ugt i16 %2, 15
+  %pCoeffsCur.0199 = getelementptr inbounds half, half* %i1, i32 8
+  %cmp38201 = icmp ugt i16 %i2, 15
   %and = and i32 %sub37, 7
   %cmp74210 = icmp eq i32 %and, 0
   %idx.neg = sub nsw i32 0, %conv
-  %3 = icmp sgt i32 %div, 1
-  %smax = select i1 %3, i32 %div, i32 1
+  %i3 = icmp sgt i32 %div, 1
+  %smax = select i1 %i3, i32 %div, i32 1
   br label %while.body
 
-while.body:                                       ; preds = %while.body.lr.ph, %while.end
+while.body:                                       ; preds = %while.end, %while.body.lr.ph
   %blkCnt.0222 = phi i32 [ %shr, %while.body.lr.ph ], [ %dec84, %while.end ]
   %pStateCur.0221 = phi half* [ %arrayidx, %while.body.lr.ph ], [ %add.ptr, %while.end ]
-  %pSamples.0220 = phi half* [ %0, %while.body.lr.ph ], [ %add.ptr83, %while.end ]
+  %pSamples.0220 = phi half* [ %i, %while.body.lr.ph ], [ %add.ptr83, %while.end ]
   %pTempSrc.0219 = phi half* [ %pSrc, %while.body.lr.ph ], [ %add.ptr14, %while.end ]
   %pOutput.0218 = phi half* [ %pDst, %while.body.lr.ph ], [ %add.ptr81, %while.end ]
-  %4 = load half, half* %1, align 4
-  %5 = load half, half* %incdec.ptr, align 4
-  %6 = load half, half* %incdec.ptr7, align 4
-  %7 = load half, half* %incdec.ptr8, align 4
-  %8 = load half, half* %incdec.ptr9, align 4
-  %9 = load half, half* %incdec.ptr10, align 4
-  %10 = load half, half* %incdec.ptr11, align 4
-  %11 = load half, half* %incdec.ptr12, align 4
-  %12 = bitcast half* %pTempSrc.0219 to <8 x half>*
-  %13 = load <8 x half>, <8 x half>* %12, align 4
-  %14 = bitcast half* %pStateCur.0221 to <8 x half>*
-  store <8 x half> %13, <8 x half>* %14, align 4
+  %i4 = load half, half* %i1, align 4
+  %i5 = load half, half* %incdec.ptr, align 4
+  %i6 = load half, half* %incdec.ptr7, align 4
+  %i7 = load half, half* %incdec.ptr8, align 4
+  %i8 = load half, half* %incdec.ptr9, align 4
+  %i9 = load half, half* %incdec.ptr10, align 4
+  %i10 = load half, half* %incdec.ptr11, align 4
+  %i11 = load half, half* %incdec.ptr12, align 4
+  %i12 = bitcast half* %pTempSrc.0219 to <8 x half>*
+  %i13 = load <8 x half>, <8 x half>* %i12, align 4
+  %i14 = bitcast half* %pStateCur.0221 to <8 x half>*
+  store <8 x half> %i13, <8 x half>* %i14, align 4
   %add.ptr = getelementptr inbounds half, half* %pStateCur.0221, i32 4
   %add.ptr14 = getelementptr inbounds half, half* %pTempSrc.0219, i32 4
-  %15 = bitcast half* %pSamples.0220 to <8 x half>*
-  %16 = load <8 x half>, <8 x half>* %15, align 4
-  %.splatinsert = insertelement <8 x half> undef, half %4, i32 0
+  %i15 = bitcast half* %pSamples.0220 to <8 x half>*
+  %i16 = load <8 x half>, <8 x half>* %i15, align 4
+  %.splatinsert = insertelement <8 x half> undef, half %i4, i32 0
   %.splat = shufflevector <8 x half> %.splatinsert, <8 x half> undef, <8 x i32> zeroinitializer
-  %17 = fmul fast <8 x half> %16, %.splat
+  %i17 = fmul fast <8 x half> %i16, %.splat
   %arrayidx15 = getelementptr inbounds half, half* %pSamples.0220, i32 1
-  %18 = bitcast half* %arrayidx15 to <8 x half>*
-  %19 = load <8 x half>, <8 x half>* %18, align 4
-  %.splatinsert16 = insertelement <8 x half> undef, half %5, i32 0
+  %i18 = bitcast half* %arrayidx15 to <8 x half>*
+  %i19 = load <8 x half>, <8 x half>* %i18, align 4
+  %.splatinsert16 = insertelement <8 x half> undef, half %i5, i32 0
   %.splat17 = shufflevector <8 x half> %.splatinsert16, <8 x half> undef, <8 x i32> zeroinitializer
-  %20 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %19, <8 x half> %.splat17, <8 x half> %17)
+  %i20 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i19, <8 x half> %.splat17, <8 x half> %i17)
   %arrayidx18 = getelementptr inbounds half, half* %pSamples.0220, i32 2
-  %21 = bitcast half* %arrayidx18 to <8 x half>*
-  %22 = load <8 x half>, <8 x half>* %21, align 4
-  %.splatinsert19 = insertelement <8 x half> undef, half %6, i32 0
+  %i21 = bitcast half* %arrayidx18 to <8 x half>*
+  %i22 = load <8 x half>, <8 x half>* %i21, align 4
+  %.splatinsert19 = insertelement <8 x half> undef, half %i6, i32 0
   %.splat20 = shufflevector <8 x half> %.splatinsert19, <8 x half> undef, <8 x i32> zeroinitializer
-  %23 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %22, <8 x half> %.splat20, <8 x half> %20)
+  %i23 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i22, <8 x half> %.splat20, <8 x half> %i20)
   %arrayidx21 = getelementptr inbounds half, half* %pSamples.0220, i32 3
-  %24 = bitcast half* %arrayidx21 to <8 x half>*
-  %25 = load <8 x half>, <8 x half>* %24, align 4
-  %.splatinsert22 = insertelement <8 x half> undef, half %7, i32 0
+  %i24 = bitcast half* %arrayidx21 to <8 x half>*
+  %i25 = load <8 x half>, <8 x half>* %i24, align 4
+  %.splatinsert22 = insertelement <8 x half> undef, half %i7, i32 0
   %.splat23 = shufflevector <8 x half> %.splatinsert22, <8 x half> undef, <8 x i32> zeroinitializer
-  %26 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %25, <8 x half> %.splat23, <8 x half> %23)
+  %i26 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i25, <8 x half> %.splat23, <8 x half> %i23)
   %arrayidx24 = getelementptr inbounds half, half* %pSamples.0220, i32 4
-  %27 = bitcast half* %arrayidx24 to <8 x half>*
-  %28 = load <8 x half>, <8 x half>* %27, align 4
-  %.splatinsert25 = insertelement <8 x half> undef, half %8, i32 0
+  %i27 = bitcast half* %arrayidx24 to <8 x half>*
+  %i28 = load <8 x half>, <8 x half>* %i27, align 4
+  %.splatinsert25 = insertelement <8 x half> undef, half %i8, i32 0
   %.splat26 = shufflevector <8 x half> %.splatinsert25, <8 x half> undef, <8 x i32> zeroinitializer
-  %29 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %28, <8 x half> %.splat26, <8 x half> %26)
+  %i29 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i28, <8 x half> %.splat26, <8 x half> %i26)
   %arrayidx27 = getelementptr inbounds half, half* %pSamples.0220, i32 5
-  %30 = bitcast half* %arrayidx27 to <8 x half>*
-  %31 = load <8 x half>, <8 x half>* %30, align 4
-  %.splatinsert28 = insertelement <8 x half> undef, half %9, i32 0
+  %i30 = bitcast half* %arrayidx27 to <8 x half>*
+  %i31 = load <8 x half>, <8 x half>* %i30, align 4
+  %.splatinsert28 = insertelement <8 x half> undef, half %i9, i32 0
   %.splat29 = shufflevector <8 x half> %.splatinsert28, <8 x half> undef, <8 x i32> zeroinitializer
-  %32 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %31, <8 x half> %.splat29, <8 x half> %29)
+  %i32 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i31, <8 x half> %.splat29, <8 x half> %i29)
   %arrayidx30 = getelementptr inbounds half, half* %pSamples.0220, i32 6
-  %33 = bitcast half* %arrayidx30 to <8 x half>*
-  %34 = load <8 x half>, <8 x half>* %33, align 4
-  %.splatinsert31 = insertelement <8 x half> undef, half %10, i32 0
+  %i33 = bitcast half* %arrayidx30 to <8 x half>*
+  %i34 = load <8 x half>, <8 x half>* %i33, align 4
+  %.splatinsert31 = insertelement <8 x half> undef, half %i10, i32 0
   %.splat32 = shufflevector <8 x half> %.splatinsert31, <8 x half> undef, <8 x i32> zeroinitializer
-  %35 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %34, <8 x half> %.splat32, <8 x half> %32)
+  %i35 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i34, <8 x half> %.splat32, <8 x half> %i32)
   %arrayidx33 = getelementptr inbounds half, half* %pSamples.0220, i32 7
-  %36 = bitcast half* %arrayidx33 to <8 x half>*
-  %37 = load <8 x half>, <8 x half>* %36, align 4
-  %.splatinsert34 = insertelement <8 x half> undef, half %11, i32 0
+  %i36 = bitcast half* %arrayidx33 to <8 x half>*
+  %i37 = load <8 x half>, <8 x half>* %i36, align 4
+  %.splatinsert34 = insertelement <8 x half> undef, half %i11, i32 0
   %.splat35 = shufflevector <8 x half> %.splatinsert34, <8 x half> undef, <8 x i32> zeroinitializer
-  %38 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %37, <8 x half> %.splat35, <8 x half> %35)
+  %i38 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i37, <8 x half> %.splat35, <8 x half> %i35)
   %pSamples.1200 = getelementptr inbounds half, half* %pSamples.0220, i32 8
   br i1 %cmp38201, label %for.body, label %for.end
 
-for.body:                                         ; preds = %while.body, %for.body
+for.body:                                         ; preds = %for.body, %while.body
   %pSamples.1207 = phi half* [ %pSamples.1, %for.body ], [ %pSamples.1200, %while.body ]
   %pCoeffsCur.0206 = phi half* [ %pCoeffsCur.0, %for.body ], [ %pCoeffsCur.0199, %while.body ]
-  %.pn205 = phi half* [ %pCoeffsCur.0206, %for.body ], [ %1, %while.body ]
+  %.pn205 = phi half* [ %pCoeffsCur.0206, %for.body ], [ %i1, %while.body ]
   %i.0204 = phi i32 [ %inc, %for.body ], [ 0, %while.body ]
-  %vecAcc0.0203 = phi <8 x half> [ %70, %for.body ], [ %38, %while.body ]
+  %vecAcc0.0203 = phi <8 x half> [ %i70, %for.body ], [ %i38, %while.body ]
   %pSamples.0.pn202 = phi half* [ %pSamples.1207, %for.body ], [ %pSamples.0220, %while.body ]
   %incdec.ptr40 = getelementptr inbounds half, half* %.pn205, i32 9
-  %39 = load half, half* %pCoeffsCur.0206, align 4
+  %i39 = load half, half* %pCoeffsCur.0206, align 4
   %incdec.ptr41 = getelementptr inbounds half, half* %.pn205, i32 10
-  %40 = load half, half* %incdec.ptr40, align 4
+  %i40 = load half, half* %incdec.ptr40, align 4
   %incdec.ptr42 = getelementptr inbounds half, half* %.pn205, i32 11
-  %41 = load half, half* %incdec.ptr41, align 4
+  %i41 = load half, half* %incdec.ptr41, align 4
   %incdec.ptr43 = getelementptr inbounds half, half* %.pn205, i32 12
-  %42 = load half, half* %incdec.ptr42, align 4
+  %i42 = load half, half* %incdec.ptr42, align 4
   %incdec.ptr44 = getelementptr inbounds half, half* %.pn205, i32 13
-  %43 = load half, half* %incdec.ptr43, align 4
+  %i43 = load half, half* %incdec.ptr43, align 4
   %incdec.ptr45 = getelementptr inbounds half, half* %.pn205, i32 14
-  %44 = load half, half* %incdec.ptr44, align 4
+  %i44 = load half, half* %incdec.ptr44, align 4
   %incdec.ptr46 = getelementptr inbounds half, half* %.pn205, i32 15
-  %45 = load half, half* %incdec.ptr45, align 4
-  %46 = load half, half* %incdec.ptr46, align 4
-  %47 = bitcast half* %pSamples.1207 to <8 x half>*
-  %48 = load <8 x half>, <8 x half>* %47, align 4
-  %.splatinsert48 = insertelement <8 x half> undef, half %39, i32 0
+  %i45 = load half, half* %incdec.ptr45, align 4
+  %i46 = load half, half* %incdec.ptr46, align 4
+  %i47 = bitcast half* %pSamples.1207 to <8 x half>*
+  %i48 = load <8 x half>, <8 x half>* %i47, align 4
+  %.splatinsert48 = insertelement <8 x half> undef, half %i39, i32 0
   %.splat49 = shufflevector <8 x half> %.splatinsert48, <8 x half> undef, <8 x i32> zeroinitializer
-  %49 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %48, <8 x half> %.splat49, <8 x half> %vecAcc0.0203)
+  %i49 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i48, <8 x half> %.splat49, <8 x half> %vecAcc0.0203)
   %arrayidx50 = getelementptr inbounds half, half* %pSamples.0.pn202, i32 9
-  %50 = bitcast half* %arrayidx50 to <8 x half>*
-  %51 = load <8 x half>, <8 x half>* %50, align 4
-  %.splatinsert51 = insertelement <8 x half> undef, half %40, i32 0
+  %i50 = bitcast half* %arrayidx50 to <8 x half>*
+  %i51 = load <8 x half>, <8 x half>* %i50, align 4
+  %.splatinsert51 = insertelement <8 x half> undef, half %i40, i32 0
   %.splat52 = shufflevector <8 x half> %.splatinsert51, <8 x half> undef, <8 x i32> zeroinitializer
-  %52 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %51, <8 x half> %.splat52, <8 x half> %49)
+  %i52 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i51, <8 x half> %.splat52, <8 x half> %i49)
   %arrayidx53 = getelementptr inbounds half, half* %pSamples.0.pn202, i32 10
-  %53 = bitcast half* %arrayidx53 to <8 x half>*
-  %54 = load <8 x half>, <8 x half>* %53, align 4
-  %.splatinsert54 = insertelement <8 x half> undef, half %41, i32 0
+  %i53 = bitcast half* %arrayidx53 to <8 x half>*
+  %i54 = load <8 x half>, <8 x half>* %i53, align 4
+  %.splatinsert54 = insertelement <8 x half> undef, half %i41, i32 0
   %.splat55 = shufflevector <8 x half> %.splatinsert54, <8 x half> undef, <8 x i32> zeroinitializer
-  %55 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %54, <8 x half> %.splat55, <8 x half> %52)
+  %i55 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i54, <8 x half> %.splat55, <8 x half> %i52)
   %arrayidx56 = getelementptr inbounds half, half* %pSamples.0.pn202, i32 11
-  %56 = bitcast half* %arrayidx56 to <8 x half>*
-  %57 = load <8 x half>, <8 x half>* %56, align 4
-  %.splatinsert57 = insertelement <8 x half> undef, half %42, i32 0
+  %i56 = bitcast half* %arrayidx56 to <8 x half>*
+  %i57 = load <8 x half>, <8 x half>* %i56, align 4
+  %.splatinsert57 = insertelement <8 x half> undef, half %i42, i32 0
   %.splat58 = shufflevector <8 x half> %.splatinsert57, <8 x half> undef, <8 x i32> zeroinitializer
-  %58 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %57, <8 x half> %.splat58, <8 x half> %55)
+  %i58 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i57, <8 x half> %.splat58, <8 x half> %i55)
   %arrayidx59 = getelementptr inbounds half, half* %pSamples.0.pn202, i32 12
-  %59 = bitcast half* %arrayidx59 to <8 x half>*
-  %60 = load <8 x half>, <8 x half>* %59, align 4
-  %.splatinsert60 = insertelement <8 x half> undef, half %43, i32 0
+  %i59 = bitcast half* %arrayidx59 to <8 x half>*
+  %i60 = load <8 x half>, <8 x half>* %i59, align 4
+  %.splatinsert60 = insertelement <8 x half> undef, half %i43, i32 0
   %.splat61 = shufflevector <8 x half> %.splatinsert60, <8 x half> undef, <8 x i32> zeroinitializer
-  %61 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %60, <8 x half> %.splat61, <8 x half> %58)
+  %i61 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i60, <8 x half> %.splat61, <8 x half> %i58)
   %arrayidx62 = getelementptr inbounds half, half* %pSamples.0.pn202, i32 13
-  %62 = bitcast half* %arrayidx62 to <8 x half>*
-  %63 = load <8 x half>, <8 x half>* %62, align 4
-  %.splatinsert63 = insertelement <8 x half> undef, half %44, i32 0
+  %i62 = bitcast half* %arrayidx62 to <8 x half>*
+  %i63 = load <8 x half>, <8 x half>* %i62, align 4
+  %.splatinsert63 = insertelement <8 x half> undef, half %i44, i32 0
   %.splat64 = shufflevector <8 x half> %.splatinsert63, <8 x half> undef, <8 x i32> zeroinitializer
-  %64 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %63, <8 x half> %.splat64, <8 x half> %61)
+  %i64 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i63, <8 x half> %.splat64, <8 x half> %i61)
   %arrayidx65 = getelementptr inbounds half, half* %pSamples.0.pn202, i32 14
-  %65 = bitcast half* %arrayidx65 to <8 x half>*
-  %66 = load <8 x half>, <8 x half>* %65, align 4
-  %.splatinsert66 = insertelement <8 x half> undef, half %45, i32 0
+  %i65 = bitcast half* %arrayidx65 to <8 x half>*
+  %i66 = load <8 x half>, <8 x half>* %i65, align 4
+  %.splatinsert66 = insertelement <8 x half> undef, half %i45, i32 0
   %.splat67 = shufflevector <8 x half> %.splatinsert66, <8 x half> undef, <8 x i32> zeroinitializer
-  %67 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %66, <8 x half> %.splat67, <8 x half> %64)
+  %i67 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i66, <8 x half> %.splat67, <8 x half> %i64)
   %arrayidx68 = getelementptr inbounds half, half* %pSamples.0.pn202, i32 15
-  %68 = bitcast half* %arrayidx68 to <8 x half>*
-  %69 = load <8 x half>, <8 x half>* %68, align 4
-  %.splatinsert69 = insertelement <8 x half> undef, half %46, i32 0
+  %i68 = bitcast half* %arrayidx68 to <8 x half>*
+  %i69 = load <8 x half>, <8 x half>* %i68, align 4
+  %.splatinsert69 = insertelement <8 x half> undef, half %i46, i32 0
   %.splat70 = shufflevector <8 x half> %.splatinsert69, <8 x half> undef, <8 x i32> zeroinitializer
-  %70 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %69, <8 x half> %.splat70, <8 x half> %67)
+  %i70 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i69, <8 x half> %.splat70, <8 x half> %i67)
   %inc = add nuw nsw i32 %i.0204, 1
   %pCoeffsCur.0 = getelementptr inbounds half, half* %pCoeffsCur.0206, i32 8
   %pSamples.1 = getelementptr inbounds half, half* %pSamples.1207, i32 8
@@ -1374,23 +1374,23 @@ for.body:                                         ; preds = %while.body, %for.bo
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:                                          ; preds = %for.body, %while.body
-  %vecAcc0.0.lcssa = phi <8 x half> [ %38, %while.body ], [ %70, %for.body ]
+  %vecAcc0.0.lcssa = phi <8 x half> [ %i38, %while.body ], [ %i70, %for.body ]
   %pCoeffsCur.0.lcssa = phi half* [ %pCoeffsCur.0199, %while.body ], [ %pCoeffsCur.0, %for.body ]
   %pSamples.1.lcssa = phi half* [ %pSamples.1200, %while.body ], [ %pSamples.1, %for.body ]
   br i1 %cmp74210, label %while.end, label %while.body76
 
-while.body76:                                     ; preds = %for.end, %while.body76
+while.body76:                                     ; preds = %while.body76, %for.end
   %pCoeffsCur.1214 = phi half* [ %incdec.ptr77, %while.body76 ], [ %pCoeffsCur.0.lcssa, %for.end ]
-  %vecAcc0.1213 = phi <8 x half> [ %74, %while.body76 ], [ %vecAcc0.0.lcssa, %for.end ]
+  %vecAcc0.1213 = phi <8 x half> [ %i74, %while.body76 ], [ %vecAcc0.0.lcssa, %for.end ]
   %numCnt.0212 = phi i32 [ %dec, %while.body76 ], [ %and, %for.end ]
   %pSamples.2211 = phi half* [ %incdec.ptr80, %while.body76 ], [ %pSamples.1.lcssa, %for.end ]
   %incdec.ptr77 = getelementptr inbounds half, half* %pCoeffsCur.1214, i32 1
-  %71 = load half, half* %pCoeffsCur.1214, align 4
-  %72 = bitcast half* %pSamples.2211 to <8 x half>*
-  %73 = load <8 x half>, <8 x half>* %72, align 4
-  %.splatinsert78 = insertelement <8 x half> undef, half %71, i32 0
+  %i71 = load half, half* %pCoeffsCur.1214, align 4
+  %i72 = bitcast half* %pSamples.2211 to <8 x half>*
+  %i73 = load <8 x half>, <8 x half>* %i72, align 4
+  %.splatinsert78 = insertelement <8 x half> undef, half %i71, i32 0
   %.splat79 = shufflevector <8 x half> %.splatinsert78, <8 x half> undef, <8 x i32> zeroinitializer
-  %74 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %73, <8 x half> %.splat79, <8 x half> %vecAcc0.1213)
+  %i74 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i73, <8 x half> %.splat79, <8 x half> %vecAcc0.1213)
   %incdec.ptr80 = getelementptr inbounds half, half* %pSamples.2211, i32 1
   %dec = add nsw i32 %numCnt.0212, -1
   %cmp74 = icmp sgt i32 %numCnt.0212, 1
@@ -1402,9 +1402,9 @@ while.end.loopexit:                               ; preds = %while.body76
 
 while.end:                                        ; preds = %while.end.loopexit, %for.end
   %pSamples.2.lcssa = phi half* [ %pSamples.1.lcssa, %for.end ], [ %scevgep, %while.end.loopexit ]
-  %vecAcc0.1.lcssa = phi <8 x half> [ %vecAcc0.0.lcssa, %for.end ], [ %74, %while.end.loopexit ]
-  %75 = bitcast half* %pOutput.0218 to <8 x half>*
-  store <8 x half> %vecAcc0.1.lcssa, <8 x half>* %75, align 4
+  %vecAcc0.1.lcssa = phi <8 x half> [ %vecAcc0.0.lcssa, %for.end ], [ %i74, %while.end.loopexit ]
+  %i75 = bitcast half* %pOutput.0218 to <8 x half>*
+  store <8 x half> %vecAcc0.1.lcssa, <8 x half>* %i75, align 4
   %add.ptr81 = getelementptr inbounds half, half* %pOutput.0218, i32 4
   %add.ptr82 = getelementptr inbounds half, half* %pSamples.2.lcssa, i32 4
   %add.ptr83 = getelementptr inbounds half, half* %add.ptr82, i32 %idx.neg
@@ -1503,12 +1503,12 @@ define void @arm_biquad_cascade_df2T_f16(%struct.arm_biquad_cascade_df2T_instanc
 ; CHECK-NEXT:    .short 0x0000 @ half 0
 entry:
   %pState1 = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f16, %struct.arm_biquad_cascade_df2T_instance_f16* %S, i32 0, i32 1
-  %0 = load half*, half** %pState1, align 4
+  %i = load half*, half** %pState1, align 4
   %numStages = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f16, %struct.arm_biquad_cascade_df2T_instance_f16* %S, i32 0, i32 0
-  %1 = load i8, i8* %numStages, align 4
-  %conv = zext i8 %1 to i32
+  %i1 = load i8, i8* %numStages, align 4
+  %conv = zext i8 %i1 to i32
   %pCoeffs = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f16, %struct.arm_biquad_cascade_df2T_instance_f16* %S, i32 0, i32 2
-  %2 = load half*, half** %pCoeffs, align 4
+  %i2 = load half*, half** %pCoeffs, align 4
   %div = lshr i32 %blockSize, 1
   %cmp.not90 = icmp eq i32 %div, 0
   %and = and i32 %blockSize, 1
@@ -1517,92 +1517,92 @@ entry:
 
 do.body:                                          ; preds = %if.end, %entry
   %stage.0 = phi i32 [ %conv, %entry ], [ %dec23, %if.end ]
-  %pCurCoeffs.0 = phi half* [ %2, %entry ], [ %add.ptr2, %if.end ]
-  %pState.0 = phi half* [ %0, %entry ], [ %pState.1, %if.end ]
+  %pCurCoeffs.0 = phi half* [ %i2, %entry ], [ %add.ptr2, %if.end ]
+  %pState.0 = phi half* [ %i, %entry ], [ %pState.1, %if.end ]
   %pIn.0 = phi half* [ %pSrc, %entry ], [ %pDst, %if.end ]
-  %3 = bitcast half* %pCurCoeffs.0 to <8 x half>*
-  %4 = load <8 x half>, <8 x half>* %3, align 2
+  %i3 = bitcast half* %pCurCoeffs.0 to <8 x half>*
+  %i4 = load <8 x half>, <8 x half>* %i3, align 2
   %add.ptr = getelementptr inbounds half, half* %pCurCoeffs.0, i32 2
-  %5 = bitcast half* %add.ptr to <8 x half>*
-  %6 = load <8 x half>, <8 x half>* %5, align 2
+  %i5 = bitcast half* %add.ptr to <8 x half>*
+  %i6 = load <8 x half>, <8 x half>* %i5, align 2
   %add.ptr2 = getelementptr inbounds half, half* %pCurCoeffs.0, i32 5
-  %7 = bitcast half* %pState.0 to <8 x half>*
-  %8 = load <8 x half>, <8 x half>* %7, align 2
-  %9 = shufflevector <8 x half> %8, <8 x half> <half poison, half poison, half 0xH0000, half 0xH0000, half poison, half poison, half poison, half poison>, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
-  %10 = bitcast <8 x half> %4 to <8 x i16>
-  %11 = tail call { i32, <8 x i16> } @llvm.arm.mve.vshlc.v8i16(<8 x i16> %10, i32 0, i32 16)
-  %12 = extractvalue { i32, <8 x i16> } %11, 0
-  %13 = extractvalue { i32, <8 x i16> } %11, 1
-  %14 = bitcast <8 x i16> %13 to <8 x half>
-  %15 = bitcast <8 x half> %6 to <8 x i16>
-  %16 = tail call { i32, <8 x i16> } @llvm.arm.mve.vshlc.v8i16(<8 x i16> %15, i32 %12, i32 16)
-  %17 = extractvalue { i32, <8 x i16> } %16, 1
-  %18 = bitcast <8 x i16> %17 to <8 x half>
+  %i7 = bitcast half* %pState.0 to <8 x half>*
+  %i8 = load <8 x half>, <8 x half>* %i7, align 2
+  %i9 = shufflevector <8 x half> %i8, <8 x half> <half poison, half poison, half 0xH0000, half 0xH0000, half poison, half poison, half poison, half poison>, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
+  %i10 = bitcast <8 x half> %i4 to <8 x i16>
+  %i11 = tail call { i32, <8 x i16> } @llvm.arm.mve.vshlc.v8i16(<8 x i16> %i10, i32 0, i32 16)
+  %i12 = extractvalue { i32, <8 x i16> } %i11, 0
+  %i13 = extractvalue { i32, <8 x i16> } %i11, 1
+  %i14 = bitcast <8 x i16> %i13 to <8 x half>
+  %i15 = bitcast <8 x half> %i6 to <8 x i16>
+  %i16 = tail call { i32, <8 x i16> } @llvm.arm.mve.vshlc.v8i16(<8 x i16> %i15, i32 %i12, i32 16)
+  %i17 = extractvalue { i32, <8 x i16> } %i16, 1
+  %i18 = bitcast <8 x i16> %i17 to <8 x half>
   br i1 %cmp.not90, label %while.end, label %while.body
 
-while.body:                                       ; preds = %do.body, %while.body
+while.body:                                       ; preds = %while.body, %do.body
   %pIn.194 = phi half* [ %incdec.ptr4, %while.body ], [ %pIn.0, %do.body ]
-  %state.093 = phi <8 x half> [ %30, %while.body ], [ %9, %do.body ]
+  %state.093 = phi <8 x half> [ %i30, %while.body ], [ %i9, %do.body ]
   %pOut.192 = phi half* [ %incdec.ptr12, %while.body ], [ %pDst, %do.body ]
   %sample.091 = phi i32 [ %dec, %while.body ], [ %div, %do.body ]
   %incdec.ptr = getelementptr inbounds half, half* %pIn.194, i32 1
-  %19 = load half, half* %pIn.194, align 2
+  %i19 = load half, half* %pIn.194, align 2
   %incdec.ptr4 = getelementptr inbounds half, half* %pIn.194, i32 2
-  %20 = load half, half* %incdec.ptr, align 2
-  %.splatinsert = insertelement <8 x half> poison, half %19, i32 0
+  %i20 = load half, half* %incdec.ptr, align 2
+  %.splatinsert = insertelement <8 x half> poison, half %i19, i32 0
   %.splat = shufflevector <8 x half> %.splatinsert, <8 x half> poison, <8 x i32> zeroinitializer
-  %21 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %4, <8 x half> %.splat, <8 x half> %state.093)
-  %22 = extractelement <8 x half> %21, i32 0
-  %.splat6 = shufflevector <8 x half> %21, <8 x half> poison, <8 x i32> zeroinitializer
-  %23 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %6, <8 x half> %.splat6, <8 x half> %21)
-  %24 = insertelement <8 x half> %23, half 0xH0000, i32 3
-  %.splatinsert7 = insertelement <8 x half> poison, half %20, i32 0
+  %i21 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i4, <8 x half> %.splat, <8 x half> %state.093)
+  %i22 = extractelement <8 x half> %i21, i32 0
+  %.splat6 = shufflevector <8 x half> %i21, <8 x half> poison, <8 x i32> zeroinitializer
+  %i23 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i6, <8 x half> %.splat6, <8 x half> %i21)
+  %i24 = insertelement <8 x half> %i23, half 0xH0000, i32 3
+  %.splatinsert7 = insertelement <8 x half> poison, half %i20, i32 0
   %.splat8 = shufflevector <8 x half> %.splatinsert7, <8 x half> poison, <8 x i32> zeroinitializer
-  %25 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %14, <8 x half> %.splat8, <8 x half> %24)
-  %26 = extractelement <8 x half> %25, i32 1
-  %.splat10 = shufflevector <8 x half> %25, <8 x half> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %27 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %18, <8 x half> %.splat10, <8 x half> %25)
-  %28 = shufflevector <8 x half> %27, <8 x half> undef, <8 x i32> <i32 2, i32 undef, i32 undef, i32 3, i32 4, i32 5, i32 6, i32 7>
-  %29 = insertelement <8 x half> %28, half 0xH0000, i32 2
-  %30 = shufflevector <8 x half> %29, <8 x half> %27, <8 x i32> <i32 0, i32 11, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %i25 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i14, <8 x half> %.splat8, <8 x half> %i24)
+  %i26 = extractelement <8 x half> %i25, i32 1
+  %.splat10 = shufflevector <8 x half> %i25, <8 x half> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %i27 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i18, <8 x half> %.splat10, <8 x half> %i25)
+  %i28 = shufflevector <8 x half> %i27, <8 x half> undef, <8 x i32> <i32 2, i32 undef, i32 undef, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %i29 = insertelement <8 x half> %i28, half 0xH0000, i32 2
+  %i30 = shufflevector <8 x half> %i29, <8 x half> %i27, <8 x i32> <i32 0, i32 11, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %incdec.ptr11 = getelementptr inbounds half, half* %pOut.192, i32 1
-  store half %22, half* %pOut.192, align 2
+  store half %i22, half* %pOut.192, align 2
   %incdec.ptr12 = getelementptr inbounds half, half* %pOut.192, i32 2
-  store half %26, half* %incdec.ptr11, align 2
+  store half %i26, half* %incdec.ptr11, align 2
   %dec = add nsw i32 %sample.091, -1
   %cmp.not = icmp eq i32 %dec, 0
   br i1 %cmp.not, label %while.end, label %while.body
 
 while.end:                                        ; preds = %while.body, %do.body
   %pOut.1.lcssa = phi half* [ %pDst, %do.body ], [ %incdec.ptr12, %while.body ]
-  %state.0.lcssa = phi <8 x half> [ %9, %do.body ], [ %30, %while.body ]
+  %state.0.lcssa = phi <8 x half> [ %i9, %do.body ], [ %i30, %while.body ]
   %pIn.1.lcssa = phi half* [ %pIn.0, %do.body ], [ %incdec.ptr4, %while.body ]
   br i1 %tobool.not, label %if.else, label %if.then
 
 if.then:                                          ; preds = %while.end
-  %31 = load half, half* %pIn.1.lcssa, align 2
-  %.splatinsert14 = insertelement <8 x half> poison, half %31, i32 0
+  %i31 = load half, half* %pIn.1.lcssa, align 2
+  %.splatinsert14 = insertelement <8 x half> poison, half %i31, i32 0
   %.splat15 = shufflevector <8 x half> %.splatinsert14, <8 x half> poison, <8 x i32> zeroinitializer
-  %32 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %4, <8 x half> %.splat15, <8 x half> %state.0.lcssa)
-  %33 = extractelement <8 x half> %32, i32 0
-  %.splat17 = shufflevector <8 x half> %32, <8 x half> poison, <8 x i32> zeroinitializer
-  %34 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %6, <8 x half> %.splat17, <8 x half> %32)
-  store half %33, half* %pOut.1.lcssa, align 2
-  %35 = extractelement <8 x half> %34, i32 1
-  store half %35, half* %pState.0, align 2
-  %36 = extractelement <8 x half> %34, i32 2
+  %i32 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i4, <8 x half> %.splat15, <8 x half> %state.0.lcssa)
+  %i33 = extractelement <8 x half> %i32, i32 0
+  %.splat17 = shufflevector <8 x half> %i32, <8 x half> poison, <8 x i32> zeroinitializer
+  %i34 = tail call fast <8 x half> @llvm.fma.v8f16(<8 x half> %i6, <8 x half> %.splat17, <8 x half> %i32)
+  store half %i33, half* %pOut.1.lcssa, align 2
+  %i35 = extractelement <8 x half> %i34, i32 1
+  store half %i35, half* %pState.0, align 2
+  %i36 = extractelement <8 x half> %i34, i32 2
   br label %if.end
 
 if.else:                                          ; preds = %while.end
-  %37 = extractelement <8 x half> %state.0.lcssa, i32 0
-  store half %37, half* %pState.0, align 2
-  %38 = extractelement <8 x half> %state.0.lcssa, i32 1
+  %i37 = extractelement <8 x half> %state.0.lcssa, i32 0
+  store half %i37, half* %pState.0, align 2
+  %i38 = extractelement <8 x half> %state.0.lcssa, i32 1
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
-  %.sink = phi half [ %38, %if.else ], [ %36, %if.then ]
-  %39 = getelementptr inbounds half, half* %pState.0, i32 1
-  store half %.sink, half* %39, align 2
+  %.sink = phi half [ %i38, %if.else ], [ %i36, %if.then ]
+  %i39 = getelementptr inbounds half, half* %pState.0, i32 1
+  store half %.sink, half* %i39, align 2
   %pState.1 = getelementptr inbounds half, half* %pState.0, i32 2
   %dec23 = add i32 %stage.0, -1
   %cmp24.not = icmp eq i32 %dec23, 0
@@ -1622,15 +1622,15 @@ define arm_aapcs_vfpcc half @vecAddAcrossF16Mve(<8 x half> %in) {
 ; CHECK-NEXT:    vadd.f16 s0, s0, s2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = shufflevector <8 x half> %in, <8 x half> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
-  %1 = fadd fast <8 x half> %0, %in
-  %2 = bitcast <8 x half> %1 to <4 x i32>
-  %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
-  %4 = bitcast <4 x i32> %3 to <8 x half>
-  %5 = fadd fast <8 x half> %1, %4
-  %6 = extractelement <8 x half> %5, i32 0
-  %7 = extractelement <8 x half> %5, i32 4
-  %add = fadd fast half %6, %7
+  %i = shufflevector <8 x half> %in, <8 x half> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+  %i1 = fadd fast <8 x half> %i, %in
+  %i2 = bitcast <8 x half> %i1 to <4 x i32>
+  %i3 = shufflevector <4 x i32> %i2, <4 x i32> poison, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
+  %i4 = bitcast <4 x i32> %i3 to <8 x half>
+  %i5 = fadd fast <8 x half> %i1, %i4
+  %i6 = extractelement <8 x half> %i5, i32 0
+  %i7 = extractelement <8 x half> %i5, i32 4
+  %add = fadd fast half %i6, %i7
   ret half %add
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll b/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
index bedbae10213d2..e5f831cd49638 100644
--- a/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
@@ -19,8 +19,8 @@ define arm_aapcs_vfpcc void @test_fadd(float* noalias nocapture readonly %A, flo
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp18 = icmp sgt i32 %n, 0
   br i1 %cmp18, label %vector.ph, label %for.cond.cleanup
@@ -32,16 +32,16 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds float, float* %A, i32 %index
-  %2 = bitcast float* %1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %2, align 4
-  %3 = fadd fast <4 x float> %wide.load, %broadcast.splat11
-  %4 = getelementptr inbounds float, float* %C, i32 %index
-  %5 = bitcast float* %4 to <4 x float>*
-  store <4 x float> %3, <4 x float>* %5, align 4
+  %i1 = getelementptr inbounds float, float* %A, i32 %index
+  %i2 = bitcast float* %i1 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i3 = fadd fast <4 x float> %wide.load, %broadcast.splat11
+  %i4 = getelementptr inbounds float, float* %C, i32 %index
+  %i5 = bitcast float* %i4 to <4 x float>*
+  store <4 x float> %i3, <4 x float>* %i5, align 4
   %index.next = add i32 %index, 4
-  %6 = icmp eq i32 %index.next, %n
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %i6 = icmp eq i32 %index.next, %n
+  br i1 %i6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -65,8 +65,8 @@ define arm_aapcs_vfpcc void @test_fadd_r(float* noalias nocapture readonly %A, f
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp18 = icmp sgt i32 %n, 0
   br i1 %cmp18, label %vector.ph, label %for.cond.cleanup
@@ -78,16 +78,16 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds float, float* %A, i32 %index
-  %2 = bitcast float* %1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %2, align 4
-  %3 = fadd fast <4 x float> %broadcast.splat11, %wide.load
-  %4 = getelementptr inbounds float, float* %C, i32 %index
-  %5 = bitcast float* %4 to <4 x float>*
-  store <4 x float> %3, <4 x float>* %5, align 4
+  %i1 = getelementptr inbounds float, float* %A, i32 %index
+  %i2 = bitcast float* %i1 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i3 = fadd fast <4 x float> %broadcast.splat11, %wide.load
+  %i4 = getelementptr inbounds float, float* %C, i32 %index
+  %i5 = bitcast float* %i4 to <4 x float>*
+  store <4 x float> %i3, <4 x float>* %i5, align 4
   %index.next = add i32 %index, 4
-  %6 = icmp eq i32 %index.next, %n
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %i6 = icmp eq i32 %index.next, %n
+  br i1 %i6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -111,8 +111,8 @@ define arm_aapcs_vfpcc void @test_fmul(float* noalias nocapture readonly %A, flo
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp18 = icmp sgt i32 %n, 0
   br i1 %cmp18, label %vector.ph, label %for.cond.cleanup
@@ -124,16 +124,16 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds float, float* %A, i32 %index
-  %2 = bitcast float* %1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %2, align 4
-  %3 = fmul fast <4 x float> %wide.load, %broadcast.splat11
-  %4 = getelementptr inbounds float, float* %C, i32 %index
-  %5 = bitcast float* %4 to <4 x float>*
-  store <4 x float> %3, <4 x float>* %5, align 4
+  %i1 = getelementptr inbounds float, float* %A, i32 %index
+  %i2 = bitcast float* %i1 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i3 = fmul fast <4 x float> %wide.load, %broadcast.splat11
+  %i4 = getelementptr inbounds float, float* %C, i32 %index
+  %i5 = bitcast float* %i4 to <4 x float>*
+  store <4 x float> %i3, <4 x float>* %i5, align 4
   %index.next = add i32 %index, 4
-  %6 = icmp eq i32 %index.next, %n
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %i6 = icmp eq i32 %index.next, %n
+  br i1 %i6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -157,8 +157,8 @@ define arm_aapcs_vfpcc void @test_fmul_r(float* noalias nocapture readonly %A, f
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp18 = icmp sgt i32 %n, 0
   br i1 %cmp18, label %vector.ph, label %for.cond.cleanup
@@ -170,16 +170,16 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds float, float* %A, i32 %index
-  %2 = bitcast float* %1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %2, align 4
-  %3 = fmul fast <4 x float> %broadcast.splat11, %wide.load
-  %4 = getelementptr inbounds float, float* %C, i32 %index
-  %5 = bitcast float* %4 to <4 x float>*
-  store <4 x float> %3, <4 x float>* %5, align 4
+  %i1 = getelementptr inbounds float, float* %A, i32 %index
+  %i2 = bitcast float* %i1 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i3 = fmul fast <4 x float> %broadcast.splat11, %wide.load
+  %i4 = getelementptr inbounds float, float* %C, i32 %index
+  %i5 = bitcast float* %i4 to <4 x float>*
+  store <4 x float> %i3, <4 x float>* %i5, align 4
   %index.next = add i32 %index, 4
-  %6 = icmp eq i32 %index.next, %n
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %i6 = icmp eq i32 %index.next, %n
+  br i1 %i6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -203,8 +203,8 @@ define arm_aapcs_vfpcc void @test_fsub(float* noalias nocapture readonly %A, flo
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp18 = icmp sgt i32 %n, 0
   br i1 %cmp18, label %vector.ph, label %for.cond.cleanup
@@ -216,16 +216,16 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds float, float* %A, i32 %index
-  %2 = bitcast float* %1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %2, align 4
-  %3 = fsub fast <4 x float> %wide.load, %broadcast.splat11
-  %4 = getelementptr inbounds float, float* %C, i32 %index
-  %5 = bitcast float* %4 to <4 x float>*
-  store <4 x float> %3, <4 x float>* %5, align 4
+  %i1 = getelementptr inbounds float, float* %A, i32 %index
+  %i2 = bitcast float* %i1 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i3 = fsub fast <4 x float> %wide.load, %broadcast.splat11
+  %i4 = getelementptr inbounds float, float* %C, i32 %index
+  %i5 = bitcast float* %i4 to <4 x float>*
+  store <4 x float> %i3, <4 x float>* %i5, align 4
   %index.next = add i32 %index, 4
-  %6 = icmp eq i32 %index.next, %n
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %i6 = icmp eq i32 %index.next, %n
+  br i1 %i6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -250,8 +250,8 @@ define arm_aapcs_vfpcc void @test_fsub_r(float* noalias nocapture readonly %A, f
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp18 = icmp sgt i32 %n, 0
   br i1 %cmp18, label %vector.ph, label %for.cond.cleanup
@@ -263,16 +263,16 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds float, float* %A, i32 %index
-  %2 = bitcast float* %1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %2, align 4
-  %3 = fsub fast <4 x float> %broadcast.splat11, %wide.load
-  %4 = getelementptr inbounds float, float* %C, i32 %index
-  %5 = bitcast float* %4 to <4 x float>*
-  store <4 x float> %3, <4 x float>* %5, align 4
+  %i1 = getelementptr inbounds float, float* %A, i32 %index
+  %i2 = bitcast float* %i1 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i3 = fsub fast <4 x float> %broadcast.splat11, %wide.load
+  %i4 = getelementptr inbounds float, float* %C, i32 %index
+  %i5 = bitcast float* %i4 to <4 x float>*
+  store <4 x float> %i3, <4 x float>* %i5, align 4
   %index.next = add i32 %index, 4
-  %6 = icmp eq i32 %index.next, %n
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %i6 = icmp eq i32 %index.next, %n
+  br i1 %i6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -298,8 +298,8 @@ define arm_aapcs_vfpcc void @test_fmas(float* noalias nocapture readonly %A, flo
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -311,20 +311,20 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds float, float* %A, i32 %index
-  %2 = bitcast float* %1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %2, align 4
-  %3 = getelementptr inbounds float, float* %B, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.load12 = load <4 x float>, <4 x float>* %4, align 4
-  %5 = fmul fast <4 x float> %wide.load12, %wide.load
-  %6 = fadd fast <4 x float> %5, %broadcast.splat14
-  %7 = getelementptr inbounds float, float* %D, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  store <4 x float> %6, <4 x float>* %8, align 4
+  %i1 = getelementptr inbounds float, float* %A, i32 %index
+  %i2 = bitcast float* %i1 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i3 = getelementptr inbounds float, float* %B, i32 %index
+  %i4 = bitcast float* %i3 to <4 x float>*
+  %wide.load12 = load <4 x float>, <4 x float>* %i4, align 4
+  %i5 = fmul fast <4 x float> %wide.load12, %wide.load
+  %i6 = fadd fast <4 x float> %i5, %broadcast.splat14
+  %i7 = getelementptr inbounds float, float* %D, i32 %index
+  %i8 = bitcast float* %i7 to <4 x float>*
+  store <4 x float> %i6, <4 x float>* %i8, align 4
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -349,8 +349,8 @@ define arm_aapcs_vfpcc void @test_fmas_r(float* noalias nocapture readonly %A, f
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -362,20 +362,20 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds float, float* %A, i32 %index
-  %2 = bitcast float* %1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %2, align 4
-  %3 = getelementptr inbounds float, float* %B, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.load12 = load <4 x float>, <4 x float>* %4, align 4
-  %5 = fmul fast <4 x float> %wide.load12, %wide.load
-  %6 = fadd fast <4 x float> %broadcast.splat14, %5
-  %7 = getelementptr inbounds float, float* %D, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  store <4 x float> %6, <4 x float>* %8, align 4
+  %i1 = getelementptr inbounds float, float* %A, i32 %index
+  %i2 = bitcast float* %i1 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i3 = getelementptr inbounds float, float* %B, i32 %index
+  %i4 = bitcast float* %i3 to <4 x float>*
+  %wide.load12 = load <4 x float>, <4 x float>* %i4, align 4
+  %i5 = fmul fast <4 x float> %wide.load12, %wide.load
+  %i6 = fadd fast <4 x float> %broadcast.splat14, %i5
+  %i7 = getelementptr inbounds float, float* %D, i32 %index
+  %i8 = bitcast float* %i7 to <4 x float>*
+  store <4 x float> %i6, <4 x float>* %i8, align 4
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -400,8 +400,8 @@ define arm_aapcs_vfpcc void @test_fma(float* noalias nocapture readonly %A, floa
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -413,20 +413,20 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds float, float* %A, i32 %index
-  %2 = bitcast float* %1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %2, align 4
-  %3 = fmul fast <4 x float> %wide.load, %broadcast.splat13
-  %4 = getelementptr inbounds float, float* %B, i32 %index
-  %5 = bitcast float* %4 to <4 x float>*
-  %wide.load14 = load <4 x float>, <4 x float>* %5, align 4
-  %6 = fadd fast <4 x float> %3, %wide.load14
-  %7 = getelementptr inbounds float, float* %D, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  store <4 x float> %6, <4 x float>* %8, align 4
+  %i1 = getelementptr inbounds float, float* %A, i32 %index
+  %i2 = bitcast float* %i1 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i3 = fmul fast <4 x float> %wide.load, %broadcast.splat13
+  %i4 = getelementptr inbounds float, float* %B, i32 %index
+  %i5 = bitcast float* %i4 to <4 x float>*
+  %wide.load14 = load <4 x float>, <4 x float>* %i5, align 4
+  %i6 = fadd fast <4 x float> %i3, %wide.load14
+  %i7 = getelementptr inbounds float, float* %D, i32 %index
+  %i8 = bitcast float* %i7 to <4 x float>*
+  store <4 x float> %i6, <4 x float>* %i8, align 4
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -451,8 +451,8 @@ define arm_aapcs_vfpcc void @test_fma_r(float* noalias nocapture readonly %A, fl
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -464,20 +464,20 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds float, float* %A, i32 %index
-  %2 = bitcast float* %1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %2, align 4
-  %3 = fmul fast <4 x float> %broadcast.splat13, %wide.load
-  %4 = getelementptr inbounds float, float* %B, i32 %index
-  %5 = bitcast float* %4 to <4 x float>*
-  %wide.load14 = load <4 x float>, <4 x float>* %5, align 4
-  %6 = fadd fast <4 x float> %3, %wide.load14
-  %7 = getelementptr inbounds float, float* %D, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  store <4 x float> %6, <4 x float>* %8, align 4
+  %i1 = getelementptr inbounds float, float* %A, i32 %index
+  %i2 = bitcast float* %i1 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i3 = fmul fast <4 x float> %broadcast.splat13, %wide.load
+  %i4 = getelementptr inbounds float, float* %B, i32 %index
+  %i5 = bitcast float* %i4 to <4 x float>*
+  %wide.load14 = load <4 x float>, <4 x float>* %i5, align 4
+  %i6 = fadd fast <4 x float> %i3, %wide.load14
+  %i7 = getelementptr inbounds float, float* %D, i32 %index
+  %i8 = bitcast float* %i7 to <4 x float>*
+  store <4 x float> %i6, <4 x float>* %i8, align 4
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -506,8 +506,8 @@ define arm_aapcs_vfpcc void @test_fmss(float* noalias nocapture readonly %A, flo
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -519,20 +519,20 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds float, float* %A, i32 %index
-  %2 = bitcast float* %1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %2, align 4
-  %3 = getelementptr inbounds float, float* %B, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.load12 = load <4 x float>, <4 x float>* %4, align 4
-  %5 = fmul fast <4 x float> %wide.load12, %wide.load
-  %6 = fsub fast <4 x float> %5, %broadcast.splat14
-  %7 = getelementptr inbounds float, float* %D, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  store <4 x float> %6, <4 x float>* %8, align 4
+  %i1 = getelementptr inbounds float, float* %A, i32 %index
+  %i2 = bitcast float* %i1 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i3 = getelementptr inbounds float, float* %B, i32 %index
+  %i4 = bitcast float* %i3 to <4 x float>*
+  %wide.load12 = load <4 x float>, <4 x float>* %i4, align 4
+  %i5 = fmul fast <4 x float> %wide.load12, %wide.load
+  %i6 = fsub fast <4 x float> %i5, %broadcast.splat14
+  %i7 = getelementptr inbounds float, float* %D, i32 %index
+  %i8 = bitcast float* %i7 to <4 x float>*
+  store <4 x float> %i6, <4 x float>* %i8, align 4
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -559,8 +559,8 @@ define arm_aapcs_vfpcc void @test_fmss_r(float* noalias nocapture readonly %A, f
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -572,20 +572,20 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds float, float* %A, i32 %index
-  %2 = bitcast float* %1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %2, align 4
-  %3 = getelementptr inbounds float, float* %B, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.load12 = load <4 x float>, <4 x float>* %4, align 4
-  %5 = fmul fast <4 x float> %wide.load12, %wide.load
-  %6 = fsub fast <4 x float> %broadcast.splat14, %5
-  %7 = getelementptr inbounds float, float* %D, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  store <4 x float> %6, <4 x float>* %8, align 4
+  %i1 = getelementptr inbounds float, float* %A, i32 %index
+  %i2 = bitcast float* %i1 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i3 = getelementptr inbounds float, float* %B, i32 %index
+  %i4 = bitcast float* %i3 to <4 x float>*
+  %wide.load12 = load <4 x float>, <4 x float>* %i4, align 4
+  %i5 = fmul fast <4 x float> %wide.load12, %wide.load
+  %i6 = fsub fast <4 x float> %broadcast.splat14, %i5
+  %i7 = getelementptr inbounds float, float* %D, i32 %index
+  %i8 = bitcast float* %i7 to <4 x float>*
+  store <4 x float> %i6, <4 x float>* %i8, align 4
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -611,8 +611,8 @@ define arm_aapcs_vfpcc void @test_fms(float* noalias nocapture readonly %A, floa
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -624,20 +624,20 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds float, float* %A, i32 %index
-  %2 = bitcast float* %1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %2, align 4
-  %3 = fmul fast <4 x float> %wide.load, %broadcast.splat13
-  %4 = getelementptr inbounds float, float* %B, i32 %index
-  %5 = bitcast float* %4 to <4 x float>*
-  %wide.load14 = load <4 x float>, <4 x float>* %5, align 4
-  %6 = fsub fast <4 x float> %3, %wide.load14
-  %7 = getelementptr inbounds float, float* %D, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  store <4 x float> %6, <4 x float>* %8, align 4
+  %i1 = getelementptr inbounds float, float* %A, i32 %index
+  %i2 = bitcast float* %i1 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i3 = fmul fast <4 x float> %wide.load, %broadcast.splat13
+  %i4 = getelementptr inbounds float, float* %B, i32 %index
+  %i5 = bitcast float* %i4 to <4 x float>*
+  %wide.load14 = load <4 x float>, <4 x float>* %i5, align 4
+  %i6 = fsub fast <4 x float> %i3, %wide.load14
+  %i7 = getelementptr inbounds float, float* %D, i32 %index
+  %i8 = bitcast float* %i7 to <4 x float>*
+  store <4 x float> %i6, <4 x float>* %i8, align 4
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -663,8 +663,8 @@ define arm_aapcs_vfpcc void @test_fms_r(float* noalias nocapture readonly %A, fl
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = and i32 %n, 7
-  %cmp = icmp eq i32 %0, 0
+  %i = and i32 %n, 7
+  %cmp = icmp eq i32 %i, 0
   tail call void @llvm.assume(i1 %cmp)
   %cmp110 = icmp sgt i32 %n, 0
   br i1 %cmp110, label %vector.ph, label %for.cond.cleanup
@@ -676,27 +676,27 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %1 = getelementptr inbounds float, float* %A, i32 %index
-  %2 = bitcast float* %1 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %2, align 4
-  %3 = fmul fast <4 x float> %broadcast.splat13, %wide.load
-  %4 = getelementptr inbounds float, float* %B, i32 %index
-  %5 = bitcast float* %4 to <4 x float>*
-  %wide.load14 = load <4 x float>, <4 x float>* %5, align 4
-  %6 = fsub fast <4 x float> %3, %wide.load14
-  %7 = getelementptr inbounds float, float* %D, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  store <4 x float> %6, <4 x float>* %8, align 4
+  %i1 = getelementptr inbounds float, float* %A, i32 %index
+  %i2 = bitcast float* %i1 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %i2, align 4
+  %i3 = fmul fast <4 x float> %broadcast.splat13, %wide.load
+  %i4 = getelementptr inbounds float, float* %B, i32 %index
+  %i5 = bitcast float* %i4 to <4 x float>*
+  %wide.load14 = load <4 x float>, <4 x float>* %i5, align 4
+  %i6 = fsub fast <4 x float> %i3, %wide.load14
+  %i7 = getelementptr inbounds float, float* %D, i32 %index
+  %i8 = bitcast float* %i7 to <4 x float>*
+  store <4 x float> %i6, <4 x float>* %i8, align 4
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %i9 = icmp eq i32 %index.next, %n
+  br i1 %i9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
 
-define dso_local void @test_nested(float* noalias nocapture %pInT1, float* noalias nocapture readonly %pOutT1, float* noalias nocapture readonly %pPRT_in, float* noalias nocapture readnone %pPRT_pDst, i32 %numRows, i32 %numCols, i32 %l) local_unnamed_addr #0 {
+define dso_local void @test_nested(float* noalias nocapture %pInT1, float* noalias nocapture readonly %pOutT1, float* noalias nocapture readonly %pPRT_in, float* noalias nocapture readnone %pPRT_pDst, i32 %numRows, i32 %numCols, i32 %l) local_unnamed_addr {
 ; CHECK-LABEL: test_nested:
 ; CHECK:       @ %bb.0: @ %for.body.us.preheader
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -746,8 +746,8 @@ for.body.us:                                      ; preds = %for.cond6.for.end_c
   %pOutT1.addr.036.us = phi float* [ %incdec.ptr.us, %for.cond6.for.end_crit_edge.us ], [ %pOutT1, %for.body.us.preheader ]
   %pPRT_in.addr.035.us = phi float* [ %scevgep, %for.cond6.for.end_crit_edge.us ], [ %pPRT_in, %for.body.us.preheader ]
   %scevgep = getelementptr float, float* %pPRT_in.addr.035.us, i32 %numCols
-  %0 = load float, float* %pOutT1.addr.036.us, align 4
-  %broadcast.splatinsert47 = insertelement <4 x float> undef, float %0, i32 0
+  %i = load float, float* %pOutT1.addr.036.us, align 4
+  %broadcast.splatinsert47 = insertelement <4 x float> undef, float %i, i32 0
   %broadcast.splat48 = shufflevector <4 x float> %broadcast.splatinsert47, <4 x float> undef, <4 x i32> zeroinitializer
   br label %vector.body
 
@@ -755,16 +755,16 @@ vector.body:                                      ; preds = %vector.body, %for.b
   %index = phi i32 [ 0, %for.body.us ], [ %index.next, %vector.body ]
   %next.gep = getelementptr float, float* %pInT1.addr.038.us, i32 %index
   %next.gep45 = getelementptr float, float* %pPRT_in.addr.035.us, i32 %index
-  %1 = bitcast float* %next.gep to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %1, align 4
-  %2 = bitcast float* %next.gep45 to <4 x float>*
-  %wide.load46 = load <4 x float>, <4 x float>* %2, align 4
-  %3 = fmul fast <4 x float> %wide.load46, %broadcast.splat48
-  %4 = fsub fast <4 x float> %wide.load, %3
-  store <4 x float> %4, <4 x float>* %1, align 4
+  %i1 = bitcast float* %next.gep to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %i1, align 4
+  %i2 = bitcast float* %next.gep45 to <4 x float>*
+  %wide.load46 = load <4 x float>, <4 x float>* %i2, align 4
+  %i3 = fmul fast <4 x float> %wide.load46, %broadcast.splat48
+  %i4 = fsub fast <4 x float> %wide.load, %i3
+  store <4 x float> %i4, <4 x float>* %i1, align 4
   %index.next = add i32 %index, 4
-  %5 = icmp eq i32 %index.next, %numCols
-  br i1 %5, label %for.cond6.for.end_crit_edge.us, label %vector.body
+  %i5 = icmp eq i32 %index.next, %numCols
+  br i1 %i5, label %for.cond6.for.end_crit_edge.us, label %vector.body
 
 for.cond6.for.end_crit_edge.us:                   ; preds = %vector.body
   %incdec.ptr.us = getelementptr inbounds float, float* %pOutT1.addr.036.us, i32 1
@@ -871,74 +871,74 @@ define void @arm_fir_f32_1_4_mve(%struct.arm_fir_instance_f32* nocapture readonl
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
   %pState1 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 1
-  %0 = load float*, float** %pState1, align 4
+  %i = load float*, float** %pState1, align 4
   %pCoeffs2 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 2
-  %1 = load float*, float** %pCoeffs2, align 4
+  %i1 = load float*, float** %pCoeffs2, align 4
   %numTaps3 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 0
-  %2 = load i16, i16* %numTaps3, align 4
-  %conv = zext i16 %2 to i32
+  %i2 = load i16, i16* %numTaps3, align 4
+  %conv = zext i16 %i2 to i32
   %sub = add nsw i32 %conv, -1
   %cmp = icmp ult i32 %sub, 4
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %arrayidx = getelementptr inbounds float, float* %0, i32 %sub
-  %incdec.ptr = getelementptr inbounds float, float* %1, i32 1
-  %3 = load float, float* %1, align 4
-  %incdec.ptr6 = getelementptr inbounds float, float* %1, i32 2
-  %4 = load float, float* %incdec.ptr, align 4
-  %incdec.ptr7 = getelementptr inbounds float, float* %1, i32 3
-  %5 = load float, float* %incdec.ptr6, align 4
-  %6 = load float, float* %incdec.ptr7, align 4
+  %arrayidx = getelementptr inbounds float, float* %i, i32 %sub
+  %incdec.ptr = getelementptr inbounds float, float* %i1, i32 1
+  %i3 = load float, float* %i1, align 4
+  %incdec.ptr6 = getelementptr inbounds float, float* %i1, i32 2
+  %i4 = load float, float* %incdec.ptr, align 4
+  %incdec.ptr7 = getelementptr inbounds float, float* %i1, i32 3
+  %i5 = load float, float* %incdec.ptr6, align 4
+  %i6 = load float, float* %incdec.ptr7, align 4
   %shr = lshr i32 %blockSize, 2
   %cmp9146 = icmp eq i32 %shr, 0
-  %.pre161 = insertelement <4 x float> undef, float %3, i32 0
+  %.pre161 = insertelement <4 x float> undef, float %i3, i32 0
   %.pre162 = shufflevector <4 x float> %.pre161, <4 x float> undef, <4 x i32> zeroinitializer
-  %.pre163 = insertelement <4 x float> undef, float %4, i32 0
+  %.pre163 = insertelement <4 x float> undef, float %i4, i32 0
   %.pre164 = shufflevector <4 x float> %.pre163, <4 x float> undef, <4 x i32> zeroinitializer
-  %.pre165 = insertelement <4 x float> undef, float %5, i32 0
+  %.pre165 = insertelement <4 x float> undef, float %i5, i32 0
   %.pre166 = shufflevector <4 x float> %.pre165, <4 x float> undef, <4 x i32> zeroinitializer
-  %.pre167 = insertelement <4 x float> undef, float %6, i32 0
+  %.pre167 = insertelement <4 x float> undef, float %i6, i32 0
   %.pre168 = shufflevector <4 x float> %.pre167, <4 x float> undef, <4 x i32> zeroinitializer
   br i1 %cmp9146, label %while.end, label %while.body.lr.ph
 
 while.body.lr.ph:                                 ; preds = %if.then
-  %7 = and i32 %blockSize, -4
-  %scevgep158 = getelementptr float, float* %pDst, i32 %7
+  %i7 = and i32 %blockSize, -4
+  %scevgep158 = getelementptr float, float* %pDst, i32 %i7
   br label %while.body
 
-while.body:                                       ; preds = %while.body.lr.ph, %while.body
+while.body:                                       ; preds = %while.body, %while.body.lr.ph
   %pStateCur.0151 = phi float* [ %arrayidx, %while.body.lr.ph ], [ %add.ptr, %while.body ]
-  %pSamples.0150 = phi float* [ %0, %while.body.lr.ph ], [ %add.ptr24, %while.body ]
+  %pSamples.0150 = phi float* [ %i, %while.body.lr.ph ], [ %add.ptr24, %while.body ]
   %pOutput.0149 = phi float* [ %pDst, %while.body.lr.ph ], [ %add.ptr23, %while.body ]
   %pTempSrc.0148 = phi float* [ %pSrc, %while.body.lr.ph ], [ %add.ptr11, %while.body ]
   %blkCnt.0147 = phi i32 [ %shr, %while.body.lr.ph ], [ %dec, %while.body ]
-  %8 = bitcast float* %pTempSrc.0148 to <4 x float>*
-  %9 = load <4 x float>, <4 x float>* %8, align 4
-  %10 = bitcast float* %pStateCur.0151 to <4 x float>*
-  store <4 x float> %9, <4 x float>* %10, align 4
+  %i8 = bitcast float* %pTempSrc.0148 to <4 x float>*
+  %i9 = load <4 x float>, <4 x float>* %i8, align 4
+  %i10 = bitcast float* %pStateCur.0151 to <4 x float>*
+  store <4 x float> %i9, <4 x float>* %i10, align 4
   %add.ptr = getelementptr inbounds float, float* %pStateCur.0151, i32 4
   %add.ptr11 = getelementptr inbounds float, float* %pTempSrc.0148, i32 4
-  %11 = bitcast float* %pSamples.0150 to <4 x float>*
-  %12 = load <4 x float>, <4 x float>* %11, align 4
-  %13 = fmul fast <4 x float> %12, %.pre162
+  %i11 = bitcast float* %pSamples.0150 to <4 x float>*
+  %i12 = load <4 x float>, <4 x float>* %i11, align 4
+  %i13 = fmul fast <4 x float> %i12, %.pre162
   %arrayidx12 = getelementptr inbounds float, float* %pSamples.0150, i32 1
-  %14 = bitcast float* %arrayidx12 to <4 x float>*
-  %15 = load <4 x float>, <4 x float>* %14, align 4
-  %mul = fmul fast <4 x float> %15, %.pre164
-  %add = fadd fast <4 x float> %mul, %13
+  %i14 = bitcast float* %arrayidx12 to <4 x float>*
+  %i15 = load <4 x float>, <4 x float>* %i14, align 4
+  %mul = fmul fast <4 x float> %i15, %.pre164
+  %add = fadd fast <4 x float> %mul, %i13
   %arrayidx13 = getelementptr inbounds float, float* %pSamples.0150, i32 2
-  %16 = bitcast float* %arrayidx13 to <4 x float>*
-  %17 = load <4 x float>, <4 x float>* %16, align 4
-  %mul16 = fmul fast <4 x float> %17, %.pre166
+  %i16 = bitcast float* %arrayidx13 to <4 x float>*
+  %i17 = load <4 x float>, <4 x float>* %i16, align 4
+  %mul16 = fmul fast <4 x float> %i17, %.pre166
   %add17 = fadd fast <4 x float> %add, %mul16
   %arrayidx18 = getelementptr inbounds float, float* %pSamples.0150, i32 3
-  %18 = bitcast float* %arrayidx18 to <4 x float>*
-  %19 = load <4 x float>, <4 x float>* %18, align 4
-  %mul21 = fmul fast <4 x float> %19, %.pre168
+  %i18 = bitcast float* %arrayidx18 to <4 x float>*
+  %i19 = load <4 x float>, <4 x float>* %i18, align 4
+  %mul21 = fmul fast <4 x float> %i19, %.pre168
   %add22 = fadd fast <4 x float> %add17, %mul21
-  %20 = bitcast float* %pOutput.0149 to <4 x float>*
-  store <4 x float> %add22, <4 x float>* %20, align 4
+  %i20 = bitcast float* %pOutput.0149 to <4 x float>*
+  store <4 x float> %add22, <4 x float>* %i20, align 4
   %add.ptr23 = getelementptr inbounds float, float* %pOutput.0149, i32 4
   %add.ptr24 = getelementptr inbounds float, float* %pSamples.0150, i32 4
   %dec = add nsw i32 %blkCnt.0147, -1
@@ -946,65 +946,65 @@ while.body:                                       ; preds = %while.body.lr.ph, %
   br i1 %cmp9, label %while.end.loopexit, label %while.body
 
 while.end.loopexit:                               ; preds = %while.body
-  %scevgep157 = getelementptr float, float* %pSrc, i32 %7
-  %scevgep159 = getelementptr float, float* %0, i32 %7
+  %scevgep157 = getelementptr float, float* %pSrc, i32 %i7
+  %scevgep159 = getelementptr float, float* %i, i32 %i7
   br label %while.end
 
-while.end:                                        ; preds = %if.then, %while.end.loopexit
+while.end:                                        ; preds = %while.end.loopexit, %if.then
   %pTempSrc.0.lcssa = phi float* [ %scevgep157, %while.end.loopexit ], [ %pSrc, %if.then ]
   %pOutput.0.lcssa = phi float* [ %scevgep158, %while.end.loopexit ], [ %pDst, %if.then ]
-  %pSamples.0.lcssa = phi float* [ %scevgep159, %while.end.loopexit ], [ %0, %if.then ]
+  %pSamples.0.lcssa = phi float* [ %scevgep159, %while.end.loopexit ], [ %i, %if.then ]
   %pStateCur.0.lcssa = phi float* [ %add.ptr, %while.end.loopexit ], [ %arrayidx, %if.then ]
   %and = and i32 %blockSize, 3
-  %21 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %and)
-  %22 = bitcast float* %pTempSrc.0.lcssa to <4 x float>*
-  %23 = load <4 x float>, <4 x float>* %22, align 4
-  %24 = bitcast float* %pStateCur.0.lcssa to <4 x float>*
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %23, <4 x float>* %24, i32 4, <4 x i1> %21)
-  %25 = bitcast float* %pSamples.0.lcssa to <4 x float>*
-  %26 = load <4 x float>, <4 x float>* %25, align 4
-  %27 = fmul fast <4 x float> %26, %.pre162
+  %i21 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %and)
+  %i22 = bitcast float* %pTempSrc.0.lcssa to <4 x float>*
+  %i23 = load <4 x float>, <4 x float>* %i22, align 4
+  %i24 = bitcast float* %pStateCur.0.lcssa to <4 x float>*
+  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %i23, <4 x float>* %i24, i32 4, <4 x i1> %i21)
+  %i25 = bitcast float* %pSamples.0.lcssa to <4 x float>*
+  %i26 = load <4 x float>, <4 x float>* %i25, align 4
+  %i27 = fmul fast <4 x float> %i26, %.pre162
   %arrayidx29 = getelementptr inbounds float, float* %pSamples.0.lcssa, i32 1
-  %28 = bitcast float* %arrayidx29 to <4 x float>*
-  %29 = load <4 x float>, <4 x float>* %28, align 4
-  %mul32 = fmul fast <4 x float> %29, %.pre164
-  %add33 = fadd fast <4 x float> %mul32, %27
+  %i28 = bitcast float* %arrayidx29 to <4 x float>*
+  %i29 = load <4 x float>, <4 x float>* %i28, align 4
+  %mul32 = fmul fast <4 x float> %i29, %.pre164
+  %add33 = fadd fast <4 x float> %mul32, %i27
   %arrayidx34 = getelementptr inbounds float, float* %pSamples.0.lcssa, i32 2
-  %30 = bitcast float* %arrayidx34 to <4 x float>*
-  %31 = load <4 x float>, <4 x float>* %30, align 4
-  %mul37 = fmul fast <4 x float> %31, %.pre166
+  %i30 = bitcast float* %arrayidx34 to <4 x float>*
+  %i31 = load <4 x float>, <4 x float>* %i30, align 4
+  %mul37 = fmul fast <4 x float> %i31, %.pre166
   %add38 = fadd fast <4 x float> %add33, %mul37
   %arrayidx39 = getelementptr inbounds float, float* %pSamples.0.lcssa, i32 3
-  %32 = bitcast float* %arrayidx39 to <4 x float>*
-  %33 = load <4 x float>, <4 x float>* %32, align 4
-  %mul42 = fmul fast <4 x float> %33, %.pre168
+  %i32 = bitcast float* %arrayidx39 to <4 x float>*
+  %i33 = load <4 x float>, <4 x float>* %i32, align 4
+  %mul42 = fmul fast <4 x float> %i33, %.pre168
   %add43 = fadd fast <4 x float> %add38, %mul42
-  %34 = bitcast float* %pOutput.0.lcssa to <4 x float>*
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %add43, <4 x float>* %34, i32 4, <4 x i1> %21)
+  %i34 = bitcast float* %pOutput.0.lcssa to <4 x float>*
+  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %add43, <4 x float>* %i34, i32 4, <4 x i1> %i21)
   %.pre = load float*, float** %pState1, align 4
   br label %if.end
 
 if.end:                                           ; preds = %while.end, %entry
-  %35 = phi float* [ %.pre, %while.end ], [ %0, %entry ]
-  %arrayidx45 = getelementptr inbounds float, float* %35, i32 %blockSize
+  %i35 = phi float* [ %.pre, %while.end ], [ %i, %entry ]
+  %arrayidx45 = getelementptr inbounds float, float* %i35, i32 %blockSize
   %shr47 = lshr i32 %conv, 2
   %cmp49141 = icmp eq i32 %shr47, 0
   br i1 %cmp49141, label %while.end55, label %while.body51.preheader
 
 while.body51.preheader:                           ; preds = %if.end
-  %36 = and i32 %conv, 65532
-  %37 = add i32 %36, %blockSize
-  %scevgep = getelementptr float, float* %35, i32 %37
+  %i36 = and i32 %conv, 65532
+  %i37 = add i32 %i36, %blockSize
+  %scevgep = getelementptr float, float* %i35, i32 %i37
   br label %while.body51
 
-while.body51:                                     ; preds = %while.body51.preheader, %while.body51
+while.body51:                                     ; preds = %while.body51, %while.body51.preheader
   %pTempSrc.1144 = phi float* [ %add.ptr52, %while.body51 ], [ %arrayidx45, %while.body51.preheader ]
-  %pTempDest.0143 = phi float* [ %add.ptr53, %while.body51 ], [ %35, %while.body51.preheader ]
+  %pTempDest.0143 = phi float* [ %add.ptr53, %while.body51 ], [ %i35, %while.body51.preheader ]
   %blkCnt.1142 = phi i32 [ %dec54, %while.body51 ], [ %shr47, %while.body51.preheader ]
-  %38 = bitcast float* %pTempSrc.1144 to <4 x float>*
-  %39 = load <4 x float>, <4 x float>* %38, align 4
-  %40 = bitcast float* %pTempDest.0143 to <4 x float>*
-  store <4 x float> %39, <4 x float>* %40, align 4
+  %i38 = bitcast float* %pTempSrc.1144 to <4 x float>*
+  %i39 = load <4 x float>, <4 x float>* %i38, align 4
+  %i40 = bitcast float* %pTempDest.0143 to <4 x float>*
+  store <4 x float> %i39, <4 x float>* %i40, align 4
   %add.ptr52 = getelementptr inbounds float, float* %pTempSrc.1144, i32 4
   %add.ptr53 = getelementptr inbounds float, float* %pTempDest.0143, i32 4
   %dec54 = add nsw i32 %blkCnt.1142, -1
@@ -1012,25 +1012,25 @@ while.body51:                                     ; preds = %while.body51.prehea
   br i1 %cmp49, label %while.end55.loopexit, label %while.body51
 
 while.end55.loopexit:                             ; preds = %while.body51
-  %scevgep156 = getelementptr float, float* %35, i32 %36
+  %scevgep156 = getelementptr float, float* %i35, i32 %i36
   br label %while.end55
 
 while.end55:                                      ; preds = %while.end55.loopexit, %if.end
-  %pTempDest.0.lcssa = phi float* [ %35, %if.end ], [ %scevgep156, %while.end55.loopexit ]
+  %pTempDest.0.lcssa = phi float* [ %i35, %if.end ], [ %scevgep156, %while.end55.loopexit ]
   %pTempSrc.1.lcssa = phi float* [ %arrayidx45, %if.end ], [ %scevgep, %while.end55.loopexit ]
   %and56 = and i32 %conv, 3
   %cmp57 = icmp eq i32 %and56, 0
   br i1 %cmp57, label %if.end61, label %if.then59
 
 if.then59:                                        ; preds = %while.end55
-  %41 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %and56)
-  %42 = bitcast float* %pTempSrc.1.lcssa to <4 x float>*
-  %43 = load <4 x float>, <4 x float>* %42, align 4
-  %44 = bitcast float* %pTempDest.0.lcssa to <4 x float>*
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %43, <4 x float>* %44, i32 4, <4 x i1> %41)
+  %i41 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %and56)
+  %i42 = bitcast float* %pTempSrc.1.lcssa to <4 x float>*
+  %i43 = load <4 x float>, <4 x float>* %i42, align 4
+  %i44 = bitcast float* %pTempDest.0.lcssa to <4 x float>*
+  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %i43, <4 x float>* %i44, i32 4, <4 x i1> %i41)
   br label %if.end61
 
-if.end61:                                         ; preds = %while.end55, %if.then59
+if.end61:                                         ; preds = %if.then59, %while.end55
   ret void
 }
 
@@ -1170,12 +1170,12 @@ define void @fir(%struct.arm_fir_instance_f32* nocapture readonly %S, float* noc
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
   %pState1 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 1
-  %0 = load float*, float** %pState1, align 4
+  %i = load float*, float** %pState1, align 4
   %pCoeffs2 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 2
-  %1 = load float*, float** %pCoeffs2, align 4
+  %i1 = load float*, float** %pCoeffs2, align 4
   %numTaps3 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 0
-  %2 = load i16, i16* %numTaps3, align 4
-  %conv = zext i16 %2 to i32
+  %i2 = load i16, i16* %numTaps3, align 4
+  %conv = zext i16 %i2 to i32
   %cmp = icmp ugt i32 %blockSize, 7
   br i1 %cmp, label %if.then, label %if.end
 
@@ -1186,164 +1186,164 @@ if.then:                                          ; preds = %entry
 
 while.body.lr.ph:                                 ; preds = %if.then
   %sub = add nsw i32 %conv, -1
-  %arrayidx = getelementptr inbounds float, float* %0, i32 %sub
-  %incdec.ptr = getelementptr inbounds float, float* %1, i32 1
-  %incdec.ptr7 = getelementptr inbounds float, float* %1, i32 2
-  %incdec.ptr8 = getelementptr inbounds float, float* %1, i32 3
-  %incdec.ptr9 = getelementptr inbounds float, float* %1, i32 4
-  %incdec.ptr10 = getelementptr inbounds float, float* %1, i32 5
-  %incdec.ptr11 = getelementptr inbounds float, float* %1, i32 6
-  %incdec.ptr12 = getelementptr inbounds float, float* %1, i32 7
+  %arrayidx = getelementptr inbounds float, float* %i, i32 %sub
+  %incdec.ptr = getelementptr inbounds float, float* %i1, i32 1
+  %incdec.ptr7 = getelementptr inbounds float, float* %i1, i32 2
+  %incdec.ptr8 = getelementptr inbounds float, float* %i1, i32 3
+  %incdec.ptr9 = getelementptr inbounds float, float* %i1, i32 4
+  %incdec.ptr10 = getelementptr inbounds float, float* %i1, i32 5
+  %incdec.ptr11 = getelementptr inbounds float, float* %i1, i32 6
+  %incdec.ptr12 = getelementptr inbounds float, float* %i1, i32 7
   %sub37 = add nsw i32 %conv, -8
   %div = sdiv i32 %sub37, 8
-  %pCoeffsCur.0199 = getelementptr inbounds float, float* %1, i32 8
-  %cmp38201 = icmp ugt i16 %2, 15
+  %pCoeffsCur.0199 = getelementptr inbounds float, float* %i1, i32 8
+  %cmp38201 = icmp ugt i16 %i2, 15
   %and = and i32 %sub37, 7
   %cmp74210 = icmp eq i32 %and, 0
   %idx.neg = sub nsw i32 0, %conv
-  %3 = icmp sgt i32 %div, 1
-  %smax = select i1 %3, i32 %div, i32 1
+  %i3 = icmp sgt i32 %div, 1
+  %smax = select i1 %i3, i32 %div, i32 1
   br label %while.body
 
-while.body:                                       ; preds = %while.body.lr.ph, %while.end
+while.body:                                       ; preds = %while.end, %while.body.lr.ph
   %blkCnt.0222 = phi i32 [ %shr, %while.body.lr.ph ], [ %dec84, %while.end ]
   %pStateCur.0221 = phi float* [ %arrayidx, %while.body.lr.ph ], [ %add.ptr, %while.end ]
-  %pSamples.0220 = phi float* [ %0, %while.body.lr.ph ], [ %add.ptr83, %while.end ]
+  %pSamples.0220 = phi float* [ %i, %while.body.lr.ph ], [ %add.ptr83, %while.end ]
   %pTempSrc.0219 = phi float* [ %pSrc, %while.body.lr.ph ], [ %add.ptr14, %while.end ]
   %pOutput.0218 = phi float* [ %pDst, %while.body.lr.ph ], [ %add.ptr81, %while.end ]
-  %4 = load float, float* %1, align 4
-  %5 = load float, float* %incdec.ptr, align 4
-  %6 = load float, float* %incdec.ptr7, align 4
-  %7 = load float, float* %incdec.ptr8, align 4
-  %8 = load float, float* %incdec.ptr9, align 4
-  %9 = load float, float* %incdec.ptr10, align 4
-  %10 = load float, float* %incdec.ptr11, align 4
-  %11 = load float, float* %incdec.ptr12, align 4
-  %12 = bitcast float* %pTempSrc.0219 to <4 x float>*
-  %13 = load <4 x float>, <4 x float>* %12, align 4
-  %14 = bitcast float* %pStateCur.0221 to <4 x float>*
-  store <4 x float> %13, <4 x float>* %14, align 4
+  %i4 = load float, float* %i1, align 4
+  %i5 = load float, float* %incdec.ptr, align 4
+  %i6 = load float, float* %incdec.ptr7, align 4
+  %i7 = load float, float* %incdec.ptr8, align 4
+  %i8 = load float, float* %incdec.ptr9, align 4
+  %i9 = load float, float* %incdec.ptr10, align 4
+  %i10 = load float, float* %incdec.ptr11, align 4
+  %i11 = load float, float* %incdec.ptr12, align 4
+  %i12 = bitcast float* %pTempSrc.0219 to <4 x float>*
+  %i13 = load <4 x float>, <4 x float>* %i12, align 4
+  %i14 = bitcast float* %pStateCur.0221 to <4 x float>*
+  store <4 x float> %i13, <4 x float>* %i14, align 4
   %add.ptr = getelementptr inbounds float, float* %pStateCur.0221, i32 4
   %add.ptr14 = getelementptr inbounds float, float* %pTempSrc.0219, i32 4
-  %15 = bitcast float* %pSamples.0220 to <4 x float>*
-  %16 = load <4 x float>, <4 x float>* %15, align 4
-  %.splatinsert = insertelement <4 x float> undef, float %4, i32 0
+  %i15 = bitcast float* %pSamples.0220 to <4 x float>*
+  %i16 = load <4 x float>, <4 x float>* %i15, align 4
+  %.splatinsert = insertelement <4 x float> undef, float %i4, i32 0
   %.splat = shufflevector <4 x float> %.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
-  %17 = fmul fast <4 x float> %16, %.splat
+  %i17 = fmul fast <4 x float> %i16, %.splat
   %arrayidx15 = getelementptr inbounds float, float* %pSamples.0220, i32 1
-  %18 = bitcast float* %arrayidx15 to <4 x float>*
-  %19 = load <4 x float>, <4 x float>* %18, align 4
-  %.splatinsert16 = insertelement <4 x float> undef, float %5, i32 0
+  %i18 = bitcast float* %arrayidx15 to <4 x float>*
+  %i19 = load <4 x float>, <4 x float>* %i18, align 4
+  %.splatinsert16 = insertelement <4 x float> undef, float %i5, i32 0
   %.splat17 = shufflevector <4 x float> %.splatinsert16, <4 x float> undef, <4 x i32> zeroinitializer
-  %20 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %19, <4 x float> %.splat17, <4 x float> %17)
+  %i20 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i19, <4 x float> %.splat17, <4 x float> %i17)
   %arrayidx18 = getelementptr inbounds float, float* %pSamples.0220, i32 2
-  %21 = bitcast float* %arrayidx18 to <4 x float>*
-  %22 = load <4 x float>, <4 x float>* %21, align 4
-  %.splatinsert19 = insertelement <4 x float> undef, float %6, i32 0
+  %i21 = bitcast float* %arrayidx18 to <4 x float>*
+  %i22 = load <4 x float>, <4 x float>* %i21, align 4
+  %.splatinsert19 = insertelement <4 x float> undef, float %i6, i32 0
   %.splat20 = shufflevector <4 x float> %.splatinsert19, <4 x float> undef, <4 x i32> zeroinitializer
-  %23 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %22, <4 x float> %.splat20, <4 x float> %20)
+  %i23 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i22, <4 x float> %.splat20, <4 x float> %i20)
   %arrayidx21 = getelementptr inbounds float, float* %pSamples.0220, i32 3
-  %24 = bitcast float* %arrayidx21 to <4 x float>*
-  %25 = load <4 x float>, <4 x float>* %24, align 4
-  %.splatinsert22 = insertelement <4 x float> undef, float %7, i32 0
+  %i24 = bitcast float* %arrayidx21 to <4 x float>*
+  %i25 = load <4 x float>, <4 x float>* %i24, align 4
+  %.splatinsert22 = insertelement <4 x float> undef, float %i7, i32 0
   %.splat23 = shufflevector <4 x float> %.splatinsert22, <4 x float> undef, <4 x i32> zeroinitializer
-  %26 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %25, <4 x float> %.splat23, <4 x float> %23)
+  %i26 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i25, <4 x float> %.splat23, <4 x float> %i23)
   %arrayidx24 = getelementptr inbounds float, float* %pSamples.0220, i32 4
-  %27 = bitcast float* %arrayidx24 to <4 x float>*
-  %28 = load <4 x float>, <4 x float>* %27, align 4
-  %.splatinsert25 = insertelement <4 x float> undef, float %8, i32 0
+  %i27 = bitcast float* %arrayidx24 to <4 x float>*
+  %i28 = load <4 x float>, <4 x float>* %i27, align 4
+  %.splatinsert25 = insertelement <4 x float> undef, float %i8, i32 0
   %.splat26 = shufflevector <4 x float> %.splatinsert25, <4 x float> undef, <4 x i32> zeroinitializer
-  %29 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %28, <4 x float> %.splat26, <4 x float> %26)
+  %i29 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i28, <4 x float> %.splat26, <4 x float> %i26)
   %arrayidx27 = getelementptr inbounds float, float* %pSamples.0220, i32 5
-  %30 = bitcast float* %arrayidx27 to <4 x float>*
-  %31 = load <4 x float>, <4 x float>* %30, align 4
-  %.splatinsert28 = insertelement <4 x float> undef, float %9, i32 0
+  %i30 = bitcast float* %arrayidx27 to <4 x float>*
+  %i31 = load <4 x float>, <4 x float>* %i30, align 4
+  %.splatinsert28 = insertelement <4 x float> undef, float %i9, i32 0
   %.splat29 = shufflevector <4 x float> %.splatinsert28, <4 x float> undef, <4 x i32> zeroinitializer
-  %32 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %31, <4 x float> %.splat29, <4 x float> %29)
+  %i32 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i31, <4 x float> %.splat29, <4 x float> %i29)
   %arrayidx30 = getelementptr inbounds float, float* %pSamples.0220, i32 6
-  %33 = bitcast float* %arrayidx30 to <4 x float>*
-  %34 = load <4 x float>, <4 x float>* %33, align 4
-  %.splatinsert31 = insertelement <4 x float> undef, float %10, i32 0
+  %i33 = bitcast float* %arrayidx30 to <4 x float>*
+  %i34 = load <4 x float>, <4 x float>* %i33, align 4
+  %.splatinsert31 = insertelement <4 x float> undef, float %i10, i32 0
   %.splat32 = shufflevector <4 x float> %.splatinsert31, <4 x float> undef, <4 x i32> zeroinitializer
-  %35 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %34, <4 x float> %.splat32, <4 x float> %32)
+  %i35 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i34, <4 x float> %.splat32, <4 x float> %i32)
   %arrayidx33 = getelementptr inbounds float, float* %pSamples.0220, i32 7
-  %36 = bitcast float* %arrayidx33 to <4 x float>*
-  %37 = load <4 x float>, <4 x float>* %36, align 4
-  %.splatinsert34 = insertelement <4 x float> undef, float %11, i32 0
+  %i36 = bitcast float* %arrayidx33 to <4 x float>*
+  %i37 = load <4 x float>, <4 x float>* %i36, align 4
+  %.splatinsert34 = insertelement <4 x float> undef, float %i11, i32 0
   %.splat35 = shufflevector <4 x float> %.splatinsert34, <4 x float> undef, <4 x i32> zeroinitializer
-  %38 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %37, <4 x float> %.splat35, <4 x float> %35)
+  %i38 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i37, <4 x float> %.splat35, <4 x float> %i35)
   %pSamples.1200 = getelementptr inbounds float, float* %pSamples.0220, i32 8
   br i1 %cmp38201, label %for.body, label %for.end
 
-for.body:                                         ; preds = %while.body, %for.body
+for.body:                                         ; preds = %for.body, %while.body
   %pSamples.1207 = phi float* [ %pSamples.1, %for.body ], [ %pSamples.1200, %while.body ]
   %pCoeffsCur.0206 = phi float* [ %pCoeffsCur.0, %for.body ], [ %pCoeffsCur.0199, %while.body ]
-  %.pn205 = phi float* [ %pCoeffsCur.0206, %for.body ], [ %1, %while.body ]
+  %.pn205 = phi float* [ %pCoeffsCur.0206, %for.body ], [ %i1, %while.body ]
   %i.0204 = phi i32 [ %inc, %for.body ], [ 0, %while.body ]
-  %vecAcc0.0203 = phi <4 x float> [ %70, %for.body ], [ %38, %while.body ]
+  %vecAcc0.0203 = phi <4 x float> [ %i70, %for.body ], [ %i38, %while.body ]
   %pSamples.0.pn202 = phi float* [ %pSamples.1207, %for.body ], [ %pSamples.0220, %while.body ]
   %incdec.ptr40 = getelementptr inbounds float, float* %.pn205, i32 9
-  %39 = load float, float* %pCoeffsCur.0206, align 4
+  %i39 = load float, float* %pCoeffsCur.0206, align 4
   %incdec.ptr41 = getelementptr inbounds float, float* %.pn205, i32 10
-  %40 = load float, float* %incdec.ptr40, align 4
+  %i40 = load float, float* %incdec.ptr40, align 4
   %incdec.ptr42 = getelementptr inbounds float, float* %.pn205, i32 11
-  %41 = load float, float* %incdec.ptr41, align 4
+  %i41 = load float, float* %incdec.ptr41, align 4
   %incdec.ptr43 = getelementptr inbounds float, float* %.pn205, i32 12
-  %42 = load float, float* %incdec.ptr42, align 4
+  %i42 = load float, float* %incdec.ptr42, align 4
   %incdec.ptr44 = getelementptr inbounds float, float* %.pn205, i32 13
-  %43 = load float, float* %incdec.ptr43, align 4
+  %i43 = load float, float* %incdec.ptr43, align 4
   %incdec.ptr45 = getelementptr inbounds float, float* %.pn205, i32 14
-  %44 = load float, float* %incdec.ptr44, align 4
+  %i44 = load float, float* %incdec.ptr44, align 4
   %incdec.ptr46 = getelementptr inbounds float, float* %.pn205, i32 15
-  %45 = load float, float* %incdec.ptr45, align 4
-  %46 = load float, float* %incdec.ptr46, align 4
-  %47 = bitcast float* %pSamples.1207 to <4 x float>*
-  %48 = load <4 x float>, <4 x float>* %47, align 4
-  %.splatinsert48 = insertelement <4 x float> undef, float %39, i32 0
+  %i45 = load float, float* %incdec.ptr45, align 4
+  %i46 = load float, float* %incdec.ptr46, align 4
+  %i47 = bitcast float* %pSamples.1207 to <4 x float>*
+  %i48 = load <4 x float>, <4 x float>* %i47, align 4
+  %.splatinsert48 = insertelement <4 x float> undef, float %i39, i32 0
   %.splat49 = shufflevector <4 x float> %.splatinsert48, <4 x float> undef, <4 x i32> zeroinitializer
-  %49 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %48, <4 x float> %.splat49, <4 x float> %vecAcc0.0203)
+  %i49 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i48, <4 x float> %.splat49, <4 x float> %vecAcc0.0203)
   %arrayidx50 = getelementptr inbounds float, float* %pSamples.0.pn202, i32 9
-  %50 = bitcast float* %arrayidx50 to <4 x float>*
-  %51 = load <4 x float>, <4 x float>* %50, align 4
-  %.splatinsert51 = insertelement <4 x float> undef, float %40, i32 0
+  %i50 = bitcast float* %arrayidx50 to <4 x float>*
+  %i51 = load <4 x float>, <4 x float>* %i50, align 4
+  %.splatinsert51 = insertelement <4 x float> undef, float %i40, i32 0
   %.splat52 = shufflevector <4 x float> %.splatinsert51, <4 x float> undef, <4 x i32> zeroinitializer
-  %52 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %51, <4 x float> %.splat52, <4 x float> %49)
+  %i52 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i51, <4 x float> %.splat52, <4 x float> %i49)
   %arrayidx53 = getelementptr inbounds float, float* %pSamples.0.pn202, i32 10
-  %53 = bitcast float* %arrayidx53 to <4 x float>*
-  %54 = load <4 x float>, <4 x float>* %53, align 4
-  %.splatinsert54 = insertelement <4 x float> undef, float %41, i32 0
+  %i53 = bitcast float* %arrayidx53 to <4 x float>*
+  %i54 = load <4 x float>, <4 x float>* %i53, align 4
+  %.splatinsert54 = insertelement <4 x float> undef, float %i41, i32 0
   %.splat55 = shufflevector <4 x float> %.splatinsert54, <4 x float> undef, <4 x i32> zeroinitializer
-  %55 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %54, <4 x float> %.splat55, <4 x float> %52)
+  %i55 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i54, <4 x float> %.splat55, <4 x float> %i52)
   %arrayidx56 = getelementptr inbounds float, float* %pSamples.0.pn202, i32 11
-  %56 = bitcast float* %arrayidx56 to <4 x float>*
-  %57 = load <4 x float>, <4 x float>* %56, align 4
-  %.splatinsert57 = insertelement <4 x float> undef, float %42, i32 0
+  %i56 = bitcast float* %arrayidx56 to <4 x float>*
+  %i57 = load <4 x float>, <4 x float>* %i56, align 4
+  %.splatinsert57 = insertelement <4 x float> undef, float %i42, i32 0
   %.splat58 = shufflevector <4 x float> %.splatinsert57, <4 x float> undef, <4 x i32> zeroinitializer
-  %58 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %57, <4 x float> %.splat58, <4 x float> %55)
+  %i58 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i57, <4 x float> %.splat58, <4 x float> %i55)
   %arrayidx59 = getelementptr inbounds float, float* %pSamples.0.pn202, i32 12
-  %59 = bitcast float* %arrayidx59 to <4 x float>*
-  %60 = load <4 x float>, <4 x float>* %59, align 4
-  %.splatinsert60 = insertelement <4 x float> undef, float %43, i32 0
+  %i59 = bitcast float* %arrayidx59 to <4 x float>*
+  %i60 = load <4 x float>, <4 x float>* %i59, align 4
+  %.splatinsert60 = insertelement <4 x float> undef, float %i43, i32 0
   %.splat61 = shufflevector <4 x float> %.splatinsert60, <4 x float> undef, <4 x i32> zeroinitializer
-  %61 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %60, <4 x float> %.splat61, <4 x float> %58)
+  %i61 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i60, <4 x float> %.splat61, <4 x float> %i58)
   %arrayidx62 = getelementptr inbounds float, float* %pSamples.0.pn202, i32 13
-  %62 = bitcast float* %arrayidx62 to <4 x float>*
-  %63 = load <4 x float>, <4 x float>* %62, align 4
-  %.splatinsert63 = insertelement <4 x float> undef, float %44, i32 0
+  %i62 = bitcast float* %arrayidx62 to <4 x float>*
+  %i63 = load <4 x float>, <4 x float>* %i62, align 4
+  %.splatinsert63 = insertelement <4 x float> undef, float %i44, i32 0
   %.splat64 = shufflevector <4 x float> %.splatinsert63, <4 x float> undef, <4 x i32> zeroinitializer
-  %64 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %63, <4 x float> %.splat64, <4 x float> %61)
+  %i64 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i63, <4 x float> %.splat64, <4 x float> %i61)
   %arrayidx65 = getelementptr inbounds float, float* %pSamples.0.pn202, i32 14
-  %65 = bitcast float* %arrayidx65 to <4 x float>*
-  %66 = load <4 x float>, <4 x float>* %65, align 4
-  %.splatinsert66 = insertelement <4 x float> undef, float %45, i32 0
+  %i65 = bitcast float* %arrayidx65 to <4 x float>*
+  %i66 = load <4 x float>, <4 x float>* %i65, align 4
+  %.splatinsert66 = insertelement <4 x float> undef, float %i45, i32 0
   %.splat67 = shufflevector <4 x float> %.splatinsert66, <4 x float> undef, <4 x i32> zeroinitializer
-  %67 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %66, <4 x float> %.splat67, <4 x float> %64)
+  %i67 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i66, <4 x float> %.splat67, <4 x float> %i64)
   %arrayidx68 = getelementptr inbounds float, float* %pSamples.0.pn202, i32 15
-  %68 = bitcast float* %arrayidx68 to <4 x float>*
-  %69 = load <4 x float>, <4 x float>* %68, align 4
-  %.splatinsert69 = insertelement <4 x float> undef, float %46, i32 0
+  %i68 = bitcast float* %arrayidx68 to <4 x float>*
+  %i69 = load <4 x float>, <4 x float>* %i68, align 4
+  %.splatinsert69 = insertelement <4 x float> undef, float %i46, i32 0
   %.splat70 = shufflevector <4 x float> %.splatinsert69, <4 x float> undef, <4 x i32> zeroinitializer
-  %70 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %69, <4 x float> %.splat70, <4 x float> %67)
+  %i70 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i69, <4 x float> %.splat70, <4 x float> %i67)
   %inc = add nuw nsw i32 %i.0204, 1
   %pCoeffsCur.0 = getelementptr inbounds float, float* %pCoeffsCur.0206, i32 8
   %pSamples.1 = getelementptr inbounds float, float* %pSamples.1207, i32 8
@@ -1351,23 +1351,23 @@ for.body:                                         ; preds = %while.body, %for.bo
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:                                          ; preds = %for.body, %while.body
-  %vecAcc0.0.lcssa = phi <4 x float> [ %38, %while.body ], [ %70, %for.body ]
+  %vecAcc0.0.lcssa = phi <4 x float> [ %i38, %while.body ], [ %i70, %for.body ]
   %pCoeffsCur.0.lcssa = phi float* [ %pCoeffsCur.0199, %while.body ], [ %pCoeffsCur.0, %for.body ]
   %pSamples.1.lcssa = phi float* [ %pSamples.1200, %while.body ], [ %pSamples.1, %for.body ]
   br i1 %cmp74210, label %while.end, label %while.body76
 
-while.body76:                                     ; preds = %for.end, %while.body76
+while.body76:                                     ; preds = %while.body76, %for.end
   %pCoeffsCur.1214 = phi float* [ %incdec.ptr77, %while.body76 ], [ %pCoeffsCur.0.lcssa, %for.end ]
-  %vecAcc0.1213 = phi <4 x float> [ %74, %while.body76 ], [ %vecAcc0.0.lcssa, %for.end ]
+  %vecAcc0.1213 = phi <4 x float> [ %i74, %while.body76 ], [ %vecAcc0.0.lcssa, %for.end ]
   %numCnt.0212 = phi i32 [ %dec, %while.body76 ], [ %and, %for.end ]
   %pSamples.2211 = phi float* [ %incdec.ptr80, %while.body76 ], [ %pSamples.1.lcssa, %for.end ]
   %incdec.ptr77 = getelementptr inbounds float, float* %pCoeffsCur.1214, i32 1
-  %71 = load float, float* %pCoeffsCur.1214, align 4
-  %72 = bitcast float* %pSamples.2211 to <4 x float>*
-  %73 = load <4 x float>, <4 x float>* %72, align 4
-  %.splatinsert78 = insertelement <4 x float> undef, float %71, i32 0
+  %i71 = load float, float* %pCoeffsCur.1214, align 4
+  %i72 = bitcast float* %pSamples.2211 to <4 x float>*
+  %i73 = load <4 x float>, <4 x float>* %i72, align 4
+  %.splatinsert78 = insertelement <4 x float> undef, float %i71, i32 0
   %.splat79 = shufflevector <4 x float> %.splatinsert78, <4 x float> undef, <4 x i32> zeroinitializer
-  %74 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %73, <4 x float> %.splat79, <4 x float> %vecAcc0.1213)
+  %i74 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i73, <4 x float> %.splat79, <4 x float> %vecAcc0.1213)
   %incdec.ptr80 = getelementptr inbounds float, float* %pSamples.2211, i32 1
   %dec = add nsw i32 %numCnt.0212, -1
   %cmp74 = icmp sgt i32 %numCnt.0212, 1
@@ -1379,9 +1379,9 @@ while.end.loopexit:                               ; preds = %while.body76
 
 while.end:                                        ; preds = %while.end.loopexit, %for.end
   %pSamples.2.lcssa = phi float* [ %pSamples.1.lcssa, %for.end ], [ %scevgep, %while.end.loopexit ]
-  %vecAcc0.1.lcssa = phi <4 x float> [ %vecAcc0.0.lcssa, %for.end ], [ %74, %while.end.loopexit ]
-  %75 = bitcast float* %pOutput.0218 to <4 x float>*
-  store <4 x float> %vecAcc0.1.lcssa, <4 x float>* %75, align 4
+  %vecAcc0.1.lcssa = phi <4 x float> [ %vecAcc0.0.lcssa, %for.end ], [ %i74, %while.end.loopexit ]
+  %i75 = bitcast float* %pOutput.0218 to <4 x float>*
+  store <4 x float> %vecAcc0.1.lcssa, <4 x float>* %i75, align 4
   %add.ptr81 = getelementptr inbounds float, float* %pOutput.0218, i32 4
   %add.ptr82 = getelementptr inbounds float, float* %pSamples.2.lcssa, i32 4
   %add.ptr83 = getelementptr inbounds float, float* %add.ptr82, i32 %idx.neg
@@ -1394,9 +1394,9 @@ if.end:                                           ; preds = %while.end, %if.then
 }
 
 %struct.arm_biquad_cascade_stereo_df2T_instance_f32 = type { i8, float*, float* }
-define arm_aapcs_vfpcc void @arm_biquad_cascade_stereo_df2T_f32(%struct.arm_biquad_cascade_stereo_df2T_instance_f32* nocapture readonly %0, float* %1, float* %2, i32 %3) {
+define arm_aapcs_vfpcc void @arm_biquad_cascade_stereo_df2T_f32(%struct.arm_biquad_cascade_stereo_df2T_instance_f32* nocapture readonly %arg, float* %arg1, float* %arg2, i32 %arg3) {
 ; CHECK-LABEL: arm_biquad_cascade_stereo_df2T_f32:
-; CHECK:       @ %bb.0:
+; CHECK:       @ %bb.0: @ %bb
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
 ; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
@@ -1414,7 +1414,8 @@ define arm_aapcs_vfpcc void @arm_biquad_cascade_stereo_df2T_f32(%struct.arm_biqu
 ; CHECK-NEXT:    movs r5, #2
 ; CHECK-NEXT:    viwdup.u32 q0, r4, r5, #1
 ; CHECK-NEXT:    mov r4, sp
-; CHECK-NEXT:  .LBB17_2: @ =>This Loop Header: Depth=1
+; CHECK-NEXT:  .LBB17_2: @ %bb29
+; CHECK-NEXT:    @ =>This Loop Header: Depth=1
 ; CHECK-NEXT:    @ Child Loop BB17_3 Depth 2
 ; CHECK-NEXT:    ldrd r5, r7, [r0]
 ; CHECK-NEXT:    vldrw.u32 q1, [r3]
@@ -1430,7 +1431,8 @@ define arm_aapcs_vfpcc void @arm_biquad_cascade_stereo_df2T_f32(%struct.arm_biqu
 ; CHECK-NEXT:    vmov.f32 s10, s12
 ; CHECK-NEXT:    mov r7, r2
 ; CHECK-NEXT:    vmov.f32 s11, s12
-; CHECK-NEXT:  .LBB17_3: @ Parent Loop BB17_2 Depth=1
+; CHECK-NEXT:  .LBB17_3: @ %bb55
+; CHECK-NEXT:    @ Parent Loop BB17_2 Depth=1
 ; CHECK-NEXT:    @ => This Inner Loop Header: Depth=2
 ; CHECK-NEXT:    vldrw.u32 q4, [r1, q0, uxtw #2]
 ; CHECK-NEXT:    vldrw.u32 q5, [r4, q0, uxtw #2]
@@ -1442,122 +1444,125 @@ define arm_aapcs_vfpcc void @arm_biquad_cascade_stereo_df2T_f32(%struct.arm_biqu
 ; CHECK-NEXT:    vfma.f32 q3, q4, q1
 ; CHECK-NEXT:    vstrw.32 q3, [r4]
 ; CHECK-NEXT:    le lr, .LBB17_3
-; CHECK-NEXT:  @ %bb.4: @ in Loop: Header=BB17_2 Depth=1
+; CHECK-NEXT:  @ %bb.4: @ %bb75
+; CHECK-NEXT:    @ in Loop: Header=BB17_2 Depth=1
 ; CHECK-NEXT:    subs.w r12, r12, #1
 ; CHECK-NEXT:    add.w r0, r0, #20
 ; CHECK-NEXT:    vstrb.8 q3, [r3], #16
 ; CHECK-NEXT:    mov r1, r2
 ; CHECK-NEXT:    bne .LBB17_2
 ; CHECK-NEXT:    b .LBB17_7
-; CHECK-NEXT:  .LBB17_5: @ %.preheader
+; CHECK-NEXT:  .LBB17_5: @ %bb21.preheader
 ; CHECK-NEXT:    dls lr, r12
 ; CHECK-NEXT:    mov r0, sp
-; CHECK-NEXT:  .LBB17_6: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:  .LBB17_6: @ %bb21
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r3], #16
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    le lr, .LBB17_6
-; CHECK-NEXT:  .LBB17_7:
+; CHECK-NEXT:  .LBB17_7: @ %bb80
 ; CHECK-NEXT:    add sp, #24
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
-  %5 = alloca [6 x float], align 4
-  %6 = getelementptr inbounds %struct.arm_biquad_cascade_stereo_df2T_instance_f32, %struct.arm_biquad_cascade_stereo_df2T_instance_f32* %0, i32 0, i32 1
-  %7 = load float*, float** %6, align 4
-  %8 = getelementptr inbounds %struct.arm_biquad_cascade_stereo_df2T_instance_f32, %struct.arm_biquad_cascade_stereo_df2T_instance_f32* %0, i32 0, i32 2
-  %9 = load float*, float** %8, align 4
-  %10 = getelementptr inbounds %struct.arm_biquad_cascade_stereo_df2T_instance_f32, %struct.arm_biquad_cascade_stereo_df2T_instance_f32* %0, i32 0, i32 0
-  %11 = load i8, i8* %10, align 4
-  %12 = zext i8 %11 to i32
-  %13 = bitcast [6 x float]* %5 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 24, i8* nonnull %13) #5
-  %14 = tail call { <4 x i32>, i32 } @llvm.arm.mve.viwdup.v4i32(i32 0, i32 2, i32 1)
-  %15 = extractvalue { <4 x i32>, i32 } %14, 0
-  %16 = getelementptr inbounds [6 x float], [6 x float]* %5, i32 0, i32 4
-  store float 0.000000e+00, float* %16, align 4
-  %17 = getelementptr inbounds [6 x float], [6 x float]* %5, i32 0, i32 5
-  store float 0.000000e+00, float* %17, align 4
-  %18 = bitcast [6 x float]* %5 to <4 x float>*
-  %19 = icmp eq i32 %3, 0
-  %20 = bitcast [6 x float]* %5 to i32*
-  %21 = getelementptr inbounds [6 x float], [6 x float]* %5, i32 0, i32 2
-  %22 = bitcast float* %21 to <4 x float>*
-  br i1 %19, label %23, label %31
-
-23:                                               ; preds = %4, %23
-  %24 = phi i32 [ %29, %23 ], [ %12, %4 ]
-  %25 = phi float* [ %28, %23 ], [ %7, %4 ]
-  %26 = bitcast float* %25 to <4 x float>*
-  %27 = load <4 x float>, <4 x float>* %26, align 8
-  store <4 x float> %27, <4 x float>* %18, align 4
-  %28 = getelementptr inbounds float, float* %25, i32 4
-  %29 = add i32 %24, -1
-  %30 = icmp eq i32 %29, 0
-  br i1 %30, label %82, label %23
-
-31:                                               ; preds = %4, %77
-  %32 = phi i32 [ %80, %77 ], [ %12, %4 ]
-  %33 = phi float* [ %78, %77 ], [ %9, %4 ]
-  %34 = phi float* [ %79, %77 ], [ %7, %4 ]
-  %35 = phi float* [ %2, %77 ], [ %1, %4 ]
-  %36 = getelementptr inbounds float, float* %33, i32 1
-  %37 = load float, float* %33, align 4
-  %38 = getelementptr inbounds float, float* %33, i32 2
-  %39 = load float, float* %36, align 4
-  %40 = getelementptr inbounds float, float* %33, i32 3
-  %41 = load float, float* %38, align 4
-  %42 = getelementptr inbounds float, float* %33, i32 4
-  %43 = load float, float* %40, align 4
-  %44 = load float, float* %42, align 4
-  %45 = insertelement <4 x float> undef, float %43, i32 0
-  %46 = shufflevector <4 x float> %45, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 undef, i32 undef>
-  %47 = insertelement <4 x float> %46, float %44, i32 2
-  %48 = insertelement <4 x float> %47, float %44, i32 3
-  %49 = insertelement <4 x float> undef, float %39, i32 0
-  %50 = shufflevector <4 x float> %49, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 undef, i32 undef>
-  %51 = insertelement <4 x float> %50, float %41, i32 2
-  %52 = insertelement <4 x float> %51, float %41, i32 3
-  %53 = bitcast float* %34 to <4 x float>*
-  %54 = load <4 x float>, <4 x float>* %53, align 8
-  store <4 x float> %54, <4 x float>* %18, align 4
-  %55 = insertelement <4 x float> undef, float %37, i32 0
-  %56 = shufflevector <4 x float> %55, <4 x float> undef, <4 x i32> zeroinitializer
-  br label %57
-
-57:                                               ; preds = %31, %57
-  %58 = phi float* [ %35, %31 ], [ %74, %57 ]
-  %59 = phi float* [ %2, %31 ], [ %70, %57 ]
-  %60 = phi i32 [ %3, %31 ], [ %75, %57 ]
-  %61 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* nonnull %20, <4 x i32> %15, i32 32, i32 2, i32 1)
-  %62 = bitcast <4 x i32> %61 to <4 x float>
-  %63 = bitcast float* %58 to i32*
-  %64 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* %63, <4 x i32> %15, i32 32, i32 2, i32 1)
-  %65 = bitcast <4 x i32> %64 to <4 x float>
-  %66 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %65, <4 x float> %56, <4 x float> %62)
-  %67 = extractelement <4 x float> %66, i32 0
-  %68 = getelementptr inbounds float, float* %59, i32 1
-  store float %67, float* %59, align 4
-  %69 = extractelement <4 x float> %66, i32 1
-  %70 = getelementptr inbounds float, float* %59, i32 2
-  store float %69, float* %68, align 4
-  %71 = load <4 x float>, <4 x float>* %22, align 4
-  %72 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %66, <4 x float> %48, <4 x float> %71)
-  %73 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %65, <4 x float> %52, <4 x float> %72)
-  store <4 x float> %73, <4 x float>* %18, align 4
-  %74 = getelementptr inbounds float, float* %58, i32 2
-  %75 = add i32 %60, -1
-  %76 = icmp eq i32 %75, 0
-  br i1 %76, label %77, label %57
-
-77:                                               ; preds = %57
-  %78 = getelementptr inbounds float, float* %33, i32 5
-  store <4 x float> %73, <4 x float>* %53, align 4
-  %79 = getelementptr inbounds float, float* %34, i32 4
-  %80 = add i32 %32, -1
-  %81 = icmp eq i32 %80, 0
-  br i1 %81, label %82, label %31
-
-82:                                               ; preds = %77, %23
-  call void @llvm.lifetime.end.p0i8(i64 24, i8* nonnull %13) #5
+bb:
+  %i = alloca [6 x float], align 4
+  %i4 = getelementptr inbounds %struct.arm_biquad_cascade_stereo_df2T_instance_f32, %struct.arm_biquad_cascade_stereo_df2T_instance_f32* %arg, i32 0, i32 1
+  %i5 = load float*, float** %i4, align 4
+  %i6 = getelementptr inbounds %struct.arm_biquad_cascade_stereo_df2T_instance_f32, %struct.arm_biquad_cascade_stereo_df2T_instance_f32* %arg, i32 0, i32 2
+  %i7 = load float*, float** %i6, align 4
+  %i8 = getelementptr inbounds %struct.arm_biquad_cascade_stereo_df2T_instance_f32, %struct.arm_biquad_cascade_stereo_df2T_instance_f32* %arg, i32 0, i32 0
+  %i9 = load i8, i8* %i8, align 4
+  %i10 = zext i8 %i9 to i32
+  %i11 = bitcast [6 x float]* %i to i8*
+  call void @llvm.lifetime.start.p0i8(i64 24, i8* nonnull %i11)
+  %i12 = tail call { <4 x i32>, i32 } @llvm.arm.mve.viwdup.v4i32(i32 0, i32 2, i32 1)
+  %i13 = extractvalue { <4 x i32>, i32 } %i12, 0
+  %i14 = getelementptr inbounds [6 x float], [6 x float]* %i, i32 0, i32 4
+  store float 0.000000e+00, float* %i14, align 4
+  %i15 = getelementptr inbounds [6 x float], [6 x float]* %i, i32 0, i32 5
+  store float 0.000000e+00, float* %i15, align 4
+  %i16 = bitcast [6 x float]* %i to <4 x float>*
+  %i17 = icmp eq i32 %arg3, 0
+  %i18 = bitcast [6 x float]* %i to i32*
+  %i19 = getelementptr inbounds [6 x float], [6 x float]* %i, i32 0, i32 2
+  %i20 = bitcast float* %i19 to <4 x float>*
+  br i1 %i17, label %bb21, label %bb29
+
+bb21:                                             ; preds = %bb21, %bb
+  %i22 = phi i32 [ %i27, %bb21 ], [ %i10, %bb ]
+  %i23 = phi float* [ %i26, %bb21 ], [ %i5, %bb ]
+  %i24 = bitcast float* %i23 to <4 x float>*
+  %i25 = load <4 x float>, <4 x float>* %i24, align 8
+  store <4 x float> %i25, <4 x float>* %i16, align 4
+  %i26 = getelementptr inbounds float, float* %i23, i32 4
+  %i27 = add i32 %i22, -1
+  %i28 = icmp eq i32 %i27, 0
+  br i1 %i28, label %bb80, label %bb21
+
+bb29:                                             ; preds = %bb75, %bb
+  %i30 = phi i32 [ %i78, %bb75 ], [ %i10, %bb ]
+  %i31 = phi float* [ %i76, %bb75 ], [ %i7, %bb ]
+  %i32 = phi float* [ %i77, %bb75 ], [ %i5, %bb ]
+  %i33 = phi float* [ %arg2, %bb75 ], [ %arg1, %bb ]
+  %i34 = getelementptr inbounds float, float* %i31, i32 1
+  %i35 = load float, float* %i31, align 4
+  %i36 = getelementptr inbounds float, float* %i31, i32 2
+  %i37 = load float, float* %i34, align 4
+  %i38 = getelementptr inbounds float, float* %i31, i32 3
+  %i39 = load float, float* %i36, align 4
+  %i40 = getelementptr inbounds float, float* %i31, i32 4
+  %i41 = load float, float* %i38, align 4
+  %i42 = load float, float* %i40, align 4
+  %i43 = insertelement <4 x float> undef, float %i41, i32 0
+  %i44 = shufflevector <4 x float> %i43, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 undef, i32 undef>
+  %i45 = insertelement <4 x float> %i44, float %i42, i32 2
+  %i46 = insertelement <4 x float> %i45, float %i42, i32 3
+  %i47 = insertelement <4 x float> undef, float %i37, i32 0
+  %i48 = shufflevector <4 x float> %i47, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 undef, i32 undef>
+  %i49 = insertelement <4 x float> %i48, float %i39, i32 2
+  %i50 = insertelement <4 x float> %i49, float %i39, i32 3
+  %i51 = bitcast float* %i32 to <4 x float>*
+  %i52 = load <4 x float>, <4 x float>* %i51, align 8
+  store <4 x float> %i52, <4 x float>* %i16, align 4
+  %i53 = insertelement <4 x float> undef, float %i35, i32 0
+  %i54 = shufflevector <4 x float> %i53, <4 x float> undef, <4 x i32> zeroinitializer
+  br label %bb55
+
+bb55:                                             ; preds = %bb55, %bb29
+  %i56 = phi float* [ %i33, %bb29 ], [ %i72, %bb55 ]
+  %i57 = phi float* [ %arg2, %bb29 ], [ %i68, %bb55 ]
+  %i58 = phi i32 [ %arg3, %bb29 ], [ %i73, %bb55 ]
+  %i59 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* nonnull %i18, <4 x i32> %i13, i32 32, i32 2, i32 1)
+  %i60 = bitcast <4 x i32> %i59 to <4 x float>
+  %i61 = bitcast float* %i56 to i32*
+  %i62 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* %i61, <4 x i32> %i13, i32 32, i32 2, i32 1)
+  %i63 = bitcast <4 x i32> %i62 to <4 x float>
+  %i64 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i63, <4 x float> %i54, <4 x float> %i60)
+  %i65 = extractelement <4 x float> %i64, i32 0
+  %i66 = getelementptr inbounds float, float* %i57, i32 1
+  store float %i65, float* %i57, align 4
+  %i67 = extractelement <4 x float> %i64, i32 1
+  %i68 = getelementptr inbounds float, float* %i57, i32 2
+  store float %i67, float* %i66, align 4
+  %i69 = load <4 x float>, <4 x float>* %i20, align 4
+  %i70 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i64, <4 x float> %i46, <4 x float> %i69)
+  %i71 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i63, <4 x float> %i50, <4 x float> %i70)
+  store <4 x float> %i71, <4 x float>* %i16, align 4
+  %i72 = getelementptr inbounds float, float* %i56, i32 2
+  %i73 = add i32 %i58, -1
+  %i74 = icmp eq i32 %i73, 0
+  br i1 %i74, label %bb75, label %bb55
+
+bb75:                                             ; preds = %bb55
+  %i76 = getelementptr inbounds float, float* %i31, i32 5
+  store <4 x float> %i71, <4 x float>* %i51, align 4
+  %i77 = getelementptr inbounds float, float* %i32, i32 4
+  %i78 = add i32 %i30, -1
+  %i79 = icmp eq i32 %i78, 0
+  br i1 %i79, label %bb80, label %bb29
+
+bb80:                                             ; preds = %bb75, %bb21
+  call void @llvm.lifetime.end.p0i8(i64 24, i8* nonnull %i11)
   ret void
 }
 
@@ -1597,30 +1602,30 @@ entry:
   %cmp15 = icmp eq i32 %shr, 0
   br i1 %cmp15, label %do.end, label %do.body
 
-do.body:                                          ; preds = %entry, %while.end
+do.body:                                          ; preds = %while.end, %entry
   %pDst.addr.0 = phi float* [ %add.ptr2, %while.end ], [ %pDst, %entry ]
   %M.addr.0 = phi i32 [ %dec3, %while.end ], [ %M, %entry ]
   %pSrc3.addr.0 = phi float* [ %incdec.ptr, %while.end ], [ %pSrc3, %entry ]
   %pSrc2.addr.0 = phi float* [ %add.ptr1, %while.end ], [ %pSrc2, %entry ]
   %pSrc1.addr.0 = phi float* [ %add.ptr, %while.end ], [ %pSrc1, %entry ]
-  %0 = load float, float* %pSrc3.addr.0, align 4
-  %.splatinsert = insertelement <4 x float> undef, float %0, i32 0
+  %i = load float, float* %pSrc3.addr.0, align 4
+  %.splatinsert = insertelement <4 x float> undef, float %i, i32 0
   %.splat = shufflevector <4 x float> %.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
   br label %while.body
 
-while.body:                                       ; preds = %do.body, %while.body
+while.body:                                       ; preds = %while.body, %do.body
   %pSrc1.addr.119 = phi float* [ %pSrc1.addr.0, %do.body ], [ %add.ptr, %while.body ]
   %pSrc2.addr.118 = phi float* [ %pSrc2.addr.0, %do.body ], [ %add.ptr1, %while.body ]
   %blkCnt.017 = phi i32 [ %shr, %do.body ], [ %dec, %while.body ]
   %pDst.addr.116 = phi float* [ %pDst.addr.0, %do.body ], [ %add.ptr2, %while.body ]
-  %1 = bitcast float* %pSrc1.addr.119 to <4 x float>*
-  %2 = load <4 x float>, <4 x float>* %1, align 4
-  %3 = bitcast float* %pSrc2.addr.118 to <4 x float>*
-  %4 = load <4 x float>, <4 x float>* %3, align 4
-  %5 = fneg fast <4 x float> %4
-  %6 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %.splat, <4 x float> %5, <4 x float> %2)
-  %7 = bitcast float* %pDst.addr.116 to <4 x float>*
-  store <4 x float> %6, <4 x float>* %7, align 4
+  %i1 = bitcast float* %pSrc1.addr.119 to <4 x float>*
+  %i2 = load <4 x float>, <4 x float>* %i1, align 4
+  %i3 = bitcast float* %pSrc2.addr.118 to <4 x float>*
+  %i4 = load <4 x float>, <4 x float>* %i3, align 4
+  %i5 = fneg fast <4 x float> %i4
+  %i6 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %.splat, <4 x float> %i5, <4 x float> %i2)
+  %i7 = bitcast float* %pDst.addr.116 to <4 x float>*
+  store <4 x float> %i6, <4 x float>* %i7, align 4
   %add.ptr = getelementptr inbounds float, float* %pSrc1.addr.119, i32 4
   %add.ptr1 = getelementptr inbounds float, float* %pSrc2.addr.118, i32 4
   %add.ptr2 = getelementptr inbounds float, float* %pDst.addr.116, i32 4
@@ -1797,11 +1802,11 @@ define arm_aapcs_vfpcc void @arm_biquad_cascade_df1_f32(%struct.arm_biquad_casd_
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
   %pState1 = getelementptr inbounds %struct.arm_biquad_casd_df1_inst_f32, %struct.arm_biquad_casd_df1_inst_f32* %S, i32 0, i32 1
-  %0 = load float*, float** %pState1, align 4
+  %i = load float*, float** %pState1, align 4
   %pCoeffs2 = getelementptr inbounds %struct.arm_biquad_casd_df1_inst_f32, %struct.arm_biquad_casd_df1_inst_f32* %S, i32 0, i32 2
-  %1 = load float*, float** %pCoeffs2, align 4
+  %i1 = load float*, float** %pCoeffs2, align 4
   %numStages = getelementptr inbounds %struct.arm_biquad_casd_df1_inst_f32, %struct.arm_biquad_casd_df1_inst_f32* %S, i32 0, i32 0
-  %2 = load i32, i32* %numStages, align 4
+  %i2 = load i32, i32* %numStages, align 4
   %shr = lshr i32 %blockSize, 2
   %cmp201 = icmp eq i32 %shr, 0
   %and = and i32 %blockSize, 3
@@ -1811,186 +1816,186 @@ entry:
   br label %do.body
 
 do.body:                                          ; preds = %if.end69, %entry
-  %pState.0 = phi float* [ %0, %entry ], [ %incdec.ptr73, %if.end69 ]
-  %pCoeffs.0 = phi float* [ %1, %entry ], [ %add.ptr74, %if.end69 ]
+  %pState.0 = phi float* [ %i, %entry ], [ %incdec.ptr73, %if.end69 ]
+  %pCoeffs.0 = phi float* [ %i1, %entry ], [ %add.ptr74, %if.end69 ]
   %pIn.0 = phi float* [ %pSrc, %entry ], [ %pDst, %if.end69 ]
   %X3.0 = phi float [ undef, %entry ], [ %X3.2, %if.end69 ]
-  %stage.0 = phi i32 [ %2, %entry ], [ %dec75, %if.end69 ]
-  %3 = load float, float* %pState.0, align 4
+  %stage.0 = phi i32 [ %i2, %entry ], [ %dec75, %if.end69 ]
+  %i3 = load float, float* %pState.0, align 4
   %arrayidx3 = getelementptr inbounds float, float* %pState.0, i32 1
-  %4 = load float, float* %arrayidx3, align 4
+  %i4 = load float, float* %arrayidx3, align 4
   %arrayidx4 = getelementptr inbounds float, float* %pState.0, i32 2
-  %5 = load float, float* %arrayidx4, align 4
+  %i5 = load float, float* %arrayidx4, align 4
   %arrayidx5 = getelementptr inbounds float, float* %pState.0, i32 3
-  %6 = load float, float* %arrayidx5, align 4
+  %i6 = load float, float* %arrayidx5, align 4
   br i1 %cmp201, label %while.end, label %while.body.lr.ph
 
 while.body.lr.ph:                                 ; preds = %do.body
-  %7 = bitcast float* %pCoeffs.0 to <4 x float>*
+  %i7 = bitcast float* %pCoeffs.0 to <4 x float>*
   %arrayidx9 = getelementptr inbounds float, float* %pCoeffs.0, i32 4
-  %8 = bitcast float* %arrayidx9 to <4 x float>*
+  %i8 = bitcast float* %arrayidx9 to <4 x float>*
   %arrayidx12 = getelementptr inbounds float, float* %pCoeffs.0, i32 8
-  %9 = bitcast float* %arrayidx12 to <4 x float>*
+  %i9 = bitcast float* %arrayidx12 to <4 x float>*
   %arrayidx15 = getelementptr inbounds float, float* %pCoeffs.0, i32 12
-  %10 = bitcast float* %arrayidx15 to <4 x float>*
+  %i10 = bitcast float* %arrayidx15 to <4 x float>*
   %arrayidx18 = getelementptr inbounds float, float* %pCoeffs.0, i32 16
-  %11 = bitcast float* %arrayidx18 to <4 x float>*
+  %i11 = bitcast float* %arrayidx18 to <4 x float>*
   %arrayidx21 = getelementptr inbounds float, float* %pCoeffs.0, i32 20
-  %12 = bitcast float* %arrayidx21 to <4 x float>*
+  %i12 = bitcast float* %arrayidx21 to <4 x float>*
   %arrayidx24 = getelementptr inbounds float, float* %pCoeffs.0, i32 24
-  %13 = bitcast float* %arrayidx24 to <4 x float>*
+  %i13 = bitcast float* %arrayidx24 to <4 x float>*
   %arrayidx27 = getelementptr inbounds float, float* %pCoeffs.0, i32 28
-  %14 = bitcast float* %arrayidx27 to <4 x float>*
+  %i14 = bitcast float* %arrayidx27 to <4 x float>*
   br label %while.body
 
-while.body:                                       ; preds = %while.body.lr.ph, %while.body
+while.body:                                       ; preds = %while.body, %while.body.lr.ph
   %sample.0208 = phi i32 [ %shr, %while.body.lr.ph ], [ %dec, %while.body ]
   %pIn.1207 = phi float* [ %pIn.0, %while.body.lr.ph ], [ %incdec.ptr8, %while.body ]
   %pOut.1206 = phi float* [ %pDst, %while.body.lr.ph ], [ %add.ptr, %while.body ]
-  %Yn2.0205 = phi float [ %6, %while.body.lr.ph ], [ %37, %while.body ]
-  %Yn1.0204 = phi float [ %5, %while.body.lr.ph ], [ %36, %while.body ]
-  %Xn2.0203 = phi float [ %4, %while.body.lr.ph ], [ %17, %while.body ]
-  %Xn1.0202 = phi float [ %3, %while.body.lr.ph ], [ %18, %while.body ]
+  %Yn2.0205 = phi float [ %i6, %while.body.lr.ph ], [ %i37, %while.body ]
+  %Yn1.0204 = phi float [ %i5, %while.body.lr.ph ], [ %i36, %while.body ]
+  %Xn2.0203 = phi float [ %i4, %while.body.lr.ph ], [ %i17, %while.body ]
+  %Xn1.0202 = phi float [ %i3, %while.body.lr.ph ], [ %i18, %while.body ]
   %incdec.ptr = getelementptr inbounds float, float* %pIn.1207, i32 1
-  %15 = load float, float* %pIn.1207, align 4
+  %i15 = load float, float* %pIn.1207, align 4
   %incdec.ptr6 = getelementptr inbounds float, float* %pIn.1207, i32 2
-  %16 = load float, float* %incdec.ptr, align 4
+  %i16 = load float, float* %incdec.ptr, align 4
   %incdec.ptr7 = getelementptr inbounds float, float* %pIn.1207, i32 3
-  %17 = load float, float* %incdec.ptr6, align 4
+  %i17 = load float, float* %incdec.ptr6, align 4
   %incdec.ptr8 = getelementptr inbounds float, float* %pIn.1207, i32 4
-  %18 = load float, float* %incdec.ptr7, align 4
-  %19 = load <4 x float>, <4 x float>* %7, align 4
-  %.splatinsert = insertelement <4 x float> undef, float %18, i32 0
+  %i18 = load float, float* %incdec.ptr7, align 4
+  %i19 = load <4 x float>, <4 x float>* %i7, align 4
+  %.splatinsert = insertelement <4 x float> undef, float %i18, i32 0
   %.splat = shufflevector <4 x float> %.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
-  %20 = fmul fast <4 x float> %.splat, %19
-  %21 = load <4 x float>, <4 x float>* %8, align 4
-  %.splatinsert10 = insertelement <4 x float> undef, float %17, i32 0
+  %i20 = fmul fast <4 x float> %.splat, %i19
+  %i21 = load <4 x float>, <4 x float>* %i8, align 4
+  %.splatinsert10 = insertelement <4 x float> undef, float %i17, i32 0
   %.splat11 = shufflevector <4 x float> %.splatinsert10, <4 x float> undef, <4 x i32> zeroinitializer
-  %22 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %21, <4 x float> %.splat11, <4 x float> %20)
-  %23 = load <4 x float>, <4 x float>* %9, align 4
-  %.splatinsert13 = insertelement <4 x float> undef, float %16, i32 0
+  %i22 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i21, <4 x float> %.splat11, <4 x float> %i20)
+  %i23 = load <4 x float>, <4 x float>* %i9, align 4
+  %.splatinsert13 = insertelement <4 x float> undef, float %i16, i32 0
   %.splat14 = shufflevector <4 x float> %.splatinsert13, <4 x float> undef, <4 x i32> zeroinitializer
-  %24 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %23, <4 x float> %.splat14, <4 x float> %22)
-  %25 = load <4 x float>, <4 x float>* %10, align 4
-  %.splatinsert16 = insertelement <4 x float> undef, float %15, i32 0
+  %i24 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i23, <4 x float> %.splat14, <4 x float> %i22)
+  %i25 = load <4 x float>, <4 x float>* %i10, align 4
+  %.splatinsert16 = insertelement <4 x float> undef, float %i15, i32 0
   %.splat17 = shufflevector <4 x float> %.splatinsert16, <4 x float> undef, <4 x i32> zeroinitializer
-  %26 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %25, <4 x float> %.splat17, <4 x float> %24)
-  %27 = load <4 x float>, <4 x float>* %11, align 4
+  %i26 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i25, <4 x float> %.splat17, <4 x float> %i24)
+  %i27 = load <4 x float>, <4 x float>* %i11, align 4
   %.splatinsert19 = insertelement <4 x float> undef, float %Xn1.0202, i32 0
   %.splat20 = shufflevector <4 x float> %.splatinsert19, <4 x float> undef, <4 x i32> zeroinitializer
-  %28 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %27, <4 x float> %.splat20, <4 x float> %26)
-  %29 = load <4 x float>, <4 x float>* %12, align 4
+  %i28 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i27, <4 x float> %.splat20, <4 x float> %i26)
+  %i29 = load <4 x float>, <4 x float>* %i12, align 4
   %.splatinsert22 = insertelement <4 x float> undef, float %Xn2.0203, i32 0
   %.splat23 = shufflevector <4 x float> %.splatinsert22, <4 x float> undef, <4 x i32> zeroinitializer
-  %30 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %29, <4 x float> %.splat23, <4 x float> %28)
-  %31 = load <4 x float>, <4 x float>* %13, align 4
+  %i30 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i29, <4 x float> %.splat23, <4 x float> %i28)
+  %i31 = load <4 x float>, <4 x float>* %i13, align 4
   %.splatinsert25 = insertelement <4 x float> undef, float %Yn1.0204, i32 0
   %.splat26 = shufflevector <4 x float> %.splatinsert25, <4 x float> undef, <4 x i32> zeroinitializer
-  %32 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %31, <4 x float> %.splat26, <4 x float> %30)
-  %33 = load <4 x float>, <4 x float>* %14, align 4
+  %i32 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i31, <4 x float> %.splat26, <4 x float> %i30)
+  %i33 = load <4 x float>, <4 x float>* %i14, align 4
   %.splatinsert28 = insertelement <4 x float> undef, float %Yn2.0205, i32 0
   %.splat29 = shufflevector <4 x float> %.splatinsert28, <4 x float> undef, <4 x i32> zeroinitializer
-  %34 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %33, <4 x float> %.splat29, <4 x float> %32)
-  %35 = bitcast float* %pOut.1206 to <4 x float>*
-  store <4 x float> %34, <4 x float>* %35, align 4
+  %i34 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i33, <4 x float> %.splat29, <4 x float> %i32)
+  %i35 = bitcast float* %pOut.1206 to <4 x float>*
+  store <4 x float> %i34, <4 x float>* %i35, align 4
   %add.ptr = getelementptr inbounds float, float* %pOut.1206, i32 4
-  %36 = extractelement <4 x float> %34, i32 3
-  %37 = extractelement <4 x float> %34, i32 2
+  %i36 = extractelement <4 x float> %i34, i32 3
+  %i37 = extractelement <4 x float> %i34, i32 2
   %dec = add nsw i32 %sample.0208, -1
   %cmp = icmp eq i32 %dec, 0
   br i1 %cmp, label %while.end, label %while.body
 
 while.end:                                        ; preds = %while.body, %do.body
-  %Xn1.0.lcssa = phi float [ %3, %do.body ], [ %18, %while.body ]
-  %Xn2.0.lcssa = phi float [ %4, %do.body ], [ %17, %while.body ]
-  %Yn1.0.lcssa = phi float [ %5, %do.body ], [ %36, %while.body ]
-  %Yn2.0.lcssa = phi float [ %6, %do.body ], [ %37, %while.body ]
+  %Xn1.0.lcssa = phi float [ %i3, %do.body ], [ %i18, %while.body ]
+  %Xn2.0.lcssa = phi float [ %i4, %do.body ], [ %i17, %while.body ]
+  %Yn1.0.lcssa = phi float [ %i5, %do.body ], [ %i36, %while.body ]
+  %Yn2.0.lcssa = phi float [ %i6, %do.body ], [ %i37, %while.body ]
   %pOut.1.lcssa = phi float* [ %pDst, %do.body ], [ %add.ptr, %while.body ]
   %pIn.1.lcssa = phi float* [ %pIn.0, %do.body ], [ %incdec.ptr8, %while.body ]
-  %X3.1.lcssa = phi float [ %X3.0, %do.body ], [ %18, %while.body ]
+  %X3.1.lcssa = phi float [ %X3.0, %do.body ], [ %i18, %while.body ]
   br i1 %tobool, label %if.end69, label %if.then
 
 if.then:                                          ; preds = %while.end
   %incdec.ptr30 = getelementptr inbounds float, float* %pIn.1.lcssa, i32 1
-  %38 = load float, float* %pIn.1.lcssa, align 4
+  %i38 = load float, float* %pIn.1.lcssa, align 4
   %incdec.ptr31 = getelementptr inbounds float, float* %pIn.1.lcssa, i32 2
-  %39 = load float, float* %incdec.ptr30, align 4
+  %i39 = load float, float* %incdec.ptr30, align 4
   %incdec.ptr32 = getelementptr inbounds float, float* %pIn.1.lcssa, i32 3
-  %40 = load float, float* %incdec.ptr31, align 4
-  %41 = load float, float* %incdec.ptr32, align 4
-  %42 = bitcast float* %pCoeffs.0 to <4 x float>*
-  %43 = load <4 x float>, <4 x float>* %42, align 4
-  %.splatinsert34 = insertelement <4 x float> undef, float %41, i32 0
+  %i40 = load float, float* %incdec.ptr31, align 4
+  %i41 = load float, float* %incdec.ptr32, align 4
+  %i42 = bitcast float* %pCoeffs.0 to <4 x float>*
+  %i43 = load <4 x float>, <4 x float>* %i42, align 4
+  %.splatinsert34 = insertelement <4 x float> undef, float %i41, i32 0
   %.splat35 = shufflevector <4 x float> %.splatinsert34, <4 x float> undef, <4 x i32> zeroinitializer
-  %44 = fmul fast <4 x float> %.splat35, %43
+  %i44 = fmul fast <4 x float> %.splat35, %i43
   %arrayidx36 = getelementptr inbounds float, float* %pCoeffs.0, i32 4
-  %45 = bitcast float* %arrayidx36 to <4 x float>*
-  %46 = load <4 x float>, <4 x float>* %45, align 4
-  %.splatinsert37 = insertelement <4 x float> undef, float %40, i32 0
+  %i45 = bitcast float* %arrayidx36 to <4 x float>*
+  %i46 = load <4 x float>, <4 x float>* %i45, align 4
+  %.splatinsert37 = insertelement <4 x float> undef, float %i40, i32 0
   %.splat38 = shufflevector <4 x float> %.splatinsert37, <4 x float> undef, <4 x i32> zeroinitializer
-  %47 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %46, <4 x float> %.splat38, <4 x float> %44)
+  %i47 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i46, <4 x float> %.splat38, <4 x float> %i44)
   %arrayidx39 = getelementptr inbounds float, float* %pCoeffs.0, i32 8
-  %48 = bitcast float* %arrayidx39 to <4 x float>*
-  %49 = load <4 x float>, <4 x float>* %48, align 4
-  %.splatinsert40 = insertelement <4 x float> undef, float %39, i32 0
+  %i48 = bitcast float* %arrayidx39 to <4 x float>*
+  %i49 = load <4 x float>, <4 x float>* %i48, align 4
+  %.splatinsert40 = insertelement <4 x float> undef, float %i39, i32 0
   %.splat41 = shufflevector <4 x float> %.splatinsert40, <4 x float> undef, <4 x i32> zeroinitializer
-  %50 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %49, <4 x float> %.splat41, <4 x float> %47)
+  %i50 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i49, <4 x float> %.splat41, <4 x float> %i47)
   %arrayidx42 = getelementptr inbounds float, float* %pCoeffs.0, i32 12
-  %51 = bitcast float* %arrayidx42 to <4 x float>*
-  %52 = load <4 x float>, <4 x float>* %51, align 4
-  %.splatinsert43 = insertelement <4 x float> undef, float %38, i32 0
+  %i51 = bitcast float* %arrayidx42 to <4 x float>*
+  %i52 = load <4 x float>, <4 x float>* %i51, align 4
+  %.splatinsert43 = insertelement <4 x float> undef, float %i38, i32 0
   %.splat44 = shufflevector <4 x float> %.splatinsert43, <4 x float> undef, <4 x i32> zeroinitializer
-  %53 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %52, <4 x float> %.splat44, <4 x float> %50)
+  %i53 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i52, <4 x float> %.splat44, <4 x float> %i50)
   %arrayidx45 = getelementptr inbounds float, float* %pCoeffs.0, i32 16
-  %54 = bitcast float* %arrayidx45 to <4 x float>*
-  %55 = load <4 x float>, <4 x float>* %54, align 4
+  %i54 = bitcast float* %arrayidx45 to <4 x float>*
+  %i55 = load <4 x float>, <4 x float>* %i54, align 4
   %.splatinsert46 = insertelement <4 x float> undef, float %Xn1.0.lcssa, i32 0
   %.splat47 = shufflevector <4 x float> %.splatinsert46, <4 x float> undef, <4 x i32> zeroinitializer
-  %56 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %55, <4 x float> %.splat47, <4 x float> %53)
+  %i56 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i55, <4 x float> %.splat47, <4 x float> %i53)
   %arrayidx48 = getelementptr inbounds float, float* %pCoeffs.0, i32 20
-  %57 = bitcast float* %arrayidx48 to <4 x float>*
-  %58 = load <4 x float>, <4 x float>* %57, align 4
+  %i57 = bitcast float* %arrayidx48 to <4 x float>*
+  %i58 = load <4 x float>, <4 x float>* %i57, align 4
   %.splatinsert49 = insertelement <4 x float> undef, float %Xn2.0.lcssa, i32 0
   %.splat50 = shufflevector <4 x float> %.splatinsert49, <4 x float> undef, <4 x i32> zeroinitializer
-  %59 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %58, <4 x float> %.splat50, <4 x float> %56)
+  %i59 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i58, <4 x float> %.splat50, <4 x float> %i56)
   %arrayidx51 = getelementptr inbounds float, float* %pCoeffs.0, i32 24
-  %60 = bitcast float* %arrayidx51 to <4 x float>*
-  %61 = load <4 x float>, <4 x float>* %60, align 4
+  %i60 = bitcast float* %arrayidx51 to <4 x float>*
+  %i61 = load <4 x float>, <4 x float>* %i60, align 4
   %.splatinsert52 = insertelement <4 x float> undef, float %Yn1.0.lcssa, i32 0
   %.splat53 = shufflevector <4 x float> %.splatinsert52, <4 x float> undef, <4 x i32> zeroinitializer
-  %62 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %61, <4 x float> %.splat53, <4 x float> %59)
+  %i62 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i61, <4 x float> %.splat53, <4 x float> %i59)
   %arrayidx54 = getelementptr inbounds float, float* %pCoeffs.0, i32 28
-  %63 = bitcast float* %arrayidx54 to <4 x float>*
-  %64 = load <4 x float>, <4 x float>* %63, align 4
+  %i63 = bitcast float* %arrayidx54 to <4 x float>*
+  %i64 = load <4 x float>, <4 x float>* %i63, align 4
   %.splatinsert55 = insertelement <4 x float> undef, float %Yn2.0.lcssa, i32 0
   %.splat56 = shufflevector <4 x float> %.splatinsert55, <4 x float> undef, <4 x i32> zeroinitializer
-  %65 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %64, <4 x float> %.splat56, <4 x float> %62)
-  %66 = extractelement <4 x float> %65, i32 0
+  %i65 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i64, <4 x float> %.splat56, <4 x float> %i62)
+  %i66 = extractelement <4 x float> %i65, i32 0
   br i1 %cmp57, label %if.then58, label %if.else
 
 if.then58:                                        ; preds = %if.then
-  store float %66, float* %pOut.1.lcssa, align 4
+  store float %i66, float* %pOut.1.lcssa, align 4
   br label %if.end69
 
 if.else:                                          ; preds = %if.then
   %incdec.ptr62 = getelementptr inbounds float, float* %pOut.1.lcssa, i32 1
-  store float %66, float* %pOut.1.lcssa, align 4
-  %67 = extractelement <4 x float> %65, i32 1
-  store float %67, float* %incdec.ptr62, align 4
+  store float %i66, float* %pOut.1.lcssa, align 4
+  %i67 = extractelement <4 x float> %i65, i32 1
+  store float %i67, float* %incdec.ptr62, align 4
   br i1 %cmp60, label %if.end69, label %if.else64
 
 if.else64:                                        ; preds = %if.else
   %incdec.ptr63 = getelementptr inbounds float, float* %pOut.1.lcssa, i32 2
-  %68 = extractelement <4 x float> %65, i32 2
-  store float %68, float* %incdec.ptr63, align 4
+  %i68 = extractelement <4 x float> %i65, i32 2
+  store float %i68, float* %incdec.ptr63, align 4
   br label %if.end69
 
-if.end69:                                         ; preds = %if.else, %while.end, %if.then58, %if.else64
-  %Xn1.1 = phi float [ %38, %if.then58 ], [ %40, %if.else64 ], [ %Xn1.0.lcssa, %while.end ], [ %39, %if.else ]
-  %Xn2.1 = phi float [ %X3.1.lcssa, %if.then58 ], [ %39, %if.else64 ], [ %Xn2.0.lcssa, %while.end ], [ %38, %if.else ]
-  %Yn1.1 = phi float [ %66, %if.then58 ], [ %68, %if.else64 ], [ %Yn1.0.lcssa, %while.end ], [ %67, %if.else ]
-  %Yn2.1 = phi float [ %Yn1.0.lcssa, %if.then58 ], [ %67, %if.else64 ], [ %Yn2.0.lcssa, %while.end ], [ %66, %if.else ]
-  %X3.2 = phi float [ %41, %if.then58 ], [ %41, %if.else64 ], [ %X3.1.lcssa, %while.end ], [ %41, %if.else ]
+if.end69:                                         ; preds = %if.else64, %if.else, %if.then58, %while.end
+  %Xn1.1 = phi float [ %i38, %if.then58 ], [ %i40, %if.else64 ], [ %Xn1.0.lcssa, %while.end ], [ %i39, %if.else ]
+  %Xn2.1 = phi float [ %X3.1.lcssa, %if.then58 ], [ %i39, %if.else64 ], [ %Xn2.0.lcssa, %while.end ], [ %i38, %if.else ]
+  %Yn1.1 = phi float [ %i66, %if.then58 ], [ %i68, %if.else64 ], [ %Yn1.0.lcssa, %while.end ], [ %i67, %if.else ]
+  %Yn2.1 = phi float [ %Yn1.0.lcssa, %if.then58 ], [ %i67, %if.else64 ], [ %Yn2.0.lcssa, %while.end ], [ %i66, %if.else ]
+  %X3.2 = phi float [ %i41, %if.then58 ], [ %i41, %if.else64 ], [ %X3.1.lcssa, %while.end ], [ %i41, %if.else ]
   store float %Xn1.1, float* %pState.0, align 4
   store float %Xn2.1, float* %arrayidx3, align 4
   store float %Yn1.1, float* %arrayidx4, align 4
@@ -2092,12 +2097,12 @@ define void @arm_biquad_cascade_df2T_f32(%struct.arm_biquad_cascade_df2T_instanc
 ; CHECK-NEXT:    .long 0x00000000 @ float 0
 entry:
   %pState1 = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f32, %struct.arm_biquad_cascade_df2T_instance_f32* %S, i32 0, i32 1
-  %0 = load float*, float** %pState1, align 4
+  %i = load float*, float** %pState1, align 4
   %numStages = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f32, %struct.arm_biquad_cascade_df2T_instance_f32* %S, i32 0, i32 0
-  %1 = load i8, i8* %numStages, align 4
-  %conv = zext i8 %1 to i32
+  %i1 = load i8, i8* %numStages, align 4
+  %conv = zext i8 %i1 to i32
   %pCoeffs = getelementptr inbounds %struct.arm_biquad_cascade_df2T_instance_f32, %struct.arm_biquad_cascade_df2T_instance_f32* %S, i32 0, i32 2
-  %2 = load float*, float** %pCoeffs, align 4
+  %i2 = load float*, float** %pCoeffs, align 4
   %div = lshr i32 %blockSize, 1
   %cmp.not90 = icmp eq i32 %div, 0
   %and = and i32 %blockSize, 1
@@ -2106,92 +2111,92 @@ entry:
 
 do.body:                                          ; preds = %if.end, %entry
   %stage.0 = phi i32 [ %conv, %entry ], [ %dec23, %if.end ]
-  %pCurCoeffs.0 = phi float* [ %2, %entry ], [ %add.ptr2, %if.end ]
-  %pState.0 = phi float* [ %0, %entry ], [ %pState.1, %if.end ]
+  %pCurCoeffs.0 = phi float* [ %i2, %entry ], [ %add.ptr2, %if.end ]
+  %pState.0 = phi float* [ %i, %entry ], [ %pState.1, %if.end ]
   %pIn.0 = phi float* [ %pSrc, %entry ], [ %pDst, %if.end ]
-  %3 = bitcast float* %pCurCoeffs.0 to <4 x float>*
-  %4 = load <4 x float>, <4 x float>* %3, align 4
+  %i3 = bitcast float* %pCurCoeffs.0 to <4 x float>*
+  %i4 = load <4 x float>, <4 x float>* %i3, align 4
   %add.ptr = getelementptr inbounds float, float* %pCurCoeffs.0, i32 2
-  %5 = bitcast float* %add.ptr to <4 x float>*
-  %6 = load <4 x float>, <4 x float>* %5, align 4
+  %i5 = bitcast float* %add.ptr to <4 x float>*
+  %i6 = load <4 x float>, <4 x float>* %i5, align 4
   %add.ptr2 = getelementptr inbounds float, float* %pCurCoeffs.0, i32 5
-  %7 = bitcast float* %pState.0 to <4 x float>*
-  %8 = load <4 x float>, <4 x float>* %7, align 8
-  %9 = shufflevector <4 x float> %8, <4 x float> <float poison, float poison, float 0.000000e+00, float 0.000000e+00>, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
-  %10 = bitcast <4 x float> %4 to <4 x i32>
-  %11 = tail call { i32, <4 x i32> } @llvm.arm.mve.vshlc.v4i32(<4 x i32> %10, i32 0, i32 32)
-  %12 = extractvalue { i32, <4 x i32> } %11, 0
-  %13 = extractvalue { i32, <4 x i32> } %11, 1
-  %14 = bitcast <4 x i32> %13 to <4 x float>
-  %15 = bitcast <4 x float> %6 to <4 x i32>
-  %16 = tail call { i32, <4 x i32> } @llvm.arm.mve.vshlc.v4i32(<4 x i32> %15, i32 %12, i32 32)
-  %17 = extractvalue { i32, <4 x i32> } %16, 1
-  %18 = bitcast <4 x i32> %17 to <4 x float>
+  %i7 = bitcast float* %pState.0 to <4 x float>*
+  %i8 = load <4 x float>, <4 x float>* %i7, align 8
+  %i9 = shufflevector <4 x float> %i8, <4 x float> <float poison, float poison, float 0.000000e+00, float 0.000000e+00>, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+  %i10 = bitcast <4 x float> %i4 to <4 x i32>
+  %i11 = tail call { i32, <4 x i32> } @llvm.arm.mve.vshlc.v4i32(<4 x i32> %i10, i32 0, i32 32)
+  %i12 = extractvalue { i32, <4 x i32> } %i11, 0
+  %i13 = extractvalue { i32, <4 x i32> } %i11, 1
+  %i14 = bitcast <4 x i32> %i13 to <4 x float>
+  %i15 = bitcast <4 x float> %i6 to <4 x i32>
+  %i16 = tail call { i32, <4 x i32> } @llvm.arm.mve.vshlc.v4i32(<4 x i32> %i15, i32 %i12, i32 32)
+  %i17 = extractvalue { i32, <4 x i32> } %i16, 1
+  %i18 = bitcast <4 x i32> %i17 to <4 x float>
   br i1 %cmp.not90, label %while.end, label %while.body
 
-while.body:                                       ; preds = %do.body, %while.body
+while.body:                                       ; preds = %while.body, %do.body
   %pIn.194 = phi float* [ %incdec.ptr4, %while.body ], [ %pIn.0, %do.body ]
-  %state.093 = phi <4 x float> [ %30, %while.body ], [ %9, %do.body ]
+  %state.093 = phi <4 x float> [ %i30, %while.body ], [ %i9, %do.body ]
   %pOut.192 = phi float* [ %incdec.ptr12, %while.body ], [ %pDst, %do.body ]
   %sample.091 = phi i32 [ %dec, %while.body ], [ %div, %do.body ]
   %incdec.ptr = getelementptr inbounds float, float* %pIn.194, i32 1
-  %19 = load float, float* %pIn.194, align 4
+  %i19 = load float, float* %pIn.194, align 4
   %incdec.ptr4 = getelementptr inbounds float, float* %pIn.194, i32 2
-  %20 = load float, float* %incdec.ptr, align 4
-  %.splatinsert = insertelement <4 x float> poison, float %19, i32 0
+  %i20 = load float, float* %incdec.ptr, align 4
+  %.splatinsert = insertelement <4 x float> poison, float %i19, i32 0
   %.splat = shufflevector <4 x float> %.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
-  %21 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %4, <4 x float> %.splat, <4 x float> %state.093)
-  %22 = extractelement <4 x float> %21, i32 0
-  %.splat6 = shufflevector <4 x float> %21, <4 x float> poison, <4 x i32> zeroinitializer
-  %23 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %6, <4 x float> %.splat6, <4 x float> %21)
-  %24 = insertelement <4 x float> %23, float 0.000000e+00, i32 3
-  %.splatinsert7 = insertelement <4 x float> poison, float %20, i32 0
+  %i21 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i4, <4 x float> %.splat, <4 x float> %state.093)
+  %i22 = extractelement <4 x float> %i21, i32 0
+  %.splat6 = shufflevector <4 x float> %i21, <4 x float> poison, <4 x i32> zeroinitializer
+  %i23 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i6, <4 x float> %.splat6, <4 x float> %i21)
+  %i24 = insertelement <4 x float> %i23, float 0.000000e+00, i32 3
+  %.splatinsert7 = insertelement <4 x float> poison, float %i20, i32 0
   %.splat8 = shufflevector <4 x float> %.splatinsert7, <4 x float> poison, <4 x i32> zeroinitializer
-  %25 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %14, <4 x float> %.splat8, <4 x float> %24)
-  %26 = extractelement <4 x float> %25, i32 1
-  %.splat10 = shufflevector <4 x float> %25, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
-  %27 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %18, <4 x float> %.splat10, <4 x float> %25)
-  %28 = shufflevector <4 x float> %27, <4 x float> undef, <4 x i32> <i32 2, i32 undef, i32 undef, i32 3>
-  %29 = insertelement <4 x float> %28, float 0.000000e+00, i32 2
-  %30 = shufflevector <4 x float> %29, <4 x float> %27, <4 x i32> <i32 0, i32 7, i32 2, i32 3>
+  %i25 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i14, <4 x float> %.splat8, <4 x float> %i24)
+  %i26 = extractelement <4 x float> %i25, i32 1
+  %.splat10 = shufflevector <4 x float> %i25, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  %i27 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i18, <4 x float> %.splat10, <4 x float> %i25)
+  %i28 = shufflevector <4 x float> %i27, <4 x float> undef, <4 x i32> <i32 2, i32 undef, i32 undef, i32 3>
+  %i29 = insertelement <4 x float> %i28, float 0.000000e+00, i32 2
+  %i30 = shufflevector <4 x float> %i29, <4 x float> %i27, <4 x i32> <i32 0, i32 7, i32 2, i32 3>
   %incdec.ptr11 = getelementptr inbounds float, float* %pOut.192, i32 1
-  store float %22, float* %pOut.192, align 4
+  store float %i22, float* %pOut.192, align 4
   %incdec.ptr12 = getelementptr inbounds float, float* %pOut.192, i32 2
-  store float %26, float* %incdec.ptr11, align 4
+  store float %i26, float* %incdec.ptr11, align 4
   %dec = add nsw i32 %sample.091, -1
   %cmp.not = icmp eq i32 %dec, 0
   br i1 %cmp.not, label %while.end, label %while.body
 
 while.end:                                        ; preds = %while.body, %do.body
   %pOut.1.lcssa = phi float* [ %pDst, %do.body ], [ %incdec.ptr12, %while.body ]
-  %state.0.lcssa = phi <4 x float> [ %9, %do.body ], [ %30, %while.body ]
+  %state.0.lcssa = phi <4 x float> [ %i9, %do.body ], [ %i30, %while.body ]
   %pIn.1.lcssa = phi float* [ %pIn.0, %do.body ], [ %incdec.ptr4, %while.body ]
   br i1 %tobool.not, label %if.else, label %if.then
 
 if.then:                                          ; preds = %while.end
-  %31 = load float, float* %pIn.1.lcssa, align 4
-  %.splatinsert14 = insertelement <4 x float> poison, float %31, i32 0
+  %i31 = load float, float* %pIn.1.lcssa, align 4
+  %.splatinsert14 = insertelement <4 x float> poison, float %i31, i32 0
   %.splat15 = shufflevector <4 x float> %.splatinsert14, <4 x float> poison, <4 x i32> zeroinitializer
-  %32 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %4, <4 x float> %.splat15, <4 x float> %state.0.lcssa)
-  %33 = extractelement <4 x float> %32, i32 0
-  %.splat17 = shufflevector <4 x float> %32, <4 x float> poison, <4 x i32> zeroinitializer
-  %34 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %6, <4 x float> %.splat17, <4 x float> %32)
-  store float %33, float* %pOut.1.lcssa, align 4
-  %35 = extractelement <4 x float> %34, i32 1
-  store float %35, float* %pState.0, align 4
-  %36 = extractelement <4 x float> %34, i32 2
+  %i32 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i4, <4 x float> %.splat15, <4 x float> %state.0.lcssa)
+  %i33 = extractelement <4 x float> %i32, i32 0
+  %.splat17 = shufflevector <4 x float> %i32, <4 x float> poison, <4 x i32> zeroinitializer
+  %i34 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %i6, <4 x float> %.splat17, <4 x float> %i32)
+  store float %i33, float* %pOut.1.lcssa, align 4
+  %i35 = extractelement <4 x float> %i34, i32 1
+  store float %i35, float* %pState.0, align 4
+  %i36 = extractelement <4 x float> %i34, i32 2
   br label %if.end
 
 if.else:                                          ; preds = %while.end
-  %37 = extractelement <4 x float> %state.0.lcssa, i32 0
-  store float %37, float* %pState.0, align 4
-  %38 = extractelement <4 x float> %state.0.lcssa, i32 1
+  %i37 = extractelement <4 x float> %state.0.lcssa, i32 0
+  store float %i37, float* %pState.0, align 4
+  %i38 = extractelement <4 x float> %state.0.lcssa, i32 1
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
-  %.sink = phi float [ %38, %if.else ], [ %36, %if.then ]
-  %39 = getelementptr inbounds float, float* %pState.0, i32 1
-  store float %.sink, float* %39, align 4
+  %.sink = phi float [ %i38, %if.else ], [ %i36, %if.then ]
+  %i39 = getelementptr inbounds float, float* %pState.0, i32 1
+  store float %.sink, float* %i39, align 4
   %pState.1 = getelementptr inbounds float, float* %pState.0, i32 2
   %dec23 = add i32 %stage.0, -1
   %cmp24.not = icmp eq i32 %dec23, 0
@@ -2209,13 +2214,13 @@ define arm_aapcs_vfpcc float @vecAddAcrossF32Mve(<4 x float> %in) {
 ; CHECK-NEXT:    vadd.f32 s0, s0, s3
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = extractelement <4 x float> %in, i32 0
-  %1 = extractelement <4 x float> %in, i32 1
-  %add = fadd fast float %0, %1
-  %2 = extractelement <4 x float> %in, i32 2
-  %add1 = fadd fast float %add, %2
-  %3 = extractelement <4 x float> %in, i32 3
-  %add2 = fadd fast float %add1, %3
+  %i = extractelement <4 x float> %in, i32 0
+  %i1 = extractelement <4 x float> %in, i32 1
+  %add = fadd fast float %i, %i1
+  %i2 = extractelement <4 x float> %in, i32 2
+  %add1 = fadd fast float %add, %i2
+  %i3 = extractelement <4 x float> %in, i32 3
+  %add2 = fadd fast float %add1, %i3
   ret float %add2
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll b/llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll
index b57d1a774d4fe..061354b1c15ce 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll
@@ -709,19 +709,19 @@ entry:
   %cmp11 = icmp sgt i32 %and, 0
   br i1 %cmp11, label %vector.body, label %for.end
 
-vector.body:                                      ; preds = %entry, %vector.body
+vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ %index.next, %vector.body ], [ 0, %entry ]
-  %0 = getelementptr inbounds i32*, i32** %src, i32 %index
-  %1 = bitcast i32** %0 to <4 x i32*>*
-  %wide.load = load <4 x i32*>, <4 x i32*>* %1, align 4
-  %2 = icmp ne <4 x i32*> %wide.load, zeroinitializer
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %wide.load, i32 4, <4 x i1> %2, <4 x i32> undef)
-  %3 = getelementptr inbounds i32, i32* %dest, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %wide.masked.gather, <4 x i32>* %4, i32 4, <4 x i1> %2)
+  %i = getelementptr inbounds i32*, i32** %src, i32 %index
+  %i1 = bitcast i32** %i to <4 x i32*>*
+  %wide.load = load <4 x i32*>, <4 x i32*>* %i1, align 4
+  %i2 = icmp ne <4 x i32*> %wide.load, zeroinitializer
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %wide.load, i32 4, <4 x i1> %i2, <4 x i32> undef)
+  %i3 = getelementptr inbounds i32, i32* %dest, i32 %index
+  %i4 = bitcast i32* %i3 to <4 x i32>*
+  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %wide.masked.gather, <4 x i32>* %i4, i32 4, <4 x i1> %i2)
   %index.next = add i32 %index, 4
-  %5 = icmp eq i32 %index.next, %and
-  br i1 %5, label %for.end, label %vector.body
+  %i5 = icmp eq i32 %index.next, %and
+  br i1 %i5, label %for.end, label %vector.body
 
 for.end:                                          ; preds = %vector.body, %entry
   ret void
@@ -754,20 +754,20 @@ entry:
   %cmp11 = icmp sgt i32 %and, 0
   br i1 %cmp11, label %vector.body, label %for.end
 
-vector.body:                                      ; preds = %entry, %vector.body
+vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ %index.next, %vector.body ], [ 0, %entry ]
-  %0 = getelementptr inbounds float*, float** %src, i32 %index
-  %1 = bitcast float** %0 to <4 x float*>*
-  %wide.load = load <4 x float*>, <4 x float*>* %1, align 4
-  %2 = icmp ne <4 x float*> %wide.load, zeroinitializer
-  %3 = bitcast <4 x float*> %wide.load to <4 x i32*>
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %3, i32 4, <4 x i1> %2, <4 x i32> undef)
-  %4 = getelementptr inbounds float, float* %dest, i32 %index
-  %5 = bitcast float* %4 to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %wide.masked.gather, <4 x i32>* %5, i32 4, <4 x i1> %2)
+  %i = getelementptr inbounds float*, float** %src, i32 %index
+  %i1 = bitcast float** %i to <4 x float*>*
+  %wide.load = load <4 x float*>, <4 x float*>* %i1, align 4
+  %i2 = icmp ne <4 x float*> %wide.load, zeroinitializer
+  %i3 = bitcast <4 x float*> %wide.load to <4 x i32*>
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %i3, i32 4, <4 x i1> %i2, <4 x i32> undef)
+  %i4 = getelementptr inbounds float, float* %dest, i32 %index
+  %i5 = bitcast float* %i4 to <4 x i32>*
+  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %wide.masked.gather, <4 x i32>* %i5, i32 4, <4 x i1> %i2)
   %index.next = add i32 %index, 4
-  %6 = icmp eq i32 %index.next, %and
-  br i1 %6, label %for.end, label %vector.body
+  %i6 = icmp eq i32 %index.next, %and
+  br i1 %i6, label %for.end, label %vector.body
 
 for.end:                                          ; preds = %vector.body, %entry
   ret void
@@ -819,7 +819,7 @@ entry:
 
 define arm_aapcs_vfpcc <4 x i32> @gepconstoff_i32(i32* %base) {
 ; CHECK-LABEL: gepconstoff_i32:
-; CHECK:       @ %bb.0:
+; CHECK:       @ %bb.0: @ %bb
 ; CHECK-NEXT:    adr r1, .LCPI30_0
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
@@ -831,6 +831,7 @@ define arm_aapcs_vfpcc <4 x i32> @gepconstoff_i32(i32* %base) {
 ; CHECK-NEXT:    .long 4 @ 0x4
 ; CHECK-NEXT:    .long 8 @ 0x8
 ; CHECK-NEXT:    .long 12 @ 0xc
+bb:
   %a = getelementptr i32, i32* %base, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %g = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %a, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
   ret <4 x i32> %g
@@ -838,7 +839,7 @@ define arm_aapcs_vfpcc <4 x i32> @gepconstoff_i32(i32* %base) {
 
 define arm_aapcs_vfpcc <4 x i32> @gepconstoff_i8(i8* %base) {
 ; CHECK-STD-LABEL: gepconstoff_i8:
-; CHECK-STD:       @ %bb.0:
+; CHECK-STD:       @ %bb.0: @ %bb
 ; CHECK-STD-NEXT:    adr r1, .LCPI31_0
 ; CHECK-STD-NEXT:    vldrw.u32 q0, [r1]
 ; CHECK-STD-NEXT:    vadd.i32 q1, q0, r0
@@ -853,7 +854,7 @@ define arm_aapcs_vfpcc <4 x i32> @gepconstoff_i8(i8* %base) {
 ; CHECK-STD-NEXT:    .long 44 @ 0x2c
 ;
 ; CHECK-OPAQ-LABEL: gepconstoff_i8:
-; CHECK-OPAQ:       @ %bb.0:
+; CHECK-OPAQ:       @ %bb.0: @ %bb
 ; CHECK-OPAQ-NEXT:    adr r1, .LCPI31_0
 ; CHECK-OPAQ-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-OPAQ-NEXT:    vldrw.u32 q0, [r0, q1]
@@ -865,6 +866,7 @@ define arm_aapcs_vfpcc <4 x i32> @gepconstoff_i8(i8* %base) {
 ; CHECK-OPAQ-NEXT:    .long 12 @ 0xc
 ; CHECK-OPAQ-NEXT:    .long 28 @ 0x1c
 ; CHECK-OPAQ-NEXT:    .long 44 @ 0x2c
+bb:
   %a = getelementptr i8, i8* %base, <4 x i32> <i32 0, i32 16, i32 32, i32 48>
   %b = bitcast <4 x i8*> %a to <4 x i32*>
   %c = getelementptr inbounds i32, <4 x i32*> %b, i32 -1
@@ -874,7 +876,7 @@ define arm_aapcs_vfpcc <4 x i32> @gepconstoff_i8(i8* %base) {
 
 define arm_aapcs_vfpcc <4 x i32> @gepconstoff3_i16(i16* %base) {
 ; CHECK-STD-LABEL: gepconstoff3_i16:
-; CHECK-STD:       @ %bb.0:
+; CHECK-STD:       @ %bb.0: @ %bb
 ; CHECK-STD-NEXT:    adr r1, .LCPI32_0
 ; CHECK-STD-NEXT:    vldrw.u32 q0, [r1]
 ; CHECK-STD-NEXT:    vadd.i32 q1, q0, r0
@@ -889,7 +891,7 @@ define arm_aapcs_vfpcc <4 x i32> @gepconstoff3_i16(i16* %base) {
 ; CHECK-STD-NEXT:    .long 280 @ 0x118
 ;
 ; CHECK-OPAQ-LABEL: gepconstoff3_i16:
-; CHECK-OPAQ:       @ %bb.0:
+; CHECK-OPAQ:       @ %bb.0: @ %bb
 ; CHECK-OPAQ-NEXT:    adr r1, .LCPI32_0
 ; CHECK-OPAQ-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-OPAQ-NEXT:    vldrw.u32 q0, [r0, q1]
@@ -901,6 +903,7 @@ define arm_aapcs_vfpcc <4 x i32> @gepconstoff3_i16(i16* %base) {
 ; CHECK-OPAQ-NEXT:    .long 18 @ 0x12
 ; CHECK-OPAQ-NEXT:    .long 58 @ 0x3a
 ; CHECK-OPAQ-NEXT:    .long 280 @ 0x118
+bb:
   %a = getelementptr i16, i16* %base, <4 x i32> <i32 0, i32 16, i32 32, i32 48>
   %b = bitcast <4 x i16*> %a to <4 x i8*>
   %c = getelementptr i8, <4 x i8*> %b, <4 x i32> <i32 16, i32 -10, i32 -2, i32 188>

diff  --git a/llvm/test/CodeGen/Thumb2/mve-postinc-dct.ll b/llvm/test/CodeGen/Thumb2/mve-postinc-dct.ll
index 0285b11710987..569699cf66cb2 100644
--- a/llvm/test/CodeGen/Thumb2/mve-postinc-dct.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-postinc-dct.ll
@@ -47,52 +47,52 @@ define void @DCT_mve1(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, pc}
 entry:
   %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %0 = load i32, i32* %NumInputs, align 4
+  %i = load i32, i32* %NumInputs, align 4
   %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %1 = load i32, i32* %NumFilters, align 4
+  %i1 = load i32, i32* %NumFilters, align 4
   %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %2 = load float*, float** %pDCTCoefs, align 4
-  %cmp = icmp ugt i32 %0, 1
+  %i2 = load float*, float** %pDCTCoefs, align 4
+  %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
-  %sub = add i32 %1, -1
+  %sub = add i32 %i1, -1
   %cmp350 = icmp ugt i32 %sub, 1
   br i1 %cmp350, label %for.body.preheader, label %for.cond.cleanup
 
 for.body.preheader:                               ; preds = %entry
-  %n.rnd.up = add i32 %0, 3
+  %n.rnd.up = add i32 %i, 3
   %n.vec = and i32 %n.rnd.up, -4
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %middle.block, %entry
   ret void
 
-for.body:                                         ; preds = %for.body.preheader, %middle.block
+for.body:                                         ; preds = %middle.block, %for.body.preheader
   %k2.051 = phi i32 [ %add16, %middle.block ], [ 1, %for.body.preheader ]
-  %mul4 = mul i32 %k2.051, %0
+  %mul4 = mul i32 %k2.051, %i
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %for.body
   %index = phi i32 [ 0, %for.body ], [ %index.next, %vector.body ]
-  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %10, %vector.body ]
-  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %0)
-  %3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %5 = add i32 %index, %mul4
-  %6 = getelementptr inbounds float, float* %2, i32 %5
-  %7 = bitcast float* %6 to <4 x float>*
-  %wide.masked.load53 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %8 = fmul fast <4 x float> %wide.masked.load53, %wide.masked.load
-  %9 = fadd fast <4 x float> %8, %vec.phi
-  %10 = select <4 x i1> %active.lane.mask, <4 x float> %9, <4 x float> %vec.phi
+  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %i10, %vector.body ]
+  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
+  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
+  %i4 = bitcast float* %i3 to <4 x float>*
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i5 = add i32 %index, %mul4
+  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
+  %i7 = bitcast float* %i6 to <4 x float>*
+  %wide.masked.load53 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i8 = fmul fast <4 x float> %wide.masked.load53, %wide.masked.load
+  %i9 = fadd fast <4 x float> %i8, %vec.phi
+  %i10 = select <4 x i1> %active.lane.mask, <4 x float> %i9, <4 x float> %vec.phi
   %index.next = add i32 %index, 4
-  %11 = icmp eq i32 %index.next, %n.vec
-  br i1 %11, label %middle.block, label %vector.body
+  %i11 = icmp eq i32 %index.next, %n.vec
+  br i1 %i11, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
-  %12 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %10)
+  %i12 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i10)
   %arrayidx14 = getelementptr inbounds float, float* %pOut, i32 %k2.051
-  store float %12, float* %arrayidx14, align 4
+  store float %i12, float* %arrayidx14, align 4
   %add16 = add nuw i32 %k2.051, 1
   %exitcond52.not = icmp eq i32 %add16, %sub
   br i1 %exitcond52.not, label %for.cond.cleanup, label %for.body
@@ -158,65 +158,65 @@ define void @DCT_mve2(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
   %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %0 = load i32, i32* %NumInputs, align 4
+  %i = load i32, i32* %NumInputs, align 4
   %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %1 = load i32, i32* %NumFilters, align 4
+  %i1 = load i32, i32* %NumFilters, align 4
   %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %2 = load float*, float** %pDCTCoefs, align 4
-  %cmp = icmp ugt i32 %0, 1
+  %i2 = load float*, float** %pDCTCoefs, align 4
+  %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
-  %sub = add i32 %1, -2
+  %sub = add i32 %i1, -2
   %cmp371 = icmp ugt i32 %sub, 1
   br i1 %cmp371, label %for.body.preheader, label %for.cond.cleanup
 
 for.body.preheader:                               ; preds = %entry
-  %n.rnd.up = add i32 %0, 3
+  %n.rnd.up = add i32 %i, 3
   %n.vec = and i32 %n.rnd.up, -4
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %middle.block, %entry
   ret void
 
-for.body:                                         ; preds = %for.body.preheader, %middle.block
+for.body:                                         ; preds = %middle.block, %for.body.preheader
   %k2.072 = phi i32 [ %add25, %middle.block ], [ 1, %for.body.preheader ]
-  %mul4 = mul i32 %k2.072, %0
+  %mul4 = mul i32 %k2.072, %i
   %add = add nuw i32 %k2.072, 1
-  %mul5 = mul i32 %add, %0
+  %mul5 = mul i32 %add, %i
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %for.body
   %index = phi i32 [ 0, %for.body ], [ %index.next, %vector.body ]
-  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %15, %vector.body ]
-  %vec.phi73 = phi <4 x float> [ zeroinitializer, %for.body ], [ %16, %vector.body ]
-  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %0)
-  %3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %5 = add i32 %index, %mul4
-  %6 = getelementptr inbounds float, float* %2, i32 %5
-  %7 = bitcast float* %6 to <4 x float>*
-  %wide.masked.load74 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %8 = fmul fast <4 x float> %wide.masked.load74, %wide.masked.load
-  %9 = fadd fast <4 x float> %8, %vec.phi73
-  %10 = add i32 %index, %mul5
-  %11 = getelementptr inbounds float, float* %2, i32 %10
-  %12 = bitcast float* %11 to <4 x float>*
-  %wide.masked.load75 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %13 = fmul fast <4 x float> %wide.masked.load75, %wide.masked.load
-  %14 = fadd fast <4 x float> %13, %vec.phi
-  %15 = select <4 x i1> %active.lane.mask, <4 x float> %14, <4 x float> %vec.phi
-  %16 = select <4 x i1> %active.lane.mask, <4 x float> %9, <4 x float> %vec.phi73
+  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %i15, %vector.body ]
+  %vec.phi73 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i16, %vector.body ]
+  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
+  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
+  %i4 = bitcast float* %i3 to <4 x float>*
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i5 = add i32 %index, %mul4
+  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
+  %i7 = bitcast float* %i6 to <4 x float>*
+  %wide.masked.load74 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i8 = fmul fast <4 x float> %wide.masked.load74, %wide.masked.load
+  %i9 = fadd fast <4 x float> %i8, %vec.phi73
+  %i10 = add i32 %index, %mul5
+  %i11 = getelementptr inbounds float, float* %i2, i32 %i10
+  %i12 = bitcast float* %i11 to <4 x float>*
+  %wide.masked.load75 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i13 = fmul fast <4 x float> %wide.masked.load75, %wide.masked.load
+  %i14 = fadd fast <4 x float> %i13, %vec.phi
+  %i15 = select <4 x i1> %active.lane.mask, <4 x float> %i14, <4 x float> %vec.phi
+  %i16 = select <4 x i1> %active.lane.mask, <4 x float> %i9, <4 x float> %vec.phi73
   %index.next = add i32 %index, 4
-  %17 = icmp eq i32 %index.next, %n.vec
-  br i1 %17, label %middle.block, label %vector.body
+  %i17 = icmp eq i32 %index.next, %n.vec
+  br i1 %i17, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
-  %18 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %16)
-  %19 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %15)
+  %i18 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i16)
+  %i19 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i15)
   %arrayidx21 = getelementptr inbounds float, float* %pOut, i32 %k2.072
-  store float %18, float* %arrayidx21, align 4
+  store float %i18, float* %arrayidx21, align 4
   %arrayidx23 = getelementptr inbounds float, float* %pOut, i32 %add
-  store float %19, float* %arrayidx23, align 4
+  store float %i19, float* %arrayidx23, align 4
   %add25 = add i32 %k2.072, 2
   %cmp3 = icmp ult i32 %add25, %sub
   br i1 %cmp3, label %for.body, label %for.cond.cleanup
@@ -315,78 +315,78 @@ define void @DCT_mve3(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
   %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %0 = load i32, i32* %NumInputs, align 4
+  %i = load i32, i32* %NumInputs, align 4
   %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %1 = load i32, i32* %NumFilters, align 4
+  %i1 = load i32, i32* %NumFilters, align 4
   %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %2 = load float*, float** %pDCTCoefs, align 4
-  %cmp = icmp ugt i32 %0, 1
+  %i2 = load float*, float** %pDCTCoefs, align 4
+  %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
-  %sub = add i32 %1, -3
+  %sub = add i32 %i1, -3
   %cmp392 = icmp ugt i32 %sub, 1
   br i1 %cmp392, label %for.body.preheader, label %for.cond.cleanup
 
 for.body.preheader:                               ; preds = %entry
-  %n.rnd.up = add i32 %0, 3
+  %n.rnd.up = add i32 %i, 3
   %n.vec = and i32 %n.rnd.up, -4
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %middle.block, %entry
   ret void
 
-for.body:                                         ; preds = %for.body.preheader, %middle.block
+for.body:                                         ; preds = %middle.block, %for.body.preheader
   %k2.093 = phi i32 [ %add34, %middle.block ], [ 1, %for.body.preheader ]
-  %mul4 = mul i32 %k2.093, %0
+  %mul4 = mul i32 %k2.093, %i
   %add = add nuw i32 %k2.093, 1
-  %mul5 = mul i32 %add, %0
+  %mul5 = mul i32 %add, %i
   %add6 = add i32 %k2.093, 2
-  %mul7 = mul i32 %add6, %0
+  %mul7 = mul i32 %add6, %i
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %for.body
   %index = phi i32 [ 0, %for.body ], [ %index.next, %vector.body ]
-  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %20, %vector.body ]
-  %vec.phi94 = phi <4 x float> [ zeroinitializer, %for.body ], [ %21, %vector.body ]
-  %vec.phi95 = phi <4 x float> [ zeroinitializer, %for.body ], [ %22, %vector.body ]
-  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %0)
-  %3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %5 = add i32 %index, %mul4
-  %6 = getelementptr inbounds float, float* %2, i32 %5
-  %7 = bitcast float* %6 to <4 x float>*
-  %wide.masked.load96 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %8 = fmul fast <4 x float> %wide.masked.load96, %wide.masked.load
-  %9 = fadd fast <4 x float> %8, %vec.phi95
-  %10 = add i32 %index, %mul5
-  %11 = getelementptr inbounds float, float* %2, i32 %10
-  %12 = bitcast float* %11 to <4 x float>*
-  %wide.masked.load97 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %13 = fmul fast <4 x float> %wide.masked.load97, %wide.masked.load
-  %14 = fadd fast <4 x float> %13, %vec.phi94
-  %15 = add i32 %index, %mul7
-  %16 = getelementptr inbounds float, float* %2, i32 %15
-  %17 = bitcast float* %16 to <4 x float>*
-  %wide.masked.load98 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %18 = fmul fast <4 x float> %wide.masked.load98, %wide.masked.load
-  %19 = fadd fast <4 x float> %18, %vec.phi
-  %20 = select <4 x i1> %active.lane.mask, <4 x float> %19, <4 x float> %vec.phi
-  %21 = select <4 x i1> %active.lane.mask, <4 x float> %14, <4 x float> %vec.phi94
-  %22 = select <4 x i1> %active.lane.mask, <4 x float> %9, <4 x float> %vec.phi95
+  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %i20, %vector.body ]
+  %vec.phi94 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i21, %vector.body ]
+  %vec.phi95 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i22, %vector.body ]
+  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
+  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
+  %i4 = bitcast float* %i3 to <4 x float>*
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i5 = add i32 %index, %mul4
+  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
+  %i7 = bitcast float* %i6 to <4 x float>*
+  %wide.masked.load96 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i8 = fmul fast <4 x float> %wide.masked.load96, %wide.masked.load
+  %i9 = fadd fast <4 x float> %i8, %vec.phi95
+  %i10 = add i32 %index, %mul5
+  %i11 = getelementptr inbounds float, float* %i2, i32 %i10
+  %i12 = bitcast float* %i11 to <4 x float>*
+  %wide.masked.load97 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i13 = fmul fast <4 x float> %wide.masked.load97, %wide.masked.load
+  %i14 = fadd fast <4 x float> %i13, %vec.phi94
+  %i15 = add i32 %index, %mul7
+  %i16 = getelementptr inbounds float, float* %i2, i32 %i15
+  %i17 = bitcast float* %i16 to <4 x float>*
+  %wide.masked.load98 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i18 = fmul fast <4 x float> %wide.masked.load98, %wide.masked.load
+  %i19 = fadd fast <4 x float> %i18, %vec.phi
+  %i20 = select <4 x i1> %active.lane.mask, <4 x float> %i19, <4 x float> %vec.phi
+  %i21 = select <4 x i1> %active.lane.mask, <4 x float> %i14, <4 x float> %vec.phi94
+  %i22 = select <4 x i1> %active.lane.mask, <4 x float> %i9, <4 x float> %vec.phi95
   %index.next = add i32 %index, 4
-  %23 = icmp eq i32 %index.next, %n.vec
-  br i1 %23, label %middle.block, label %vector.body
+  %i23 = icmp eq i32 %index.next, %n.vec
+  br i1 %i23, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
-  %24 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %22)
-  %25 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %21)
-  %26 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %20)
+  %i24 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i22)
+  %i25 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i21)
+  %i26 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i20)
   %arrayidx28 = getelementptr inbounds float, float* %pOut, i32 %k2.093
-  store float %24, float* %arrayidx28, align 4
+  store float %i24, float* %arrayidx28, align 4
   %arrayidx30 = getelementptr inbounds float, float* %pOut, i32 %add
-  store float %25, float* %arrayidx30, align 4
+  store float %i25, float* %arrayidx30, align 4
   %arrayidx32 = getelementptr inbounds float, float* %pOut, i32 %add6
-  store float %26, float* %arrayidx32, align 4
+  store float %i26, float* %arrayidx32, align 4
   %add34 = add i32 %k2.093, 3
   %cmp3 = icmp ult i32 %add34, %sub
   br i1 %cmp3, label %for.body, label %for.cond.cleanup
@@ -501,91 +501,91 @@ define void @DCT_mve4(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
   %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %0 = load i32, i32* %NumInputs, align 4
+  %i = load i32, i32* %NumInputs, align 4
   %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %1 = load i32, i32* %NumFilters, align 4
+  %i1 = load i32, i32* %NumFilters, align 4
   %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %2 = load float*, float** %pDCTCoefs, align 4
-  %cmp = icmp ugt i32 %0, 1
+  %i2 = load float*, float** %pDCTCoefs, align 4
+  %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
-  %sub = add i32 %1, -4
+  %sub = add i32 %i1, -4
   %cmp3113 = icmp ugt i32 %sub, 1
   br i1 %cmp3113, label %for.body.preheader, label %for.cond.cleanup
 
 for.body.preheader:                               ; preds = %entry
-  %n.rnd.up = add i32 %0, 3
+  %n.rnd.up = add i32 %i, 3
   %n.vec = and i32 %n.rnd.up, -4
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %middle.block, %entry
   ret void
 
-for.body:                                         ; preds = %for.body.preheader, %middle.block
+for.body:                                         ; preds = %middle.block, %for.body.preheader
   %k2.0114 = phi i32 [ %add43, %middle.block ], [ 1, %for.body.preheader ]
-  %mul4 = mul i32 %k2.0114, %0
+  %mul4 = mul i32 %k2.0114, %i
   %add = add nuw nsw i32 %k2.0114, 1
-  %mul5 = mul i32 %add, %0
+  %mul5 = mul i32 %add, %i
   %add6 = add nuw nsw i32 %k2.0114, 2
-  %mul7 = mul i32 %add6, %0
+  %mul7 = mul i32 %add6, %i
   %add8 = add i32 %k2.0114, 3
-  %mul9 = mul i32 %add8, %0
+  %mul9 = mul i32 %add8, %i
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %for.body
   %index = phi i32 [ 0, %for.body ], [ %index.next, %vector.body ]
-  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %25, %vector.body ]
-  %vec.phi115 = phi <4 x float> [ zeroinitializer, %for.body ], [ %26, %vector.body ]
-  %vec.phi116 = phi <4 x float> [ zeroinitializer, %for.body ], [ %27, %vector.body ]
-  %vec.phi117 = phi <4 x float> [ zeroinitializer, %for.body ], [ %28, %vector.body ]
-  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %0)
-  %3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %5 = add i32 %index, %mul4
-  %6 = getelementptr inbounds float, float* %2, i32 %5
-  %7 = bitcast float* %6 to <4 x float>*
-  %wide.masked.load118 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %8 = fmul fast <4 x float> %wide.masked.load118, %wide.masked.load
-  %9 = fadd fast <4 x float> %8, %vec.phi116
-  %10 = add i32 %index, %mul5
-  %11 = getelementptr inbounds float, float* %2, i32 %10
-  %12 = bitcast float* %11 to <4 x float>*
-  %wide.masked.load119 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %13 = fmul fast <4 x float> %wide.masked.load119, %wide.masked.load
-  %14 = fadd fast <4 x float> %13, %vec.phi117
-  %15 = add i32 %index, %mul7
-  %16 = getelementptr inbounds float, float* %2, i32 %15
-  %17 = bitcast float* %16 to <4 x float>*
-  %wide.masked.load120 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %18 = fmul fast <4 x float> %wide.masked.load120, %wide.masked.load
-  %19 = fadd fast <4 x float> %18, %vec.phi115
-  %20 = add i32 %index, %mul9
-  %21 = getelementptr inbounds float, float* %2, i32 %20
-  %22 = bitcast float* %21 to <4 x float>*
-  %wide.masked.load121 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %22, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %23 = fmul fast <4 x float> %wide.masked.load121, %wide.masked.load
-  %24 = fadd fast <4 x float> %23, %vec.phi
-  %25 = select <4 x i1> %active.lane.mask, <4 x float> %24, <4 x float> %vec.phi
-  %26 = select <4 x i1> %active.lane.mask, <4 x float> %19, <4 x float> %vec.phi115
-  %27 = select <4 x i1> %active.lane.mask, <4 x float> %9, <4 x float> %vec.phi116
-  %28 = select <4 x i1> %active.lane.mask, <4 x float> %14, <4 x float> %vec.phi117
+  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %i25, %vector.body ]
+  %vec.phi115 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i26, %vector.body ]
+  %vec.phi116 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i27, %vector.body ]
+  %vec.phi117 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i28, %vector.body ]
+  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
+  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
+  %i4 = bitcast float* %i3 to <4 x float>*
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i5 = add i32 %index, %mul4
+  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
+  %i7 = bitcast float* %i6 to <4 x float>*
+  %wide.masked.load118 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i8 = fmul fast <4 x float> %wide.masked.load118, %wide.masked.load
+  %i9 = fadd fast <4 x float> %i8, %vec.phi116
+  %i10 = add i32 %index, %mul5
+  %i11 = getelementptr inbounds float, float* %i2, i32 %i10
+  %i12 = bitcast float* %i11 to <4 x float>*
+  %wide.masked.load119 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i13 = fmul fast <4 x float> %wide.masked.load119, %wide.masked.load
+  %i14 = fadd fast <4 x float> %i13, %vec.phi117
+  %i15 = add i32 %index, %mul7
+  %i16 = getelementptr inbounds float, float* %i2, i32 %i15
+  %i17 = bitcast float* %i16 to <4 x float>*
+  %wide.masked.load120 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i18 = fmul fast <4 x float> %wide.masked.load120, %wide.masked.load
+  %i19 = fadd fast <4 x float> %i18, %vec.phi115
+  %i20 = add i32 %index, %mul9
+  %i21 = getelementptr inbounds float, float* %i2, i32 %i20
+  %i22 = bitcast float* %i21 to <4 x float>*
+  %wide.masked.load121 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i22, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i23 = fmul fast <4 x float> %wide.masked.load121, %wide.masked.load
+  %i24 = fadd fast <4 x float> %i23, %vec.phi
+  %i25 = select <4 x i1> %active.lane.mask, <4 x float> %i24, <4 x float> %vec.phi
+  %i26 = select <4 x i1> %active.lane.mask, <4 x float> %i19, <4 x float> %vec.phi115
+  %i27 = select <4 x i1> %active.lane.mask, <4 x float> %i9, <4 x float> %vec.phi116
+  %i28 = select <4 x i1> %active.lane.mask, <4 x float> %i14, <4 x float> %vec.phi117
   %index.next = add i32 %index, 4
-  %29 = icmp eq i32 %index.next, %n.vec
-  br i1 %29, label %middle.block, label %vector.body
+  %i29 = icmp eq i32 %index.next, %n.vec
+  br i1 %i29, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
-  %30 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %28)
-  %31 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %27)
-  %32 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %26)
-  %33 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %25)
+  %i30 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i28)
+  %i31 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i27)
+  %i32 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i26)
+  %i33 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i25)
   %arrayidx35 = getelementptr inbounds float, float* %pOut, i32 %k2.0114
-  store float %31, float* %arrayidx35, align 4
+  store float %i31, float* %arrayidx35, align 4
   %arrayidx37 = getelementptr inbounds float, float* %pOut, i32 %add
-  store float %30, float* %arrayidx37, align 4
+  store float %i30, float* %arrayidx37, align 4
   %arrayidx39 = getelementptr inbounds float, float* %pOut, i32 %add6
-  store float %32, float* %arrayidx39, align 4
+  store float %i32, float* %arrayidx39, align 4
   %arrayidx41 = getelementptr inbounds float, float* %pOut, i32 %add8
-  store float %33, float* %arrayidx41, align 4
+  store float %i33, float* %arrayidx41, align 4
   %add43 = add i32 %k2.0114, 4
   %cmp3 = icmp ult i32 %add43, %sub
   br i1 %cmp3, label %for.body, label %for.cond.cleanup
@@ -703,104 +703,104 @@ define void @DCT_mve5(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
   %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %0 = load i32, i32* %NumInputs, align 4
+  %i = load i32, i32* %NumInputs, align 4
   %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %1 = load i32, i32* %NumFilters, align 4
+  %i1 = load i32, i32* %NumFilters, align 4
   %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %2 = load float*, float** %pDCTCoefs, align 4
-  %cmp = icmp ugt i32 %0, 1
+  %i2 = load float*, float** %pDCTCoefs, align 4
+  %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
-  %sub = add i32 %1, -5
+  %sub = add i32 %i1, -5
   %cmp3134 = icmp ugt i32 %sub, 1
   br i1 %cmp3134, label %for.body.preheader, label %for.cond.cleanup
 
 for.body.preheader:                               ; preds = %entry
-  %n.rnd.up = add i32 %0, 3
+  %n.rnd.up = add i32 %i, 3
   %n.vec = and i32 %n.rnd.up, -4
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %middle.block, %entry
   ret void
 
-for.body:                                         ; preds = %for.body.preheader, %middle.block
+for.body:                                         ; preds = %middle.block, %for.body.preheader
   %k2.0135 = phi i32 [ %add52, %middle.block ], [ 1, %for.body.preheader ]
-  %mul4 = mul i32 %k2.0135, %0
+  %mul4 = mul i32 %k2.0135, %i
   %add = add nuw i32 %k2.0135, 1
-  %mul5 = mul i32 %add, %0
+  %mul5 = mul i32 %add, %i
   %add6 = add i32 %k2.0135, 2
-  %mul7 = mul i32 %add6, %0
+  %mul7 = mul i32 %add6, %i
   %add8 = add i32 %k2.0135, 3
-  %mul9 = mul i32 %add8, %0
+  %mul9 = mul i32 %add8, %i
   %add10 = add i32 %k2.0135, 4
-  %mul11 = mul i32 %add10, %0
+  %mul11 = mul i32 %add10, %i
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %for.body
   %index = phi i32 [ 0, %for.body ], [ %index.next, %vector.body ]
-  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %30, %vector.body ]
-  %vec.phi136 = phi <4 x float> [ zeroinitializer, %for.body ], [ %31, %vector.body ]
-  %vec.phi137 = phi <4 x float> [ zeroinitializer, %for.body ], [ %32, %vector.body ]
-  %vec.phi138 = phi <4 x float> [ zeroinitializer, %for.body ], [ %33, %vector.body ]
-  %vec.phi139 = phi <4 x float> [ zeroinitializer, %for.body ], [ %34, %vector.body ]
-  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %0)
-  %3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %5 = add i32 %index, %mul4
-  %6 = getelementptr inbounds float, float* %2, i32 %5
-  %7 = bitcast float* %6 to <4 x float>*
-  %wide.masked.load140 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %8 = fmul fast <4 x float> %wide.masked.load140, %wide.masked.load
-  %9 = fadd fast <4 x float> %8, %vec.phi137
-  %10 = add i32 %index, %mul5
-  %11 = getelementptr inbounds float, float* %2, i32 %10
-  %12 = bitcast float* %11 to <4 x float>*
-  %wide.masked.load141 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %13 = fmul fast <4 x float> %wide.masked.load141, %wide.masked.load
-  %14 = fadd fast <4 x float> %13, %vec.phi139
-  %15 = add i32 %index, %mul7
-  %16 = getelementptr inbounds float, float* %2, i32 %15
-  %17 = bitcast float* %16 to <4 x float>*
-  %wide.masked.load142 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %18 = fmul fast <4 x float> %wide.masked.load142, %wide.masked.load
-  %19 = fadd fast <4 x float> %18, %vec.phi138
-  %20 = add i32 %index, %mul9
-  %21 = getelementptr inbounds float, float* %2, i32 %20
-  %22 = bitcast float* %21 to <4 x float>*
-  %wide.masked.load143 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %22, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %23 = fmul fast <4 x float> %wide.masked.load143, %wide.masked.load
-  %24 = fadd fast <4 x float> %23, %vec.phi136
-  %25 = add i32 %index, %mul11
-  %26 = getelementptr inbounds float, float* %2, i32 %25
-  %27 = bitcast float* %26 to <4 x float>*
-  %wide.masked.load144 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %27, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %28 = fmul fast <4 x float> %wide.masked.load144, %wide.masked.load
-  %29 = fadd fast <4 x float> %28, %vec.phi
-  %30 = select <4 x i1> %active.lane.mask, <4 x float> %29, <4 x float> %vec.phi
-  %31 = select <4 x i1> %active.lane.mask, <4 x float> %24, <4 x float> %vec.phi136
-  %32 = select <4 x i1> %active.lane.mask, <4 x float> %9, <4 x float> %vec.phi137
-  %33 = select <4 x i1> %active.lane.mask, <4 x float> %19, <4 x float> %vec.phi138
-  %34 = select <4 x i1> %active.lane.mask, <4 x float> %14, <4 x float> %vec.phi139
+  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %i30, %vector.body ]
+  %vec.phi136 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i31, %vector.body ]
+  %vec.phi137 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i32, %vector.body ]
+  %vec.phi138 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i33, %vector.body ]
+  %vec.phi139 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i34, %vector.body ]
+  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
+  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
+  %i4 = bitcast float* %i3 to <4 x float>*
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i5 = add i32 %index, %mul4
+  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
+  %i7 = bitcast float* %i6 to <4 x float>*
+  %wide.masked.load140 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i8 = fmul fast <4 x float> %wide.masked.load140, %wide.masked.load
+  %i9 = fadd fast <4 x float> %i8, %vec.phi137
+  %i10 = add i32 %index, %mul5
+  %i11 = getelementptr inbounds float, float* %i2, i32 %i10
+  %i12 = bitcast float* %i11 to <4 x float>*
+  %wide.masked.load141 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i13 = fmul fast <4 x float> %wide.masked.load141, %wide.masked.load
+  %i14 = fadd fast <4 x float> %i13, %vec.phi139
+  %i15 = add i32 %index, %mul7
+  %i16 = getelementptr inbounds float, float* %i2, i32 %i15
+  %i17 = bitcast float* %i16 to <4 x float>*
+  %wide.masked.load142 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i18 = fmul fast <4 x float> %wide.masked.load142, %wide.masked.load
+  %i19 = fadd fast <4 x float> %i18, %vec.phi138
+  %i20 = add i32 %index, %mul9
+  %i21 = getelementptr inbounds float, float* %i2, i32 %i20
+  %i22 = bitcast float* %i21 to <4 x float>*
+  %wide.masked.load143 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i22, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i23 = fmul fast <4 x float> %wide.masked.load143, %wide.masked.load
+  %i24 = fadd fast <4 x float> %i23, %vec.phi136
+  %i25 = add i32 %index, %mul11
+  %i26 = getelementptr inbounds float, float* %i2, i32 %i25
+  %i27 = bitcast float* %i26 to <4 x float>*
+  %wide.masked.load144 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i27, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i28 = fmul fast <4 x float> %wide.masked.load144, %wide.masked.load
+  %i29 = fadd fast <4 x float> %i28, %vec.phi
+  %i30 = select <4 x i1> %active.lane.mask, <4 x float> %i29, <4 x float> %vec.phi
+  %i31 = select <4 x i1> %active.lane.mask, <4 x float> %i24, <4 x float> %vec.phi136
+  %i32 = select <4 x i1> %active.lane.mask, <4 x float> %i9, <4 x float> %vec.phi137
+  %i33 = select <4 x i1> %active.lane.mask, <4 x float> %i19, <4 x float> %vec.phi138
+  %i34 = select <4 x i1> %active.lane.mask, <4 x float> %i14, <4 x float> %vec.phi139
   %index.next = add i32 %index, 4
-  %35 = icmp eq i32 %index.next, %n.vec
-  br i1 %35, label %middle.block, label %vector.body
+  %i35 = icmp eq i32 %index.next, %n.vec
+  br i1 %i35, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
-  %36 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %34)
-  %37 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %33)
-  %38 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %32)
-  %39 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %31)
-  %40 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %30)
+  %i36 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i34)
+  %i37 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i33)
+  %i38 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i32)
+  %i39 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i31)
+  %i40 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i30)
   %arrayidx42 = getelementptr inbounds float, float* %pOut, i32 %k2.0135
-  store float %38, float* %arrayidx42, align 4
+  store float %i38, float* %arrayidx42, align 4
   %arrayidx44 = getelementptr inbounds float, float* %pOut, i32 %add
-  store float %36, float* %arrayidx44, align 4
+  store float %i36, float* %arrayidx44, align 4
   %arrayidx46 = getelementptr inbounds float, float* %pOut, i32 %add6
-  store float %37, float* %arrayidx46, align 4
+  store float %i37, float* %arrayidx46, align 4
   %arrayidx48 = getelementptr inbounds float, float* %pOut, i32 %add8
-  store float %39, float* %arrayidx48, align 4
+  store float %i39, float* %arrayidx48, align 4
   %arrayidx50 = getelementptr inbounds float, float* %pOut, i32 %add10
-  store float %40, float* %arrayidx50, align 4
+  store float %i40, float* %arrayidx50, align 4
   %add52 = add i32 %k2.0135, 5
   %cmp3 = icmp ult i32 %add52, %sub
   br i1 %cmp3, label %for.body, label %for.cond.cleanup
@@ -930,117 +930,117 @@ define void @DCT_mve6(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
   %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %0 = load i32, i32* %NumInputs, align 4
+  %i = load i32, i32* %NumInputs, align 4
   %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %1 = load i32, i32* %NumFilters, align 4
+  %i1 = load i32, i32* %NumFilters, align 4
   %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %2 = load float*, float** %pDCTCoefs, align 4
-  %cmp = icmp ugt i32 %0, 1
+  %i2 = load float*, float** %pDCTCoefs, align 4
+  %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
-  %sub = add i32 %1, -6
+  %sub = add i32 %i1, -6
   %cmp3155 = icmp ugt i32 %sub, 1
   br i1 %cmp3155, label %for.body.preheader, label %for.cond.cleanup
 
 for.body.preheader:                               ; preds = %entry
-  %n.rnd.up = add i32 %0, 3
+  %n.rnd.up = add i32 %i, 3
   %n.vec = and i32 %n.rnd.up, -4
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %middle.block, %entry
   ret void
 
-for.body:                                         ; preds = %for.body.preheader, %middle.block
+for.body:                                         ; preds = %middle.block, %for.body.preheader
   %k2.0156 = phi i32 [ %add61, %middle.block ], [ 1, %for.body.preheader ]
-  %mul4 = mul i32 %k2.0156, %0
+  %mul4 = mul i32 %k2.0156, %i
   %add = add nuw i32 %k2.0156, 1
-  %mul5 = mul i32 %add, %0
+  %mul5 = mul i32 %add, %i
   %add6 = add i32 %k2.0156, 2
-  %mul7 = mul i32 %add6, %0
+  %mul7 = mul i32 %add6, %i
   %add8 = add i32 %k2.0156, 3
-  %mul9 = mul i32 %add8, %0
+  %mul9 = mul i32 %add8, %i
   %add10 = add i32 %k2.0156, 4
-  %mul11 = mul i32 %add10, %0
+  %mul11 = mul i32 %add10, %i
   %add12 = add i32 %k2.0156, 5
-  %mul13 = mul i32 %add12, %0
+  %mul13 = mul i32 %add12, %i
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %for.body
   %index = phi i32 [ 0, %for.body ], [ %index.next, %vector.body ]
-  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %35, %vector.body ]
-  %vec.phi157 = phi <4 x float> [ zeroinitializer, %for.body ], [ %36, %vector.body ]
-  %vec.phi158 = phi <4 x float> [ zeroinitializer, %for.body ], [ %37, %vector.body ]
-  %vec.phi159 = phi <4 x float> [ zeroinitializer, %for.body ], [ %38, %vector.body ]
-  %vec.phi160 = phi <4 x float> [ zeroinitializer, %for.body ], [ %39, %vector.body ]
-  %vec.phi161 = phi <4 x float> [ zeroinitializer, %for.body ], [ %40, %vector.body ]
-  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %0)
-  %3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %5 = add i32 %index, %mul4
-  %6 = getelementptr inbounds float, float* %2, i32 %5
-  %7 = bitcast float* %6 to <4 x float>*
-  %wide.masked.load162 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %8 = fmul fast <4 x float> %wide.masked.load162, %wide.masked.load
-  %9 = fadd fast <4 x float> %8, %vec.phi158
-  %10 = add i32 %index, %mul5
-  %11 = getelementptr inbounds float, float* %2, i32 %10
-  %12 = bitcast float* %11 to <4 x float>*
-  %wide.masked.load163 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %13 = fmul fast <4 x float> %wide.masked.load163, %wide.masked.load
-  %14 = fadd fast <4 x float> %13, %vec.phi160
-  %15 = add i32 %index, %mul7
-  %16 = getelementptr inbounds float, float* %2, i32 %15
-  %17 = bitcast float* %16 to <4 x float>*
-  %wide.masked.load164 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %18 = fmul fast <4 x float> %wide.masked.load164, %wide.masked.load
-  %19 = fadd fast <4 x float> %18, %vec.phi161
-  %20 = add i32 %index, %mul9
-  %21 = getelementptr inbounds float, float* %2, i32 %20
-  %22 = bitcast float* %21 to <4 x float>*
-  %wide.masked.load165 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %22, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %23 = fmul fast <4 x float> %wide.masked.load165, %wide.masked.load
-  %24 = fadd fast <4 x float> %23, %vec.phi159
-  %25 = add i32 %index, %mul11
-  %26 = getelementptr inbounds float, float* %2, i32 %25
-  %27 = bitcast float* %26 to <4 x float>*
-  %wide.masked.load166 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %27, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %28 = fmul fast <4 x float> %wide.masked.load166, %wide.masked.load
-  %29 = fadd fast <4 x float> %28, %vec.phi157
-  %30 = add i32 %index, %mul13
-  %31 = getelementptr inbounds float, float* %2, i32 %30
-  %32 = bitcast float* %31 to <4 x float>*
-  %wide.masked.load167 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %32, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %33 = fmul fast <4 x float> %wide.masked.load167, %wide.masked.load
-  %34 = fadd fast <4 x float> %33, %vec.phi
-  %35 = select <4 x i1> %active.lane.mask, <4 x float> %34, <4 x float> %vec.phi
-  %36 = select <4 x i1> %active.lane.mask, <4 x float> %29, <4 x float> %vec.phi157
-  %37 = select <4 x i1> %active.lane.mask, <4 x float> %9, <4 x float> %vec.phi158
-  %38 = select <4 x i1> %active.lane.mask, <4 x float> %24, <4 x float> %vec.phi159
-  %39 = select <4 x i1> %active.lane.mask, <4 x float> %14, <4 x float> %vec.phi160
-  %40 = select <4 x i1> %active.lane.mask, <4 x float> %19, <4 x float> %vec.phi161
+  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %i35, %vector.body ]
+  %vec.phi157 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i36, %vector.body ]
+  %vec.phi158 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i37, %vector.body ]
+  %vec.phi159 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i38, %vector.body ]
+  %vec.phi160 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i39, %vector.body ]
+  %vec.phi161 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i40, %vector.body ]
+  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
+  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
+  %i4 = bitcast float* %i3 to <4 x float>*
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i5 = add i32 %index, %mul4
+  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
+  %i7 = bitcast float* %i6 to <4 x float>*
+  %wide.masked.load162 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i8 = fmul fast <4 x float> %wide.masked.load162, %wide.masked.load
+  %i9 = fadd fast <4 x float> %i8, %vec.phi158
+  %i10 = add i32 %index, %mul5
+  %i11 = getelementptr inbounds float, float* %i2, i32 %i10
+  %i12 = bitcast float* %i11 to <4 x float>*
+  %wide.masked.load163 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i13 = fmul fast <4 x float> %wide.masked.load163, %wide.masked.load
+  %i14 = fadd fast <4 x float> %i13, %vec.phi160
+  %i15 = add i32 %index, %mul7
+  %i16 = getelementptr inbounds float, float* %i2, i32 %i15
+  %i17 = bitcast float* %i16 to <4 x float>*
+  %wide.masked.load164 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i18 = fmul fast <4 x float> %wide.masked.load164, %wide.masked.load
+  %i19 = fadd fast <4 x float> %i18, %vec.phi161
+  %i20 = add i32 %index, %mul9
+  %i21 = getelementptr inbounds float, float* %i2, i32 %i20
+  %i22 = bitcast float* %i21 to <4 x float>*
+  %wide.masked.load165 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i22, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i23 = fmul fast <4 x float> %wide.masked.load165, %wide.masked.load
+  %i24 = fadd fast <4 x float> %i23, %vec.phi159
+  %i25 = add i32 %index, %mul11
+  %i26 = getelementptr inbounds float, float* %i2, i32 %i25
+  %i27 = bitcast float* %i26 to <4 x float>*
+  %wide.masked.load166 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i27, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i28 = fmul fast <4 x float> %wide.masked.load166, %wide.masked.load
+  %i29 = fadd fast <4 x float> %i28, %vec.phi157
+  %i30 = add i32 %index, %mul13
+  %i31 = getelementptr inbounds float, float* %i2, i32 %i30
+  %i32 = bitcast float* %i31 to <4 x float>*
+  %wide.masked.load167 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i32, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i33 = fmul fast <4 x float> %wide.masked.load167, %wide.masked.load
+  %i34 = fadd fast <4 x float> %i33, %vec.phi
+  %i35 = select <4 x i1> %active.lane.mask, <4 x float> %i34, <4 x float> %vec.phi
+  %i36 = select <4 x i1> %active.lane.mask, <4 x float> %i29, <4 x float> %vec.phi157
+  %i37 = select <4 x i1> %active.lane.mask, <4 x float> %i9, <4 x float> %vec.phi158
+  %i38 = select <4 x i1> %active.lane.mask, <4 x float> %i24, <4 x float> %vec.phi159
+  %i39 = select <4 x i1> %active.lane.mask, <4 x float> %i14, <4 x float> %vec.phi160
+  %i40 = select <4 x i1> %active.lane.mask, <4 x float> %i19, <4 x float> %vec.phi161
   %index.next = add i32 %index, 4
-  %41 = icmp eq i32 %index.next, %n.vec
-  br i1 %41, label %middle.block, label %vector.body
+  %i41 = icmp eq i32 %index.next, %n.vec
+  br i1 %i41, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
-  %42 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %40)
-  %43 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %39)
-  %44 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %38)
-  %45 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %37)
-  %46 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %36)
-  %47 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %35)
+  %i42 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i40)
+  %i43 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i39)
+  %i44 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i38)
+  %i45 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i37)
+  %i46 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i36)
+  %i47 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i35)
   %arrayidx49 = getelementptr inbounds float, float* %pOut, i32 %k2.0156
-  store float %45, float* %arrayidx49, align 4
+  store float %i45, float* %arrayidx49, align 4
   %arrayidx51 = getelementptr inbounds float, float* %pOut, i32 %add
-  store float %43, float* %arrayidx51, align 4
+  store float %i43, float* %arrayidx51, align 4
   %arrayidx53 = getelementptr inbounds float, float* %pOut, i32 %add6
-  store float %42, float* %arrayidx53, align 4
+  store float %i42, float* %arrayidx53, align 4
   %arrayidx55 = getelementptr inbounds float, float* %pOut, i32 %add8
-  store float %44, float* %arrayidx55, align 4
+  store float %i44, float* %arrayidx55, align 4
   %arrayidx57 = getelementptr inbounds float, float* %pOut, i32 %add10
-  store float %46, float* %arrayidx57, align 4
+  store float %i46, float* %arrayidx57, align 4
   %arrayidx59 = getelementptr inbounds float, float* %pOut, i32 %add12
-  store float %47, float* %arrayidx59, align 4
+  store float %i47, float* %arrayidx59, align 4
   %add61 = add i32 %k2.0156, 6
   %cmp3 = icmp ult i32 %add61, %sub
   br i1 %cmp3, label %for.body, label %for.cond.cleanup
@@ -1207,130 +1207,130 @@ define void @DCT_mve7(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
   %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %0 = load i32, i32* %NumInputs, align 4
+  %i = load i32, i32* %NumInputs, align 4
   %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %1 = load i32, i32* %NumFilters, align 4
+  %i1 = load i32, i32* %NumFilters, align 4
   %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %2 = load float*, float** %pDCTCoefs, align 4
-  %cmp = icmp ugt i32 %0, 1
+  %i2 = load float*, float** %pDCTCoefs, align 4
+  %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
-  %sub = add i32 %1, -7
+  %sub = add i32 %i1, -7
   %cmp3176 = icmp ugt i32 %sub, 1
   br i1 %cmp3176, label %for.body.preheader, label %for.cond.cleanup
 
 for.body.preheader:                               ; preds = %entry
-  %n.rnd.up = add i32 %0, 3
+  %n.rnd.up = add i32 %i, 3
   %n.vec = and i32 %n.rnd.up, -4
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %middle.block, %entry
   ret void
 
-for.body:                                         ; preds = %for.body.preheader, %middle.block
+for.body:                                         ; preds = %middle.block, %for.body.preheader
   %k2.0177 = phi i32 [ %add70, %middle.block ], [ 1, %for.body.preheader ]
-  %mul4 = mul i32 %k2.0177, %0
+  %mul4 = mul i32 %k2.0177, %i
   %add = add nuw i32 %k2.0177, 1
-  %mul5 = mul i32 %add, %0
+  %mul5 = mul i32 %add, %i
   %add6 = add i32 %k2.0177, 2
-  %mul7 = mul i32 %add6, %0
+  %mul7 = mul i32 %add6, %i
   %add8 = add i32 %k2.0177, 3
-  %mul9 = mul i32 %add8, %0
+  %mul9 = mul i32 %add8, %i
   %add10 = add i32 %k2.0177, 4
-  %mul11 = mul i32 %add10, %0
+  %mul11 = mul i32 %add10, %i
   %add12 = add i32 %k2.0177, 5
-  %mul13 = mul i32 %add12, %0
+  %mul13 = mul i32 %add12, %i
   %add14 = add i32 %k2.0177, 6
-  %mul15 = mul i32 %add14, %0
+  %mul15 = mul i32 %add14, %i
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %for.body
   %index = phi i32 [ 0, %for.body ], [ %index.next, %vector.body ]
-  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %40, %vector.body ]
-  %vec.phi178 = phi <4 x float> [ zeroinitializer, %for.body ], [ %41, %vector.body ]
-  %vec.phi179 = phi <4 x float> [ zeroinitializer, %for.body ], [ %42, %vector.body ]
-  %vec.phi180 = phi <4 x float> [ zeroinitializer, %for.body ], [ %43, %vector.body ]
-  %vec.phi181 = phi <4 x float> [ zeroinitializer, %for.body ], [ %44, %vector.body ]
-  %vec.phi182 = phi <4 x float> [ zeroinitializer, %for.body ], [ %45, %vector.body ]
-  %vec.phi183 = phi <4 x float> [ zeroinitializer, %for.body ], [ %46, %vector.body ]
-  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %0)
-  %3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %5 = add i32 %index, %mul4
-  %6 = getelementptr inbounds float, float* %2, i32 %5
-  %7 = bitcast float* %6 to <4 x float>*
-  %wide.masked.load184 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %8 = fmul fast <4 x float> %wide.masked.load184, %wide.masked.load
-  %9 = fadd fast <4 x float> %8, %vec.phi179
-  %10 = add i32 %index, %mul5
-  %11 = getelementptr inbounds float, float* %2, i32 %10
-  %12 = bitcast float* %11 to <4 x float>*
-  %wide.masked.load185 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %13 = fmul fast <4 x float> %wide.masked.load185, %wide.masked.load
-  %14 = fadd fast <4 x float> %13, %vec.phi181
-  %15 = add i32 %index, %mul7
-  %16 = getelementptr inbounds float, float* %2, i32 %15
-  %17 = bitcast float* %16 to <4 x float>*
-  %wide.masked.load186 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %18 = fmul fast <4 x float> %wide.masked.load186, %wide.masked.load
-  %19 = fadd fast <4 x float> %18, %vec.phi183
-  %20 = add i32 %index, %mul9
-  %21 = getelementptr inbounds float, float* %2, i32 %20
-  %22 = bitcast float* %21 to <4 x float>*
-  %wide.masked.load187 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %22, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %23 = fmul fast <4 x float> %wide.masked.load187, %wide.masked.load
-  %24 = fadd fast <4 x float> %23, %vec.phi182
-  %25 = add i32 %index, %mul11
-  %26 = getelementptr inbounds float, float* %2, i32 %25
-  %27 = bitcast float* %26 to <4 x float>*
-  %wide.masked.load188 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %27, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %28 = fmul fast <4 x float> %wide.masked.load188, %wide.masked.load
-  %29 = fadd fast <4 x float> %28, %vec.phi180
-  %30 = add i32 %index, %mul13
-  %31 = getelementptr inbounds float, float* %2, i32 %30
-  %32 = bitcast float* %31 to <4 x float>*
-  %wide.masked.load189 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %32, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %33 = fmul fast <4 x float> %wide.masked.load189, %wide.masked.load
-  %34 = fadd fast <4 x float> %33, %vec.phi178
-  %35 = add i32 %index, %mul15
-  %36 = getelementptr inbounds float, float* %2, i32 %35
-  %37 = bitcast float* %36 to <4 x float>*
-  %wide.masked.load190 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %37, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %38 = fmul fast <4 x float> %wide.masked.load190, %wide.masked.load
-  %39 = fadd fast <4 x float> %38, %vec.phi
-  %40 = select <4 x i1> %active.lane.mask, <4 x float> %39, <4 x float> %vec.phi
-  %41 = select <4 x i1> %active.lane.mask, <4 x float> %34, <4 x float> %vec.phi178
-  %42 = select <4 x i1> %active.lane.mask, <4 x float> %9, <4 x float> %vec.phi179
-  %43 = select <4 x i1> %active.lane.mask, <4 x float> %29, <4 x float> %vec.phi180
-  %44 = select <4 x i1> %active.lane.mask, <4 x float> %14, <4 x float> %vec.phi181
-  %45 = select <4 x i1> %active.lane.mask, <4 x float> %24, <4 x float> %vec.phi182
-  %46 = select <4 x i1> %active.lane.mask, <4 x float> %19, <4 x float> %vec.phi183
+  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %i40, %vector.body ]
+  %vec.phi178 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i41, %vector.body ]
+  %vec.phi179 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i42, %vector.body ]
+  %vec.phi180 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i43, %vector.body ]
+  %vec.phi181 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i44, %vector.body ]
+  %vec.phi182 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i45, %vector.body ]
+  %vec.phi183 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i46, %vector.body ]
+  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
+  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
+  %i4 = bitcast float* %i3 to <4 x float>*
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i5 = add i32 %index, %mul4
+  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
+  %i7 = bitcast float* %i6 to <4 x float>*
+  %wide.masked.load184 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i8 = fmul fast <4 x float> %wide.masked.load184, %wide.masked.load
+  %i9 = fadd fast <4 x float> %i8, %vec.phi179
+  %i10 = add i32 %index, %mul5
+  %i11 = getelementptr inbounds float, float* %i2, i32 %i10
+  %i12 = bitcast float* %i11 to <4 x float>*
+  %wide.masked.load185 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i13 = fmul fast <4 x float> %wide.masked.load185, %wide.masked.load
+  %i14 = fadd fast <4 x float> %i13, %vec.phi181
+  %i15 = add i32 %index, %mul7
+  %i16 = getelementptr inbounds float, float* %i2, i32 %i15
+  %i17 = bitcast float* %i16 to <4 x float>*
+  %wide.masked.load186 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i18 = fmul fast <4 x float> %wide.masked.load186, %wide.masked.load
+  %i19 = fadd fast <4 x float> %i18, %vec.phi183
+  %i20 = add i32 %index, %mul9
+  %i21 = getelementptr inbounds float, float* %i2, i32 %i20
+  %i22 = bitcast float* %i21 to <4 x float>*
+  %wide.masked.load187 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i22, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i23 = fmul fast <4 x float> %wide.masked.load187, %wide.masked.load
+  %i24 = fadd fast <4 x float> %i23, %vec.phi182
+  %i25 = add i32 %index, %mul11
+  %i26 = getelementptr inbounds float, float* %i2, i32 %i25
+  %i27 = bitcast float* %i26 to <4 x float>*
+  %wide.masked.load188 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i27, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i28 = fmul fast <4 x float> %wide.masked.load188, %wide.masked.load
+  %i29 = fadd fast <4 x float> %i28, %vec.phi180
+  %i30 = add i32 %index, %mul13
+  %i31 = getelementptr inbounds float, float* %i2, i32 %i30
+  %i32 = bitcast float* %i31 to <4 x float>*
+  %wide.masked.load189 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i32, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i33 = fmul fast <4 x float> %wide.masked.load189, %wide.masked.load
+  %i34 = fadd fast <4 x float> %i33, %vec.phi178
+  %i35 = add i32 %index, %mul15
+  %i36 = getelementptr inbounds float, float* %i2, i32 %i35
+  %i37 = bitcast float* %i36 to <4 x float>*
+  %wide.masked.load190 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i37, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i38 = fmul fast <4 x float> %wide.masked.load190, %wide.masked.load
+  %i39 = fadd fast <4 x float> %i38, %vec.phi
+  %i40 = select <4 x i1> %active.lane.mask, <4 x float> %i39, <4 x float> %vec.phi
+  %i41 = select <4 x i1> %active.lane.mask, <4 x float> %i34, <4 x float> %vec.phi178
+  %i42 = select <4 x i1> %active.lane.mask, <4 x float> %i9, <4 x float> %vec.phi179
+  %i43 = select <4 x i1> %active.lane.mask, <4 x float> %i29, <4 x float> %vec.phi180
+  %i44 = select <4 x i1> %active.lane.mask, <4 x float> %i14, <4 x float> %vec.phi181
+  %i45 = select <4 x i1> %active.lane.mask, <4 x float> %i24, <4 x float> %vec.phi182
+  %i46 = select <4 x i1> %active.lane.mask, <4 x float> %i19, <4 x float> %vec.phi183
   %index.next = add i32 %index, 4
-  %47 = icmp eq i32 %index.next, %n.vec
-  br i1 %47, label %middle.block, label %vector.body
+  %i47 = icmp eq i32 %index.next, %n.vec
+  br i1 %i47, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
-  %48 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %46)
-  %49 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %45)
-  %50 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %44)
-  %51 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %43)
-  %52 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %42)
-  %53 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %41)
-  %54 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %40)
+  %i48 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i46)
+  %i49 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i45)
+  %i50 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i44)
+  %i51 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i43)
+  %i52 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i42)
+  %i53 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i41)
+  %i54 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i40)
   %arrayidx56 = getelementptr inbounds float, float* %pOut, i32 %k2.0177
-  store float %52, float* %arrayidx56, align 4
+  store float %i52, float* %arrayidx56, align 4
   %arrayidx58 = getelementptr inbounds float, float* %pOut, i32 %add
-  store float %50, float* %arrayidx58, align 4
+  store float %i50, float* %arrayidx58, align 4
   %arrayidx60 = getelementptr inbounds float, float* %pOut, i32 %add6
-  store float %48, float* %arrayidx60, align 4
+  store float %i48, float* %arrayidx60, align 4
   %arrayidx62 = getelementptr inbounds float, float* %pOut, i32 %add8
-  store float %49, float* %arrayidx62, align 4
+  store float %i49, float* %arrayidx62, align 4
   %arrayidx64 = getelementptr inbounds float, float* %pOut, i32 %add10
-  store float %51, float* %arrayidx64, align 4
+  store float %i51, float* %arrayidx64, align 4
   %arrayidx66 = getelementptr inbounds float, float* %pOut, i32 %add12
-  store float %53, float* %arrayidx66, align 4
+  store float %i53, float* %arrayidx66, align 4
   %arrayidx68 = getelementptr inbounds float, float* %pOut, i32 %add14
-  store float %54, float* %arrayidx68, align 4
+  store float %i54, float* %arrayidx68, align 4
   %add70 = add i32 %k2.0177, 7
   %cmp3 = icmp ult i32 %add70, %sub
   br i1 %cmp3, label %for.body, label %for.cond.cleanup
@@ -1510,143 +1510,143 @@ define void @DCT_mve8(%struct.DCT_InstanceTypeDef* nocapture readonly %S, float*
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
   %NumInputs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 2
-  %0 = load i32, i32* %NumInputs, align 4
+  %i = load i32, i32* %NumInputs, align 4
   %NumFilters = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 1
-  %1 = load i32, i32* %NumFilters, align 4
+  %i1 = load i32, i32* %NumFilters, align 4
   %pDCTCoefs = getelementptr inbounds %struct.DCT_InstanceTypeDef, %struct.DCT_InstanceTypeDef* %S, i32 0, i32 0
-  %2 = load float*, float** %pDCTCoefs, align 4
-  %cmp = icmp ugt i32 %0, 1
+  %i2 = load float*, float** %pDCTCoefs, align 4
+  %cmp = icmp ugt i32 %i, 1
   tail call void @llvm.assume(i1 %cmp)
-  %sub = add i32 %1, -8
+  %sub = add i32 %i1, -8
   %cmp3197 = icmp ugt i32 %sub, 1
   br i1 %cmp3197, label %for.body.preheader, label %for.cond.cleanup
 
 for.body.preheader:                               ; preds = %entry
-  %n.rnd.up = add i32 %0, 3
+  %n.rnd.up = add i32 %i, 3
   %n.vec = and i32 %n.rnd.up, -4
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %middle.block, %entry
   ret void
 
-for.body:                                         ; preds = %for.body.preheader, %middle.block
+for.body:                                         ; preds = %middle.block, %for.body.preheader
   %k2.0198 = phi i32 [ %add79, %middle.block ], [ 1, %for.body.preheader ]
-  %mul4 = mul i32 %k2.0198, %0
+  %mul4 = mul i32 %k2.0198, %i
   %add = add nuw nsw i32 %k2.0198, 1
-  %mul5 = mul i32 %add, %0
+  %mul5 = mul i32 %add, %i
   %add6 = add nuw nsw i32 %k2.0198, 2
-  %mul7 = mul i32 %add6, %0
+  %mul7 = mul i32 %add6, %i
   %add8 = add nuw nsw i32 %k2.0198, 3
-  %mul9 = mul i32 %add8, %0
+  %mul9 = mul i32 %add8, %i
   %add10 = add nuw nsw i32 %k2.0198, 4
-  %mul11 = mul i32 %add10, %0
+  %mul11 = mul i32 %add10, %i
   %add12 = add nuw nsw i32 %k2.0198, 5
-  %mul13 = mul i32 %add12, %0
+  %mul13 = mul i32 %add12, %i
   %add14 = add nuw nsw i32 %k2.0198, 6
-  %mul15 = mul i32 %add14, %0
+  %mul15 = mul i32 %add14, %i
   %add16 = add i32 %k2.0198, 7
-  %mul17 = mul i32 %add16, %0
+  %mul17 = mul i32 %add16, %i
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %for.body
   %index = phi i32 [ 0, %for.body ], [ %index.next, %vector.body ]
-  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %45, %vector.body ]
-  %vec.phi199 = phi <4 x float> [ zeroinitializer, %for.body ], [ %46, %vector.body ]
-  %vec.phi200 = phi <4 x float> [ zeroinitializer, %for.body ], [ %47, %vector.body ]
-  %vec.phi201 = phi <4 x float> [ zeroinitializer, %for.body ], [ %48, %vector.body ]
-  %vec.phi202 = phi <4 x float> [ zeroinitializer, %for.body ], [ %49, %vector.body ]
-  %vec.phi203 = phi <4 x float> [ zeroinitializer, %for.body ], [ %50, %vector.body ]
-  %vec.phi204 = phi <4 x float> [ zeroinitializer, %for.body ], [ %51, %vector.body ]
-  %vec.phi205 = phi <4 x float> [ zeroinitializer, %for.body ], [ %52, %vector.body ]
-  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %0)
-  %3 = getelementptr inbounds float, float* %pIn, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %5 = add i32 %index, %mul4
-  %6 = getelementptr inbounds float, float* %2, i32 %5
-  %7 = bitcast float* %6 to <4 x float>*
-  %wide.masked.load206 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %8 = fmul fast <4 x float> %wide.masked.load206, %wide.masked.load
-  %9 = fadd fast <4 x float> %8, %vec.phi200
-  %10 = add i32 %index, %mul5
-  %11 = getelementptr inbounds float, float* %2, i32 %10
-  %12 = bitcast float* %11 to <4 x float>*
-  %wide.masked.load207 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %13 = fmul fast <4 x float> %wide.masked.load207, %wide.masked.load
-  %14 = fadd fast <4 x float> %13, %vec.phi202
-  %15 = add i32 %index, %mul7
-  %16 = getelementptr inbounds float, float* %2, i32 %15
-  %17 = bitcast float* %16 to <4 x float>*
-  %wide.masked.load208 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %18 = fmul fast <4 x float> %wide.masked.load208, %wide.masked.load
-  %19 = fadd fast <4 x float> %18, %vec.phi204
-  %20 = add i32 %index, %mul9
-  %21 = getelementptr inbounds float, float* %2, i32 %20
-  %22 = bitcast float* %21 to <4 x float>*
-  %wide.masked.load209 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %22, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %23 = fmul fast <4 x float> %wide.masked.load209, %wide.masked.load
-  %24 = fadd fast <4 x float> %23, %vec.phi205
-  %25 = add i32 %index, %mul11
-  %26 = getelementptr inbounds float, float* %2, i32 %25
-  %27 = bitcast float* %26 to <4 x float>*
-  %wide.masked.load210 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %27, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %28 = fmul fast <4 x float> %wide.masked.load210, %wide.masked.load
-  %29 = fadd fast <4 x float> %28, %vec.phi203
-  %30 = add i32 %index, %mul13
-  %31 = getelementptr inbounds float, float* %2, i32 %30
-  %32 = bitcast float* %31 to <4 x float>*
-  %wide.masked.load211 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %32, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %33 = fmul fast <4 x float> %wide.masked.load211, %wide.masked.load
-  %34 = fadd fast <4 x float> %33, %vec.phi201
-  %35 = add i32 %index, %mul15
-  %36 = getelementptr inbounds float, float* %2, i32 %35
-  %37 = bitcast float* %36 to <4 x float>*
-  %wide.masked.load212 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %37, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %38 = fmul fast <4 x float> %wide.masked.load212, %wide.masked.load
-  %39 = fadd fast <4 x float> %38, %vec.phi199
-  %40 = add i32 %index, %mul17
-  %41 = getelementptr inbounds float, float* %2, i32 %40
-  %42 = bitcast float* %41 to <4 x float>*
-  %wide.masked.load213 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %42, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %43 = fmul fast <4 x float> %wide.masked.load213, %wide.masked.load
-  %44 = fadd fast <4 x float> %43, %vec.phi
-  %45 = select <4 x i1> %active.lane.mask, <4 x float> %44, <4 x float> %vec.phi
-  %46 = select <4 x i1> %active.lane.mask, <4 x float> %39, <4 x float> %vec.phi199
-  %47 = select <4 x i1> %active.lane.mask, <4 x float> %9, <4 x float> %vec.phi200
-  %48 = select <4 x i1> %active.lane.mask, <4 x float> %34, <4 x float> %vec.phi201
-  %49 = select <4 x i1> %active.lane.mask, <4 x float> %14, <4 x float> %vec.phi202
-  %50 = select <4 x i1> %active.lane.mask, <4 x float> %29, <4 x float> %vec.phi203
-  %51 = select <4 x i1> %active.lane.mask, <4 x float> %19, <4 x float> %vec.phi204
-  %52 = select <4 x i1> %active.lane.mask, <4 x float> %24, <4 x float> %vec.phi205
+  %vec.phi = phi <4 x float> [ zeroinitializer, %for.body ], [ %i45, %vector.body ]
+  %vec.phi199 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i46, %vector.body ]
+  %vec.phi200 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i47, %vector.body ]
+  %vec.phi201 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i48, %vector.body ]
+  %vec.phi202 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i49, %vector.body ]
+  %vec.phi203 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i50, %vector.body ]
+  %vec.phi204 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i51, %vector.body ]
+  %vec.phi205 = phi <4 x float> [ zeroinitializer, %for.body ], [ %i52, %vector.body ]
+  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i)
+  %i3 = getelementptr inbounds float, float* %pIn, i32 %index
+  %i4 = bitcast float* %i3 to <4 x float>*
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i4, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i5 = add i32 %index, %mul4
+  %i6 = getelementptr inbounds float, float* %i2, i32 %i5
+  %i7 = bitcast float* %i6 to <4 x float>*
+  %wide.masked.load206 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i7, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i8 = fmul fast <4 x float> %wide.masked.load206, %wide.masked.load
+  %i9 = fadd fast <4 x float> %i8, %vec.phi200
+  %i10 = add i32 %index, %mul5
+  %i11 = getelementptr inbounds float, float* %i2, i32 %i10
+  %i12 = bitcast float* %i11 to <4 x float>*
+  %wide.masked.load207 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i12, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i13 = fmul fast <4 x float> %wide.masked.load207, %wide.masked.load
+  %i14 = fadd fast <4 x float> %i13, %vec.phi202
+  %i15 = add i32 %index, %mul7
+  %i16 = getelementptr inbounds float, float* %i2, i32 %i15
+  %i17 = bitcast float* %i16 to <4 x float>*
+  %wide.masked.load208 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i17, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i18 = fmul fast <4 x float> %wide.masked.load208, %wide.masked.load
+  %i19 = fadd fast <4 x float> %i18, %vec.phi204
+  %i20 = add i32 %index, %mul9
+  %i21 = getelementptr inbounds float, float* %i2, i32 %i20
+  %i22 = bitcast float* %i21 to <4 x float>*
+  %wide.masked.load209 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i22, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i23 = fmul fast <4 x float> %wide.masked.load209, %wide.masked.load
+  %i24 = fadd fast <4 x float> %i23, %vec.phi205
+  %i25 = add i32 %index, %mul11
+  %i26 = getelementptr inbounds float, float* %i2, i32 %i25
+  %i27 = bitcast float* %i26 to <4 x float>*
+  %wide.masked.load210 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i27, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i28 = fmul fast <4 x float> %wide.masked.load210, %wide.masked.load
+  %i29 = fadd fast <4 x float> %i28, %vec.phi203
+  %i30 = add i32 %index, %mul13
+  %i31 = getelementptr inbounds float, float* %i2, i32 %i30
+  %i32 = bitcast float* %i31 to <4 x float>*
+  %wide.masked.load211 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i32, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i33 = fmul fast <4 x float> %wide.masked.load211, %wide.masked.load
+  %i34 = fadd fast <4 x float> %i33, %vec.phi201
+  %i35 = add i32 %index, %mul15
+  %i36 = getelementptr inbounds float, float* %i2, i32 %i35
+  %i37 = bitcast float* %i36 to <4 x float>*
+  %wide.masked.load212 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i37, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i38 = fmul fast <4 x float> %wide.masked.load212, %wide.masked.load
+  %i39 = fadd fast <4 x float> %i38, %vec.phi199
+  %i40 = add i32 %index, %mul17
+  %i41 = getelementptr inbounds float, float* %i2, i32 %i40
+  %i42 = bitcast float* %i41 to <4 x float>*
+  %wide.masked.load213 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %i42, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %i43 = fmul fast <4 x float> %wide.masked.load213, %wide.masked.load
+  %i44 = fadd fast <4 x float> %i43, %vec.phi
+  %i45 = select <4 x i1> %active.lane.mask, <4 x float> %i44, <4 x float> %vec.phi
+  %i46 = select <4 x i1> %active.lane.mask, <4 x float> %i39, <4 x float> %vec.phi199
+  %i47 = select <4 x i1> %active.lane.mask, <4 x float> %i9, <4 x float> %vec.phi200
+  %i48 = select <4 x i1> %active.lane.mask, <4 x float> %i34, <4 x float> %vec.phi201
+  %i49 = select <4 x i1> %active.lane.mask, <4 x float> %i14, <4 x float> %vec.phi202
+  %i50 = select <4 x i1> %active.lane.mask, <4 x float> %i29, <4 x float> %vec.phi203
+  %i51 = select <4 x i1> %active.lane.mask, <4 x float> %i19, <4 x float> %vec.phi204
+  %i52 = select <4 x i1> %active.lane.mask, <4 x float> %i24, <4 x float> %vec.phi205
   %index.next = add i32 %index, 4
-  %53 = icmp eq i32 %index.next, %n.vec
-  br i1 %53, label %middle.block, label %vector.body
+  %i53 = icmp eq i32 %index.next, %n.vec
+  br i1 %i53, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
-  %54 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %52)
-  %55 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %51)
-  %56 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %50)
-  %57 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %49)
-  %58 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %48)
-  %59 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %47)
-  %60 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %46)
-  %61 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %45)
+  %i54 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i52)
+  %i55 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i51)
+  %i56 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i50)
+  %i57 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i49)
+  %i58 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i48)
+  %i59 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i47)
+  %i60 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i46)
+  %i61 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> %i45)
   %arrayidx63 = getelementptr inbounds float, float* %pOut, i32 %k2.0198
-  store float %59, float* %arrayidx63, align 4
+  store float %i59, float* %arrayidx63, align 4
   %arrayidx65 = getelementptr inbounds float, float* %pOut, i32 %add
-  store float %57, float* %arrayidx65, align 4
+  store float %i57, float* %arrayidx65, align 4
   %arrayidx67 = getelementptr inbounds float, float* %pOut, i32 %add6
-  store float %55, float* %arrayidx67, align 4
+  store float %i55, float* %arrayidx67, align 4
   %arrayidx69 = getelementptr inbounds float, float* %pOut, i32 %add8
-  store float %54, float* %arrayidx69, align 4
+  store float %i54, float* %arrayidx69, align 4
   %arrayidx71 = getelementptr inbounds float, float* %pOut, i32 %add10
-  store float %56, float* %arrayidx71, align 4
+  store float %i56, float* %arrayidx71, align 4
   %arrayidx73 = getelementptr inbounds float, float* %pOut, i32 %add12
-  store float %58, float* %arrayidx73, align 4
+  store float %i58, float* %arrayidx73, align 4
   %arrayidx75 = getelementptr inbounds float, float* %pOut, i32 %add14
-  store float %60, float* %arrayidx75, align 4
+  store float %i60, float* %arrayidx75, align 4
   %arrayidx77 = getelementptr inbounds float, float* %pOut, i32 %add16
-  store float %61, float* %arrayidx77, align 4
+  store float %i61, float* %arrayidx77, align 4
   %add79 = add i32 %k2.0198, 8
   %cmp3 = icmp ult i32 %add79, %sub
   br i1 %cmp3, label %for.body, label %for.cond.cleanup


        


More information about the llvm-commits mailing list