[llvm] 25e38c3 - [ARM] Extra reduction plus tailpredication tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 7 09:17:17 PDT 2020


Author: David Green
Date: 2020-08-07T17:16:56+01:00
New Revision: 25e38c3f3c2a814797f53d0f08a687b3b4a028e4

URL: https://github.com/llvm/llvm-project/commit/25e38c3f3c2a814797f53d0f08a687b3b4a028e4
DIFF: https://github.com/llvm/llvm-project/commit/25e38c3f3c2a814797f53d0f08a687b3b4a028e4.diff

LOG: [ARM] Extra reduction plus tailpredication tests. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll
index 0d22a7f3cd99..f3252ac1c4a8 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -tail-predication=enabled -verify-machineinstrs %s -o - | FileCheck %s
 
 define i32 @add_i32(i32* nocapture readonly %x, i32 %n) {
 ; CHECK-LABEL: add_i32:
@@ -1696,6 +1696,2864 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
   ret float %r.0.lcssa
 }
 
+define i32 @add4i32(i32* noalias nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: add4i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cbz r1, .LBB17_4
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    adds r2, r1, #3
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    bic r2, r2, #3
+; CHECK-NEXT:    subs r2, #4
+; CHECK-NEXT:    add.w lr, r3, r2, lsr #2
+; CHECK-NEXT:    adr r3, .LCPI17_0
+; CHECK-NEXT:    subs r2, r1, #1
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    vdup.32 q1, r2
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB17_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vadd.i32 q2, q0, r1
+; CHECK-NEXT:    vdup.32 q3, r1
+; CHECK-NEXT:    vcmp.u32 hi, q3, q2
+; CHECK-NEXT:    adds r1, #4
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vpsttt
+; CHECK-NEXT:    vcmpt.u32 cs, q1, q2
+; CHECK-NEXT:    vldrwt.u32 q2, [r0], #16
+; CHECK-NEXT:    vaddvat.u32 r2, q2
+; CHECK-NEXT:    le lr, .LBB17_2
+; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:  .LBB17_4:
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.5:
+; CHECK-NEXT:  .LCPI17_0:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp6.not = icmp eq i32 %n, 0
+  br i1 %cmp6.not, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %n, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %n, -1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i32 [ 0, %vector.ph ], [ %4, %vector.body ]
+  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %trip.count.minus.1)
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %2 = select <4 x i1> %active.lane.mask, <4 x i32> %wide.masked.load, <4 x i32> zeroinitializer
+  %3 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %2)
+  %4 = add i32 %3, %vec.phi
+  %index.next = add i32 %index, 4
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  %s.0.lcssa = phi i32 [ 0, %entry ], [ %4, %vector.body ]
+  ret i32 %s.0.lcssa
+}
+
+define i32 @mla4i32(i32* noalias nocapture readonly %x, i32* noalias nocapture readonly %y, i32 %n) {
+; CHECK-LABEL: mla4i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cbz r2, .LBB18_4
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    adds r3, r2, #3
+; CHECK-NEXT:    bic r3, r3, #3
+; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #2
+; CHECK-NEXT:    sub.w r12, r2, #1
+; CHECK-NEXT:    adr r2, .LCPI18_0
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    vdup.32 q1, r12
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB18_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vadd.i32 q2, q0, r3
+; CHECK-NEXT:    vdup.32 q3, r3
+; CHECK-NEXT:    vcmp.u32 hi, q3, q2
+; CHECK-NEXT:    adds r3, #4
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vpstttt
+; CHECK-NEXT:    vcmpt.u32 cs, q1, q2
+; CHECK-NEXT:    vldrwt.u32 q2, [r0], #16
+; CHECK-NEXT:    vldrwt.u32 q3, [r1], #16
+; CHECK-NEXT:    vmlavat.u32 r2, q3, q2
+; CHECK-NEXT:    le lr, .LBB18_2
+; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:  .LBB18_4:
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.5:
+; CHECK-NEXT:  .LCPI18_0:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp8.not = icmp eq i32 %n, 0
+  br i1 %cmp8.not, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %n, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %n, -1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i32 [ 0, %vector.ph ], [ %7, %vector.body ]
+  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %trip.count.minus.1)
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %2 = getelementptr inbounds i32, i32* %y, i32 %index
+  %3 = bitcast i32* %2 to <4 x i32>*
+  %wide.masked.load13 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %4 = mul nsw <4 x i32> %wide.masked.load13, %wide.masked.load
+  %5 = select <4 x i1> %active.lane.mask, <4 x i32> %4, <4 x i32> zeroinitializer
+  %6 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %5)
+  %7 = add i32 %6, %vec.phi
+  %index.next = add i32 %index, 4
+  %8 = icmp eq i32 %index.next, %n.vec
+  br i1 %8, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  %s.0.lcssa = phi i32 [ 0, %entry ], [ %7, %vector.body ]
+  ret i32 %s.0.lcssa
+}
+
+define i32 @add8i32(i16* noalias nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: add8i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #40
+; CHECK-NEXT:    sub sp, #40
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    beq.w .LBB19_3
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    adds r2, r1, #7
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    bic r2, r2, #7
+; CHECK-NEXT:    vmov.i8 q1, #0x0
+; CHECK-NEXT:    subs r2, #8
+; CHECK-NEXT:    vmov.i8 q2, #0xff
+; CHECK-NEXT:    add.w lr, r3, r2, lsr #3
+; CHECK-NEXT:    adr r3, .LCPI19_0
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr r3, .LCPI19_1
+; CHECK-NEXT:    subs r2, r1, #1
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    vdup.32 q4, r2
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:  .LBB19_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q3, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vadd.i32 q5, q0, r1
+; CHECK-NEXT:    vdup.32 q0, r1
+; CHECK-NEXT:    vcmp.u32 hi, q0, q5
+; CHECK-NEXT:    vadd.i32 q6, q3, r1
+; CHECK-NEXT:    vpsel q7, q2, q1
+; CHECK-NEXT:    vcmp.u32 hi, q0, q6
+; CHECK-NEXT:    vpsel q3, q2, q1
+; CHECK-NEXT:    adds r1, #8
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov.16 q0[0], r3
+; CHECK-NEXT:    vmov r3, s13
+; CHECK-NEXT:    vmov.16 q0[1], r3
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vmov.16 q0[2], r3
+; CHECK-NEXT:    vmov r3, s15
+; CHECK-NEXT:    vmov.16 q0[3], r3
+; CHECK-NEXT:    vmov r3, s28
+; CHECK-NEXT:    vmov.16 q0[4], r3
+; CHECK-NEXT:    vmov r3, s29
+; CHECK-NEXT:    vmov.16 q0[5], r3
+; CHECK-NEXT:    vmov r3, s30
+; CHECK-NEXT:    vmov.16 q0[6], r3
+; CHECK-NEXT:    vmov r3, s31
+; CHECK-NEXT:    vmov.16 q0[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vstr p0, [sp, #36] @ 4-byte Spill
+; CHECK-NEXT:    vcmp.u32 cs, q4, q5
+; CHECK-NEXT:    vpsel q5, q2, q1
+; CHECK-NEXT:    vcmp.u32 cs, q4, q6
+; CHECK-NEXT:    vpsel q0, q2, q1
+; CHECK-NEXT:    vldr p0, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.16 q6[0], r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov.16 q6[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov.16 q6[2], r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    vmov.16 q6[3], r3
+; CHECK-NEXT:    vmov r3, s20
+; CHECK-NEXT:    vmov.16 q6[4], r3
+; CHECK-NEXT:    vmov r3, s21
+; CHECK-NEXT:    vmov.16 q6[5], r3
+; CHECK-NEXT:    vmov r3, s22
+; CHECK-NEXT:    vmov.16 q6[6], r3
+; CHECK-NEXT:    vmov r3, s23
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vmov.16 q6[7], r3
+; CHECK-NEXT:    vpsttt
+; CHECK-NEXT:    vcmpt.i16 ne, q6, zr
+; CHECK-NEXT:    vldrht.u16 q0, [r0], #16
+; CHECK-NEXT:    vaddvat.s16 r2, q0
+; CHECK-NEXT:    le lr, .LBB19_2
+; CHECK-NEXT:    b .LBB19_4
+; CHECK-NEXT:  .LBB19_3:
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:  .LBB19_4: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    add sp, #40
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.5:
+; CHECK-NEXT:  .LCPI19_0:
+; CHECK-NEXT:    .long 4 @ 0x4
+; CHECK-NEXT:    .long 5 @ 0x5
+; CHECK-NEXT:    .long 6 @ 0x6
+; CHECK-NEXT:    .long 7 @ 0x7
+; CHECK-NEXT:  .LCPI19_1:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp6.not = icmp eq i32 %n, 0
+  br i1 %cmp6.not, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %n, 7
+  %n.vec = and i32 %n.rnd.up, -8
+  %trip.count.minus.1 = add i32 %n, -1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i32 [ 0, %vector.ph ], [ %5, %vector.body ]
+  %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %trip.count.minus.1)
+  %0 = getelementptr inbounds i16, i16* %x, i32 %index
+  %1 = bitcast i16* %0 to <8 x i16>*
+  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %2 = sext <8 x i16> %wide.masked.load to <8 x i32>
+  %3 = select <8 x i1> %active.lane.mask, <8 x i32> %2, <8 x i32> zeroinitializer
+  %4 = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %3)
+  %5 = add i32 %4, %vec.phi
+  %index.next = add i32 %index, 8
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  %s.0.lcssa = phi i32 [ 0, %entry ], [ %5, %vector.body ]
+  ret i32 %s.0.lcssa
+}
+
+define i32 @mla8i32(i16* noalias nocapture readonly %x, i16* noalias nocapture readonly %y, i32 %n) {
+; CHECK-LABEL: mla8i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #40
+; CHECK-NEXT:    sub sp, #40
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    beq.w .LBB20_3
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    adds r3, r2, #7
+; CHECK-NEXT:    vmov.i8 q1, #0x0
+; CHECK-NEXT:    bic r3, r3, #7
+; CHECK-NEXT:    vmov.i8 q2, #0xff
+; CHECK-NEXT:    sub.w r12, r3, #8
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #3
+; CHECK-NEXT:    sub.w r12, r2, #1
+; CHECK-NEXT:    adr r2, .LCPI20_0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    adr r2, .LCPI20_1
+; CHECK-NEXT:    vdup.32 q4, r12
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:  .LBB20_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q3, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vadd.i32 q5, q0, r3
+; CHECK-NEXT:    vdup.32 q0, r3
+; CHECK-NEXT:    vcmp.u32 hi, q0, q5
+; CHECK-NEXT:    vadd.i32 q6, q3, r3
+; CHECK-NEXT:    vpsel q7, q2, q1
+; CHECK-NEXT:    vcmp.u32 hi, q0, q6
+; CHECK-NEXT:    vpsel q3, q2, q1
+; CHECK-NEXT:    adds r3, #8
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vmov.16 q0[0], r2
+; CHECK-NEXT:    vmov r2, s13
+; CHECK-NEXT:    vmov.16 q0[1], r2
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    vmov.16 q0[2], r2
+; CHECK-NEXT:    vmov r2, s15
+; CHECK-NEXT:    vmov.16 q0[3], r2
+; CHECK-NEXT:    vmov r2, s28
+; CHECK-NEXT:    vmov.16 q0[4], r2
+; CHECK-NEXT:    vmov r2, s29
+; CHECK-NEXT:    vmov.16 q0[5], r2
+; CHECK-NEXT:    vmov r2, s30
+; CHECK-NEXT:    vmov.16 q0[6], r2
+; CHECK-NEXT:    vmov r2, s31
+; CHECK-NEXT:    vmov.16 q0[7], r2
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vstr p0, [sp, #36] @ 4-byte Spill
+; CHECK-NEXT:    vcmp.u32 cs, q4, q5
+; CHECK-NEXT:    vpsel q5, q2, q1
+; CHECK-NEXT:    vcmp.u32 cs, q4, q6
+; CHECK-NEXT:    vpsel q0, q2, q1
+; CHECK-NEXT:    vldr p0, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov.16 q6[0], r2
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    vmov.16 q6[1], r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov.16 q6[2], r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vmov.16 q6[3], r2
+; CHECK-NEXT:    vmov r2, s20
+; CHECK-NEXT:    vmov.16 q6[4], r2
+; CHECK-NEXT:    vmov r2, s21
+; CHECK-NEXT:    vmov.16 q6[5], r2
+; CHECK-NEXT:    vmov r2, s22
+; CHECK-NEXT:    vmov.16 q6[6], r2
+; CHECK-NEXT:    vmov r2, s23
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vmov.16 q6[7], r2
+; CHECK-NEXT:    vpstttt
+; CHECK-NEXT:    vcmpt.i16 ne, q6, zr
+; CHECK-NEXT:    vldrht.u16 q0, [r0], #16
+; CHECK-NEXT:    vldrht.u16 q3, [r1], #16
+; CHECK-NEXT:    vmlavat.s16 r12, q3, q0
+; CHECK-NEXT:    le lr, .LBB20_2
+; CHECK-NEXT:    b .LBB20_4
+; CHECK-NEXT:  .LBB20_3:
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:  .LBB20_4: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r12
+; CHECK-NEXT:    add sp, #40
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.5:
+; CHECK-NEXT:  .LCPI20_0:
+; CHECK-NEXT:    .long 4 @ 0x4
+; CHECK-NEXT:    .long 5 @ 0x5
+; CHECK-NEXT:    .long 6 @ 0x6
+; CHECK-NEXT:    .long 7 @ 0x7
+; CHECK-NEXT:  .LCPI20_1:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp9.not = icmp eq i32 %n, 0
+  br i1 %cmp9.not, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %n, 7
+  %n.vec = and i32 %n.rnd.up, -8
+  %trip.count.minus.1 = add i32 %n, -1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i32 [ 0, %vector.ph ], [ %9, %vector.body ]
+  %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %trip.count.minus.1)
+  %0 = getelementptr inbounds i16, i16* %x, i32 %index
+  %1 = bitcast i16* %0 to <8 x i16>*
+  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %2 = sext <8 x i16> %wide.masked.load to <8 x i32>
+  %3 = getelementptr inbounds i16, i16* %y, i32 %index
+  %4 = bitcast i16* %3 to <8 x i16>*
+  %wide.masked.load14 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %4, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %5 = sext <8 x i16> %wide.masked.load14 to <8 x i32>
+  %6 = mul nsw <8 x i32> %5, %2
+  %7 = select <8 x i1> %active.lane.mask, <8 x i32> %6, <8 x i32> zeroinitializer
+  %8 = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %7)
+  %9 = add i32 %8, %vec.phi
+  %index.next = add i32 %index, 8
+  %10 = icmp eq i32 %index.next, %n.vec
+  br i1 %10, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  %s.0.lcssa = phi i32 [ 0, %entry ], [ %9, %vector.body ]
+  ret i32 %s.0.lcssa
+}
+
+define i32 @add16i32(i8* noalias nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: add16i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #136
+; CHECK-NEXT:    sub sp, #136
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    beq.w .LBB21_3
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    add.w r2, r1, #15
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    bic r2, r2, #15
+; CHECK-NEXT:    vmov.i8 q7, #0x0
+; CHECK-NEXT:    subs r2, #16
+; CHECK-NEXT:    vmov.i8 q2, #0xff
+; CHECK-NEXT:    add.w lr, r3, r2, lsr #4
+; CHECK-NEXT:    adr r3, .LCPI21_0
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr r3, .LCPI21_1
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:    subs r2, r1, #1
+; CHECK-NEXT:    vstrw.32 q0, [sp, #64] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr r3, .LCPI21_2
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr r3, .LCPI21_3
+; CHECK-NEXT:    vstrw.32 q0, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vdup.32 q0, r2
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:  .LBB21_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vdup.32 q6, r1
+; CHECK-NEXT:    vadd.i32 q0, q0, r1
+; CHECK-NEXT:    vcmp.u32 hi, q6, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #112] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q3, q2, q7
+; CHECK-NEXT:    vadd.i32 q0, q0, r1
+; CHECK-NEXT:    vcmp.u32 hi, q6, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #96] @ 16-byte Spill
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.16 q4[0], r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov.16 q4[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov.16 q4[2], r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    vmov.16 q4[3], r3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov.16 q4[4], r3
+; CHECK-NEXT:    vmov r3, s13
+; CHECK-NEXT:    vmov.16 q4[5], r3
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vmov.16 q4[6], r3
+; CHECK-NEXT:    vmov r3, s15
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vmov.16 q4[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vadd.i32 q3, q0, r1
+; CHECK-NEXT:    vpsel q5, q2, q7
+; CHECK-NEXT:    vcmp.u32 hi, q6, q3
+; CHECK-NEXT:    vstr p0, [sp, #92] @ 4-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vadd.i32 q4, q0, r1
+; CHECK-NEXT:    adds r1, #16
+; CHECK-NEXT:    vcmp.u32 hi, q6, q4
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vldr p0, [sp, #92] @ 4-byte Reload
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.16 q6[0], r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov.16 q6[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov.16 q6[2], r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    vpsel q1, q2, q7
+; CHECK-NEXT:    vmov.16 q6[3], r3
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q6[4], r3
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.16 q6[5], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.16 q6[6], r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmov.16 q6[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q6, zr
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vmov.u16 r3, q0[0]
+; CHECK-NEXT:    vmov.8 q6[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[1]
+; CHECK-NEXT:    vmov.8 q6[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[2]
+; CHECK-NEXT:    vmov.8 q6[2], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[3]
+; CHECK-NEXT:    vmov.8 q6[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[4]
+; CHECK-NEXT:    vmov.8 q6[4], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[5]
+; CHECK-NEXT:    vmov.8 q6[5], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[6]
+; CHECK-NEXT:    vmov.8 q6[6], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[7]
+; CHECK-NEXT:    vmov.8 q6[7], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[0]
+; CHECK-NEXT:    vmov.8 q6[8], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[1]
+; CHECK-NEXT:    vmov.8 q6[9], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[2]
+; CHECK-NEXT:    vmov.8 q6[10], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[3]
+; CHECK-NEXT:    vmov.8 q6[11], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[4]
+; CHECK-NEXT:    vmov.8 q6[12], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[5]
+; CHECK-NEXT:    vmov.8 q6[13], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[6]
+; CHECK-NEXT:    vmov.8 q6[14], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[7]
+; CHECK-NEXT:    vmov.8 q6[15], r3
+; CHECK-NEXT:    vcmp.i8 ne, q6, zr
+; CHECK-NEXT:    vstr p0, [sp, #92] @ 4-byte Spill
+; CHECK-NEXT:    vldrw.u32 q6, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #112] @ 16-byte Reload
+; CHECK-NEXT:    vcmp.u32 cs, q6, q0
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #96] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q5, q2, q7
+; CHECK-NEXT:    vcmp.u32 cs, q6, q0
+; CHECK-NEXT:    vpsel q1, q2, q7
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q0[0], r3
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.16 q0[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.16 q0[2], r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmov.16 q0[3], r3
+; CHECK-NEXT:    vmov r3, s20
+; CHECK-NEXT:    vmov.16 q0[4], r3
+; CHECK-NEXT:    vmov r3, s21
+; CHECK-NEXT:    vmov.16 q0[5], r3
+; CHECK-NEXT:    vmov r3, s22
+; CHECK-NEXT:    vmov.16 q0[6], r3
+; CHECK-NEXT:    vmov r3, s23
+; CHECK-NEXT:    vmov.16 q0[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vcmp.u32 cs, q6, q3
+; CHECK-NEXT:    vpsel q3, q2, q7
+; CHECK-NEXT:    vcmp.u32 cs, q6, q4
+; CHECK-NEXT:    vpsel q1, q2, q7
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q4[0], r3
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.16 q4[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.16 q4[2], r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmov.16 q4[3], r3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov.16 q4[4], r3
+; CHECK-NEXT:    vmov r3, s13
+; CHECK-NEXT:    vmov.16 q4[5], r3
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vmov.16 q4[6], r3
+; CHECK-NEXT:    vmov r3, s15
+; CHECK-NEXT:    vmov.16 q4[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vpsel q4, q2, q7
+; CHECK-NEXT:    vldr p0, [sp, #92] @ 4-byte Reload
+; CHECK-NEXT:    vmov.u16 r3, q4[0]
+; CHECK-NEXT:    vmov.8 q3[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[1]
+; CHECK-NEXT:    vmov.8 q3[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[2]
+; CHECK-NEXT:    vmov.8 q3[2], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[3]
+; CHECK-NEXT:    vmov.8 q3[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[4]
+; CHECK-NEXT:    vmov.8 q3[4], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[5]
+; CHECK-NEXT:    vmov.8 q3[5], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[6]
+; CHECK-NEXT:    vmov.8 q3[6], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[7]
+; CHECK-NEXT:    vmov.8 q3[7], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[0]
+; CHECK-NEXT:    vmov.8 q3[8], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[1]
+; CHECK-NEXT:    vmov.8 q3[9], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[2]
+; CHECK-NEXT:    vmov.8 q3[10], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[3]
+; CHECK-NEXT:    vmov.8 q3[11], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[4]
+; CHECK-NEXT:    vmov.8 q3[12], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[5]
+; CHECK-NEXT:    vmov.8 q3[13], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[6]
+; CHECK-NEXT:    vmov.8 q3[14], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[7]
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vmov.8 q3[15], r3
+; CHECK-NEXT:    vpsttt
+; CHECK-NEXT:    vcmpt.i8 ne, q3, zr
+; CHECK-NEXT:    vldrbt.u8 q0, [r0], #16
+; CHECK-NEXT:    vaddvat.u8 r2, q0
+; CHECK-NEXT:    le lr, .LBB21_2
+; CHECK-NEXT:    b .LBB21_4
+; CHECK-NEXT:  .LBB21_3:
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:  .LBB21_4: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    add sp, #136
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.5:
+; CHECK-NEXT:  .LCPI21_0:
+; CHECK-NEXT:    .long 12 @ 0xc
+; CHECK-NEXT:    .long 13 @ 0xd
+; CHECK-NEXT:    .long 14 @ 0xe
+; CHECK-NEXT:    .long 15 @ 0xf
+; CHECK-NEXT:  .LCPI21_1:
+; CHECK-NEXT:    .long 8 @ 0x8
+; CHECK-NEXT:    .long 9 @ 0x9
+; CHECK-NEXT:    .long 10 @ 0xa
+; CHECK-NEXT:    .long 11 @ 0xb
+; CHECK-NEXT:  .LCPI21_2:
+; CHECK-NEXT:    .long 4 @ 0x4
+; CHECK-NEXT:    .long 5 @ 0x5
+; CHECK-NEXT:    .long 6 @ 0x6
+; CHECK-NEXT:    .long 7 @ 0x7
+; CHECK-NEXT:  .LCPI21_3:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp6.not = icmp eq i32 %n, 0
+  br i1 %cmp6.not, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %n, 15
+  %n.vec = and i32 %n.rnd.up, -16
+  %trip.count.minus.1 = add i32 %n, -1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i32 [ 0, %vector.ph ], [ %5, %vector.body ]
+  %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %trip.count.minus.1)
+  %0 = getelementptr inbounds i8, i8* %x, i32 %index
+  %1 = bitcast i8* %0 to <16 x i8>*
+  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %1, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
+  %2 = zext <16 x i8> %wide.masked.load to <16 x i32>
+  %3 = select <16 x i1> %active.lane.mask, <16 x i32> %2, <16 x i32> zeroinitializer
+  %4 = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %3)
+  %5 = add i32 %4, %vec.phi
+  %index.next = add i32 %index, 16
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  %s.0.lcssa = phi i32 [ 0, %entry ], [ %5, %vector.body ]
+  ret i32 %s.0.lcssa
+}
+
+define i32 @mla16i32(i8* noalias nocapture readonly %x, i8* noalias nocapture readonly %y, i32 %n) {
+; CHECK-LABEL: mla16i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #136
+; CHECK-NEXT:    sub sp, #136
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    beq.w .LBB22_3
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    add.w r3, r2, #15
+; CHECK-NEXT:    vmov.i8 q7, #0x0
+; CHECK-NEXT:    bic r3, r3, #15
+; CHECK-NEXT:    vmov.i8 q2, #0xff
+; CHECK-NEXT:    sub.w r12, r3, #16
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #4
+; CHECK-NEXT:    sub.w r12, r2, #1
+; CHECK-NEXT:    adr r2, .LCPI22_0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    adr r2, .LCPI22_1
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #64] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    adr r2, .LCPI22_2
+; CHECK-NEXT:    vstrw.32 q0, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    adr r2, .LCPI22_3
+; CHECK-NEXT:    vstrw.32 q0, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vdup.32 q0, r12
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:  .LBB22_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vdup.32 q6, r3
+; CHECK-NEXT:    vadd.i32 q0, q0, r3
+; CHECK-NEXT:    vcmp.u32 hi, q6, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #112] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q3, q2, q7
+; CHECK-NEXT:    vadd.i32 q0, q0, r3
+; CHECK-NEXT:    vcmp.u32 hi, q6, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #96] @ 16-byte Spill
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov.16 q4[0], r2
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    vmov.16 q4[1], r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov.16 q4[2], r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vmov.16 q4[3], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vmov.16 q4[4], r2
+; CHECK-NEXT:    vmov r2, s13
+; CHECK-NEXT:    vmov.16 q4[5], r2
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    vmov.16 q4[6], r2
+; CHECK-NEXT:    vmov r2, s15
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vmov.16 q4[7], r2
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vadd.i32 q3, q0, r3
+; CHECK-NEXT:    vpsel q5, q2, q7
+; CHECK-NEXT:    vcmp.u32 hi, q6, q3
+; CHECK-NEXT:    vstr p0, [sp, #92] @ 4-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vadd.i32 q4, q0, r3
+; CHECK-NEXT:    adds r3, #16
+; CHECK-NEXT:    vcmp.u32 hi, q6, q4
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vldr p0, [sp, #92] @ 4-byte Reload
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov.16 q6[0], r2
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    vmov.16 q6[1], r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov.16 q6[2], r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vpsel q1, q2, q7
+; CHECK-NEXT:    vmov.16 q6[3], r2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov.16 q6[4], r2
+; CHECK-NEXT:    vmov r2, s5
+; CHECK-NEXT:    vmov.16 q6[5], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vmov.16 q6[6], r2
+; CHECK-NEXT:    vmov r2, s7
+; CHECK-NEXT:    vmov.16 q6[7], r2
+; CHECK-NEXT:    vcmp.i16 ne, q6, zr
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    vmov.8 q6[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[1]
+; CHECK-NEXT:    vmov.8 q6[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    vmov.8 q6[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    vmov.8 q6[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    vmov.8 q6[4], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    vmov.8 q6[5], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    vmov.8 q6[6], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    vmov.8 q6[7], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[0]
+; CHECK-NEXT:    vmov.8 q6[8], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[1]
+; CHECK-NEXT:    vmov.8 q6[9], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[2]
+; CHECK-NEXT:    vmov.8 q6[10], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[3]
+; CHECK-NEXT:    vmov.8 q6[11], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[4]
+; CHECK-NEXT:    vmov.8 q6[12], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[5]
+; CHECK-NEXT:    vmov.8 q6[13], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[6]
+; CHECK-NEXT:    vmov.8 q6[14], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[7]
+; CHECK-NEXT:    vmov.8 q6[15], r2
+; CHECK-NEXT:    vcmp.i8 ne, q6, zr
+; CHECK-NEXT:    vstr p0, [sp, #92] @ 4-byte Spill
+; CHECK-NEXT:    vldrw.u32 q6, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #112] @ 16-byte Reload
+; CHECK-NEXT:    vcmp.u32 cs, q6, q0
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #96] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q5, q2, q7
+; CHECK-NEXT:    vcmp.u32 cs, q6, q0
+; CHECK-NEXT:    vpsel q1, q2, q7
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov.16 q0[0], r2
+; CHECK-NEXT:    vmov r2, s5
+; CHECK-NEXT:    vmov.16 q0[1], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vmov.16 q0[2], r2
+; CHECK-NEXT:    vmov r2, s7
+; CHECK-NEXT:    vmov.16 q0[3], r2
+; CHECK-NEXT:    vmov r2, s20
+; CHECK-NEXT:    vmov.16 q0[4], r2
+; CHECK-NEXT:    vmov r2, s21
+; CHECK-NEXT:    vmov.16 q0[5], r2
+; CHECK-NEXT:    vmov r2, s22
+; CHECK-NEXT:    vmov.16 q0[6], r2
+; CHECK-NEXT:    vmov r2, s23
+; CHECK-NEXT:    vmov.16 q0[7], r2
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vcmp.u32 cs, q6, q3
+; CHECK-NEXT:    vpsel q3, q2, q7
+; CHECK-NEXT:    vcmp.u32 cs, q6, q4
+; CHECK-NEXT:    vpsel q1, q2, q7
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov.16 q4[0], r2
+; CHECK-NEXT:    vmov r2, s5
+; CHECK-NEXT:    vmov.16 q4[1], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vmov.16 q4[2], r2
+; CHECK-NEXT:    vmov r2, s7
+; CHECK-NEXT:    vmov.16 q4[3], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vmov.16 q4[4], r2
+; CHECK-NEXT:    vmov r2, s13
+; CHECK-NEXT:    vmov.16 q4[5], r2
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    vmov.16 q4[6], r2
+; CHECK-NEXT:    vmov r2, s15
+; CHECK-NEXT:    vmov.16 q4[7], r2
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vpsel q4, q2, q7
+; CHECK-NEXT:    vldr p0, [sp, #92] @ 4-byte Reload
+; CHECK-NEXT:    vmov.u16 r2, q4[0]
+; CHECK-NEXT:    vmov.8 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[1]
+; CHECK-NEXT:    vmov.8 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[2]
+; CHECK-NEXT:    vmov.8 q3[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[3]
+; CHECK-NEXT:    vmov.8 q3[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[4]
+; CHECK-NEXT:    vmov.8 q3[4], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[5]
+; CHECK-NEXT:    vmov.8 q3[5], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[6]
+; CHECK-NEXT:    vmov.8 q3[6], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[7]
+; CHECK-NEXT:    vmov.8 q3[7], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    vmov.8 q3[8], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[1]
+; CHECK-NEXT:    vmov.8 q3[9], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    vmov.8 q3[10], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    vmov.8 q3[11], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    vmov.8 q3[12], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    vmov.8 q3[13], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    vmov.8 q3[14], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vmov.8 q3[15], r2
+; CHECK-NEXT:    vpstttt
+; CHECK-NEXT:    vcmpt.i8 ne, q3, zr
+; CHECK-NEXT:    vldrbt.u8 q0, [r0], #16
+; CHECK-NEXT:    vldrbt.u8 q1, [r1], #16
+; CHECK-NEXT:    vmlavat.u8 r12, q1, q0
+; CHECK-NEXT:    le lr, .LBB22_2
+; CHECK-NEXT:    b .LBB22_4
+; CHECK-NEXT:  .LBB22_3:
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:  .LBB22_4: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r12
+; CHECK-NEXT:    add sp, #136
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.5:
+; CHECK-NEXT:  .LCPI22_0:
+; CHECK-NEXT:    .long 12 @ 0xc
+; CHECK-NEXT:    .long 13 @ 0xd
+; CHECK-NEXT:    .long 14 @ 0xe
+; CHECK-NEXT:    .long 15 @ 0xf
+; CHECK-NEXT:  .LCPI22_1:
+; CHECK-NEXT:    .long 8 @ 0x8
+; CHECK-NEXT:    .long 9 @ 0x9
+; CHECK-NEXT:    .long 10 @ 0xa
+; CHECK-NEXT:    .long 11 @ 0xb
+; CHECK-NEXT:  .LCPI22_2:
+; CHECK-NEXT:    .long 4 @ 0x4
+; CHECK-NEXT:    .long 5 @ 0x5
+; CHECK-NEXT:    .long 6 @ 0x6
+; CHECK-NEXT:    .long 7 @ 0x7
+; CHECK-NEXT:  .LCPI22_3:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp9.not = icmp eq i32 %n, 0
+  br i1 %cmp9.not, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %n, 15
+  %n.vec = and i32 %n.rnd.up, -16
+  %trip.count.minus.1 = add i32 %n, -1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i32 [ 0, %vector.ph ], [ %9, %vector.body ]
+  %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %trip.count.minus.1)
+  %0 = getelementptr inbounds i8, i8* %x, i32 %index
+  %1 = bitcast i8* %0 to <16 x i8>*
+  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %1, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
+  %2 = zext <16 x i8> %wide.masked.load to <16 x i32>
+  %3 = getelementptr inbounds i8, i8* %y, i32 %index
+  %4 = bitcast i8* %3 to <16 x i8>*
+  %wide.masked.load14 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %4, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
+  %5 = zext <16 x i8> %wide.masked.load14 to <16 x i32>
+  %6 = mul nuw nsw <16 x i32> %5, %2
+  %7 = select <16 x i1> %active.lane.mask, <16 x i32> %6, <16 x i32> zeroinitializer
+  %8 = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %7)
+  %9 = add i32 %8, %vec.phi
+  %index.next = add i32 %index, 16
+  %10 = icmp eq i32 %index.next, %n.vec
+  br i1 %10, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  %s.0.lcssa = phi i32 [ 0, %entry ], [ %9, %vector.body ]
+  ret i32 %s.0.lcssa
+}
+
+define signext i16 @add8i16(i16* noalias nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: add8i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #40
+; CHECK-NEXT:    sub sp, #40
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    beq.w .LBB23_3
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    adds r2, r1, #7
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    bic r2, r2, #7
+; CHECK-NEXT:    vmov.i8 q1, #0x0
+; CHECK-NEXT:    subs r2, #8
+; CHECK-NEXT:    vmov.i8 q2, #0xff
+; CHECK-NEXT:    add.w lr, r3, r2, lsr #3
+; CHECK-NEXT:    adr r3, .LCPI23_0
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr r3, .LCPI23_1
+; CHECK-NEXT:    subs r2, r1, #1
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    vdup.32 q4, r2
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:  .LBB23_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q3, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vadd.i32 q5, q0, r1
+; CHECK-NEXT:    vdup.32 q0, r1
+; CHECK-NEXT:    vcmp.u32 hi, q0, q5
+; CHECK-NEXT:    vadd.i32 q6, q3, r1
+; CHECK-NEXT:    vpsel q7, q2, q1
+; CHECK-NEXT:    vcmp.u32 hi, q0, q6
+; CHECK-NEXT:    vpsel q3, q2, q1
+; CHECK-NEXT:    adds r1, #8
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov.16 q0[0], r3
+; CHECK-NEXT:    vmov r3, s13
+; CHECK-NEXT:    vmov.16 q0[1], r3
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vmov.16 q0[2], r3
+; CHECK-NEXT:    vmov r3, s15
+; CHECK-NEXT:    vmov.16 q0[3], r3
+; CHECK-NEXT:    vmov r3, s28
+; CHECK-NEXT:    vmov.16 q0[4], r3
+; CHECK-NEXT:    vmov r3, s29
+; CHECK-NEXT:    vmov.16 q0[5], r3
+; CHECK-NEXT:    vmov r3, s30
+; CHECK-NEXT:    vmov.16 q0[6], r3
+; CHECK-NEXT:    vmov r3, s31
+; CHECK-NEXT:    vmov.16 q0[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vstr p0, [sp, #36] @ 4-byte Spill
+; CHECK-NEXT:    vcmp.u32 cs, q4, q5
+; CHECK-NEXT:    vpsel q5, q2, q1
+; CHECK-NEXT:    vcmp.u32 cs, q4, q6
+; CHECK-NEXT:    vpsel q0, q2, q1
+; CHECK-NEXT:    vldr p0, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.16 q6[0], r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov.16 q6[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov.16 q6[2], r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    vmov.16 q6[3], r3
+; CHECK-NEXT:    vmov r3, s20
+; CHECK-NEXT:    vmov.16 q6[4], r3
+; CHECK-NEXT:    vmov r3, s21
+; CHECK-NEXT:    vmov.16 q6[5], r3
+; CHECK-NEXT:    vmov r3, s22
+; CHECK-NEXT:    vmov.16 q6[6], r3
+; CHECK-NEXT:    vmov r3, s23
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vmov.16 q6[7], r3
+; CHECK-NEXT:    vpsttt
+; CHECK-NEXT:    vcmpt.i16 ne, q6, zr
+; CHECK-NEXT:    vldrht.u16 q0, [r0], #16
+; CHECK-NEXT:    vaddvat.u16 r2, q0
+; CHECK-NEXT:    le lr, .LBB23_2
+; CHECK-NEXT:    b .LBB23_4
+; CHECK-NEXT:  .LBB23_3:
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:  .LBB23_4: @ %for.cond.cleanup
+; CHECK-NEXT:    sxth r0, r2
+; CHECK-NEXT:    add sp, #40
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.5:
+; CHECK-NEXT:  .LCPI23_0:
+; CHECK-NEXT:    .long 4 @ 0x4
+; CHECK-NEXT:    .long 5 @ 0x5
+; CHECK-NEXT:    .long 6 @ 0x6
+; CHECK-NEXT:    .long 7 @ 0x7
+; CHECK-NEXT:  .LCPI23_1:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp8.not = icmp eq i32 %n, 0
+  br i1 %cmp8.not, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %n, 7
+  %n.vec = and i32 %n.rnd.up, -8
+  %trip.count.minus.1 = add i32 %n, -1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i16 [ 0, %vector.ph ], [ %4, %vector.body ]
+  %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %trip.count.minus.1)
+  %0 = getelementptr inbounds i16, i16* %x, i32 %index
+  %1 = bitcast i16* %0 to <8 x i16>*
+  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %2 = select <8 x i1> %active.lane.mask, <8 x i16> %wide.masked.load, <8 x i16> zeroinitializer
+  %3 = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %2)
+  %4 = add i16 %3, %vec.phi
+  %index.next = add i32 %index, 8
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  %s.0.lcssa = phi i16 [ 0, %entry ], [ %4, %vector.body ]
+  ret i16 %s.0.lcssa
+}
+
+define signext i16 @mla8i16(i16* noalias nocapture readonly %x, i16* noalias nocapture readonly %y, i32 %n) {
+; CHECK-LABEL: mla8i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #40
+; CHECK-NEXT:    sub sp, #40
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    beq.w .LBB24_3
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    adds r3, r2, #7
+; CHECK-NEXT:    vmov.i8 q1, #0x0
+; CHECK-NEXT:    bic r3, r3, #7
+; CHECK-NEXT:    vmov.i8 q2, #0xff
+; CHECK-NEXT:    sub.w r12, r3, #8
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #3
+; CHECK-NEXT:    adr r3, .LCPI24_0
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr r3, .LCPI24_1
+; CHECK-NEXT:    sub.w r12, r2, #1
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    vdup.32 q4, r12
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:  .LBB24_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q3, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vadd.i32 q5, q0, r2
+; CHECK-NEXT:    vdup.32 q0, r2
+; CHECK-NEXT:    vcmp.u32 hi, q0, q5
+; CHECK-NEXT:    vadd.i32 q6, q3, r2
+; CHECK-NEXT:    vpsel q7, q2, q1
+; CHECK-NEXT:    vcmp.u32 hi, q0, q6
+; CHECK-NEXT:    vpsel q3, q2, q1
+; CHECK-NEXT:    adds r2, #8
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov.16 q0[0], r3
+; CHECK-NEXT:    vmov r3, s13
+; CHECK-NEXT:    vmov.16 q0[1], r3
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vmov.16 q0[2], r3
+; CHECK-NEXT:    vmov r3, s15
+; CHECK-NEXT:    vmov.16 q0[3], r3
+; CHECK-NEXT:    vmov r3, s28
+; CHECK-NEXT:    vmov.16 q0[4], r3
+; CHECK-NEXT:    vmov r3, s29
+; CHECK-NEXT:    vmov.16 q0[5], r3
+; CHECK-NEXT:    vmov r3, s30
+; CHECK-NEXT:    vmov.16 q0[6], r3
+; CHECK-NEXT:    vmov r3, s31
+; CHECK-NEXT:    vmov.16 q0[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vstr p0, [sp, #36] @ 4-byte Spill
+; CHECK-NEXT:    vcmp.u32 cs, q4, q5
+; CHECK-NEXT:    vpsel q5, q2, q1
+; CHECK-NEXT:    vcmp.u32 cs, q4, q6
+; CHECK-NEXT:    vpsel q0, q2, q1
+; CHECK-NEXT:    vldr p0, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.16 q6[0], r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov.16 q6[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov.16 q6[2], r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    vmov.16 q6[3], r3
+; CHECK-NEXT:    vmov r3, s20
+; CHECK-NEXT:    vmov.16 q6[4], r3
+; CHECK-NEXT:    vmov r3, s21
+; CHECK-NEXT:    vmov.16 q6[5], r3
+; CHECK-NEXT:    vmov r3, s22
+; CHECK-NEXT:    vmov.16 q6[6], r3
+; CHECK-NEXT:    vmov r3, s23
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vmov.16 q6[7], r3
+; CHECK-NEXT:    vpstttt
+; CHECK-NEXT:    vcmpt.i16 ne, q6, zr
+; CHECK-NEXT:    vldrht.u16 q0, [r0], #16
+; CHECK-NEXT:    vldrht.u16 q3, [r1], #16
+; CHECK-NEXT:    vmlavat.u16 r12, q3, q0
+; CHECK-NEXT:    le lr, .LBB24_2
+; CHECK-NEXT:    b .LBB24_4
+; CHECK-NEXT:  .LBB24_3:
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:  .LBB24_4: @ %for.cond.cleanup
+; CHECK-NEXT:    sxth.w r0, r12
+; CHECK-NEXT:    add sp, #40
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.5:
+; CHECK-NEXT:  .LCPI24_0:
+; CHECK-NEXT:    .long 4 @ 0x4
+; CHECK-NEXT:    .long 5 @ 0x5
+; CHECK-NEXT:    .long 6 @ 0x6
+; CHECK-NEXT:    .long 7 @ 0x7
+; CHECK-NEXT:  .LCPI24_1:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp11.not = icmp eq i32 %n, 0
+  br i1 %cmp11.not, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %n, 7
+  %n.vec = and i32 %n.rnd.up, -8
+  %trip.count.minus.1 = add i32 %n, -1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i16 [ 0, %vector.ph ], [ %7, %vector.body ]
+  %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %trip.count.minus.1)
+  %0 = getelementptr inbounds i16, i16* %x, i32 %index
+  %1 = bitcast i16* %0 to <8 x i16>*
+  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %2 = getelementptr inbounds i16, i16* %y, i32 %index
+  %3 = bitcast i16* %2 to <8 x i16>*
+  %wide.masked.load16 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %3, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %4 = mul <8 x i16> %wide.masked.load16, %wide.masked.load
+  %5 = select <8 x i1> %active.lane.mask, <8 x i16> %4, <8 x i16> zeroinitializer
+  %6 = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %5)
+  %7 = add i16 %6, %vec.phi
+  %index.next = add i32 %index, 8
+  %8 = icmp eq i32 %index.next, %n.vec
+  br i1 %8, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  %s.0.lcssa = phi i16 [ 0, %entry ], [ %7, %vector.body ]
+  ret i16 %s.0.lcssa
+}
+
+define signext i16 @add16i16(i8* noalias nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: add16i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #200
+; CHECK-NEXT:    sub sp, #200
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    beq.w .LBB25_3
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    add.w r2, r1, #15
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    bic r2, r2, #15
+; CHECK-NEXT:    subs r2, #16
+; CHECK-NEXT:    add.w lr, r3, r2, lsr #4
+; CHECK-NEXT:    adr.w r3, .LCPI25_0
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:    adr.w r3, .LCPI25_1
+; CHECK-NEXT:    subs r2, r1, #1
+; CHECK-NEXT:    vstrw.32 q0, [sp, #112] @ 16-byte Spill
+; CHECK-NEXT:    vmov.i8 q0, #0x0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #96] @ 16-byte Spill
+; CHECK-NEXT:    vmov.i8 q0, #0xff
+; CHECK-NEXT:    vstrw.32 q0, [sp, #80] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr.w r3, .LCPI25_2
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #64] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr.w r3, .LCPI25_3
+; CHECK-NEXT:    vstrw.32 q0, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    vstrw.32 q0, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vdup.32 q0, r2
+; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vmov.i32 q0, #0x0
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:  .LBB25_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #112] @ 16-byte Reload
+; CHECK-NEXT:    vdup.32 q6, r1
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #96] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #80] @ 16-byte Reload
+; CHECK-NEXT:    vadd.i32 q0, q0, r1
+; CHECK-NEXT:    vcmp.u32 hi, q6, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #176] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q4, q1, q3
+; CHECK-NEXT:    vadd.i32 q0, q0, r1
+; CHECK-NEXT:    vcmp.u32 hi, q6, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #160] @ 16-byte Spill
+; CHECK-NEXT:    vpsel q0, q1, q3
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.16 q5[0], r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov.16 q5[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov.16 q5[2], r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    vmov.16 q5[3], r3
+; CHECK-NEXT:    vmov r3, s16
+; CHECK-NEXT:    vmov.16 q5[4], r3
+; CHECK-NEXT:    vmov r3, s17
+; CHECK-NEXT:    vmov.16 q5[5], r3
+; CHECK-NEXT:    vmov r3, s18
+; CHECK-NEXT:    vmov.16 q5[6], r3
+; CHECK-NEXT:    vmov r3, s19
+; CHECK-NEXT:    vmov.16 q5[7], r3
+; CHECK-NEXT:    vmov q4, q1
+; CHECK-NEXT:    vcmp.i16 ne, q5, zr
+; CHECK-NEXT:    vstr p0, [sp, #196] @ 4-byte Spill
+; CHECK-NEXT:    vpsel q7, q1, q3
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vadd.i32 q0, q0, r1
+; CHECK-NEXT:    vcmp.u32 hi, q6, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #128] @ 16-byte Spill
+; CHECK-NEXT:    vstr p0, [sp, #192] @ 4-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vadd.i32 q5, q0, r1
+; CHECK-NEXT:    adds r1, #16
+; CHECK-NEXT:    vcmp.u32 hi, q6, q5
+; CHECK-NEXT:    vpsel q0, q1, q3
+; CHECK-NEXT:    vldr p0, [sp, #192] @ 4-byte Reload
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.16 q6[0], r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov.16 q6[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov.16 q6[2], r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    vpsel q1, q1, q3
+; CHECK-NEXT:    vmov.16 q6[3], r3
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q6[4], r3
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.16 q6[5], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.16 q6[6], r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmov.16 q6[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q6, zr
+; CHECK-NEXT:    vpsel q0, q4, q3
+; CHECK-NEXT:    vstr p0, [sp, #192] @ 4-byte Spill
+; CHECK-NEXT:    vmov.u16 r3, q0[0]
+; CHECK-NEXT:    vmov.8 q6[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[1]
+; CHECK-NEXT:    vmov.8 q6[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[2]
+; CHECK-NEXT:    vmov.8 q6[2], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[3]
+; CHECK-NEXT:    vmov.8 q6[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[4]
+; CHECK-NEXT:    vmov.8 q6[4], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[5]
+; CHECK-NEXT:    vmov.8 q6[5], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[6]
+; CHECK-NEXT:    vmov.8 q6[6], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[7]
+; CHECK-NEXT:    vmov.8 q6[7], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[0]
+; CHECK-NEXT:    vmov.8 q6[8], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[1]
+; CHECK-NEXT:    vmov.8 q6[9], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[2]
+; CHECK-NEXT:    vmov.8 q6[10], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[3]
+; CHECK-NEXT:    vmov.8 q6[11], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[4]
+; CHECK-NEXT:    vmov.8 q6[12], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[5]
+; CHECK-NEXT:    vmov.8 q6[13], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[6]
+; CHECK-NEXT:    vmov.8 q6[14], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[7]
+; CHECK-NEXT:    vmov.8 q6[15], r3
+; CHECK-NEXT:    vcmp.i8 ne, q6, zr
+; CHECK-NEXT:    vstr p0, [sp, #156] @ 4-byte Spill
+; CHECK-NEXT:    vldrw.u32 q2, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #176] @ 16-byte Reload
+; CHECK-NEXT:    vcmp.u32 cs, q2, q0
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #160] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q6, q4, q3
+; CHECK-NEXT:    vcmp.u32 cs, q2, q0
+; CHECK-NEXT:    vpsel q1, q4, q3
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q0[0], r3
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.16 q0[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.16 q0[2], r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmov.16 q0[3], r3
+; CHECK-NEXT:    vmov r3, s24
+; CHECK-NEXT:    vmov.16 q0[4], r3
+; CHECK-NEXT:    vmov r3, s25
+; CHECK-NEXT:    vmov.16 q0[5], r3
+; CHECK-NEXT:    vmov r3, s26
+; CHECK-NEXT:    vmov.16 q0[6], r3
+; CHECK-NEXT:    vmov r3, s27
+; CHECK-NEXT:    vmov.16 q0[7], r3
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #128] @ 16-byte Reload
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vmov q6, q4
+; CHECK-NEXT:    vpsel q7, q4, q3
+; CHECK-NEXT:    vcmp.u32 cs, q2, q1
+; CHECK-NEXT:    vpsel q4, q4, q3
+; CHECK-NEXT:    vcmp.u32 cs, q2, q5
+; CHECK-NEXT:    vpsel q1, q6, q3
+; CHECK-NEXT:    vmov q2, q3
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov q5, q6
+; CHECK-NEXT:    vmov.16 q3[0], r3
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.16 q3[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.16 q3[2], r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmov.16 q3[3], r3
+; CHECK-NEXT:    vmov r3, s16
+; CHECK-NEXT:    vmov.16 q3[4], r3
+; CHECK-NEXT:    vmov r3, s17
+; CHECK-NEXT:    vmov.16 q3[5], r3
+; CHECK-NEXT:    vmov r3, s18
+; CHECK-NEXT:    vmov.16 q3[6], r3
+; CHECK-NEXT:    vmov r3, s19
+; CHECK-NEXT:    vmov.16 q3[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q3, zr
+; CHECK-NEXT:    vpsel q5, q6, q2
+; CHECK-NEXT:    vldr p0, [sp, #156] @ 4-byte Reload
+; CHECK-NEXT:    vmov.u16 r3, q5[0]
+; CHECK-NEXT:    vmov.8 q4[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[1]
+; CHECK-NEXT:    vmov.8 q4[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[2]
+; CHECK-NEXT:    vmov.8 q4[2], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[3]
+; CHECK-NEXT:    vmov.8 q4[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[4]
+; CHECK-NEXT:    vmov.8 q4[4], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[5]
+; CHECK-NEXT:    vmov.8 q4[5], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[6]
+; CHECK-NEXT:    vmov.8 q4[6], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[7]
+; CHECK-NEXT:    vmov.8 q4[7], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[0]
+; CHECK-NEXT:    vmov.8 q4[8], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[1]
+; CHECK-NEXT:    vmov.8 q4[9], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[2]
+; CHECK-NEXT:    vmov.8 q4[10], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[3]
+; CHECK-NEXT:    vmov.8 q4[11], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[4]
+; CHECK-NEXT:    vmov.8 q4[12], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[5]
+; CHECK-NEXT:    vmov.8 q4[13], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[6]
+; CHECK-NEXT:    vmov.8 q4[14], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[7]
+; CHECK-NEXT:    vmov.8 q4[15], r3
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vpstt
+; CHECK-NEXT:    vcmpt.i8 ne, q4, zr
+; CHECK-NEXT:    vldrbt.u8 q4, [r0], #16
+; CHECK-NEXT:    vmov.u8 r3, q4[0]
+; CHECK-NEXT:    vldr p0, [sp, #192] @ 4-byte Reload
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcmpt.i16 ne, q3, zr
+; CHECK-NEXT:    vmov.16 q3[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q4[1]
+; CHECK-NEXT:    vldrw.u32 q2, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vmov.16 q3[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q4[2]
+; CHECK-NEXT:    vmov.16 q3[2], r3
+; CHECK-NEXT:    vmov.u8 r3, q4[3]
+; CHECK-NEXT:    vmov.16 q3[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q4[4]
+; CHECK-NEXT:    vmov.16 q3[4], r3
+; CHECK-NEXT:    vmov.u8 r3, q4[5]
+; CHECK-NEXT:    vmov.16 q3[5], r3
+; CHECK-NEXT:    vmov.u8 r3, q4[6]
+; CHECK-NEXT:    vmov.16 q3[6], r3
+; CHECK-NEXT:    vmov.u8 r3, q4[7]
+; CHECK-NEXT:    vmov.16 q3[7], r3
+; CHECK-NEXT:    vmov.u8 r3, q4[8]
+; CHECK-NEXT:    vmovlb.u8 q1, q3
+; CHECK-NEXT:    vpsel q3, q1, q2
+; CHECK-NEXT:    vldr p0, [sp, #196] @ 4-byte Reload
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcmpt.i16 ne, q0, zr
+; CHECK-NEXT:    vmov.16 q0[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q4[9]
+; CHECK-NEXT:    vmov.16 q0[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q4[10]
+; CHECK-NEXT:    vmov.16 q0[2], r3
+; CHECK-NEXT:    vmov.u8 r3, q4[11]
+; CHECK-NEXT:    vmov.16 q0[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q4[12]
+; CHECK-NEXT:    vmov.16 q0[4], r3
+; CHECK-NEXT:    vmov.u8 r3, q4[13]
+; CHECK-NEXT:    vmov.16 q0[5], r3
+; CHECK-NEXT:    vmov.u8 r3, q4[14]
+; CHECK-NEXT:    vmov.16 q0[6], r3
+; CHECK-NEXT:    vmov.u8 r3, q4[15]
+; CHECK-NEXT:    vmov.16 q0[7], r3
+; CHECK-NEXT:    vmovlb.u8 q0, q0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vaddt.i16 q3, q3, q0
+; CHECK-NEXT:    vaddva.u16 r2, q3
+; CHECK-NEXT:    le lr, .LBB25_2
+; CHECK-NEXT:    b .LBB25_4
+; CHECK-NEXT:  .LBB25_3:
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:  .LBB25_4: @ %for.cond.cleanup
+; CHECK-NEXT:    sxth r0, r2
+; CHECK-NEXT:    add sp, #200
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.5:
+; CHECK-NEXT:  .LCPI25_0:
+; CHECK-NEXT:    .long 12 @ 0xc
+; CHECK-NEXT:    .long 13 @ 0xd
+; CHECK-NEXT:    .long 14 @ 0xe
+; CHECK-NEXT:    .long 15 @ 0xf
+; CHECK-NEXT:  .LCPI25_1:
+; CHECK-NEXT:    .long 8 @ 0x8
+; CHECK-NEXT:    .long 9 @ 0x9
+; CHECK-NEXT:    .long 10 @ 0xa
+; CHECK-NEXT:    .long 11 @ 0xb
+; CHECK-NEXT:  .LCPI25_2:
+; CHECK-NEXT:    .long 4 @ 0x4
+; CHECK-NEXT:    .long 5 @ 0x5
+; CHECK-NEXT:    .long 6 @ 0x6
+; CHECK-NEXT:    .long 7 @ 0x7
+; CHECK-NEXT:  .LCPI25_3:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp8.not = icmp eq i32 %n, 0
+  br i1 %cmp8.not, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %n, 15
+  %n.vec = and i32 %n.rnd.up, -16
+  %trip.count.minus.1 = add i32 %n, -1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i16 [ 0, %vector.ph ], [ %5, %vector.body ]
+  %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %trip.count.minus.1)
+  %0 = getelementptr inbounds i8, i8* %x, i32 %index
+  %1 = bitcast i8* %0 to <16 x i8>*
+  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %1, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
+  %2 = zext <16 x i8> %wide.masked.load to <16 x i16>
+  %3 = select <16 x i1> %active.lane.mask, <16 x i16> %2, <16 x i16> zeroinitializer
+  %4 = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %3)
+  %5 = add i16 %4, %vec.phi
+  %index.next = add i32 %index, 16
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  %s.0.lcssa = phi i16 [ 0, %entry ], [ %5, %vector.body ]
+  ret i16 %s.0.lcssa
+}
+
+define signext i16 @mla16i16(i8* noalias nocapture readonly %x, i8* noalias nocapture readonly %y, i32 %n) {
+; CHECK-LABEL: mla16i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #200
+; CHECK-NEXT:    sub sp, #200
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    beq.w .LBB26_3
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    add.w r3, r2, #15
+; CHECK-NEXT:    bic r3, r3, #15
+; CHECK-NEXT:    sub.w r12, r3, #16
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #4
+; CHECK-NEXT:    adr.w r3, .LCPI26_0
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:    adr.w r3, .LCPI26_1
+; CHECK-NEXT:    sub.w r12, r2, #1
+; CHECK-NEXT:    vstrw.32 q0, [sp, #112] @ 16-byte Spill
+; CHECK-NEXT:    vmov.i8 q0, #0x0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #96] @ 16-byte Spill
+; CHECK-NEXT:    vmov.i8 q0, #0xff
+; CHECK-NEXT:    vstrw.32 q0, [sp, #80] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr.w r3, .LCPI26_2
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #64] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr.w r3, .LCPI26_3
+; CHECK-NEXT:    vstrw.32 q0, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    vstrw.32 q0, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vdup.32 q0, r12
+; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vmov.i32 q0, #0x0
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:  .LBB26_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #112] @ 16-byte Reload
+; CHECK-NEXT:    vdup.32 q6, r2
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #96] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q2, [sp, #80] @ 16-byte Reload
+; CHECK-NEXT:    vadd.i32 q0, q0, r2
+; CHECK-NEXT:    vcmp.u32 hi, q6, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #176] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q4, q2, q3
+; CHECK-NEXT:    vadd.i32 q0, q0, r2
+; CHECK-NEXT:    vcmp.u32 hi, q6, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #160] @ 16-byte Spill
+; CHECK-NEXT:    vpsel q0, q2, q3
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.16 q5[0], r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov.16 q5[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov.16 q5[2], r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    vmov.16 q5[3], r3
+; CHECK-NEXT:    vmov r3, s16
+; CHECK-NEXT:    vmov.16 q5[4], r3
+; CHECK-NEXT:    vmov r3, s17
+; CHECK-NEXT:    vmov.16 q5[5], r3
+; CHECK-NEXT:    vmov r3, s18
+; CHECK-NEXT:    vmov.16 q5[6], r3
+; CHECK-NEXT:    vmov r3, s19
+; CHECK-NEXT:    vmov.16 q5[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q5, zr
+; CHECK-NEXT:    vmov q5, q3
+; CHECK-NEXT:    vstr p0, [sp, #196] @ 4-byte Spill
+; CHECK-NEXT:    vpsel q7, q2, q3
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vadd.i32 q4, q0, r2
+; CHECK-NEXT:    vcmp.u32 hi, q6, q4
+; CHECK-NEXT:    vstr p0, [sp, #192] @ 4-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vadd.i32 q0, q0, r2
+; CHECK-NEXT:    adds r2, #16
+; CHECK-NEXT:    vcmp.u32 hi, q6, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #144] @ 16-byte Spill
+; CHECK-NEXT:    vpsel q0, q2, q3
+; CHECK-NEXT:    vldr p0, [sp, #192] @ 4-byte Reload
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.16 q6[0], r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov.16 q6[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov.16 q6[2], r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    vpsel q1, q2, q3
+; CHECK-NEXT:    vmov.16 q6[3], r3
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q6[4], r3
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.16 q6[5], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.16 q6[6], r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmov.16 q6[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q6, zr
+; CHECK-NEXT:    vpsel q0, q2, q3
+; CHECK-NEXT:    vstr p0, [sp, #192] @ 4-byte Spill
+; CHECK-NEXT:    vmov.u16 r3, q0[0]
+; CHECK-NEXT:    vmov.8 q6[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[1]
+; CHECK-NEXT:    vmov.8 q6[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[2]
+; CHECK-NEXT:    vmov.8 q6[2], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[3]
+; CHECK-NEXT:    vmov.8 q6[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[4]
+; CHECK-NEXT:    vmov.8 q6[4], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[5]
+; CHECK-NEXT:    vmov.8 q6[5], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[6]
+; CHECK-NEXT:    vmov.8 q6[6], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[7]
+; CHECK-NEXT:    vmov.8 q6[7], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[0]
+; CHECK-NEXT:    vmov.8 q6[8], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[1]
+; CHECK-NEXT:    vmov.8 q6[9], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[2]
+; CHECK-NEXT:    vmov.8 q6[10], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[3]
+; CHECK-NEXT:    vmov.8 q6[11], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[4]
+; CHECK-NEXT:    vmov.8 q6[12], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[5]
+; CHECK-NEXT:    vmov.8 q6[13], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[6]
+; CHECK-NEXT:    vmov.8 q6[14], r3
+; CHECK-NEXT:    vmov.u16 r3, q7[7]
+; CHECK-NEXT:    vmov.8 q6[15], r3
+; CHECK-NEXT:    vcmp.i8 ne, q6, zr
+; CHECK-NEXT:    vstr p0, [sp, #140] @ 4-byte Spill
+; CHECK-NEXT:    vldrw.u32 q7, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #176] @ 16-byte Reload
+; CHECK-NEXT:    vcmp.u32 cs, q7, q0
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #160] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q6, q2, q3
+; CHECK-NEXT:    vcmp.u32 cs, q7, q0
+; CHECK-NEXT:    vpsel q1, q2, q3
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q0[0], r3
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.16 q0[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.16 q0[2], r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmov.16 q0[3], r3
+; CHECK-NEXT:    vmov r3, s24
+; CHECK-NEXT:    vmov.16 q0[4], r3
+; CHECK-NEXT:    vmov r3, s25
+; CHECK-NEXT:    vmov.16 q0[5], r3
+; CHECK-NEXT:    vmov r3, s26
+; CHECK-NEXT:    vmov.16 q0[6], r3
+; CHECK-NEXT:    vmov r3, s27
+; CHECK-NEXT:    vmov.16 q0[7], r3
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #144] @ 16-byte Reload
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vpsel q3, q2, q3
+; CHECK-NEXT:    vcmp.u32 cs, q7, q4
+; CHECK-NEXT:    vpsel q6, q2, q5
+; CHECK-NEXT:    vcmp.u32 cs, q7, q1
+; CHECK-NEXT:    vpsel q1, q2, q5
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q4[0], r3
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.16 q4[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.16 q4[2], r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmov.16 q4[3], r3
+; CHECK-NEXT:    vmov r3, s24
+; CHECK-NEXT:    vmov.16 q4[4], r3
+; CHECK-NEXT:    vmov r3, s25
+; CHECK-NEXT:    vmov.16 q4[5], r3
+; CHECK-NEXT:    vmov r3, s26
+; CHECK-NEXT:    vmov.16 q4[6], r3
+; CHECK-NEXT:    vmov r3, s27
+; CHECK-NEXT:    vmov.16 q4[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vpsel q6, q2, q5
+; CHECK-NEXT:    vldr p0, [sp, #140] @ 4-byte Reload
+; CHECK-NEXT:    vmov.u16 r3, q6[0]
+; CHECK-NEXT:    vmov.8 q5[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q6[1]
+; CHECK-NEXT:    vmov.8 q5[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q6[2]
+; CHECK-NEXT:    vmov.8 q5[2], r3
+; CHECK-NEXT:    vmov.u16 r3, q6[3]
+; CHECK-NEXT:    vmov.8 q5[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q6[4]
+; CHECK-NEXT:    vmov.8 q5[4], r3
+; CHECK-NEXT:    vmov.u16 r3, q6[5]
+; CHECK-NEXT:    vmov.8 q5[5], r3
+; CHECK-NEXT:    vmov.u16 r3, q6[6]
+; CHECK-NEXT:    vmov.8 q5[6], r3
+; CHECK-NEXT:    vmov.u16 r3, q6[7]
+; CHECK-NEXT:    vmov.8 q5[7], r3
+; CHECK-NEXT:    vmov.u16 r3, q3[0]
+; CHECK-NEXT:    vmov.8 q5[8], r3
+; CHECK-NEXT:    vmov.u16 r3, q3[1]
+; CHECK-NEXT:    vmov.8 q5[9], r3
+; CHECK-NEXT:    vmov.u16 r3, q3[2]
+; CHECK-NEXT:    vmov.8 q5[10], r3
+; CHECK-NEXT:    vmov.u16 r3, q3[3]
+; CHECK-NEXT:    vmov.8 q5[11], r3
+; CHECK-NEXT:    vmov.u16 r3, q3[4]
+; CHECK-NEXT:    vmov.8 q5[12], r3
+; CHECK-NEXT:    vmov.u16 r3, q3[5]
+; CHECK-NEXT:    vmov.8 q5[13], r3
+; CHECK-NEXT:    vmov.u16 r3, q3[6]
+; CHECK-NEXT:    vmov.8 q5[14], r3
+; CHECK-NEXT:    vmov.u16 r3, q3[7]
+; CHECK-NEXT:    vmov.8 q5[15], r3
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vpsttt
+; CHECK-NEXT:    vcmpt.i8 ne, q5, zr
+; CHECK-NEXT:    vldrbt.u8 q5, [r0], #16
+; CHECK-NEXT:    vldrbt.u8 q3, [r1], #16
+; CHECK-NEXT:    vmov.u8 r3, q3[0]
+; CHECK-NEXT:    vldr p0, [sp, #192] @ 4-byte Reload
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcmpt.i16 ne, q4, zr
+; CHECK-NEXT:    vmov.16 q4[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[1]
+; CHECK-NEXT:    vmov.16 q4[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[2]
+; CHECK-NEXT:    vmov.16 q4[2], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[3]
+; CHECK-NEXT:    vmov.16 q4[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[4]
+; CHECK-NEXT:    vmov.16 q4[4], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[5]
+; CHECK-NEXT:    vmov.16 q4[5], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[6]
+; CHECK-NEXT:    vmov.16 q4[6], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[7]
+; CHECK-NEXT:    vmov.16 q4[7], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[0]
+; CHECK-NEXT:    vmov.16 q6[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[1]
+; CHECK-NEXT:    vmov.16 q6[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[2]
+; CHECK-NEXT:    vmov.16 q6[2], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[3]
+; CHECK-NEXT:    vmov.16 q6[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[4]
+; CHECK-NEXT:    vmov.16 q6[4], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[5]
+; CHECK-NEXT:    vmov.16 q6[5], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[6]
+; CHECK-NEXT:    vmov.16 q6[6], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[7]
+; CHECK-NEXT:    vmov.16 q6[7], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[8]
+; CHECK-NEXT:    vmovlb.u8 q1, q6
+; CHECK-NEXT:    vmovlb.u8 q6, q4
+; CHECK-NEXT:    vldrw.u32 q4, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vmult.i16 q4, q6, q1
+; CHECK-NEXT:    vldr p0, [sp, #196] @ 4-byte Reload
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcmpt.i16 ne, q0, zr
+; CHECK-NEXT:    vmov.16 q0[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[9]
+; CHECK-NEXT:    vmov.16 q0[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[10]
+; CHECK-NEXT:    vmov.16 q0[2], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[11]
+; CHECK-NEXT:    vmov.16 q0[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[12]
+; CHECK-NEXT:    vmov.16 q0[4], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[13]
+; CHECK-NEXT:    vmov.16 q0[5], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[14]
+; CHECK-NEXT:    vmov.16 q0[6], r3
+; CHECK-NEXT:    vmov.u8 r3, q5[15]
+; CHECK-NEXT:    vmov.16 q0[7], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[8]
+; CHECK-NEXT:    vmov.16 q5[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[9]
+; CHECK-NEXT:    vmov.16 q5[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[10]
+; CHECK-NEXT:    vmov.16 q5[2], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[11]
+; CHECK-NEXT:    vmov.16 q5[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[12]
+; CHECK-NEXT:    vmov.16 q5[4], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[13]
+; CHECK-NEXT:    vmov.16 q5[5], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[14]
+; CHECK-NEXT:    vmov.16 q5[6], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[15]
+; CHECK-NEXT:    vmov.16 q5[7], r3
+; CHECK-NEXT:    vmullb.u8 q0, q5, q0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vaddt.i16 q4, q4, q0
+; CHECK-NEXT:    vaddva.u16 r12, q4
+; CHECK-NEXT:    le lr, .LBB26_2
+; CHECK-NEXT:    b .LBB26_4
+; CHECK-NEXT:  .LBB26_3:
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:  .LBB26_4: @ %for.cond.cleanup
+; CHECK-NEXT:    sxth.w r0, r12
+; CHECK-NEXT:    add sp, #200
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.5:
+; CHECK-NEXT:  .LCPI26_0:
+; CHECK-NEXT:    .long 12 @ 0xc
+; CHECK-NEXT:    .long 13 @ 0xd
+; CHECK-NEXT:    .long 14 @ 0xe
+; CHECK-NEXT:    .long 15 @ 0xf
+; CHECK-NEXT:  .LCPI26_1:
+; CHECK-NEXT:    .long 8 @ 0x8
+; CHECK-NEXT:    .long 9 @ 0x9
+; CHECK-NEXT:    .long 10 @ 0xa
+; CHECK-NEXT:    .long 11 @ 0xb
+; CHECK-NEXT:  .LCPI26_2:
+; CHECK-NEXT:    .long 4 @ 0x4
+; CHECK-NEXT:    .long 5 @ 0x5
+; CHECK-NEXT:    .long 6 @ 0x6
+; CHECK-NEXT:    .long 7 @ 0x7
+; CHECK-NEXT:  .LCPI26_3:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp13.not = icmp eq i32 %n, 0
+  br i1 %cmp13.not, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %n, 15
+  %n.vec = and i32 %n.rnd.up, -16
+  %trip.count.minus.1 = add i32 %n, -1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i16 [ 0, %vector.ph ], [ %9, %vector.body ]
+  %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %trip.count.minus.1)
+  %0 = getelementptr inbounds i8, i8* %x, i32 %index
+  %1 = bitcast i8* %0 to <16 x i8>*
+  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %1, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
+  %2 = zext <16 x i8> %wide.masked.load to <16 x i16>
+  %3 = getelementptr inbounds i8, i8* %y, i32 %index
+  %4 = bitcast i8* %3 to <16 x i8>*
+  %wide.masked.load18 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %4, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
+  %5 = zext <16 x i8> %wide.masked.load18 to <16 x i16>
+  %6 = mul nuw <16 x i16> %5, %2
+  %7 = select <16 x i1> %active.lane.mask, <16 x i16> %6, <16 x i16> zeroinitializer
+  %8 = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %7)
+  %9 = add i16 %8, %vec.phi
+  %index.next = add i32 %index, 16
+  %10 = icmp eq i32 %index.next, %n.vec
+  br i1 %10, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  %s.0.lcssa = phi i16 [ 0, %entry ], [ %9, %vector.body ]
+  ret i16 %s.0.lcssa
+}
+
+define zeroext i8 @add16i8(i8* noalias nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: add16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #136
+; CHECK-NEXT:    sub sp, #136
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    beq.w .LBB27_3
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    add.w r2, r1, #15
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    bic r2, r2, #15
+; CHECK-NEXT:    vmov.i8 q7, #0x0
+; CHECK-NEXT:    subs r2, #16
+; CHECK-NEXT:    vmov.i8 q2, #0xff
+; CHECK-NEXT:    add.w lr, r3, r2, lsr #4
+; CHECK-NEXT:    adr r3, .LCPI27_0
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr r3, .LCPI27_1
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:    subs r2, r1, #1
+; CHECK-NEXT:    vstrw.32 q0, [sp, #64] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr r3, .LCPI27_2
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr r3, .LCPI27_3
+; CHECK-NEXT:    vstrw.32 q0, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vdup.32 q0, r2
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:  .LBB27_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vdup.32 q6, r1
+; CHECK-NEXT:    vadd.i32 q0, q0, r1
+; CHECK-NEXT:    vcmp.u32 hi, q6, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #112] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q3, q2, q7
+; CHECK-NEXT:    vadd.i32 q0, q0, r1
+; CHECK-NEXT:    vcmp.u32 hi, q6, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #96] @ 16-byte Spill
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.16 q4[0], r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov.16 q4[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov.16 q4[2], r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    vmov.16 q4[3], r3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov.16 q4[4], r3
+; CHECK-NEXT:    vmov r3, s13
+; CHECK-NEXT:    vmov.16 q4[5], r3
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vmov.16 q4[6], r3
+; CHECK-NEXT:    vmov r3, s15
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vmov.16 q4[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vadd.i32 q3, q0, r1
+; CHECK-NEXT:    vpsel q5, q2, q7
+; CHECK-NEXT:    vcmp.u32 hi, q6, q3
+; CHECK-NEXT:    vstr p0, [sp, #92] @ 4-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vadd.i32 q4, q0, r1
+; CHECK-NEXT:    adds r1, #16
+; CHECK-NEXT:    vcmp.u32 hi, q6, q4
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vldr p0, [sp, #92] @ 4-byte Reload
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.16 q6[0], r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov.16 q6[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov.16 q6[2], r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    vpsel q1, q2, q7
+; CHECK-NEXT:    vmov.16 q6[3], r3
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q6[4], r3
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.16 q6[5], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.16 q6[6], r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmov.16 q6[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q6, zr
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vmov.u16 r3, q0[0]
+; CHECK-NEXT:    vmov.8 q6[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[1]
+; CHECK-NEXT:    vmov.8 q6[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[2]
+; CHECK-NEXT:    vmov.8 q6[2], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[3]
+; CHECK-NEXT:    vmov.8 q6[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[4]
+; CHECK-NEXT:    vmov.8 q6[4], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[5]
+; CHECK-NEXT:    vmov.8 q6[5], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[6]
+; CHECK-NEXT:    vmov.8 q6[6], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[7]
+; CHECK-NEXT:    vmov.8 q6[7], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[0]
+; CHECK-NEXT:    vmov.8 q6[8], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[1]
+; CHECK-NEXT:    vmov.8 q6[9], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[2]
+; CHECK-NEXT:    vmov.8 q6[10], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[3]
+; CHECK-NEXT:    vmov.8 q6[11], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[4]
+; CHECK-NEXT:    vmov.8 q6[12], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[5]
+; CHECK-NEXT:    vmov.8 q6[13], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[6]
+; CHECK-NEXT:    vmov.8 q6[14], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[7]
+; CHECK-NEXT:    vmov.8 q6[15], r3
+; CHECK-NEXT:    vcmp.i8 ne, q6, zr
+; CHECK-NEXT:    vstr p0, [sp, #92] @ 4-byte Spill
+; CHECK-NEXT:    vldrw.u32 q6, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #112] @ 16-byte Reload
+; CHECK-NEXT:    vcmp.u32 cs, q6, q0
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #96] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q5, q2, q7
+; CHECK-NEXT:    vcmp.u32 cs, q6, q0
+; CHECK-NEXT:    vpsel q1, q2, q7
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q0[0], r3
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.16 q0[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.16 q0[2], r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmov.16 q0[3], r3
+; CHECK-NEXT:    vmov r3, s20
+; CHECK-NEXT:    vmov.16 q0[4], r3
+; CHECK-NEXT:    vmov r3, s21
+; CHECK-NEXT:    vmov.16 q0[5], r3
+; CHECK-NEXT:    vmov r3, s22
+; CHECK-NEXT:    vmov.16 q0[6], r3
+; CHECK-NEXT:    vmov r3, s23
+; CHECK-NEXT:    vmov.16 q0[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vcmp.u32 cs, q6, q3
+; CHECK-NEXT:    vpsel q3, q2, q7
+; CHECK-NEXT:    vcmp.u32 cs, q6, q4
+; CHECK-NEXT:    vpsel q1, q2, q7
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q4[0], r3
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.16 q4[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.16 q4[2], r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmov.16 q4[3], r3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov.16 q4[4], r3
+; CHECK-NEXT:    vmov r3, s13
+; CHECK-NEXT:    vmov.16 q4[5], r3
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vmov.16 q4[6], r3
+; CHECK-NEXT:    vmov r3, s15
+; CHECK-NEXT:    vmov.16 q4[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vpsel q4, q2, q7
+; CHECK-NEXT:    vldr p0, [sp, #92] @ 4-byte Reload
+; CHECK-NEXT:    vmov.u16 r3, q4[0]
+; CHECK-NEXT:    vmov.8 q3[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[1]
+; CHECK-NEXT:    vmov.8 q3[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[2]
+; CHECK-NEXT:    vmov.8 q3[2], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[3]
+; CHECK-NEXT:    vmov.8 q3[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[4]
+; CHECK-NEXT:    vmov.8 q3[4], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[5]
+; CHECK-NEXT:    vmov.8 q3[5], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[6]
+; CHECK-NEXT:    vmov.8 q3[6], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[7]
+; CHECK-NEXT:    vmov.8 q3[7], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[0]
+; CHECK-NEXT:    vmov.8 q3[8], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[1]
+; CHECK-NEXT:    vmov.8 q3[9], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[2]
+; CHECK-NEXT:    vmov.8 q3[10], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[3]
+; CHECK-NEXT:    vmov.8 q3[11], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[4]
+; CHECK-NEXT:    vmov.8 q3[12], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[5]
+; CHECK-NEXT:    vmov.8 q3[13], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[6]
+; CHECK-NEXT:    vmov.8 q3[14], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[7]
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vmov.8 q3[15], r3
+; CHECK-NEXT:    vpsttt
+; CHECK-NEXT:    vcmpt.i8 ne, q3, zr
+; CHECK-NEXT:    vldrbt.u8 q0, [r0], #16
+; CHECK-NEXT:    vaddvat.u8 r2, q0
+; CHECK-NEXT:    le lr, .LBB27_2
+; CHECK-NEXT:    b .LBB27_4
+; CHECK-NEXT:  .LBB27_3:
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:  .LBB27_4: @ %for.cond.cleanup
+; CHECK-NEXT:    uxtb r0, r2
+; CHECK-NEXT:    add sp, #136
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.5:
+; CHECK-NEXT:  .LCPI27_0:
+; CHECK-NEXT:    .long 12 @ 0xc
+; CHECK-NEXT:    .long 13 @ 0xd
+; CHECK-NEXT:    .long 14 @ 0xe
+; CHECK-NEXT:    .long 15 @ 0xf
+; CHECK-NEXT:  .LCPI27_1:
+; CHECK-NEXT:    .long 8 @ 0x8
+; CHECK-NEXT:    .long 9 @ 0x9
+; CHECK-NEXT:    .long 10 @ 0xa
+; CHECK-NEXT:    .long 11 @ 0xb
+; CHECK-NEXT:  .LCPI27_2:
+; CHECK-NEXT:    .long 4 @ 0x4
+; CHECK-NEXT:    .long 5 @ 0x5
+; CHECK-NEXT:    .long 6 @ 0x6
+; CHECK-NEXT:    .long 7 @ 0x7
+; CHECK-NEXT:  .LCPI27_3:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp7.not = icmp eq i32 %n, 0
+  br i1 %cmp7.not, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %n, 15
+  %n.vec = and i32 %n.rnd.up, -16
+  %trip.count.minus.1 = add i32 %n, -1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i8 [ 0, %vector.ph ], [ %4, %vector.body ]
+  %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %trip.count.minus.1)
+  %0 = getelementptr inbounds i8, i8* %x, i32 %index
+  %1 = bitcast i8* %0 to <16 x i8>*
+  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %1, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
+  %2 = select <16 x i1> %active.lane.mask, <16 x i8> %wide.masked.load, <16 x i8> zeroinitializer
+  %3 = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %2)
+  %4 = add i8 %3, %vec.phi
+  %index.next = add i32 %index, 16
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  %s.0.lcssa = phi i8 [ 0, %entry ], [ %4, %vector.body ]
+  ret i8 %s.0.lcssa
+}
+
+define zeroext i8 @mla16i8(i8* noalias nocapture readonly %x, i8* noalias nocapture readonly %y, i32 %n) {
+; CHECK-LABEL: mla16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #136
+; CHECK-NEXT:    sub sp, #136
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    beq.w .LBB28_3
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    add.w r3, r2, #15
+; CHECK-NEXT:    vmov.i8 q7, #0x0
+; CHECK-NEXT:    bic r3, r3, #15
+; CHECK-NEXT:    vmov.i8 q2, #0xff
+; CHECK-NEXT:    sub.w r12, r3, #16
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #4
+; CHECK-NEXT:    adr r3, .LCPI28_0
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr r3, .LCPI28_1
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:    sub.w r12, r2, #1
+; CHECK-NEXT:    vstrw.32 q0, [sp, #64] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr r3, .LCPI28_2
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr r3, .LCPI28_3
+; CHECK-NEXT:    vstrw.32 q0, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vdup.32 q0, r12
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:  .LBB28_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vdup.32 q6, r2
+; CHECK-NEXT:    vadd.i32 q0, q0, r2
+; CHECK-NEXT:    vcmp.u32 hi, q6, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #112] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q3, q2, q7
+; CHECK-NEXT:    vadd.i32 q0, q0, r2
+; CHECK-NEXT:    vcmp.u32 hi, q6, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #96] @ 16-byte Spill
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.16 q4[0], r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov.16 q4[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov.16 q4[2], r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    vmov.16 q4[3], r3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov.16 q4[4], r3
+; CHECK-NEXT:    vmov r3, s13
+; CHECK-NEXT:    vmov.16 q4[5], r3
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vmov.16 q4[6], r3
+; CHECK-NEXT:    vmov r3, s15
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vmov.16 q4[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vadd.i32 q3, q0, r2
+; CHECK-NEXT:    vpsel q5, q2, q7
+; CHECK-NEXT:    vcmp.u32 hi, q6, q3
+; CHECK-NEXT:    vstr p0, [sp, #92] @ 4-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vadd.i32 q4, q0, r2
+; CHECK-NEXT:    adds r2, #16
+; CHECK-NEXT:    vcmp.u32 hi, q6, q4
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vldr p0, [sp, #92] @ 4-byte Reload
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.16 q6[0], r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov.16 q6[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov.16 q6[2], r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    vpsel q1, q2, q7
+; CHECK-NEXT:    vmov.16 q6[3], r3
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q6[4], r3
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.16 q6[5], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.16 q6[6], r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmov.16 q6[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q6, zr
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vmov.u16 r3, q0[0]
+; CHECK-NEXT:    vmov.8 q6[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[1]
+; CHECK-NEXT:    vmov.8 q6[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[2]
+; CHECK-NEXT:    vmov.8 q6[2], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[3]
+; CHECK-NEXT:    vmov.8 q6[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[4]
+; CHECK-NEXT:    vmov.8 q6[4], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[5]
+; CHECK-NEXT:    vmov.8 q6[5], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[6]
+; CHECK-NEXT:    vmov.8 q6[6], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[7]
+; CHECK-NEXT:    vmov.8 q6[7], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[0]
+; CHECK-NEXT:    vmov.8 q6[8], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[1]
+; CHECK-NEXT:    vmov.8 q6[9], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[2]
+; CHECK-NEXT:    vmov.8 q6[10], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[3]
+; CHECK-NEXT:    vmov.8 q6[11], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[4]
+; CHECK-NEXT:    vmov.8 q6[12], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[5]
+; CHECK-NEXT:    vmov.8 q6[13], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[6]
+; CHECK-NEXT:    vmov.8 q6[14], r3
+; CHECK-NEXT:    vmov.u16 r3, q5[7]
+; CHECK-NEXT:    vmov.8 q6[15], r3
+; CHECK-NEXT:    vcmp.i8 ne, q6, zr
+; CHECK-NEXT:    vstr p0, [sp, #92] @ 4-byte Spill
+; CHECK-NEXT:    vldrw.u32 q6, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #112] @ 16-byte Reload
+; CHECK-NEXT:    vcmp.u32 cs, q6, q0
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #96] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q5, q2, q7
+; CHECK-NEXT:    vcmp.u32 cs, q6, q0
+; CHECK-NEXT:    vpsel q1, q2, q7
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q0[0], r3
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.16 q0[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.16 q0[2], r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmov.16 q0[3], r3
+; CHECK-NEXT:    vmov r3, s20
+; CHECK-NEXT:    vmov.16 q0[4], r3
+; CHECK-NEXT:    vmov r3, s21
+; CHECK-NEXT:    vmov.16 q0[5], r3
+; CHECK-NEXT:    vmov r3, s22
+; CHECK-NEXT:    vmov.16 q0[6], r3
+; CHECK-NEXT:    vmov r3, s23
+; CHECK-NEXT:    vmov.16 q0[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vpsel q0, q2, q7
+; CHECK-NEXT:    vcmp.u32 cs, q6, q3
+; CHECK-NEXT:    vpsel q3, q2, q7
+; CHECK-NEXT:    vcmp.u32 cs, q6, q4
+; CHECK-NEXT:    vpsel q1, q2, q7
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q4[0], r3
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.16 q4[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.16 q4[2], r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmov.16 q4[3], r3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov.16 q4[4], r3
+; CHECK-NEXT:    vmov r3, s13
+; CHECK-NEXT:    vmov.16 q4[5], r3
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vmov.16 q4[6], r3
+; CHECK-NEXT:    vmov r3, s15
+; CHECK-NEXT:    vmov.16 q4[7], r3
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vpsel q4, q2, q7
+; CHECK-NEXT:    vldr p0, [sp, #92] @ 4-byte Reload
+; CHECK-NEXT:    vmov.u16 r3, q4[0]
+; CHECK-NEXT:    vmov.8 q3[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[1]
+; CHECK-NEXT:    vmov.8 q3[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[2]
+; CHECK-NEXT:    vmov.8 q3[2], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[3]
+; CHECK-NEXT:    vmov.8 q3[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[4]
+; CHECK-NEXT:    vmov.8 q3[4], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[5]
+; CHECK-NEXT:    vmov.8 q3[5], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[6]
+; CHECK-NEXT:    vmov.8 q3[6], r3
+; CHECK-NEXT:    vmov.u16 r3, q4[7]
+; CHECK-NEXT:    vmov.8 q3[7], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[0]
+; CHECK-NEXT:    vmov.8 q3[8], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[1]
+; CHECK-NEXT:    vmov.8 q3[9], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[2]
+; CHECK-NEXT:    vmov.8 q3[10], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[3]
+; CHECK-NEXT:    vmov.8 q3[11], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[4]
+; CHECK-NEXT:    vmov.8 q3[12], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[5]
+; CHECK-NEXT:    vmov.8 q3[13], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[6]
+; CHECK-NEXT:    vmov.8 q3[14], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[7]
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vmov.8 q3[15], r3
+; CHECK-NEXT:    vpstttt
+; CHECK-NEXT:    vcmpt.i8 ne, q3, zr
+; CHECK-NEXT:    vldrbt.u8 q0, [r0], #16
+; CHECK-NEXT:    vldrbt.u8 q1, [r1], #16
+; CHECK-NEXT:    vmlavat.u8 r12, q1, q0
+; CHECK-NEXT:    le lr, .LBB28_2
+; CHECK-NEXT:    b .LBB28_4
+; CHECK-NEXT:  .LBB28_3:
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:  .LBB28_4: @ %for.cond.cleanup
+; CHECK-NEXT:    uxtb.w r0, r12
+; CHECK-NEXT:    add sp, #136
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.5:
+; CHECK-NEXT:  .LCPI28_0:
+; CHECK-NEXT:    .long 12 @ 0xc
+; CHECK-NEXT:    .long 13 @ 0xd
+; CHECK-NEXT:    .long 14 @ 0xe
+; CHECK-NEXT:    .long 15 @ 0xf
+; CHECK-NEXT:  .LCPI28_1:
+; CHECK-NEXT:    .long 8 @ 0x8
+; CHECK-NEXT:    .long 9 @ 0x9
+; CHECK-NEXT:    .long 10 @ 0xa
+; CHECK-NEXT:    .long 11 @ 0xb
+; CHECK-NEXT:  .LCPI28_2:
+; CHECK-NEXT:    .long 4 @ 0x4
+; CHECK-NEXT:    .long 5 @ 0x5
+; CHECK-NEXT:    .long 6 @ 0x6
+; CHECK-NEXT:    .long 7 @ 0x7
+; CHECK-NEXT:  .LCPI28_3:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp10.not = icmp eq i32 %n, 0
+  br i1 %cmp10.not, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %n, 15
+  %n.vec = and i32 %n.rnd.up, -16
+  %trip.count.minus.1 = add i32 %n, -1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i8 [ 0, %vector.ph ], [ %7, %vector.body ]
+  %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %trip.count.minus.1)
+  %0 = getelementptr inbounds i8, i8* %x, i32 %index
+  %1 = bitcast i8* %0 to <16 x i8>*
+  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %1, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
+  %2 = getelementptr inbounds i8, i8* %y, i32 %index
+  %3 = bitcast i8* %2 to <16 x i8>*
+  %wide.masked.load15 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %3, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
+  %4 = mul <16 x i8> %wide.masked.load15, %wide.masked.load
+  %5 = select <16 x i1> %active.lane.mask, <16 x i8> %4, <16 x i8> zeroinitializer
+  %6 = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %5)
+  %7 = add i8 %6, %vec.phi
+  %index.next = add i32 %index, 16
+  %8 = icmp eq i32 %index.next, %n.vec
+  br i1 %8, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  %s.0.lcssa = phi i8 [ 0, %entry ], [ %7, %vector.body ]
+  ret i8 %s.0.lcssa
+}
+
+define i64 @add4i64(i32* noalias nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: add4i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cbz r1, .LBB29_4
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    adds r2, r1, #3
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    bic r2, r2, #3
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    subs r2, #4
+; CHECK-NEXT:    vdup.32 q1, r1
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    add.w lr, r3, r2, lsr #2
+; CHECK-NEXT:    adr r2, .LCPI29_0
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB29_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vadd.i32 q2, q0, r3
+; CHECK-NEXT:    vdup.32 q3, r3
+; CHECK-NEXT:    vcmp.u32 hi, q3, q2
+; CHECK-NEXT:    adds r3, #4
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vpsttt
+; CHECK-NEXT:    vcmpt.u32 cs, q1, q2
+; CHECK-NEXT:    vldrwt.u32 q2, [r0], #16
+; CHECK-NEXT:    vaddlvat.s32 r2, r1, q2
+; CHECK-NEXT:    le lr, .LBB29_2
+; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:  .LBB29_4:
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    mov r1, r2
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.5:
+; CHECK-NEXT:  .LCPI29_0:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp6.not = icmp eq i32 %n, 0
+  br i1 %cmp6.not, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %n, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %n, -1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i64 [ 0, %vector.ph ], [ %5, %vector.body ]
+  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %trip.count.minus.1)
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %2 = sext <4 x i32> %wide.masked.load to <4 x i64>
+  %3 = select <4 x i1> %active.lane.mask, <4 x i64> %2, <4 x i64> zeroinitializer
+  %4 = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %3)
+  %5 = add i64 %4, %vec.phi
+  %index.next = add i32 %index, 4
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  %s.0.lcssa = phi i64 [ 0, %entry ], [ %5, %vector.body ]
+  ret i64 %s.0.lcssa
+}
+
+define i64 @mla4i64(i32* noalias nocapture readonly %x, i32* noalias nocapture readonly %y, i32 %n) {
+; CHECK-LABEL: mla4i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cbz r2, .LBB30_3
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    adds r3, r2, #3
+; CHECK-NEXT:    bic r3, r3, #3
+; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #2
+; CHECK-NEXT:    adr r3, .LCPI30_0
+; CHECK-NEXT:    sub.w r12, r2, #1
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    vdup.32 q1, r12
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB30_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vadd.i32 q2, q0, r2
+; CHECK-NEXT:    vdup.32 q3, r2
+; CHECK-NEXT:    vcmp.u32 hi, q3, q2
+; CHECK-NEXT:    adds r2, #4
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vpstttt
+; CHECK-NEXT:    vcmpt.u32 cs, q1, q2
+; CHECK-NEXT:    vldrwt.u32 q2, [r0], #16
+; CHECK-NEXT:    vldrwt.u32 q3, [r1], #16
+; CHECK-NEXT:    vmlalvat.s32 r12, r3, q3, q2
+; CHECK-NEXT:    le lr, .LBB30_2
+; CHECK-NEXT:    b .LBB30_4
+; CHECK-NEXT:  .LBB30_3:
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:    mov r3, r12
+; CHECK-NEXT:  .LBB30_4: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r12
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.5:
+; CHECK-NEXT:  .LCPI30_0:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp9.not = icmp eq i32 %n, 0
+  br i1 %cmp9.not, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %n, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %n, -1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i64 [ 0, %vector.ph ], [ %9, %vector.body ]
+  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %trip.count.minus.1)
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %2 = sext <4 x i32> %wide.masked.load to <4 x i64>
+  %3 = getelementptr inbounds i32, i32* %y, i32 %index
+  %4 = bitcast i32* %3 to <4 x i32>*
+  %wide.masked.load14 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %4, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %5 = sext <4 x i32> %wide.masked.load14 to <4 x i64>
+  %6 = mul nsw <4 x i64> %5, %2
+  %7 = select <4 x i1> %active.lane.mask, <4 x i64> %6, <4 x i64> zeroinitializer
+  %8 = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %7)
+  %9 = add i64 %8, %vec.phi
+  %index.next = add i32 %index, 4
+  %10 = icmp eq i32 %index.next, %n.vec
+  br i1 %10, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  %s.0.lcssa = phi i64 [ 0, %entry ], [ %9, %vector.body ]
+  ret i64 %s.0.lcssa
+}
+
+define i64 @mla8i64(i16* noalias nocapture readonly %x, i16* noalias nocapture readonly %y, i32 %n) {
+; CHECK-LABEL: mla8i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #40
+; CHECK-NEXT:    sub sp, #40
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    beq.w .LBB31_3
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    adds r3, r2, #7
+; CHECK-NEXT:    vmov.i8 q1, #0x0
+; CHECK-NEXT:    bic r3, r3, #7
+; CHECK-NEXT:    vmov.i8 q2, #0xff
+; CHECK-NEXT:    sub.w r12, r3, #8
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #3
+; CHECK-NEXT:    adr r3, .LCPI31_0
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    adr r3, .LCPI31_1
+; CHECK-NEXT:    sub.w r12, r2, #1
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    vdup.32 q4, r12
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:  .LBB31_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q3, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vadd.i32 q5, q0, r2
+; CHECK-NEXT:    vdup.32 q0, r2
+; CHECK-NEXT:    vcmp.u32 hi, q0, q5
+; CHECK-NEXT:    vadd.i32 q6, q3, r2
+; CHECK-NEXT:    vpsel q7, q2, q1
+; CHECK-NEXT:    vcmp.u32 hi, q0, q6
+; CHECK-NEXT:    vpsel q3, q2, q1
+; CHECK-NEXT:    adds r2, #8
+; CHECK-NEXT:    vmov r4, s12
+; CHECK-NEXT:    vmov.16 q0[0], r4
+; CHECK-NEXT:    vmov r4, s13
+; CHECK-NEXT:    vmov.16 q0[1], r4
+; CHECK-NEXT:    vmov r4, s14
+; CHECK-NEXT:    vmov.16 q0[2], r4
+; CHECK-NEXT:    vmov r4, s15
+; CHECK-NEXT:    vmov.16 q0[3], r4
+; CHECK-NEXT:    vmov r4, s28
+; CHECK-NEXT:    vmov.16 q0[4], r4
+; CHECK-NEXT:    vmov r4, s29
+; CHECK-NEXT:    vmov.16 q0[5], r4
+; CHECK-NEXT:    vmov r4, s30
+; CHECK-NEXT:    vmov.16 q0[6], r4
+; CHECK-NEXT:    vmov r4, s31
+; CHECK-NEXT:    vmov.16 q0[7], r4
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vstr p0, [sp, #36] @ 4-byte Spill
+; CHECK-NEXT:    vcmp.u32 cs, q4, q5
+; CHECK-NEXT:    vpsel q5, q2, q1
+; CHECK-NEXT:    vcmp.u32 cs, q4, q6
+; CHECK-NEXT:    vpsel q0, q2, q1
+; CHECK-NEXT:    vldr p0, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    vmov r4, s0
+; CHECK-NEXT:    vmov.16 q6[0], r4
+; CHECK-NEXT:    vmov r4, s1
+; CHECK-NEXT:    vmov.16 q6[1], r4
+; CHECK-NEXT:    vmov r4, s2
+; CHECK-NEXT:    vmov.16 q6[2], r4
+; CHECK-NEXT:    vmov r4, s3
+; CHECK-NEXT:    vmov.16 q6[3], r4
+; CHECK-NEXT:    vmov r4, s20
+; CHECK-NEXT:    vmov.16 q6[4], r4
+; CHECK-NEXT:    vmov r4, s21
+; CHECK-NEXT:    vmov.16 q6[5], r4
+; CHECK-NEXT:    vmov r4, s22
+; CHECK-NEXT:    vmov.16 q6[6], r4
+; CHECK-NEXT:    vmov r4, s23
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vmov.16 q6[7], r4
+; CHECK-NEXT:    vpstttt
+; CHECK-NEXT:    vcmpt.i16 ne, q6, zr
+; CHECK-NEXT:    vldrht.u16 q0, [r0], #16
+; CHECK-NEXT:    vldrht.u16 q3, [r1], #16
+; CHECK-NEXT:    vmlalvat.s16 r12, r3, q3, q0
+; CHECK-NEXT:    le lr, .LBB31_2
+; CHECK-NEXT:    b .LBB31_4
+; CHECK-NEXT:  .LBB31_3:
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:    mov r3, r12
+; CHECK-NEXT:  .LBB31_4: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r12
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    add sp, #40
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r4, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.5:
+; CHECK-NEXT:  .LCPI31_0:
+; CHECK-NEXT:    .long 4 @ 0x4
+; CHECK-NEXT:    .long 5 @ 0x5
+; CHECK-NEXT:    .long 6 @ 0x6
+; CHECK-NEXT:    .long 7 @ 0x7
+; CHECK-NEXT:  .LCPI31_1:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp9.not = icmp eq i32 %n, 0
+  br i1 %cmp9.not, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %n, 7
+  %n.vec = and i32 %n.rnd.up, -8
+  %trip.count.minus.1 = add i32 %n, -1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i64 [ 0, %vector.ph ], [ %9, %vector.body ]
+  %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %trip.count.minus.1)
+  %0 = getelementptr inbounds i16, i16* %x, i32 %index
+  %1 = bitcast i16* %0 to <8 x i16>*
+  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %2 = sext <8 x i16> %wide.masked.load to <8 x i64>
+  %3 = getelementptr inbounds i16, i16* %y, i32 %index
+  %4 = bitcast i16* %3 to <8 x i16>*
+  %wide.masked.load14 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %4, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %5 = sext <8 x i16> %wide.masked.load14 to <8 x i64>
+  %6 = mul nsw <8 x i64> %5, %2
+  %7 = select <8 x i1> %active.lane.mask, <8 x i64> %6, <8 x i64> zeroinitializer
+  %8 = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %7)
+  %9 = add i64 %8, %vec.phi
+  %index.next = add i32 %index, 8
+  %10 = icmp eq i32 %index.next, %n.vec
+  br i1 %10, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  %s.0.lcssa = phi i64 [ 0, %entry ], [ %9, %vector.body ]
+  ret i64 %s.0.lcssa
+}
+
+declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) #1
+declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) #2
+declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32) #1
+declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>) #2
+declare i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32>) #3
+declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32) #1
+declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>) #2
+declare i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32>) #3
+declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>) #3
+declare i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16>) #3
+declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>) #3
+declare i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64>) #3
+declare i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64>) #3
+
 declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
 declare i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32>)
 declare i32 @llvm.experimental.vector.reduce.and.v4i32(<4 x i32>)


        


More information about the llvm-commits mailing list