[llvm] r373064 - [NFC][ARM] Add some tail-predication tests

Sam Parker via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 27 03:33:53 PDT 2019


Author: sam_parker
Date: Fri Sep 27 03:33:53 2019
New Revision: 373064

URL: http://llvm.org/viewvc/llvm-project?rev=373064&view=rev
Log:
[NFC][ARM] Add some tail-predication tests

Use different data types for some simple loops.

Added:
    llvm/trunk/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll

Added: llvm/trunk/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll?rev=373064&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll (added)
+++ llvm/trunk/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll Fri Sep 27 03:33:53 2019
@@ -0,0 +1,1757 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -disable-mve-tail-predication=false -enable-arm-maskedldst=true %s -o - | FileCheck %s
+
+define arm_aapcs_vfpcc i32 @test_acc_scalar_char(i8 zeroext %a, i8* nocapture readonly %b, i32 %N) {
+; CHECK-LABEL: test_acc_scalar_char:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mov r12, r0
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    bxeq lr
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    adds r3, r2, #3
+; CHECK-NEXT:    subs r2, #1
+; CHECK-NEXT:    bic r3, r3, #3
+; CHECK-NEXT:    vdup.32 q0, r2
+; CHECK-NEXT:    sub.w lr, r3, #4
+; CHECK-NEXT:    adr r2, .LCPI0_0
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    vldrw.u32 q1, [r2]
+; CHECK-NEXT:    add.w lr, r3, lr, lsr #2
+; CHECK-NEXT:    vmov.i32 q4, #0x0
+; CHECK-NEXT:    vmov.i32 q2, #0xff
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB0_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmov q3, q4
+; CHECK-NEXT:    vadd.i32 q4, q1, r0
+; CHECK-NEXT:    vcmp.u32 cs, q0, q4
+; CHECK-NEXT:    @ implicit-def: $q4
+; CHECK-NEXT:    vmrs r3, p0
+; CHECK-NEXT:    and r2, r3, #1
+; CHECK-NEXT:    rsbs r4, r2, #0
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    bfi r2, r4, #0, #1
+; CHECK-NEXT:    ubfx r4, r3, #4, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    bfi r2, r4, #1, #1
+; CHECK-NEXT:    ubfx r4, r3, #8, #1
+; CHECK-NEXT:    ubfx r3, r3, #12, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    bfi r2, r4, #2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    bfi r2, r3, #3, #1
+; CHECK-NEXT:    lsls r3, r2, #31
+; CHECK-NEXT:    add.w r3, r1, r0
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrbne r4, [r3]
+; CHECK-NEXT:    vmovne.32 q4[0], r4
+; CHECK-NEXT:    lsls r4, r2, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r4, [r3, #1]
+; CHECK-NEXT:    vmovmi.32 q4[1], r4
+; CHECK-NEXT:    lsls r4, r2, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r4, [r3, #2]
+; CHECK-NEXT:    vmovmi.32 q4[2], r4
+; CHECK-NEXT:    lsls r2, r2, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r2, [r3, #3]
+; CHECK-NEXT:    vmovmi.32 q4[3], r2
+; CHECK-NEXT:    vand q5, q4, q2
+; CHECK-NEXT:    vmov q4, q3
+; CHECK-NEXT:    adds r0, #4
+; CHECK-NEXT:    vmla.u32 q4, q5, r12
+; CHECK-NEXT:    le lr, .LBB0_1
+; CHECK-NEXT:  @ %bb.2: @ %middle.block
+; CHECK-NEXT:    vpsel q0, q4, q3
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    pop.w {r4, lr}
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI0_0:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp7 = icmp eq i32 %N, 0
+  br i1 %cmp7, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %conv = zext i8 %a to i32
+  %n.rnd.up = add i32 %N, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %N, -1
+  %broadcast.splatinsert10 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
+  %broadcast.splat11 = shufflevector <4 x i32> %broadcast.splatinsert10, <4 x i32> undef, <4 x i32> zeroinitializer
+  %broadcast.splatinsert12 = insertelement <4 x i32> undef, i32 %conv, i32 0
+  %broadcast.splat13 = shufflevector <4 x i32> %broadcast.splatinsert12, <4 x i32> undef, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %5, %vector.body ]
+  %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
+  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
+  %0 = getelementptr inbounds i8, i8* %b, i32 %index
+  %1 = icmp ule <4 x i32> %induction, %broadcast.splat11
+  %2 = bitcast i8* %0 to <4 x i8>*
+  %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %2, i32 1, <4 x i1> %1, <4 x i8> undef)
+  %3 = zext <4 x i8> %wide.masked.load to <4 x i32>
+  %4 = mul nuw nsw <4 x i32> %broadcast.splat13, %3
+  %5 = add nuw nsw <4 x i32> %4, %vec.phi
+  %index.next = add i32 %index, 4
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %7 = select <4 x i1> %1, <4 x i32> %5, <4 x i32> %vec.phi
+  %8 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %7)
+  br label %for.cond.cleanup
+
+for.cond.cleanup:                                 ; preds = %middle.block, %entry
+  %res.0.lcssa = phi i32 [ 0, %entry ], [ %8, %middle.block ]
+  ret i32 %res.0.lcssa
+}
+
+define arm_aapcs_vfpcc i32 @test_acc_scalar_short(i16 signext %a, i16* nocapture readonly %b, i32 %N) {
+; CHECK-LABEL: test_acc_scalar_short:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mov r12, r0
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    bxeq lr
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    adds r3, r2, #3
+; CHECK-NEXT:    subs r2, #1
+; CHECK-NEXT:    bic r3, r3, #3
+; CHECK-NEXT:    vdup.32 q0, r2
+; CHECK-NEXT:    sub.w lr, r3, #4
+; CHECK-NEXT:    adr r2, .LCPI1_0
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    vldrw.u32 q1, [r2]
+; CHECK-NEXT:    add.w lr, r3, lr, lsr #2
+; CHECK-NEXT:    vmov.i32 q3, #0x0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB1_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmov q2, q3
+; CHECK-NEXT:    vadd.i32 q3, q1, r0
+; CHECK-NEXT:    vcmp.u32 cs, q0, q3
+; CHECK-NEXT:    @ implicit-def: $q3
+; CHECK-NEXT:    adds r0, #4
+; CHECK-NEXT:    vmrs r3, p0
+; CHECK-NEXT:    and r2, r3, #1
+; CHECK-NEXT:    rsbs r4, r2, #0
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    bfi r2, r4, #0, #1
+; CHECK-NEXT:    ubfx r4, r3, #4, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    bfi r2, r4, #1, #1
+; CHECK-NEXT:    ubfx r4, r3, #8, #1
+; CHECK-NEXT:    ubfx r3, r3, #12, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    bfi r2, r4, #2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    bfi r2, r3, #3, #1
+; CHECK-NEXT:    lsls r3, r2, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrhne r3, [r1]
+; CHECK-NEXT:    vmovne.32 q3[0], r3
+; CHECK-NEXT:    lsls r3, r2, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r1, #2]
+; CHECK-NEXT:    vmovmi.32 q3[1], r3
+; CHECK-NEXT:    lsls r3, r2, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r1, #4]
+; CHECK-NEXT:    vmovmi.32 q3[2], r3
+; CHECK-NEXT:    lsls r2, r2, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r2, [r1, #6]
+; CHECK-NEXT:    vmovmi.32 q3[3], r2
+; CHECK-NEXT:    vmovlb.s16 q4, q3
+; CHECK-NEXT:    vmov q3, q2
+; CHECK-NEXT:    adds r1, #8
+; CHECK-NEXT:    vmla.u32 q3, q4, r12
+; CHECK-NEXT:    le lr, .LBB1_1
+; CHECK-NEXT:  @ %bb.2: @ %middle.block
+; CHECK-NEXT:    vpsel q0, q3, q2
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop.w {r4, lr}
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI1_0:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp7 = icmp eq i32 %N, 0
+  br i1 %cmp7, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %conv = sext i16 %a to i32
+  %n.rnd.up = add i32 %N, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %N, -1
+  %broadcast.splatinsert10 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
+  %broadcast.splat11 = shufflevector <4 x i32> %broadcast.splatinsert10, <4 x i32> undef, <4 x i32> zeroinitializer
+  %broadcast.splatinsert12 = insertelement <4 x i32> undef, i32 %conv, i32 0
+  %broadcast.splat13 = shufflevector <4 x i32> %broadcast.splatinsert12, <4 x i32> undef, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %5, %vector.body ]
+  %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
+  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
+  %0 = getelementptr inbounds i16, i16* %b, i32 %index
+  %1 = icmp ule <4 x i32> %induction, %broadcast.splat11
+  %2 = bitcast i16* %0 to <4 x i16>*
+  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef)
+  %3 = sext <4 x i16> %wide.masked.load to <4 x i32>
+  %4 = mul nsw <4 x i32> %broadcast.splat13, %3
+  %5 = add nsw <4 x i32> %4, %vec.phi
+  %index.next = add i32 %index, 4
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %7 = select <4 x i1> %1, <4 x i32> %5, <4 x i32> %vec.phi
+  %8 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %7)
+  br label %for.cond.cleanup
+
+for.cond.cleanup:                                 ; preds = %middle.block, %entry
+  %res.0.lcssa = phi i32 [ 0, %entry ], [ %8, %middle.block ]
+  ret i32 %res.0.lcssa
+}
+
+define arm_aapcs_vfpcc i32 @test_acc_scalar_uchar(i8 zeroext %a, i8* nocapture readonly %b, i32 %N) {
+; CHECK-LABEL: test_acc_scalar_uchar:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mov r12, r0
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    bxeq lr
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    adds r3, r2, #3
+; CHECK-NEXT:    subs r2, #1
+; CHECK-NEXT:    bic r3, r3, #3
+; CHECK-NEXT:    vdup.32 q0, r2
+; CHECK-NEXT:    sub.w lr, r3, #4
+; CHECK-NEXT:    adr r2, .LCPI2_0
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    vldrw.u32 q1, [r2]
+; CHECK-NEXT:    add.w lr, r3, lr, lsr #2
+; CHECK-NEXT:    vmov.i32 q4, #0x0
+; CHECK-NEXT:    vmov.i32 q2, #0xff
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB2_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmov q3, q4
+; CHECK-NEXT:    vadd.i32 q4, q1, r0
+; CHECK-NEXT:    vcmp.u32 cs, q0, q4
+; CHECK-NEXT:    @ implicit-def: $q4
+; CHECK-NEXT:    vmrs r3, p0
+; CHECK-NEXT:    and r2, r3, #1
+; CHECK-NEXT:    rsbs r4, r2, #0
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    bfi r2, r4, #0, #1
+; CHECK-NEXT:    ubfx r4, r3, #4, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    bfi r2, r4, #1, #1
+; CHECK-NEXT:    ubfx r4, r3, #8, #1
+; CHECK-NEXT:    ubfx r3, r3, #12, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    bfi r2, r4, #2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    bfi r2, r3, #3, #1
+; CHECK-NEXT:    lsls r3, r2, #31
+; CHECK-NEXT:    add.w r3, r1, r0
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrbne r4, [r3]
+; CHECK-NEXT:    vmovne.32 q4[0], r4
+; CHECK-NEXT:    lsls r4, r2, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r4, [r3, #1]
+; CHECK-NEXT:    vmovmi.32 q4[1], r4
+; CHECK-NEXT:    lsls r4, r2, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r4, [r3, #2]
+; CHECK-NEXT:    vmovmi.32 q4[2], r4
+; CHECK-NEXT:    lsls r2, r2, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r2, [r3, #3]
+; CHECK-NEXT:    vmovmi.32 q4[3], r2
+; CHECK-NEXT:    vand q5, q4, q2
+; CHECK-NEXT:    vmov q4, q3
+; CHECK-NEXT:    adds r0, #4
+; CHECK-NEXT:    vmla.u32 q4, q5, r12
+; CHECK-NEXT:    le lr, .LBB2_1
+; CHECK-NEXT:  @ %bb.2: @ %middle.block
+; CHECK-NEXT:    vpsel q0, q4, q3
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    pop.w {r4, lr}
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI2_0:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp7 = icmp eq i32 %N, 0
+  br i1 %cmp7, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %conv = zext i8 %a to i32
+  %n.rnd.up = add i32 %N, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %N, -1
+  %broadcast.splatinsert10 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
+  %broadcast.splat11 = shufflevector <4 x i32> %broadcast.splatinsert10, <4 x i32> undef, <4 x i32> zeroinitializer
+  %broadcast.splatinsert12 = insertelement <4 x i32> undef, i32 %conv, i32 0
+  %broadcast.splat13 = shufflevector <4 x i32> %broadcast.splatinsert12, <4 x i32> undef, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %5, %vector.body ]
+  %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
+  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
+  %0 = getelementptr inbounds i8, i8* %b, i32 %index
+  %1 = icmp ule <4 x i32> %induction, %broadcast.splat11
+  %2 = bitcast i8* %0 to <4 x i8>*
+  %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %2, i32 1, <4 x i1> %1, <4 x i8> undef)
+  %3 = zext <4 x i8> %wide.masked.load to <4 x i32>
+  %4 = mul nuw nsw <4 x i32> %broadcast.splat13, %3
+  %5 = add nuw nsw <4 x i32> %4, %vec.phi
+  %index.next = add i32 %index, 4
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %7 = select <4 x i1> %1, <4 x i32> %5, <4 x i32> %vec.phi
+  %8 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %7)
+  br label %for.cond.cleanup
+
+for.cond.cleanup:                                 ; preds = %middle.block, %entry
+  %res.0.lcssa = phi i32 [ 0, %entry ], [ %8, %middle.block ]
+  ret i32 %res.0.lcssa
+}
+
+define arm_aapcs_vfpcc i32 @test_acc_scalar_ushort(i16 signext %a, i16* nocapture readonly %b, i32 %N) {
+; CHECK-LABEL: test_acc_scalar_ushort:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mov r12, r0
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    bxeq lr
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    adds r3, r2, #3
+; CHECK-NEXT:    subs r2, #1
+; CHECK-NEXT:    bic r3, r3, #3
+; CHECK-NEXT:    vdup.32 q0, r2
+; CHECK-NEXT:    sub.w lr, r3, #4
+; CHECK-NEXT:    adr r2, .LCPI3_0
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    vldrw.u32 q1, [r2]
+; CHECK-NEXT:    add.w lr, r3, lr, lsr #2
+; CHECK-NEXT:    vmov.i32 q3, #0x0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB3_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmov q2, q3
+; CHECK-NEXT:    vadd.i32 q3, q1, r0
+; CHECK-NEXT:    vcmp.u32 cs, q0, q3
+; CHECK-NEXT:    @ implicit-def: $q3
+; CHECK-NEXT:    adds r0, #4
+; CHECK-NEXT:    vmrs r3, p0
+; CHECK-NEXT:    and r2, r3, #1
+; CHECK-NEXT:    rsbs r4, r2, #0
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    bfi r2, r4, #0, #1
+; CHECK-NEXT:    ubfx r4, r3, #4, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    bfi r2, r4, #1, #1
+; CHECK-NEXT:    ubfx r4, r3, #8, #1
+; CHECK-NEXT:    ubfx r3, r3, #12, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    bfi r2, r4, #2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    bfi r2, r3, #3, #1
+; CHECK-NEXT:    lsls r3, r2, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrhne r3, [r1]
+; CHECK-NEXT:    vmovne.32 q3[0], r3
+; CHECK-NEXT:    lsls r3, r2, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r1, #2]
+; CHECK-NEXT:    vmovmi.32 q3[1], r3
+; CHECK-NEXT:    lsls r3, r2, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r1, #4]
+; CHECK-NEXT:    vmovmi.32 q3[2], r3
+; CHECK-NEXT:    lsls r2, r2, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r2, [r1, #6]
+; CHECK-NEXT:    vmovmi.32 q3[3], r2
+; CHECK-NEXT:    vmovlb.u16 q4, q3
+; CHECK-NEXT:    vmov q3, q2
+; CHECK-NEXT:    adds r1, #8
+; CHECK-NEXT:    vmla.u32 q3, q4, r12
+; CHECK-NEXT:    le lr, .LBB3_1
+; CHECK-NEXT:  @ %bb.2: @ %middle.block
+; CHECK-NEXT:    vpsel q0, q3, q2
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop.w {r4, lr}
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI3_0:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp7 = icmp eq i32 %N, 0
+  br i1 %cmp7, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %conv = sext i16 %a to i32
+  %n.rnd.up = add i32 %N, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %N, -1
+  %broadcast.splatinsert10 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
+  %broadcast.splat11 = shufflevector <4 x i32> %broadcast.splatinsert10, <4 x i32> undef, <4 x i32> zeroinitializer
+  %broadcast.splatinsert12 = insertelement <4 x i32> undef, i32 %conv, i32 0
+  %broadcast.splat13 = shufflevector <4 x i32> %broadcast.splatinsert12, <4 x i32> undef, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %5, %vector.body ]
+  %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
+  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
+  %0 = getelementptr inbounds i16, i16* %b, i32 %index
+  %1 = icmp ule <4 x i32> %induction, %broadcast.splat11
+  %2 = bitcast i16* %0 to <4 x i16>*
+  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef)
+  %3 = zext <4 x i16> %wide.masked.load to <4 x i32>
+  %4 = mul nsw <4 x i32> %broadcast.splat13, %3
+  %5 = add nsw <4 x i32> %4, %vec.phi
+  %index.next = add i32 %index, 4
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %7 = select <4 x i1> %1, <4 x i32> %5, <4 x i32> %vec.phi
+  %8 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %7)
+  br label %for.cond.cleanup
+
+for.cond.cleanup:                                 ; preds = %middle.block, %entry
+  %res.0.lcssa = phi i32 [ 0, %entry ], [ %8, %middle.block ]
+  ret i32 %res.0.lcssa
+}
+
+define arm_aapcs_vfpcc i32 @test_acc_scalar_int(i32 %a, i32* nocapture readonly %b, i32 %N) {
+; CHECK-LABEL: test_acc_scalar_int:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    itt eq
+; CHECK-NEXT:    moveq r0, #0
+; CHECK-NEXT:    bxeq lr
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    adds r3, r2, #3
+; CHECK-NEXT:    vmov.i32 q0, #0x0
+; CHECK-NEXT:    bic r3, r3, #3
+; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB4_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    subs r2, #4
+; CHECK-NEXT:    vmov q1, q0
+; CHECK-NEXT:    vctp.32 r2
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vldrwt.u32 q2, [r1]
+; CHECK-NEXT:    adds r1, #16
+; CHECK-NEXT:    vmla.u32 q0, q2, r0
+; CHECK-NEXT:    le lr, .LBB4_1
+; CHECK-NEXT:  @ %bb.2: @ %middle.block
+; CHECK-NEXT:    vctp.32 r2
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %cmp6 = icmp eq i32 %N, 0
+  br i1 %cmp6, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %N, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %N, -1
+  %broadcast.splatinsert9 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
+  %broadcast.splat10 = shufflevector <4 x i32> %broadcast.splatinsert9, <4 x i32> undef, <4 x i32> zeroinitializer
+  %broadcast.splatinsert11 = insertelement <4 x i32> undef, i32 %a, i32 0
+  %broadcast.splat12 = shufflevector <4 x i32> %broadcast.splatinsert11, <4 x i32> undef, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %4, %vector.body ]
+  %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
+  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
+  %0 = getelementptr inbounds i32, i32* %b, i32 %index
+  %1 = icmp ule <4 x i32> %induction, %broadcast.splat10
+  %2 = bitcast i32* %0 to <4 x i32>*
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %2, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %3 = mul nsw <4 x i32> %wide.masked.load, %broadcast.splat12
+  %4 = add nsw <4 x i32> %3, %vec.phi
+  %index.next = add i32 %index, 4
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %6 = select <4 x i1> %1, <4 x i32> %4, <4 x i32> %vec.phi
+  %7 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %6)
+  br label %for.cond.cleanup
+
+for.cond.cleanup:                                 ; preds = %middle.block, %entry
+  %res.0.lcssa = phi i32 [ 0, %entry ], [ %7, %middle.block ]
+  ret i32 %res.0.lcssa
+}
+
+define arm_aapcs_vfpcc void @test_vec_mul_scalar_add_char(i8* nocapture readonly %a, i8* nocapture readonly %b, i8 zeroext %c, i32* nocapture %res, i32 %N) {
+; CHECK-LABEL: test_vec_mul_scalar_add_char:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, lr}
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    ldr.w r12, [sp, #72]
+; CHECK-NEXT:    cmp.w r12, #0
+; CHECK-NEXT:    beq.w .LBB5_12
+; CHECK-NEXT:  @ %bb.1: @ %for.body.lr.ph
+; CHECK-NEXT:    add.w r5, r3, r12, lsl #2
+; CHECK-NEXT:    add.w r6, r1, r12
+; CHECK-NEXT:    cmp r5, r1
+; CHECK-NEXT:    add.w r4, r0, r12
+; CHECK-NEXT:    cset r7, hi
+; CHECK-NEXT:    cmp r6, r3
+; CHECK-NEXT:    cset r6, hi
+; CHECK-NEXT:    cmp r5, r0
+; CHECK-NEXT:    cset r5, hi
+; CHECK-NEXT:    cmp r4, r3
+; CHECK-NEXT:    cset r4, hi
+; CHECK-NEXT:    ands r5, r4
+; CHECK-NEXT:    lsls r5, r5, #31
+; CHECK-NEXT:    itt eq
+; CHECK-NEXT:    andeq r7, r6
+; CHECK-NEXT:    lslseq.w r7, r7, #31
+; CHECK-NEXT:    beq .LBB5_4
+; CHECK-NEXT:  @ %bb.2: @ %for.body.preheader
+; CHECK-NEXT:    sub.w r4, r12, #1
+; CHECK-NEXT:    and lr, r12, #3
+; CHECK-NEXT:    cmp r4, #3
+; CHECK-NEXT:    bhs.w .LBB5_6
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:    movs r7, #0
+; CHECK-NEXT:    b .LBB5_9
+; CHECK-NEXT:  .LBB5_4: @ %vector.ph
+; CHECK-NEXT:    add.w r7, r12, #3
+; CHECK-NEXT:    adr r5, .LCPI5_0
+; CHECK-NEXT:    bic r7, r7, #3
+; CHECK-NEXT:    sub.w r4, r12, #1
+; CHECK-NEXT:    subs r7, #4
+; CHECK-NEXT:    movs r6, #1
+; CHECK-NEXT:    vldrw.u32 q1, [r5]
+; CHECK-NEXT:    vdup.32 q0, r4
+; CHECK-NEXT:    add.w lr, r6, r7, lsr #2
+; CHECK-NEXT:    movs r4, #0
+; CHECK-NEXT:    vmov.i32 q2, #0xff
+; CHECK-NEXT:    vmov.i32 q3, #0xff
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB5_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vadd.i32 q4, q1, r4
+; CHECK-NEXT:    @ implicit-def: $q5
+; CHECK-NEXT:    sub.w r12, r12, #4
+; CHECK-NEXT:    vcmp.u32 cs, q0, q4
+; CHECK-NEXT:    @ implicit-def: $q4
+; CHECK-NEXT:    vmrs r6, p0
+; CHECK-NEXT:    and r5, r6, #1
+; CHECK-NEXT:    rsbs r7, r5, #0
+; CHECK-NEXT:    movs r5, #0
+; CHECK-NEXT:    bfi r5, r7, #0, #1
+; CHECK-NEXT:    ubfx r7, r6, #4, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #1, #1
+; CHECK-NEXT:    ubfx r7, r6, #8, #1
+; CHECK-NEXT:    ubfx r6, r6, #12, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #2, #1
+; CHECK-NEXT:    rsbs r6, r6, #0
+; CHECK-NEXT:    bfi r5, r6, #3, #1
+; CHECK-NEXT:    lsls r6, r5, #31
+; CHECK-NEXT:    add.w r6, r0, r4
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrbne r7, [r6]
+; CHECK-NEXT:    vmovne.32 q4[0], r7
+; CHECK-NEXT:    lsls r7, r5, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r7, [r6, #1]
+; CHECK-NEXT:    vmovmi.32 q4[1], r7
+; CHECK-NEXT:    lsls r7, r5, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r7, [r6, #2]
+; CHECK-NEXT:    vmovmi.32 q4[2], r7
+; CHECK-NEXT:    lsls r5, r5, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r5, [r6, #3]
+; CHECK-NEXT:    vmovmi.32 q4[3], r5
+; CHECK-NEXT:    vmrs r6, p0
+; CHECK-NEXT:    vand q4, q4, q2
+; CHECK-NEXT:    and r5, r6, #1
+; CHECK-NEXT:    rsbs r7, r5, #0
+; CHECK-NEXT:    movs r5, #0
+; CHECK-NEXT:    bfi r5, r7, #0, #1
+; CHECK-NEXT:    ubfx r7, r6, #4, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #1, #1
+; CHECK-NEXT:    ubfx r7, r6, #8, #1
+; CHECK-NEXT:    ubfx r6, r6, #12, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #2, #1
+; CHECK-NEXT:    rsbs r6, r6, #0
+; CHECK-NEXT:    bfi r5, r6, #3, #1
+; CHECK-NEXT:    lsls r6, r5, #31
+; CHECK-NEXT:    add.w r6, r1, r4
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrbne r7, [r6]
+; CHECK-NEXT:    vmovne.32 q5[0], r7
+; CHECK-NEXT:    lsls r7, r5, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r7, [r6, #1]
+; CHECK-NEXT:    vmovmi.32 q5[1], r7
+; CHECK-NEXT:    lsls r7, r5, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r7, [r6, #2]
+; CHECK-NEXT:    vmovmi.32 q5[2], r7
+; CHECK-NEXT:    lsls r5, r5, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r5, [r6, #3]
+; CHECK-NEXT:    vmovmi.32 q5[3], r5
+; CHECK-NEXT:    vand q5, q5, q3
+; CHECK-NEXT:    vctp.32 r12
+; CHECK-NEXT:    vmul.i32 q4, q5, q4
+; CHECK-NEXT:    adds r4, #4
+; CHECK-NEXT:    vadd.i32 q4, q4, r2
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vstrwt.32 q4, [r3]
+; CHECK-NEXT:    adds r3, #16
+; CHECK-NEXT:    le lr, .LBB5_5
+; CHECK-NEXT:    b .LBB5_12
+; CHECK-NEXT:  .LBB5_6: @ %for.body.preheader.new
+; CHECK-NEXT:    sub.w r12, lr, r12
+; CHECK-NEXT:    subs r4, r1, #3
+; CHECK-NEXT:    subs r5, r0, #3
+; CHECK-NEXT:    sub.w r7, r3, #16
+; CHECK-NEXT:    mov.w r9, #0
+; CHECK-NEXT:  .LBB5_7: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldrb.w r8, [r5, #3]
+; CHECK-NEXT:    sub.w r9, r9, #4
+; CHECK-NEXT:    ldrb r6, [r4, #3]
+; CHECK-NEXT:    cmp r12, r9
+; CHECK-NEXT:    smlabb r6, r6, r8, r2
+; CHECK-NEXT:    str r6, [r7, #16]!
+; CHECK-NEXT:    ldrb r8, [r5, #4]!
+; CHECK-NEXT:    ldrb r6, [r4, #4]!
+; CHECK-NEXT:    smlabb r6, r6, r8, r2
+; CHECK-NEXT:    str r6, [r7, #4]
+; CHECK-NEXT:    ldrb.w r8, [r5, #1]
+; CHECK-NEXT:    ldrb r6, [r4, #1]
+; CHECK-NEXT:    smlabb r6, r6, r8, r2
+; CHECK-NEXT:    str r6, [r7, #8]
+; CHECK-NEXT:    ldrb.w r8, [r5, #2]
+; CHECK-NEXT:    ldrb r6, [r4, #2]
+; CHECK-NEXT:    smlabb r6, r6, r8, r2
+; CHECK-NEXT:    str r6, [r7, #12]
+; CHECK-NEXT:    bne .LBB5_7
+; CHECK-NEXT:  @ %bb.8: @ %for.cond.cleanup.loopexit.unr-lcssa.loopexit
+; CHECK-NEXT:    rsb.w r7, r9, #0
+; CHECK-NEXT:  .LBB5_9: @ %for.cond.cleanup.loopexit.unr-lcssa
+; CHECK-NEXT:    wls lr, lr, .LBB5_12
+; CHECK-NEXT:  @ %bb.10: @ %for.body.epil.preheader
+; CHECK-NEXT:    subs r7, #1
+; CHECK-NEXT:    add r0, r7
+; CHECK-NEXT:    add r1, r7
+; CHECK-NEXT:    add.w r3, r3, r7, lsl #2
+; CHECK-NEXT:  .LBB5_11: @ %for.body.epil
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldrb r7, [r0, #1]!
+; CHECK-NEXT:    ldrb r6, [r1, #1]!
+; CHECK-NEXT:    smlabb r7, r6, r7, r2
+; CHECK-NEXT:    str r7, [r3, #4]!
+; CHECK-NEXT:    le lr, .LBB5_11
+; CHECK-NEXT:  .LBB5_12: @ %for.cond.cleanup
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.13:
+; CHECK-NEXT:  .LCPI5_0:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %res12 = bitcast i32* %res to i8*
+  %cmp10 = icmp eq i32 %N, 0
+  br i1 %cmp10, label %for.cond.cleanup, label %for.body.lr.ph
+
+for.body.lr.ph:                                   ; preds = %entry
+  %conv3 = zext i8 %c to i32
+  %scevgep = getelementptr i32, i32* %res, i32 %N
+  %scevgep13 = bitcast i32* %scevgep to i8*
+  %scevgep14 = getelementptr i8, i8* %a, i32 %N
+  %scevgep15 = getelementptr i8, i8* %b, i32 %N
+  %bound0 = icmp ugt i8* %scevgep14, %res12
+  %bound1 = icmp ugt i8* %scevgep13, %a
+  %found.conflict = and i1 %bound0, %bound1
+  %bound016 = icmp ugt i8* %scevgep15, %res12
+  %bound117 = icmp ugt i8* %scevgep13, %b
+  %found.conflict18 = and i1 %bound016, %bound117
+  %conflict.rdx = or i1 %found.conflict, %found.conflict18
+  br i1 %conflict.rdx, label %for.body.preheader, label %vector.ph
+
+for.body.preheader:                               ; preds = %for.body.lr.ph
+  %0 = add i32 %N, -1
+  %xtraiter = and i32 %N, 3
+  %1 = icmp ult i32 %0, 3
+  br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+
+for.body.preheader.new:                           ; preds = %for.body.preheader
+  %unroll_iter = sub i32 %N, %xtraiter
+  br label %for.body
+
+vector.ph:                                        ; preds = %for.body.lr.ph
+  %n.rnd.up = add i32 %N, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %N, -1
+  %broadcast.splatinsert19 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
+  %broadcast.splat20 = shufflevector <4 x i32> %broadcast.splatinsert19, <4 x i32> undef, <4 x i32> zeroinitializer
+  %broadcast.splatinsert22 = insertelement <4 x i32> undef, i32 %conv3, i32 0
+  %broadcast.splat23 = shufflevector <4 x i32> %broadcast.splatinsert22, <4 x i32> undef, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
+  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
+  %2 = getelementptr inbounds i8, i8* %a, i32 %index
+  %3 = icmp ule <4 x i32> %induction, %broadcast.splat20
+  %4 = bitcast i8* %2 to <4 x i8>*
+  %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %4, i32 1, <4 x i1> %3, <4 x i8> undef)
+  %5 = zext <4 x i8> %wide.masked.load to <4 x i32>
+  %6 = getelementptr inbounds i8, i8* %b, i32 %index
+  %7 = bitcast i8* %6 to <4 x i8>*
+  %wide.masked.load21 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %7, i32 1, <4 x i1> %3, <4 x i8> undef)
+  %8 = zext <4 x i8> %wide.masked.load21 to <4 x i32>
+  %9 = mul nuw nsw <4 x i32> %8, %5
+  %10 = add nuw nsw <4 x i32> %9, %broadcast.splat23
+  %11 = getelementptr inbounds i32, i32* %res, i32 %index
+  %12 = bitcast i32* %11 to <4 x i32>*
+  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %10, <4 x i32>* %12, i32 4, <4 x i1> %3)
+  %index.next = add i32 %index, 4
+  %13 = icmp eq i32 %index.next, %n.vec
+  br i1 %13, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup.loopexit.unr-lcssa:              ; preds = %for.body, %for.body.preheader
+  %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
+  %lcmp.mod = icmp eq i32 %xtraiter, 0
+  br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil
+
+for.body.epil:                                    ; preds = %for.cond.cleanup.loopexit.unr-lcssa, %for.body.epil
+  %i.011.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.011.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
+  %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
+  %arrayidx.epil = getelementptr inbounds i8, i8* %a, i32 %i.011.epil
+  %14 = load i8, i8* %arrayidx.epil, align 1
+  %conv.epil = zext i8 %14 to i32
+  %arrayidx1.epil = getelementptr inbounds i8, i8* %b, i32 %i.011.epil
+  %15 = load i8, i8* %arrayidx1.epil, align 1
+  %conv2.epil = zext i8 %15 to i32
+  %mul.epil = mul nuw nsw i32 %conv2.epil, %conv.epil
+  %add.epil = add nuw nsw i32 %mul.epil, %conv3
+  %arrayidx4.epil = getelementptr inbounds i32, i32* %res, i32 %i.011.epil
+  store i32 %add.epil, i32* %arrayidx4.epil, align 4
+  %inc.epil = add nuw i32 %i.011.epil, 1
+  %epil.iter.sub = add i32 %epil.iter, -1
+  %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0
+  br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
+
+for.cond.cleanup:                                 ; preds = %vector.body, %for.cond.cleanup.loopexit.unr-lcssa, %for.body.epil, %entry
+  ret void
+
+for.body:                                         ; preds = %for.body, %for.body.preheader.new
+  %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
+  %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
+  %arrayidx = getelementptr inbounds i8, i8* %a, i32 %i.011
+  %16 = load i8, i8* %arrayidx, align 1
+  %conv = zext i8 %16 to i32
+  %arrayidx1 = getelementptr inbounds i8, i8* %b, i32 %i.011
+  %17 = load i8, i8* %arrayidx1, align 1
+  %conv2 = zext i8 %17 to i32
+  %mul = mul nuw nsw i32 %conv2, %conv
+  %add = add nuw nsw i32 %mul, %conv3
+  %arrayidx4 = getelementptr inbounds i32, i32* %res, i32 %i.011
+  store i32 %add, i32* %arrayidx4, align 4
+  %inc = or i32 %i.011, 1
+  %arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %inc
+  %18 = load i8, i8* %arrayidx.1, align 1
+  %conv.1 = zext i8 %18 to i32
+  %arrayidx1.1 = getelementptr inbounds i8, i8* %b, i32 %inc
+  %19 = load i8, i8* %arrayidx1.1, align 1
+  %conv2.1 = zext i8 %19 to i32
+  %mul.1 = mul nuw nsw i32 %conv2.1, %conv.1
+  %add.1 = add nuw nsw i32 %mul.1, %conv3
+  %arrayidx4.1 = getelementptr inbounds i32, i32* %res, i32 %inc
+  store i32 %add.1, i32* %arrayidx4.1, align 4
+  %inc.1 = or i32 %i.011, 2
+  %arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %inc.1
+  %20 = load i8, i8* %arrayidx.2, align 1
+  %conv.2 = zext i8 %20 to i32
+  %arrayidx1.2 = getelementptr inbounds i8, i8* %b, i32 %inc.1
+  %21 = load i8, i8* %arrayidx1.2, align 1
+  %conv2.2 = zext i8 %21 to i32
+  %mul.2 = mul nuw nsw i32 %conv2.2, %conv.2
+  %add.2 = add nuw nsw i32 %mul.2, %conv3
+  %arrayidx4.2 = getelementptr inbounds i32, i32* %res, i32 %inc.1
+  store i32 %add.2, i32* %arrayidx4.2, align 4
+  %inc.2 = or i32 %i.011, 3
+  %arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %inc.2
+  %22 = load i8, i8* %arrayidx.3, align 1
+  %conv.3 = zext i8 %22 to i32
+  %arrayidx1.3 = getelementptr inbounds i8, i8* %b, i32 %inc.2
+  %23 = load i8, i8* %arrayidx1.3, align 1
+  %conv2.3 = zext i8 %23 to i32
+  %mul.3 = mul nuw nsw i32 %conv2.3, %conv.3
+  %add.3 = add nuw nsw i32 %mul.3, %conv3
+  %arrayidx4.3 = getelementptr inbounds i32, i32* %res, i32 %inc.2
+  store i32 %add.3, i32* %arrayidx4.3, align 4
+  %inc.3 = add nuw i32 %i.011, 4
+  %niter.nsub.3 = add i32 %niter, -4
+  %niter.ncmp.3 = icmp eq i32 %niter.nsub.3, 0
+  br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
+}
+
+define arm_aapcs_vfpcc void @test_vec_mul_scalar_add_short(i16* nocapture readonly %a, i16* nocapture readonly %b, i16 signext %c, i32* nocapture %res, i32 %N) {
+; CHECK-LABEL: test_vec_mul_scalar_add_short:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    push {r4, r5, r6, r7, lr}
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    ldr.w r12, [sp, #28]
+; CHECK-NEXT:    cmp.w r12, #0
+; CHECK-NEXT:    beq.w .LBB6_3
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    add.w r5, r12, #3
+; CHECK-NEXT:    movs r4, #1
+; CHECK-NEXT:    bic r5, r5, #3
+; CHECK-NEXT:    subs r5, #4
+; CHECK-NEXT:    add.w lr, r4, r5, lsr #2
+; CHECK-NEXT:    adr r5, .LCPI6_0
+; CHECK-NEXT:    sub.w r4, r12, #1
+; CHECK-NEXT:    vldrw.u32 q1, [r5]
+; CHECK-NEXT:    vdup.32 q0, r4
+; CHECK-NEXT:    movs r4, #0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB6_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vadd.i32 q2, q1, r4
+; CHECK-NEXT:    @ implicit-def: $q3
+; CHECK-NEXT:    sub.w r12, r12, #4
+; CHECK-NEXT:    vcmp.u32 cs, q0, q2
+; CHECK-NEXT:    @ implicit-def: $q2
+; CHECK-NEXT:    adds r4, #4
+; CHECK-NEXT:    vmrs r6, p0
+; CHECK-NEXT:    and r5, r6, #1
+; CHECK-NEXT:    rsbs r7, r5, #0
+; CHECK-NEXT:    movs r5, #0
+; CHECK-NEXT:    bfi r5, r7, #0, #1
+; CHECK-NEXT:    ubfx r7, r6, #4, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #1, #1
+; CHECK-NEXT:    ubfx r7, r6, #8, #1
+; CHECK-NEXT:    ubfx r6, r6, #12, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #2, #1
+; CHECK-NEXT:    rsbs r6, r6, #0
+; CHECK-NEXT:    bfi r5, r6, #3, #1
+; CHECK-NEXT:    lsls r6, r5, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrhne r6, [r0]
+; CHECK-NEXT:    vmovne.32 q2[0], r6
+; CHECK-NEXT:    lsls r6, r5, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r6, [r0, #2]
+; CHECK-NEXT:    vmovmi.32 q2[1], r6
+; CHECK-NEXT:    lsls r6, r5, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r6, [r0, #4]
+; CHECK-NEXT:    vmovmi.32 q2[2], r6
+; CHECK-NEXT:    lsls r5, r5, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r5, [r0, #6]
+; CHECK-NEXT:    vmovmi.32 q2[3], r5
+; CHECK-NEXT:    vmrs r6, p0
+; CHECK-NEXT:    vmovlb.s16 q2, q2
+; CHECK-NEXT:    adds r0, #8
+; CHECK-NEXT:    and r5, r6, #1
+; CHECK-NEXT:    rsbs r7, r5, #0
+; CHECK-NEXT:    movs r5, #0
+; CHECK-NEXT:    bfi r5, r7, #0, #1
+; CHECK-NEXT:    ubfx r7, r6, #4, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #1, #1
+; CHECK-NEXT:    ubfx r7, r6, #8, #1
+; CHECK-NEXT:    ubfx r6, r6, #12, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #2, #1
+; CHECK-NEXT:    rsbs r6, r6, #0
+; CHECK-NEXT:    bfi r5, r6, #3, #1
+; CHECK-NEXT:    lsls r6, r5, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrhne r6, [r1]
+; CHECK-NEXT:    vmovne.32 q3[0], r6
+; CHECK-NEXT:    lsls r6, r5, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r6, [r1, #2]
+; CHECK-NEXT:    vmovmi.32 q3[1], r6
+; CHECK-NEXT:    lsls r6, r5, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r6, [r1, #4]
+; CHECK-NEXT:    vmovmi.32 q3[2], r6
+; CHECK-NEXT:    lsls r5, r5, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r5, [r1, #6]
+; CHECK-NEXT:    vmovmi.32 q3[3], r5
+; CHECK-NEXT:    vmovlb.s16 q3, q3
+; CHECK-NEXT:    vctp.32 r12
+; CHECK-NEXT:    vmul.i32 q2, q3, q2
+; CHECK-NEXT:    adds r1, #8
+; CHECK-NEXT:    vadd.i32 q2, q2, r2
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vstrwt.32 q2, [r3]
+; CHECK-NEXT:    adds r3, #16
+; CHECK-NEXT:    le lr, .LBB6_2
+; CHECK-NEXT:  .LBB6_3: @ %for.cond.cleanup
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.4:
+; CHECK-NEXT:  .LCPI6_0:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp10 = icmp eq i32 %N, 0
+  br i1 %cmp10, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %conv3 = sext i16 %c to i32
+  %n.rnd.up = add i32 %N, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %N, -1
+  %broadcast.splatinsert12 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
+  %broadcast.splat13 = shufflevector <4 x i32> %broadcast.splatinsert12, <4 x i32> undef, <4 x i32> zeroinitializer
+  %broadcast.splatinsert15 = insertelement <4 x i32> undef, i32 %conv3, i32 0
+  %broadcast.splat16 = shufflevector <4 x i32> %broadcast.splatinsert15, <4 x i32> undef, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
+  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
+  %0 = getelementptr inbounds i16, i16* %a, i32 %index
+  %1 = icmp ule <4 x i32> %induction, %broadcast.splat13
+  %2 = bitcast i16* %0 to <4 x i16>*
+  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef)
+  %3 = sext <4 x i16> %wide.masked.load to <4 x i32>
+  %4 = getelementptr inbounds i16, i16* %b, i32 %index
+  %5 = bitcast i16* %4 to <4 x i16>*
+  %wide.masked.load14 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %5, i32 2, <4 x i1> %1, <4 x i16> undef)
+  %6 = sext <4 x i16> %wide.masked.load14 to <4 x i32>
+  %7 = mul nsw <4 x i32> %6, %3
+  %8 = add nsw <4 x i32> %7, %broadcast.splat16
+  %9 = getelementptr inbounds i32, i32* %res, i32 %index
+  %10 = bitcast i32* %9 to <4 x i32>*
+  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %8, <4 x i32>* %10, i32 4, <4 x i1> %1)
+  %index.next = add i32 %index, 4
+  %11 = icmp eq i32 %index.next, %n.vec
+  br i1 %11, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  ret void
+}
+
+define arm_aapcs_vfpcc void @test_vec_mul_scalar_add_uchar(i8* nocapture readonly %a, i8* nocapture readonly %b, i8 zeroext %c, i32* nocapture %res, i32 %N) {
+; CHECK-LABEL: test_vec_mul_scalar_add_uchar:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, lr}
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    ldr.w r12, [sp, #72]
+; CHECK-NEXT:    cmp.w r12, #0
+; CHECK-NEXT:    beq.w .LBB7_12
+; CHECK-NEXT:  @ %bb.1: @ %for.body.lr.ph
+; CHECK-NEXT:    add.w r5, r3, r12, lsl #2
+; CHECK-NEXT:    add.w r6, r1, r12
+; CHECK-NEXT:    cmp r5, r1
+; CHECK-NEXT:    add.w r4, r0, r12
+; CHECK-NEXT:    cset r7, hi
+; CHECK-NEXT:    cmp r6, r3
+; CHECK-NEXT:    cset r6, hi
+; CHECK-NEXT:    cmp r5, r0
+; CHECK-NEXT:    cset r5, hi
+; CHECK-NEXT:    cmp r4, r3
+; CHECK-NEXT:    cset r4, hi
+; CHECK-NEXT:    ands r5, r4
+; CHECK-NEXT:    lsls r5, r5, #31
+; CHECK-NEXT:    itt eq
+; CHECK-NEXT:    andeq r7, r6
+; CHECK-NEXT:    lslseq.w r7, r7, #31
+; CHECK-NEXT:    beq .LBB7_4
+; CHECK-NEXT:  @ %bb.2: @ %for.body.preheader
+; CHECK-NEXT:    sub.w r4, r12, #1
+; CHECK-NEXT:    and lr, r12, #3
+; CHECK-NEXT:    cmp r4, #3
+; CHECK-NEXT:    bhs.w .LBB7_6
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:    movs r7, #0
+; CHECK-NEXT:    b .LBB7_9
+; CHECK-NEXT:  .LBB7_4: @ %vector.ph
+; CHECK-NEXT:    add.w r7, r12, #3
+; CHECK-NEXT:    adr r5, .LCPI7_0
+; CHECK-NEXT:    bic r7, r7, #3
+; CHECK-NEXT:    sub.w r4, r12, #1
+; CHECK-NEXT:    subs r7, #4
+; CHECK-NEXT:    movs r6, #1
+; CHECK-NEXT:    vldrw.u32 q1, [r5]
+; CHECK-NEXT:    vdup.32 q0, r4
+; CHECK-NEXT:    add.w lr, r6, r7, lsr #2
+; CHECK-NEXT:    movs r4, #0
+; CHECK-NEXT:    vmov.i32 q2, #0xff
+; CHECK-NEXT:    vmov.i32 q3, #0xff
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB7_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vadd.i32 q4, q1, r4
+; CHECK-NEXT:    @ implicit-def: $q5
+; CHECK-NEXT:    sub.w r12, r12, #4
+; CHECK-NEXT:    vcmp.u32 cs, q0, q4
+; CHECK-NEXT:    @ implicit-def: $q4
+; CHECK-NEXT:    vmrs r6, p0
+; CHECK-NEXT:    and r5, r6, #1
+; CHECK-NEXT:    rsbs r7, r5, #0
+; CHECK-NEXT:    movs r5, #0
+; CHECK-NEXT:    bfi r5, r7, #0, #1
+; CHECK-NEXT:    ubfx r7, r6, #4, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #1, #1
+; CHECK-NEXT:    ubfx r7, r6, #8, #1
+; CHECK-NEXT:    ubfx r6, r6, #12, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #2, #1
+; CHECK-NEXT:    rsbs r6, r6, #0
+; CHECK-NEXT:    bfi r5, r6, #3, #1
+; CHECK-NEXT:    lsls r6, r5, #31
+; CHECK-NEXT:    add.w r6, r0, r4
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrbne r7, [r6]
+; CHECK-NEXT:    vmovne.32 q4[0], r7
+; CHECK-NEXT:    lsls r7, r5, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r7, [r6, #1]
+; CHECK-NEXT:    vmovmi.32 q4[1], r7
+; CHECK-NEXT:    lsls r7, r5, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r7, [r6, #2]
+; CHECK-NEXT:    vmovmi.32 q4[2], r7
+; CHECK-NEXT:    lsls r5, r5, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r5, [r6, #3]
+; CHECK-NEXT:    vmovmi.32 q4[3], r5
+; CHECK-NEXT:    vmrs r6, p0
+; CHECK-NEXT:    vand q4, q4, q2
+; CHECK-NEXT:    and r5, r6, #1
+; CHECK-NEXT:    rsbs r7, r5, #0
+; CHECK-NEXT:    movs r5, #0
+; CHECK-NEXT:    bfi r5, r7, #0, #1
+; CHECK-NEXT:    ubfx r7, r6, #4, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #1, #1
+; CHECK-NEXT:    ubfx r7, r6, #8, #1
+; CHECK-NEXT:    ubfx r6, r6, #12, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #2, #1
+; CHECK-NEXT:    rsbs r6, r6, #0
+; CHECK-NEXT:    bfi r5, r6, #3, #1
+; CHECK-NEXT:    lsls r6, r5, #31
+; CHECK-NEXT:    add.w r6, r1, r4
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrbne r7, [r6]
+; CHECK-NEXT:    vmovne.32 q5[0], r7
+; CHECK-NEXT:    lsls r7, r5, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r7, [r6, #1]
+; CHECK-NEXT:    vmovmi.32 q5[1], r7
+; CHECK-NEXT:    lsls r7, r5, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r7, [r6, #2]
+; CHECK-NEXT:    vmovmi.32 q5[2], r7
+; CHECK-NEXT:    lsls r5, r5, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r5, [r6, #3]
+; CHECK-NEXT:    vmovmi.32 q5[3], r5
+; CHECK-NEXT:    vand q5, q5, q3
+; CHECK-NEXT:    vctp.32 r12
+; CHECK-NEXT:    vmul.i32 q4, q5, q4
+; CHECK-NEXT:    adds r4, #4
+; CHECK-NEXT:    vadd.i32 q4, q4, r2
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vstrwt.32 q4, [r3]
+; CHECK-NEXT:    adds r3, #16
+; CHECK-NEXT:    le lr, .LBB7_5
+; CHECK-NEXT:    b .LBB7_12
+; CHECK-NEXT:  .LBB7_6: @ %for.body.preheader.new
+; CHECK-NEXT:    sub.w r12, lr, r12
+; CHECK-NEXT:    subs r4, r1, #3
+; CHECK-NEXT:    subs r5, r0, #3
+; CHECK-NEXT:    sub.w r7, r3, #16
+; CHECK-NEXT:    mov.w r9, #0
+; CHECK-NEXT:  .LBB7_7: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldrb.w r8, [r5, #3]
+; CHECK-NEXT:    sub.w r9, r9, #4
+; CHECK-NEXT:    ldrb r6, [r4, #3]
+; CHECK-NEXT:    cmp r12, r9
+; CHECK-NEXT:    smlabb r6, r6, r8, r2
+; CHECK-NEXT:    str r6, [r7, #16]!
+; CHECK-NEXT:    ldrb r8, [r5, #4]!
+; CHECK-NEXT:    ldrb r6, [r4, #4]!
+; CHECK-NEXT:    smlabb r6, r6, r8, r2
+; CHECK-NEXT:    str r6, [r7, #4]
+; CHECK-NEXT:    ldrb.w r8, [r5, #1]
+; CHECK-NEXT:    ldrb r6, [r4, #1]
+; CHECK-NEXT:    smlabb r6, r6, r8, r2
+; CHECK-NEXT:    str r6, [r7, #8]
+; CHECK-NEXT:    ldrb.w r8, [r5, #2]
+; CHECK-NEXT:    ldrb r6, [r4, #2]
+; CHECK-NEXT:    smlabb r6, r6, r8, r2
+; CHECK-NEXT:    str r6, [r7, #12]
+; CHECK-NEXT:    bne .LBB7_7
+; CHECK-NEXT:  @ %bb.8: @ %for.cond.cleanup.loopexit.unr-lcssa.loopexit
+; CHECK-NEXT:    rsb.w r7, r9, #0
+; CHECK-NEXT:  .LBB7_9: @ %for.cond.cleanup.loopexit.unr-lcssa
+; CHECK-NEXT:    wls lr, lr, .LBB7_12
+; CHECK-NEXT:  @ %bb.10: @ %for.body.epil.preheader
+; CHECK-NEXT:    subs r7, #1
+; CHECK-NEXT:    add r0, r7
+; CHECK-NEXT:    add r1, r7
+; CHECK-NEXT:    add.w r3, r3, r7, lsl #2
+; CHECK-NEXT:  .LBB7_11: @ %for.body.epil
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldrb r7, [r0, #1]!
+; CHECK-NEXT:    ldrb r6, [r1, #1]!
+; CHECK-NEXT:    smlabb r7, r6, r7, r2
+; CHECK-NEXT:    str r7, [r3, #4]!
+; CHECK-NEXT:    le lr, .LBB7_11
+; CHECK-NEXT:  .LBB7_12: @ %for.cond.cleanup
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.13:
+; CHECK-NEXT:  .LCPI7_0:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %res12 = bitcast i32* %res to i8*
+  %cmp10 = icmp eq i32 %N, 0
+  br i1 %cmp10, label %for.cond.cleanup, label %for.body.lr.ph
+
+for.body.lr.ph:                                   ; preds = %entry
+  %conv3 = zext i8 %c to i32
+  %scevgep = getelementptr i32, i32* %res, i32 %N
+  %scevgep13 = bitcast i32* %scevgep to i8*
+  %scevgep14 = getelementptr i8, i8* %a, i32 %N
+  %scevgep15 = getelementptr i8, i8* %b, i32 %N
+  %bound0 = icmp ugt i8* %scevgep14, %res12
+  %bound1 = icmp ugt i8* %scevgep13, %a
+  %found.conflict = and i1 %bound0, %bound1
+  %bound016 = icmp ugt i8* %scevgep15, %res12
+  %bound117 = icmp ugt i8* %scevgep13, %b
+  %found.conflict18 = and i1 %bound016, %bound117
+  %conflict.rdx = or i1 %found.conflict, %found.conflict18
+  br i1 %conflict.rdx, label %for.body.preheader, label %vector.ph
+
+for.body.preheader:                               ; preds = %for.body.lr.ph
+  %0 = add i32 %N, -1
+  %xtraiter = and i32 %N, 3
+  %1 = icmp ult i32 %0, 3
+  br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+
+for.body.preheader.new:                           ; preds = %for.body.preheader
+  %unroll_iter = sub i32 %N, %xtraiter
+  br label %for.body
+
+vector.ph:                                        ; preds = %for.body.lr.ph
+  %n.rnd.up = add i32 %N, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %N, -1
+  %broadcast.splatinsert19 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
+  %broadcast.splat20 = shufflevector <4 x i32> %broadcast.splatinsert19, <4 x i32> undef, <4 x i32> zeroinitializer
+  %broadcast.splatinsert22 = insertelement <4 x i32> undef, i32 %conv3, i32 0
+  %broadcast.splat23 = shufflevector <4 x i32> %broadcast.splatinsert22, <4 x i32> undef, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
+  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
+  %2 = getelementptr inbounds i8, i8* %a, i32 %index
+  %3 = icmp ule <4 x i32> %induction, %broadcast.splat20
+  %4 = bitcast i8* %2 to <4 x i8>*
+  %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %4, i32 1, <4 x i1> %3, <4 x i8> undef)
+  %5 = zext <4 x i8> %wide.masked.load to <4 x i32>
+  %6 = getelementptr inbounds i8, i8* %b, i32 %index
+  %7 = bitcast i8* %6 to <4 x i8>*
+  %wide.masked.load21 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %7, i32 1, <4 x i1> %3, <4 x i8> undef)
+  %8 = zext <4 x i8> %wide.masked.load21 to <4 x i32>
+  %9 = mul nuw nsw <4 x i32> %8, %5
+  %10 = add nuw nsw <4 x i32> %9, %broadcast.splat23
+  %11 = getelementptr inbounds i32, i32* %res, i32 %index
+  %12 = bitcast i32* %11 to <4 x i32>*
+  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %10, <4 x i32>* %12, i32 4, <4 x i1> %3)
+  %index.next = add i32 %index, 4
+  %13 = icmp eq i32 %index.next, %n.vec
+  br i1 %13, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup.loopexit.unr-lcssa:              ; preds = %for.body, %for.body.preheader
+  %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
+  %lcmp.mod = icmp eq i32 %xtraiter, 0
+  br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil
+
+for.body.epil:                                    ; preds = %for.cond.cleanup.loopexit.unr-lcssa, %for.body.epil
+  %i.011.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.011.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
+  %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
+  %arrayidx.epil = getelementptr inbounds i8, i8* %a, i32 %i.011.epil
+  %14 = load i8, i8* %arrayidx.epil, align 1
+  %conv.epil = zext i8 %14 to i32
+  %arrayidx1.epil = getelementptr inbounds i8, i8* %b, i32 %i.011.epil
+  %15 = load i8, i8* %arrayidx1.epil, align 1
+  %conv2.epil = zext i8 %15 to i32
+  %mul.epil = mul nuw nsw i32 %conv2.epil, %conv.epil
+  %add.epil = add nuw nsw i32 %mul.epil, %conv3
+  %arrayidx4.epil = getelementptr inbounds i32, i32* %res, i32 %i.011.epil
+  store i32 %add.epil, i32* %arrayidx4.epil, align 4
+  %inc.epil = add nuw i32 %i.011.epil, 1
+  %epil.iter.sub = add i32 %epil.iter, -1
+  %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0
+  br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
+
+for.cond.cleanup:                                 ; preds = %vector.body, %for.cond.cleanup.loopexit.unr-lcssa, %for.body.epil, %entry
+  ret void
+
+for.body:                                         ; preds = %for.body, %for.body.preheader.new
+  %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
+  %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
+  %arrayidx = getelementptr inbounds i8, i8* %a, i32 %i.011
+  %16 = load i8, i8* %arrayidx, align 1
+  %conv = zext i8 %16 to i32
+  %arrayidx1 = getelementptr inbounds i8, i8* %b, i32 %i.011
+  %17 = load i8, i8* %arrayidx1, align 1
+  %conv2 = zext i8 %17 to i32
+  %mul = mul nuw nsw i32 %conv2, %conv
+  %add = add nuw nsw i32 %mul, %conv3
+  %arrayidx4 = getelementptr inbounds i32, i32* %res, i32 %i.011
+  store i32 %add, i32* %arrayidx4, align 4
+  %inc = or i32 %i.011, 1
+  %arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %inc
+  %18 = load i8, i8* %arrayidx.1, align 1
+  %conv.1 = zext i8 %18 to i32
+  %arrayidx1.1 = getelementptr inbounds i8, i8* %b, i32 %inc
+  %19 = load i8, i8* %arrayidx1.1, align 1
+  %conv2.1 = zext i8 %19 to i32
+  %mul.1 = mul nuw nsw i32 %conv2.1, %conv.1
+  %add.1 = add nuw nsw i32 %mul.1, %conv3
+  %arrayidx4.1 = getelementptr inbounds i32, i32* %res, i32 %inc
+  store i32 %add.1, i32* %arrayidx4.1, align 4
+  %inc.1 = or i32 %i.011, 2
+  %arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %inc.1
+  %20 = load i8, i8* %arrayidx.2, align 1
+  %conv.2 = zext i8 %20 to i32
+  %arrayidx1.2 = getelementptr inbounds i8, i8* %b, i32 %inc.1
+  %21 = load i8, i8* %arrayidx1.2, align 1
+  %conv2.2 = zext i8 %21 to i32
+  %mul.2 = mul nuw nsw i32 %conv2.2, %conv.2
+  %add.2 = add nuw nsw i32 %mul.2, %conv3
+  %arrayidx4.2 = getelementptr inbounds i32, i32* %res, i32 %inc.1
+  store i32 %add.2, i32* %arrayidx4.2, align 4
+  %inc.2 = or i32 %i.011, 3
+  %arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %inc.2
+  %22 = load i8, i8* %arrayidx.3, align 1
+  %conv.3 = zext i8 %22 to i32
+  %arrayidx1.3 = getelementptr inbounds i8, i8* %b, i32 %inc.2
+  %23 = load i8, i8* %arrayidx1.3, align 1
+  %conv2.3 = zext i8 %23 to i32
+  %mul.3 = mul nuw nsw i32 %conv2.3, %conv.3
+  %add.3 = add nuw nsw i32 %mul.3, %conv3
+  %arrayidx4.3 = getelementptr inbounds i32, i32* %res, i32 %inc.2
+  store i32 %add.3, i32* %arrayidx4.3, align 4
+  %inc.3 = add nuw i32 %i.011, 4
+  %niter.nsub.3 = add i32 %niter, -4
+  %niter.ncmp.3 = icmp eq i32 %niter.nsub.3, 0
+  br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
+}
+
+define arm_aapcs_vfpcc void @test_vec_mul_scalar_add_ushort(i16* nocapture readonly %a, i16* nocapture readonly %b, i16 signext %c, i32* nocapture %res, i32 %N) {
+; CHECK-LABEL: test_vec_mul_scalar_add_ushort:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    push {r4, r5, r6, r7, lr}
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    ldr.w r12, [sp, #28]
+; CHECK-NEXT:    cmp.w r12, #0
+; CHECK-NEXT:    beq.w .LBB8_3
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph
+; CHECK-NEXT:    add.w r5, r12, #3
+; CHECK-NEXT:    movs r4, #1
+; CHECK-NEXT:    bic r5, r5, #3
+; CHECK-NEXT:    subs r5, #4
+; CHECK-NEXT:    add.w lr, r4, r5, lsr #2
+; CHECK-NEXT:    adr r5, .LCPI8_0
+; CHECK-NEXT:    sub.w r4, r12, #1
+; CHECK-NEXT:    vldrw.u32 q1, [r5]
+; CHECK-NEXT:    vdup.32 q0, r4
+; CHECK-NEXT:    movs r4, #0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB8_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vadd.i32 q2, q1, r4
+; CHECK-NEXT:    @ implicit-def: $q3
+; CHECK-NEXT:    sub.w r12, r12, #4
+; CHECK-NEXT:    vcmp.u32 cs, q0, q2
+; CHECK-NEXT:    @ implicit-def: $q2
+; CHECK-NEXT:    adds r4, #4
+; CHECK-NEXT:    vmrs r6, p0
+; CHECK-NEXT:    and r5, r6, #1
+; CHECK-NEXT:    rsbs r7, r5, #0
+; CHECK-NEXT:    movs r5, #0
+; CHECK-NEXT:    bfi r5, r7, #0, #1
+; CHECK-NEXT:    ubfx r7, r6, #4, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #1, #1
+; CHECK-NEXT:    ubfx r7, r6, #8, #1
+; CHECK-NEXT:    ubfx r6, r6, #12, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #2, #1
+; CHECK-NEXT:    rsbs r6, r6, #0
+; CHECK-NEXT:    bfi r5, r6, #3, #1
+; CHECK-NEXT:    lsls r6, r5, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrhne r6, [r0]
+; CHECK-NEXT:    vmovne.32 q2[0], r6
+; CHECK-NEXT:    lsls r6, r5, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r6, [r0, #2]
+; CHECK-NEXT:    vmovmi.32 q2[1], r6
+; CHECK-NEXT:    lsls r6, r5, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r6, [r0, #4]
+; CHECK-NEXT:    vmovmi.32 q2[2], r6
+; CHECK-NEXT:    lsls r5, r5, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r5, [r0, #6]
+; CHECK-NEXT:    vmovmi.32 q2[3], r5
+; CHECK-NEXT:    vmrs r6, p0
+; CHECK-NEXT:    vmovlb.u16 q2, q2
+; CHECK-NEXT:    adds r0, #8
+; CHECK-NEXT:    and r5, r6, #1
+; CHECK-NEXT:    rsbs r7, r5, #0
+; CHECK-NEXT:    movs r5, #0
+; CHECK-NEXT:    bfi r5, r7, #0, #1
+; CHECK-NEXT:    ubfx r7, r6, #4, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #1, #1
+; CHECK-NEXT:    ubfx r7, r6, #8, #1
+; CHECK-NEXT:    ubfx r6, r6, #12, #1
+; CHECK-NEXT:    rsbs r7, r7, #0
+; CHECK-NEXT:    bfi r5, r7, #2, #1
+; CHECK-NEXT:    rsbs r6, r6, #0
+; CHECK-NEXT:    bfi r5, r6, #3, #1
+; CHECK-NEXT:    lsls r6, r5, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrhne r6, [r1]
+; CHECK-NEXT:    vmovne.32 q3[0], r6
+; CHECK-NEXT:    lsls r6, r5, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r6, [r1, #2]
+; CHECK-NEXT:    vmovmi.32 q3[1], r6
+; CHECK-NEXT:    lsls r6, r5, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r6, [r1, #4]
+; CHECK-NEXT:    vmovmi.32 q3[2], r6
+; CHECK-NEXT:    lsls r5, r5, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r5, [r1, #6]
+; CHECK-NEXT:    vmovmi.32 q3[3], r5
+; CHECK-NEXT:    vmovlb.u16 q3, q3
+; CHECK-NEXT:    vctp.32 r12
+; CHECK-NEXT:    vmul.i32 q2, q3, q2
+; CHECK-NEXT:    adds r1, #8
+; CHECK-NEXT:    vadd.i32 q2, q2, r2
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vstrwt.32 q2, [r3]
+; CHECK-NEXT:    adds r3, #16
+; CHECK-NEXT:    le lr, .LBB8_2
+; CHECK-NEXT:  .LBB8_3: @ %for.cond.cleanup
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.4:
+; CHECK-NEXT:  .LCPI8_0:
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 2 @ 0x2
+; CHECK-NEXT:    .long 3 @ 0x3
+entry:
+  %cmp10 = icmp eq i32 %N, 0
+  br i1 %cmp10, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %conv3 = sext i16 %c to i32
+  %n.rnd.up = add i32 %N, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %N, -1
+  %broadcast.splatinsert12 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
+  %broadcast.splat13 = shufflevector <4 x i32> %broadcast.splatinsert12, <4 x i32> undef, <4 x i32> zeroinitializer
+  %broadcast.splatinsert15 = insertelement <4 x i32> undef, i32 %conv3, i32 0
+  %broadcast.splat16 = shufflevector <4 x i32> %broadcast.splatinsert15, <4 x i32> undef, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
+  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
+  %0 = getelementptr inbounds i16, i16* %a, i32 %index
+  %1 = icmp ule <4 x i32> %induction, %broadcast.splat13
+  %2 = bitcast i16* %0 to <4 x i16>*
+  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef)
+  %3 = zext <4 x i16> %wide.masked.load to <4 x i32>
+  %4 = getelementptr inbounds i16, i16* %b, i32 %index
+  %5 = bitcast i16* %4 to <4 x i16>*
+  %wide.masked.load14 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %5, i32 2, <4 x i1> %1, <4 x i16> undef)
+  %6 = zext <4 x i16> %wide.masked.load14 to <4 x i32>
+  %7 = mul nuw nsw <4 x i32> %6, %3
+  %8 = add nsw <4 x i32> %7, %broadcast.splat16
+  %9 = getelementptr inbounds i32, i32* %res, i32 %index
+  %10 = bitcast i32* %9 to <4 x i32>*
+  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %8, <4 x i32>* %10, i32 4, <4 x i1> %1)
+  %index.next = add i32 %index, 4
+  %11 = icmp eq i32 %index.next, %n.vec
+  br i1 %11, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  ret void
+}
+
+define arm_aapcs_vfpcc void @test_vec_mul_scalar_add_int(i32* nocapture readonly %a, i32* nocapture readonly %b, i32 %c, i32* nocapture %res, i32 %N) {
+; CHECK-LABEL: test_vec_mul_scalar_add_int:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, lr}
+; CHECK-NEXT:    ldr.w r12, [sp, #32]
+; CHECK-NEXT:    cmp.w r12, #0
+; CHECK-NEXT:    beq.w .LBB9_11
+; CHECK-NEXT:  @ %bb.1: @ %vector.memcheck
+; CHECK-NEXT:    add.w r4, r3, r12, lsl #2
+; CHECK-NEXT:    add.w r5, r1, r12, lsl #2
+; CHECK-NEXT:    cmp r4, r1
+; CHECK-NEXT:    add.w r6, r0, r12, lsl #2
+; CHECK-NEXT:    cset r7, hi
+; CHECK-NEXT:    cmp r5, r3
+; CHECK-NEXT:    cset r5, hi
+; CHECK-NEXT:    cmp r4, r0
+; CHECK-NEXT:    cset r4, hi
+; CHECK-NEXT:    cmp r6, r3
+; CHECK-NEXT:    cset r6, hi
+; CHECK-NEXT:    mov.w lr, #1
+; CHECK-NEXT:    ands r6, r4
+; CHECK-NEXT:    lsls r6, r6, #31
+; CHECK-NEXT:    itt eq
+; CHECK-NEXT:    andeq.w r4, r5, r7
+; CHECK-NEXT:    lslseq.w r4, r4, #31
+; CHECK-NEXT:    beq .LBB9_4
+; CHECK-NEXT:  @ %bb.2: @ %for.body.preheader
+; CHECK-NEXT:    sub.w r4, r12, #1
+; CHECK-NEXT:    and r5, r12, #3
+; CHECK-NEXT:    cmp r4, #3
+; CHECK-NEXT:    bhs .LBB9_6
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:    mov r10, r5
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:    b .LBB9_8
+; CHECK-NEXT:  .LBB9_4: @ %vector.ph
+; CHECK-NEXT:    add.w r4, r12, #3
+; CHECK-NEXT:    bic r4, r4, #3
+; CHECK-NEXT:    subs r4, #4
+; CHECK-NEXT:    add.w lr, lr, r4, lsr #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB9_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    sub.w r12, r12, #4
+; CHECK-NEXT:    vctp.32 r12
+; CHECK-NEXT:    vpstt
+; CHECK-NEXT:    vldrwt.u32 q0, [r0]
+; CHECK-NEXT:    vldrwt.u32 q1, [r1]
+; CHECK-NEXT:    adds r0, #16
+; CHECK-NEXT:    vmul.i32 q0, q1, q0
+; CHECK-NEXT:    adds r1, #16
+; CHECK-NEXT:    vadd.i32 q0, q0, r2
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vstrwt.32 q0, [r3]
+; CHECK-NEXT:    adds r3, #16
+; CHECK-NEXT:    le lr, .LBB9_5
+; CHECK-NEXT:    b .LBB9_11
+; CHECK-NEXT:  .LBB9_6: @ %for.body.preheader.new
+; CHECK-NEXT:    sub.w r7, r12, r5
+; CHECK-NEXT:    mov r10, r5
+; CHECK-NEXT:    subs r7, #4
+; CHECK-NEXT:    movs r4, #0
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:    add.w lr, lr, r7, lsr #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB9_7: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r5, [r0, r4]
+; CHECK-NEXT:    add.w r9, r0, r4
+; CHECK-NEXT:    ldr r6, [r1, r4]
+; CHECK-NEXT:    adds r7, r1, r4
+; CHECK-NEXT:    add.w r12, r12, #4
+; CHECK-NEXT:    mla r5, r6, r5, r2
+; CHECK-NEXT:    str r5, [r3, r4]
+; CHECK-NEXT:    ldr.w r8, [r9, #4]
+; CHECK-NEXT:    ldr r6, [r7, #4]
+; CHECK-NEXT:    mla r8, r6, r8, r2
+; CHECK-NEXT:    adds r6, r3, r4
+; CHECK-NEXT:    adds r4, #16
+; CHECK-NEXT:    str.w r8, [r6, #4]
+; CHECK-NEXT:    ldr.w r8, [r9, #8]
+; CHECK-NEXT:    ldr r5, [r7, #8]
+; CHECK-NEXT:    mla r5, r5, r8, r2
+; CHECK-NEXT:    str r5, [r6, #8]
+; CHECK-NEXT:    ldr.w r5, [r9, #12]
+; CHECK-NEXT:    ldr r7, [r7, #12]
+; CHECK-NEXT:    mla r5, r7, r5, r2
+; CHECK-NEXT:    str r5, [r6, #12]
+; CHECK-NEXT:    le lr, .LBB9_7
+; CHECK-NEXT:  .LBB9_8: @ %for.cond.cleanup.loopexit.unr-lcssa
+; CHECK-NEXT:    wls lr, r10, .LBB9_11
+; CHECK-NEXT:  @ %bb.9: @ %for.body.epil.preheader
+; CHECK-NEXT:    mvn r7, #3
+; CHECK-NEXT:    mov lr, r10
+; CHECK-NEXT:    add.w r7, r7, r12, lsl #2
+; CHECK-NEXT:    add r0, r7
+; CHECK-NEXT:    add r1, r7
+; CHECK-NEXT:    add r3, r7
+; CHECK-NEXT:  .LBB9_10: @ %for.body.epil
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r7, [r0, #4]!
+; CHECK-NEXT:    ldr r6, [r1, #4]!
+; CHECK-NEXT:    mla r7, r6, r7, r2
+; CHECK-NEXT:    str r7, [r3, #4]!
+; CHECK-NEXT:    le lr, .LBB9_10
+; CHECK-NEXT:  .LBB9_11: @ %for.cond.cleanup
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
+entry:
+  %cmp8 = icmp eq i32 %N, 0
+  br i1 %cmp8, label %for.cond.cleanup, label %vector.memcheck
+
+vector.memcheck:                                  ; preds = %entry
+  %scevgep = getelementptr i32, i32* %res, i32 %N
+  %scevgep13 = getelementptr i32, i32* %a, i32 %N
+  %scevgep16 = getelementptr i32, i32* %b, i32 %N
+  %bound0 = icmp ugt i32* %scevgep13, %res
+  %bound1 = icmp ugt i32* %scevgep, %a
+  %found.conflict = and i1 %bound0, %bound1
+  %bound018 = icmp ugt i32* %scevgep16, %res
+  %bound119 = icmp ugt i32* %scevgep, %b
+  %found.conflict20 = and i1 %bound018, %bound119
+  %conflict.rdx = or i1 %found.conflict, %found.conflict20
+  br i1 %conflict.rdx, label %for.body.preheader, label %vector.ph
+
+for.body.preheader:                               ; preds = %vector.memcheck
+  %0 = add i32 %N, -1
+  %xtraiter = and i32 %N, 3
+  %1 = icmp ult i32 %0, 3
+  br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+
+for.body.preheader.new:                           ; preds = %for.body.preheader
+  %unroll_iter = sub i32 %N, %xtraiter
+  br label %for.body
+
+vector.ph:                                        ; preds = %vector.memcheck
+  %n.rnd.up = add i32 %N, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %N, -1
+  %broadcast.splatinsert21 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
+  %broadcast.splat22 = shufflevector <4 x i32> %broadcast.splatinsert21, <4 x i32> undef, <4 x i32> zeroinitializer
+  %broadcast.splatinsert24 = insertelement <4 x i32> undef, i32 %c, i32 0
+  %broadcast.splat25 = shufflevector <4 x i32> %broadcast.splatinsert24, <4 x i32> undef, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
+  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
+  %2 = getelementptr inbounds i32, i32* %a, i32 %index
+  %3 = icmp ule <4 x i32> %induction, %broadcast.splat22
+  %4 = bitcast i32* %2 to <4 x i32>*
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %4, i32 4, <4 x i1> %3, <4 x i32> undef)
+  %5 = getelementptr inbounds i32, i32* %b, i32 %index
+  %6 = bitcast i32* %5 to <4 x i32>*
+  %wide.masked.load23 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %6, i32 4, <4 x i1> %3, <4 x i32> undef)
+  %7 = mul nsw <4 x i32> %wide.masked.load23, %wide.masked.load
+  %8 = add nsw <4 x i32> %7, %broadcast.splat25
+  %9 = getelementptr inbounds i32, i32* %res, i32 %index
+  %10 = bitcast i32* %9 to <4 x i32>*
+  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %8, <4 x i32>* %10, i32 4, <4 x i1> %3)
+  %index.next = add i32 %index, 4
+  %11 = icmp eq i32 %index.next, %n.vec
+  br i1 %11, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup.loopexit.unr-lcssa:              ; preds = %for.body, %for.body.preheader
+  %i.09.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
+  %lcmp.mod = icmp eq i32 %xtraiter, 0
+  br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil
+
+for.body.epil:                                    ; preds = %for.cond.cleanup.loopexit.unr-lcssa, %for.body.epil
+  %i.09.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.09.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
+  %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
+  %arrayidx.epil = getelementptr inbounds i32, i32* %a, i32 %i.09.epil
+  %12 = load i32, i32* %arrayidx.epil, align 4
+  %arrayidx1.epil = getelementptr inbounds i32, i32* %b, i32 %i.09.epil
+  %13 = load i32, i32* %arrayidx1.epil, align 4
+  %mul.epil = mul nsw i32 %13, %12
+  %add.epil = add nsw i32 %mul.epil, %c
+  %arrayidx2.epil = getelementptr inbounds i32, i32* %res, i32 %i.09.epil
+  store i32 %add.epil, i32* %arrayidx2.epil, align 4
+  %inc.epil = add nuw i32 %i.09.epil, 1
+  %epil.iter.sub = add i32 %epil.iter, -1
+  %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0
+  br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
+
+for.cond.cleanup:                                 ; preds = %vector.body, %for.cond.cleanup.loopexit.unr-lcssa, %for.body.epil, %entry
+  ret void
+
+for.body:                                         ; preds = %for.body, %for.body.preheader.new
+  %i.09 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
+  %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
+  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.09
+  %14 = load i32, i32* %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds i32, i32* %b, i32 %i.09
+  %15 = load i32, i32* %arrayidx1, align 4
+  %mul = mul nsw i32 %15, %14
+  %add = add nsw i32 %mul, %c
+  %arrayidx2 = getelementptr inbounds i32, i32* %res, i32 %i.09
+  store i32 %add, i32* %arrayidx2, align 4
+  %inc = or i32 %i.09, 1
+  %arrayidx.1 = getelementptr inbounds i32, i32* %a, i32 %inc
+  %16 = load i32, i32* %arrayidx.1, align 4
+  %arrayidx1.1 = getelementptr inbounds i32, i32* %b, i32 %inc
+  %17 = load i32, i32* %arrayidx1.1, align 4
+  %mul.1 = mul nsw i32 %17, %16
+  %add.1 = add nsw i32 %mul.1, %c
+  %arrayidx2.1 = getelementptr inbounds i32, i32* %res, i32 %inc
+  store i32 %add.1, i32* %arrayidx2.1, align 4
+  %inc.1 = or i32 %i.09, 2
+  %arrayidx.2 = getelementptr inbounds i32, i32* %a, i32 %inc.1
+  %18 = load i32, i32* %arrayidx.2, align 4
+  %arrayidx1.2 = getelementptr inbounds i32, i32* %b, i32 %inc.1
+  %19 = load i32, i32* %arrayidx1.2, align 4
+  %mul.2 = mul nsw i32 %19, %18
+  %add.2 = add nsw i32 %mul.2, %c
+  %arrayidx2.2 = getelementptr inbounds i32, i32* %res, i32 %inc.1
+  store i32 %add.2, i32* %arrayidx2.2, align 4
+  %inc.2 = or i32 %i.09, 3
+  %arrayidx.3 = getelementptr inbounds i32, i32* %a, i32 %inc.2
+  %20 = load i32, i32* %arrayidx.3, align 4
+  %arrayidx1.3 = getelementptr inbounds i32, i32* %b, i32 %inc.2
+  %21 = load i32, i32* %arrayidx1.3, align 4
+  %mul.3 = mul nsw i32 %21, %20
+  %add.3 = add nsw i32 %mul.3, %c
+  %arrayidx2.3 = getelementptr inbounds i32, i32* %res, i32 %inc.2
+  store i32 %add.3, i32* %arrayidx2.3, align 4
+  %inc.3 = add nuw i32 %i.09, 4
+  %niter.nsub.3 = add i32 %niter, -4
+  %niter.ncmp.3 = icmp eq i32 %niter.nsub.3, 0
+  br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
+}
+
+; Function Attrs: argmemonly nounwind readonly willreturn
+declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32 immarg, <4 x i1>, <4 x i8>) #2
+
+; Function Attrs: nounwind readnone willreturn
+declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) #3
+
+; Function Attrs: argmemonly nounwind readonly willreturn
+declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) #2
+
+; Function Attrs: argmemonly nounwind readonly willreturn
+declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) #2
+
+; Function Attrs: argmemonly nounwind willreturn
+declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) #4
+




More information about the llvm-commits mailing list