[llvm] 3dff4f5 - [ARM] Add extra vabd, vhadd and vmulh tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Sun Feb 6 06:12:33 PST 2022


Author: David Green
Date: 2022-02-06T14:12:28Z
New Revision: 3dff4f5cfb461cd25e2c95f2a8e9511f266e5dee

URL: https://github.com/llvm/llvm-project/commit/3dff4f5cfb461cd25e2c95f2a8e9511f266e5dee
DIFF: https://github.com/llvm/llvm-project/commit/3dff4f5cfb461cd25e2c95f2a8e9511f266e5dee.diff

LOG: [ARM] Add extra vabd, vhadd and vmulh tests. NFC

This is some extra testing for vabd, vhadd and vmulh. Some of the tests
have also be reordered.

Added: 
    

Modified: 
    llvm/test/CodeGen/Thumb2/mve-vabdus.ll
    llvm/test/CodeGen/Thumb2/mve-vhadd.ll
    llvm/test/CodeGen/Thumb2/mve-vmulh.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Thumb2/mve-vabdus.ll b/llvm/test/CodeGen/Thumb2/mve-vabdus.ll
index 9a7c138569a0..aa37c70718ac 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vabdus.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vabdus.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve %s -o - | FileCheck %s
 
-define arm_aapcs_vfpcc <16 x i8> @vabd_s8(<16 x i8> %src1, <16 x i8> %src2) {
-; CHECK-LABEL: vabd_s8:
+define arm_aapcs_vfpcc <16 x i8> @vabd_v16s8(<16 x i8> %src1, <16 x i8> %src2) {
+; CHECK-LABEL: vabd_v16s8:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vabd.s8 q0, q0, q1
 ; CHECK-NEXT:    bx lr
@@ -16,8 +16,46 @@ define arm_aapcs_vfpcc <16 x i8> @vabd_s8(<16 x i8> %src1, <16 x i8> %src2) {
   ret <16 x i8> %result
 }
 
-define arm_aapcs_vfpcc <8 x i16> @vabd_s16(<8 x i16> %src1, <8 x i16> %src2) {
-; CHECK-LABEL: vabd_s16:
+define arm_aapcs_vfpcc <8 x i8> @vabd_v8s8(<8 x i8> %src1, <8 x i8> %src2) {
+; CHECK-LABEL: vabd_v8s8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vsub.i16 q0, q0, q1
+; CHECK-NEXT:    vabs.s16 q0, q0
+; CHECK-NEXT:    bx lr
+  %sextsrc1 = sext <8 x i8> %src1 to <8 x i16>
+  %sextsrc2 = sext <8 x i8> %src2 to <8 x i16>
+  %add1 = sub <8 x i16> %sextsrc1, %sextsrc2
+  %add2 = sub <8 x i16> zeroinitializer, %add1
+  %c = icmp sge <8 x i16> %add1, zeroinitializer
+  %s = select <8 x i1> %c, <8 x i16> %add1, <8 x i16> %add2
+  %result = trunc <8 x i16> %s to <8 x i8>
+  ret <8 x i8> %result
+}
+
+define arm_aapcs_vfpcc <4 x i8> @vabd_v4s8(<4 x i8> %src1, <4 x i8> %src2) {
+; CHECK-LABEL: vabd_v4s8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vsub.i32 q0, q0, q1
+; CHECK-NEXT:    vabs.s32 q0, q0
+; CHECK-NEXT:    bx lr
+  %sextsrc1 = sext <4 x i8> %src1 to <4 x i16>
+  %sextsrc2 = sext <4 x i8> %src2 to <4 x i16>
+  %add1 = sub <4 x i16> %sextsrc1, %sextsrc2
+  %add2 = sub <4 x i16> zeroinitializer, %add1
+  %c = icmp sge <4 x i16> %add1, zeroinitializer
+  %s = select <4 x i1> %c, <4 x i16> %add1, <4 x i16> %add2
+  %result = trunc <4 x i16> %s to <4 x i8>
+  ret <4 x i8> %result
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vabd_v8s16(<8 x i16> %src1, <8 x i16> %src2) {
+; CHECK-LABEL: vabd_v8s16:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vabd.s16 q0, q0, q1
 ; CHECK-NEXT:    bx lr
@@ -31,8 +69,26 @@ define arm_aapcs_vfpcc <8 x i16> @vabd_s16(<8 x i16> %src1, <8 x i16> %src2) {
   ret <8 x i16> %result
 }
 
-define arm_aapcs_vfpcc <4 x i32> @vabd_s32(<4 x i32> %src1, <4 x i32> %src2) {
-; CHECK-LABEL: vabd_s32:
+define arm_aapcs_vfpcc <4 x i16> @vabd_v4s16(<4 x i16> %src1, <4 x i16> %src2) {
+; CHECK-LABEL: vabd_v4s16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vsub.i32 q0, q0, q1
+; CHECK-NEXT:    vabs.s32 q0, q0
+; CHECK-NEXT:    bx lr
+  %sextsrc1 = sext <4 x i16> %src1 to <4 x i32>
+  %sextsrc2 = sext <4 x i16> %src2 to <4 x i32>
+  %add1 = sub <4 x i32> %sextsrc1, %sextsrc2
+  %add2 = sub <4 x i32> zeroinitializer, %add1
+  %c = icmp sge <4 x i32> %add1, zeroinitializer
+  %s = select <4 x i1> %c, <4 x i32> %add1, <4 x i32> %add2
+  %result = trunc <4 x i32> %s to <4 x i16>
+  ret <4 x i16> %result
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vabd_v4s32(<4 x i32> %src1, <4 x i32> %src2) {
+; CHECK-LABEL: vabd_v4s32:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vabd.s32 q0, q0, q1
 ; CHECK-NEXT:    bx lr
@@ -46,8 +102,44 @@ define arm_aapcs_vfpcc <4 x i32> @vabd_s32(<4 x i32> %src1, <4 x i32> %src2) {
   ret <4 x i32> %result
 }
 
-define arm_aapcs_vfpcc <16 x i8> @vabd_u8(<16 x i8> %src1, <16 x i8> %src2) {
-; CHECK-LABEL: vabd_u8:
+define arm_aapcs_vfpcc <2 x i32> @vabd_v2s32(<2 x i32> %src1, <2 x i32> %src2) {
+; CHECK-LABEL: vabd_v2s32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    asrs r1, r0, #31
+; CHECK-NEXT:    subs r0, r0, r2
+; CHECK-NEXT:    sbc.w r1, r1, r2, asr #31
+; CHECK-NEXT:    asrs r2, r3, #31
+; CHECK-NEXT:    adds.w r0, r0, r1, asr #31
+; CHECK-NEXT:    eor.w lr, r0, r1, asr #31
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    adc.w r12, r1, r1, asr #31
+; CHECK-NEXT:    eor.w r1, r12, r1, asr #31
+; CHECK-NEXT:    subs r3, r3, r0
+; CHECK-NEXT:    sbc.w r0, r2, r0, asr #31
+; CHECK-NEXT:    adds.w r2, r3, r0, asr #31
+; CHECK-NEXT:    eor.w r2, r2, r0, asr #31
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, lr
+; CHECK-NEXT:    adc.w r2, r0, r0, asr #31
+; CHECK-NEXT:    eor.w r0, r2, r0, asr #31
+; CHECK-NEXT:    vmov q0[3], q0[1], r0, r1
+; CHECK-NEXT:    pop {r7, pc}
+  %sextsrc1 = sext <2 x i32> %src1 to <2 x i64>
+  %sextsrc2 = sext <2 x i32> %src2 to <2 x i64>
+  %add1 = sub <2 x i64> %sextsrc1, %sextsrc2
+  %add2 = sub <2 x i64> zeroinitializer, %add1
+  %c = icmp sge <2 x i64> %add1, zeroinitializer
+  %s = select <2 x i1> %c, <2 x i64> %add1, <2 x i64> %add2
+  %result = trunc <2 x i64> %s to <2 x i32>
+  ret <2 x i32> %result
+}
+
+define arm_aapcs_vfpcc <16 x i8> @vabd_v16u8(<16 x i8> %src1, <16 x i8> %src2) {
+; CHECK-LABEL: vabd_v16u8:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vabd.u8 q0, q0, q1
 ; CHECK-NEXT:    bx lr
@@ -61,8 +153,45 @@ define arm_aapcs_vfpcc <16 x i8> @vabd_u8(<16 x i8> %src1, <16 x i8> %src2) {
   ret <16 x i8> %result
 }
 
-define arm_aapcs_vfpcc <8 x i16> @vabd_u16(<8 x i16> %src1, <8 x i16> %src2) {
-; CHECK-LABEL: vabd_u16:
+define arm_aapcs_vfpcc <8 x i8> @vabd_v8u8(<8 x i8> %src1, <8 x i8> %src2) {
+; CHECK-LABEL: vabd_v8u8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmovlb.u8 q1, q1
+; CHECK-NEXT:    vmovlb.u8 q0, q0
+; CHECK-NEXT:    vsub.i16 q0, q0, q1
+; CHECK-NEXT:    vabs.s16 q0, q0
+; CHECK-NEXT:    bx lr
+  %zextsrc1 = zext <8 x i8> %src1 to <8 x i16>
+  %zextsrc2 = zext <8 x i8> %src2 to <8 x i16>
+  %add1 = sub <8 x i16> %zextsrc1, %zextsrc2
+  %add2 = sub <8 x i16> zeroinitializer, %add1
+  %c = icmp sge <8 x i16> %add1, zeroinitializer
+  %s = select <8 x i1> %c, <8 x i16> %add1, <8 x i16> %add2
+  %result = trunc <8 x i16> %s to <8 x i8>
+  ret <8 x i8> %result
+}
+
+define arm_aapcs_vfpcc <4 x i8> @vabd_v4u8(<4 x i8> %src1, <4 x i8> %src2) {
+; CHECK-LABEL: vabd_v4u8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i32 q2, #0xff
+; CHECK-NEXT:    vand q1, q1, q2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vsub.i32 q0, q0, q1
+; CHECK-NEXT:    vabs.s32 q0, q0
+; CHECK-NEXT:    bx lr
+  %zextsrc1 = zext <4 x i8> %src1 to <4 x i16>
+  %zextsrc2 = zext <4 x i8> %src2 to <4 x i16>
+  %add1 = sub <4 x i16> %zextsrc1, %zextsrc2
+  %add2 = sub <4 x i16> zeroinitializer, %add1
+  %c = icmp sge <4 x i16> %add1, zeroinitializer
+  %s = select <4 x i1> %c, <4 x i16> %add1, <4 x i16> %add2
+  %result = trunc <4 x i16> %s to <4 x i8>
+  ret <4 x i8> %result
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vabd_v8u16(<8 x i16> %src1, <8 x i16> %src2) {
+; CHECK-LABEL: vabd_v8u16:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vabd.u16 q0, q0, q1
 ; CHECK-NEXT:    bx lr
@@ -76,6 +205,24 @@ define arm_aapcs_vfpcc <8 x i16> @vabd_u16(<8 x i16> %src1, <8 x i16> %src2) {
   ret <8 x i16> %result
 }
 
+define arm_aapcs_vfpcc <4 x i16> @vabd_v4u16(<4 x i16> %src1, <4 x i16> %src2) {
+; CHECK-LABEL: vabd_v4u16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmovlb.u16 q1, q1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    vsub.i32 q0, q0, q1
+; CHECK-NEXT:    vabs.s32 q0, q0
+; CHECK-NEXT:    bx lr
+  %zextsrc1 = zext <4 x i16> %src1 to <4 x i32>
+  %zextsrc2 = zext <4 x i16> %src2 to <4 x i32>
+  %add1 = sub <4 x i32> %zextsrc1, %zextsrc2
+  %add2 = sub <4 x i32> zeroinitializer, %add1
+  %c = icmp sge <4 x i32> %add1, zeroinitializer
+  %s = select <4 x i1> %c, <4 x i32> %add1, <4 x i32> %add2
+  %result = trunc <4 x i32> %s to <4 x i16>
+  ret <4 x i16> %result
+}
+
 define arm_aapcs_vfpcc <4 x i32> @vabd_u32(<4 x i32> %src1, <4 x i32> %src2) {
 ; CHECK-LABEL: vabd_u32:
 ; CHECK:       @ %bb.0:
@@ -91,19 +238,71 @@ define arm_aapcs_vfpcc <4 x i32> @vabd_u32(<4 x i32> %src1, <4 x i32> %src2) {
   ret <4 x i32> %result
 }
 
+define arm_aapcs_vfpcc <4 x i32> @vabd_v4u32(<4 x i32> %src1, <4 x i32> %src2) {
+; CHECK-LABEL: vabd_v4u32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vabd.u32 q0, q0, q1
+; CHECK-NEXT:    bx lr
+  %zextsrc1 = zext <4 x i32> %src1 to <4 x i64>
+  %zextsrc2 = zext <4 x i32> %src2 to <4 x i64>
+  %add1 = sub <4 x i64> %zextsrc1, %zextsrc2
+  %add2 = sub <4 x i64> zeroinitializer, %add1
+  %c = icmp sge <4 x i64> %add1, zeroinitializer
+  %s = select <4 x i1> %c, <4 x i64> %add1, <4 x i64> %add2
+  %result = trunc <4 x i64> %s to <4 x i32>
+  ret <4 x i32> %result
+}
+
+define arm_aapcs_vfpcc <2 x i32> @vabd_v2u32(<2 x i32> %src1, <2 x i32> %src2) {
+; CHECK-LABEL: vabd_v2u32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    vmov.i64 q2, #0xffffffff
+; CHECK-NEXT:    vand q1, q1, q2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r0, r1, d3
+; CHECK-NEXT:    vmov r2, r3, d1
+; CHECK-NEXT:    subs r0, r2, r0
+; CHECK-NEXT:    sbc.w r1, r3, r1
+; CHECK-NEXT:    vmov r3, r2, d2
+; CHECK-NEXT:    adds.w r0, r0, r1, asr #31
+; CHECK-NEXT:    eor.w lr, r0, r1, asr #31
+; CHECK-NEXT:    vmov r0, r4, d0
+; CHECK-NEXT:    adc.w r12, r1, r1, asr #31
+; CHECK-NEXT:    subs r0, r0, r3
+; CHECK-NEXT:    sbc.w r2, r4, r2
+; CHECK-NEXT:    adds.w r0, r0, r2, asr #31
+; CHECK-NEXT:    eor.w r0, r0, r2, asr #31
+; CHECK-NEXT:    vmov q0[2], q0[0], r0, lr
+; CHECK-NEXT:    eor.w r0, r12, r1, asr #31
+; CHECK-NEXT:    adc.w r1, r2, r2, asr #31
+; CHECK-NEXT:    eor.w r1, r1, r2, asr #31
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r0
+; CHECK-NEXT:    pop {r4, pc}
+  %zextsrc1 = zext <2 x i32> %src1 to <2 x i64>
+  %zextsrc2 = zext <2 x i32> %src2 to <2 x i64>
+  %add1 = sub <2 x i64> %zextsrc1, %zextsrc2
+  %add2 = sub <2 x i64> zeroinitializer, %add1
+  %c = icmp sge <2 x i64> %add1, zeroinitializer
+  %s = select <2 x i1> %c, <2 x i64> %add1, <2 x i64> %add2
+  %result = trunc <2 x i64> %s to <2 x i32>
+  ret <2 x i32> %result
+}
+
 define void @vabd_loop_s8(i8* nocapture readonly %x, i8* nocapture readonly %y, i8* noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vabd_loop_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #64
-; CHECK-NEXT:  .LBB6_1: @ %vector.body
+; CHECK-NEXT:  .LBB15_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrb.u8 q0, [r1], #16
 ; CHECK-NEXT:    vldrb.u8 q1, [r0], #16
 ; CHECK-NEXT:    vabd.s8 q0, q1, q0
 ; CHECK-NEXT:    vstrb.8 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB6_1
+; CHECK-NEXT:    le lr, .LBB15_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -141,13 +340,13 @@ define void @vabd_loop_s16(i16* nocapture readonly %x, i16* nocapture readonly %
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #128
-; CHECK-NEXT:  .LBB7_1: @ %vector.body
+; CHECK-NEXT:  .LBB16_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrh.u16 q0, [r1], #16
 ; CHECK-NEXT:    vldrh.u16 q1, [r0], #16
 ; CHECK-NEXT:    vabd.s16 q0, q1, q0
 ; CHECK-NEXT:    vstrb.8 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB7_1
+; CHECK-NEXT:    le lr, .LBB16_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -186,7 +385,7 @@ define void @vabd_loop_s32(i32* nocapture readonly %x, i32* nocapture readonly %
 ; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, lr}
 ; CHECK-NEXT:    mov.w lr, #256
 ; CHECK-NEXT:    vmov.i32 q0, #0x0
-; CHECK-NEXT:  .LBB8_1: @ %vector.body
+; CHECK-NEXT:  .LBB17_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q1, [r0], #16
 ; CHECK-NEXT:    vmov.f32 s8, s6
@@ -231,7 +430,7 @@ define void @vabd_loop_s32(i32* nocapture readonly %x, i32* nocapture readonly %
 ; CHECK-NEXT:    vpst
 ; CHECK-NEXT:    vsubt.i32 q2, q0, q2
 ; CHECK-NEXT:    vstrb.8 q2, [r2], #16
-; CHECK-NEXT:    le lr, .LBB8_1
+; CHECK-NEXT:    le lr, .LBB17_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, pc}
 entry:
@@ -269,13 +468,13 @@ define void @vabd_loop_u8(i8* nocapture readonly %x, i8* nocapture readonly %y,
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #64
-; CHECK-NEXT:  .LBB9_1: @ %vector.body
+; CHECK-NEXT:  .LBB18_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrb.u8 q0, [r1], #16
 ; CHECK-NEXT:    vldrb.u8 q1, [r0], #16
 ; CHECK-NEXT:    vabd.u8 q0, q1, q0
 ; CHECK-NEXT:    vstrb.8 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB9_1
+; CHECK-NEXT:    le lr, .LBB18_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -313,13 +512,13 @@ define void @vabd_loop_u16(i16* nocapture readonly %x, i16* nocapture readonly %
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #128
-; CHECK-NEXT:  .LBB10_1: @ %vector.body
+; CHECK-NEXT:  .LBB19_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrh.u16 q0, [r1], #16
 ; CHECK-NEXT:    vldrh.u16 q1, [r0], #16
 ; CHECK-NEXT:    vabd.u16 q0, q1, q0
 ; CHECK-NEXT:    vstrb.8 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB10_1
+; CHECK-NEXT:    le lr, .LBB19_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -361,7 +560,7 @@ define void @vabd_loop_u32(i32* nocapture readonly %x, i32* nocapture readonly %
 ; CHECK-NEXT:    mov.w lr, #256
 ; CHECK-NEXT:    vmov.i64 q0, #0xffffffff
 ; CHECK-NEXT:    vmov.i32 q1, #0x0
-; CHECK-NEXT:  .LBB11_1: @ %vector.body
+; CHECK-NEXT:  .LBB20_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q4, [r1], #16
 ; CHECK-NEXT:    vldrw.u32 q5, [r0], #16
@@ -406,7 +605,7 @@ define void @vabd_loop_u32(i32* nocapture readonly %x, i32* nocapture readonly %
 ; CHECK-NEXT:    vpst
 ; CHECK-NEXT:    vsubt.i32 q4, q1, q4
 ; CHECK-NEXT:    vstrb.8 q4, [r2], #16
-; CHECK-NEXT:    le lr, .LBB11_1
+; CHECK-NEXT:    le lr, .LBB20_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, pc}

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vhadd.ll b/llvm/test/CodeGen/Thumb2/mve-vhadd.ll
index f8fa6c806d84..e67afc494369 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vhadd.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vhadd.ll
@@ -1,59 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
 
-define arm_aapcs_vfpcc <16 x i8> @vrhadd_s8(<16 x i8> %src1, <16 x i8> %src2) {
-; CHECK-LABEL: vrhadd_s8:
-; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmovlt.s8 q2, q1
-; CHECK-NEXT:    vmovlt.s8 q3, q0
-; CHECK-NEXT:    vmovlb.s8 q1, q1
-; CHECK-NEXT:    vmovlb.s8 q0, q0
-; CHECK-NEXT:    vadd.i16 q2, q3, q2
-; CHECK-NEXT:    movs r0, #1
-; CHECK-NEXT:    vadd.i16 q0, q0, q1
-; CHECK-NEXT:    vadd.i16 q2, q2, r0
-; CHECK-NEXT:    vadd.i16 q0, q0, r0
-; CHECK-NEXT:    vshr.u16 q2, q2, #1
-; CHECK-NEXT:    vshr.u16 q0, q0, #1
-; CHECK-NEXT:    vmovnt.i16 q0, q2
-; CHECK-NEXT:    bx lr
-  %sextsrc1 = sext <16 x i8> %src1 to <16 x i16>
-  %sextsrc2 = sext <16 x i8> %src2 to <16 x i16>
-  %add1 = add <16 x i16> %sextsrc1, %sextsrc2
-  %add2 = add <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %resulti16 = lshr <16 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %result = trunc <16 x i16> %resulti16 to <16 x i8>
-  ret <16 x i8> %result
-}
-
-define arm_aapcs_vfpcc <8 x i16> @vrhadd_s16(<8 x i16> %src1, <8 x i16> %src2) {
-; CHECK-LABEL: vrhadd_s16:
-; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmovlt.s16 q2, q1
-; CHECK-NEXT:    vmovlt.s16 q3, q0
-; CHECK-NEXT:    vmovlb.s16 q1, q1
-; CHECK-NEXT:    vmovlb.s16 q0, q0
-; CHECK-NEXT:    vadd.i32 q2, q3, q2
-; CHECK-NEXT:    movs r0, #1
-; CHECK-NEXT:    vadd.i32 q0, q0, q1
-; CHECK-NEXT:    vadd.i32 q2, q2, r0
-; CHECK-NEXT:    vadd.i32 q0, q0, r0
-; CHECK-NEXT:    vshr.u32 q2, q2, #1
-; CHECK-NEXT:    vshr.u32 q0, q0, #1
-; CHECK-NEXT:    vmovnt.i32 q0, q2
-; CHECK-NEXT:    bx lr
-  %sextsrc1 = sext <8 x i16> %src1 to <8 x i32>
-  %sextsrc2 = sext <8 x i16> %src2 to <8 x i32>
-  %add1 = add <8 x i32> %sextsrc1, %sextsrc2
-  %add2 = add <8 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %resulti16 = lshr <8 x i32> %add2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %result = trunc <8 x i32> %resulti16 to <8 x i16>
-  ret <8 x i16> %result
-}
-
-define arm_aapcs_vfpcc <4 x i32> @vrhadd_s32(<4 x i32> %src1, <4 x i32> %src2) {
-; CHECK-LABEL: vrhadd_s32:
-; CHECK:       @ %bb.0:
+define arm_aapcs_vfpcc <4 x i32> @vhadds_v4i32(<4 x i32> %s0, <4 x i32> %s1) {
+; CHECK-LABEL: vhadds_v4i32:
+; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d9}
 ; CHECK-NEXT:    vpush {d9}
 ; CHECK-NEXT:    vmov.f32 s8, s2
@@ -68,69 +18,125 @@ define arm_aapcs_vfpcc <4 x i32> @vrhadd_s32(<4 x i32> %src1, <4 x i32> %src2) {
 ; CHECK-NEXT:    asrs r1, r0, #31
 ; CHECK-NEXT:    adds r0, r0, r2
 ; CHECK-NEXT:    adc.w r1, r1, r2, asr #31
-; CHECK-NEXT:    adds r0, #1
-; CHECK-NEXT:    adc r1, r1, #0
 ; CHECK-NEXT:    lsrl r0, r1, #1
 ; CHECK-NEXT:    vmov r1, s0
-; CHECK-NEXT:    asrs r2, r1, #31
-; CHECK-NEXT:    adds r1, r1, r3
-; CHECK-NEXT:    adc.w r3, r2, r3, asr #31
-; CHECK-NEXT:    adds r2, r1, #1
-; CHECK-NEXT:    adc r1, r3, #0
-; CHECK-NEXT:    vmov r3, s18
+; CHECK-NEXT:    adds r2, r1, r3
+; CHECK-NEXT:    asr.w r12, r1, #31
+; CHECK-NEXT:    adc.w r1, r12, r3, asr #31
 ; CHECK-NEXT:    lsrl r2, r1, #1
+; CHECK-NEXT:    vmov r3, s18
 ; CHECK-NEXT:    vmov q0[2], q0[0], r2, r0
 ; CHECK-NEXT:    vmov r0, s10
 ; CHECK-NEXT:    vmov r2, s6
 ; CHECK-NEXT:    asrs r1, r0, #31
 ; CHECK-NEXT:    adds r0, r0, r2
 ; CHECK-NEXT:    adc.w r1, r1, r2, asr #31
-; CHECK-NEXT:    adds r0, #1
-; CHECK-NEXT:    adc r1, r1, #0
 ; CHECK-NEXT:    lsrl r0, r1, #1
 ; CHECK-NEXT:    vmov r1, s14
-; CHECK-NEXT:    asrs r2, r1, #31
-; CHECK-NEXT:    adds r1, r1, r3
-; CHECK-NEXT:    adc.w r3, r2, r3, asr #31
-; CHECK-NEXT:    adds r2, r1, #1
-; CHECK-NEXT:    adc r1, r3, #0
+; CHECK-NEXT:    adds r2, r1, r3
+; CHECK-NEXT:    asr.w r12, r1, #31
+; CHECK-NEXT:    adc.w r1, r12, r3, asr #31
 ; CHECK-NEXT:    lsrl r2, r1, #1
 ; CHECK-NEXT:    vmov q0[3], q0[1], r2, r0
 ; CHECK-NEXT:    vpop {d9}
 ; CHECK-NEXT:    bx lr
-  %sextsrc1 = sext <4 x i32> %src1 to <4 x i64>
-  %sextsrc2 = sext <4 x i32> %src2 to <4 x i64>
-  %add1 = add <4 x i64> %sextsrc1, %sextsrc2
-  %add2 = add <4 x i64> %add1, <i64 1, i64 1, i64 1, i64 1>
-  %resulti16 = lshr <4 x i64> %add2, <i64 1, i64 1, i64 1, i64 1>
-  %result = trunc <4 x i64> %resulti16 to <4 x i32>
-  ret <4 x i32> %result
+entry:
+  %s0s = sext <4 x i32> %s0 to <4 x i64>
+  %s1s = sext <4 x i32> %s1 to <4 x i64>
+  %m = add <4 x i64> %s0s, %s1s
+  %s = lshr <4 x i64> %m, <i64 1, i64 1, i64 1, i64 1>
+  %s2 = trunc <4 x i64> %s to <4 x i32>
+  ret <4 x i32> %s2
 }
 
-define arm_aapcs_vfpcc <16 x i8> @vhadd_s8(<16 x i8> %src1, <16 x i8> %src2) {
-; CHECK-LABEL: vhadd_s8:
-; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmovlt.s8 q2, q1
-; CHECK-NEXT:    vmovlt.s8 q3, q0
-; CHECK-NEXT:    vmovlb.s8 q1, q1
-; CHECK-NEXT:    vmovlb.s8 q0, q0
-; CHECK-NEXT:    vadd.i16 q2, q3, q2
-; CHECK-NEXT:    vadd.i16 q0, q0, q1
-; CHECK-NEXT:    vshr.u16 q2, q2, #1
-; CHECK-NEXT:    vshr.u16 q0, q0, #1
-; CHECK-NEXT:    vmovnt.i16 q0, q2
+define arm_aapcs_vfpcc <4 x i32> @vhaddu_v4i32(<4 x i32> %s0, <4 x i32> %s1) {
+; CHECK-LABEL: vhaddu_v4i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.f32 s8, s6
+; CHECK-NEXT:    vmov.i64 q4, #0xffffffff
+; CHECK-NEXT:    vmov.f32 s10, s7
+; CHECK-NEXT:    vmov.f32 s12, s2
+; CHECK-NEXT:    vand q2, q2, q4
+; CHECK-NEXT:    vmov.f32 s14, s3
+; CHECK-NEXT:    vand q3, q3, q4
+; CHECK-NEXT:    vmov r0, r1, d4
+; CHECK-NEXT:    vmov r2, r3, d6
+; CHECK-NEXT:    vmov.f32 s6, s5
+; CHECK-NEXT:    vmov.f32 s2, s1
+; CHECK-NEXT:    vand q1, q1, q4
+; CHECK-NEXT:    vand q4, q0, q4
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vmov r3, r2, d8
+; CHECK-NEXT:    lsrl r0, r1, #1
+; CHECK-NEXT:    vmov r1, r12, d2
+; CHECK-NEXT:    adds r4, r3, r1
+; CHECK-NEXT:    adc.w r1, r2, r12
+; CHECK-NEXT:    vmov r2, r3, d7
+; CHECK-NEXT:    lsrl r4, r1, #1
+; CHECK-NEXT:    vmov q0[2], q0[0], r4, r0
+; CHECK-NEXT:    vmov r0, r1, d5
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vmov r3, r4, d9
+; CHECK-NEXT:    lsrl r0, r1, #1
+; CHECK-NEXT:    vmov r1, r12, d3
+; CHECK-NEXT:    adds r2, r3, r1
+; CHECK-NEXT:    adc.w r1, r4, r12
+; CHECK-NEXT:    lsrl r2, r1, #1
+; CHECK-NEXT:    vmov q0[3], q0[1], r2, r0
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, pc}
+entry:
+  %s0s = zext <4 x i32> %s0 to <4 x i64>
+  %s1s = zext <4 x i32> %s1 to <4 x i64>
+  %m = add <4 x i64> %s0s, %s1s
+  %s = lshr <4 x i64> %m, <i64 1, i64 1, i64 1, i64 1>
+  %s2 = trunc <4 x i64> %s to <4 x i32>
+  ret <4 x i32> %s2
+}
+
+define arm_aapcs_vfpcc <4 x i16> @vhadds_v4i16(<4 x i16> %s0, <4 x i16> %s1) {
+; CHECK-LABEL: vhadds_v4i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vadd.i32 q0, q0, q1
+; CHECK-NEXT:    vshr.u32 q0, q0, #1
 ; CHECK-NEXT:    bx lr
-  %sextsrc1 = sext <16 x i8> %src1 to <16 x i16>
-  %sextsrc2 = sext <16 x i8> %src2 to <16 x i16>
-  %add = add <16 x i16> %sextsrc1, %sextsrc2
-  %resulti16 = lshr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %result = trunc <16 x i16> %resulti16 to <16 x i8>
-  ret <16 x i8> %result
+entry:
+  %s0s = sext <4 x i16> %s0 to <4 x i32>
+  %s1s = sext <4 x i16> %s1 to <4 x i32>
+  %m = add <4 x i32> %s0s, %s1s
+  %s = lshr <4 x i32> %m, <i32 1, i32 1, i32 1, i32 1>
+  %s2 = trunc <4 x i32> %s to <4 x i16>
+  ret <4 x i16> %s2
+}
+
+define arm_aapcs_vfpcc <4 x i16> @vhaddu_v4i16(<4 x i16> %s0, <4 x i16> %s1) {
+; CHECK-LABEL: vhaddu_v4i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u16 q1, q1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    vadd.i32 q0, q0, q1
+; CHECK-NEXT:    vshr.u32 q0, q0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = zext <4 x i16> %s0 to <4 x i32>
+  %s1s = zext <4 x i16> %s1 to <4 x i32>
+  %m = add <4 x i32> %s0s, %s1s
+  %s = lshr <4 x i32> %m, <i32 1, i32 1, i32 1, i32 1>
+  %s2 = trunc <4 x i32> %s to <4 x i16>
+  ret <4 x i16> %s2
 }
 
-define arm_aapcs_vfpcc <8 x i16> @vhadd_s16(<8 x i16> %src1, <8 x i16> %src2) {
-; CHECK-LABEL: vhadd_s16:
-; CHECK:       @ %bb.0:
+define arm_aapcs_vfpcc <8 x i16> @vhadds_v8i16(<8 x i16> %s0, <8 x i16> %s1) {
+; CHECK-LABEL: vhadds_v8i16:
+; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlt.s16 q2, q1
 ; CHECK-NEXT:    vmovlt.s16 q3, q0
 ; CHECK-NEXT:    vmovlb.s16 q1, q1
@@ -141,17 +147,156 @@ define arm_aapcs_vfpcc <8 x i16> @vhadd_s16(<8 x i16> %src1, <8 x i16> %src2) {
 ; CHECK-NEXT:    vshr.u32 q0, q0, #1
 ; CHECK-NEXT:    vmovnt.i32 q0, q2
 ; CHECK-NEXT:    bx lr
-  %sextsrc1 = sext <8 x i16> %src1 to <8 x i32>
-  %sextsrc2 = sext <8 x i16> %src2 to <8 x i32>
-  %add = add <8 x i32> %sextsrc1, %sextsrc2
-  %resulti16 = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %result = trunc <8 x i32> %resulti16 to <8 x i16>
-  ret <8 x i16> %result
+entry:
+  %s0s = sext <8 x i16> %s0 to <8 x i32>
+  %s1s = sext <8 x i16> %s1 to <8 x i32>
+  %m = add <8 x i32> %s0s, %s1s
+  %s = lshr <8 x i32> %m, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %s2 = trunc <8 x i32> %s to <8 x i16>
+  ret <8 x i16> %s2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vhaddu_v8i16(<8 x i16> %s0, <8 x i16> %s1) {
+; CHECK-LABEL: vhaddu_v8i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlt.u16 q2, q1
+; CHECK-NEXT:    vmovlt.u16 q3, q0
+; CHECK-NEXT:    vmovlb.u16 q1, q1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    vadd.i32 q2, q3, q2
+; CHECK-NEXT:    vadd.i32 q0, q0, q1
+; CHECK-NEXT:    vshr.u32 q2, q2, #1
+; CHECK-NEXT:    vshr.u32 q0, q0, #1
+; CHECK-NEXT:    vmovnt.i32 q0, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = zext <8 x i16> %s0 to <8 x i32>
+  %s1s = zext <8 x i16> %s1 to <8 x i32>
+  %m = add <8 x i32> %s0s, %s1s
+  %s = lshr <8 x i32> %m, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %s2 = trunc <8 x i32> %s to <8 x i16>
+  ret <8 x i16> %s2
+}
+
+define arm_aapcs_vfpcc <4 x i8> @vhadds_v4i8(<4 x i8> %s0, <4 x i8> %s1) {
+; CHECK-LABEL: vhadds_v4i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vadd.i32 q0, q0, q1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    vshr.u32 q0, q0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = sext <4 x i8> %s0 to <4 x i16>
+  %s1s = sext <4 x i8> %s1 to <4 x i16>
+  %m = add <4 x i16> %s0s, %s1s
+  %s = lshr <4 x i16> %m, <i16 1, i16 1, i16 1, i16 1>
+  %s2 = trunc <4 x i16> %s to <4 x i8>
+  ret <4 x i8> %s2
+}
+
+define arm_aapcs_vfpcc <4 x i8> @vhaddu_v4i8(<4 x i8> %s0, <4 x i8> %s1) {
+; CHECK-LABEL: vhaddu_v4i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q2, #0xff
+; CHECK-NEXT:    vand q1, q1, q2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vadd.i32 q0, q0, q1
+; CHECK-NEXT:    vshr.u32 q0, q0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = zext <4 x i8> %s0 to <4 x i16>
+  %s1s = zext <4 x i8> %s1 to <4 x i16>
+  %m = add <4 x i16> %s0s, %s1s
+  %s = lshr <4 x i16> %m, <i16 1, i16 1, i16 1, i16 1>
+  %s2 = trunc <4 x i16> %s to <4 x i8>
+  ret <4 x i8> %s2
+}
+
+define arm_aapcs_vfpcc <8 x i8> @vhadds_v8i8(<8 x i8> %s0, <8 x i8> %s1) {
+; CHECK-LABEL: vhadds_v8i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vadd.i16 q0, q0, q1
+; CHECK-NEXT:    vshr.u16 q0, q0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = sext <8 x i8> %s0 to <8 x i16>
+  %s1s = sext <8 x i8> %s1 to <8 x i16>
+  %m = add <8 x i16> %s0s, %s1s
+  %s = lshr <8 x i16> %m, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %s2 = trunc <8 x i16> %s to <8 x i8>
+  ret <8 x i8> %s2
+}
+
+define arm_aapcs_vfpcc <8 x i8> @vhaddu_v8i8(<8 x i8> %s0, <8 x i8> %s1) {
+; CHECK-LABEL: vhaddu_v8i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u8 q1, q1
+; CHECK-NEXT:    vmovlb.u8 q0, q0
+; CHECK-NEXT:    vadd.i16 q0, q0, q1
+; CHECK-NEXT:    vshr.u16 q0, q0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = zext <8 x i8> %s0 to <8 x i16>
+  %s1s = zext <8 x i8> %s1 to <8 x i16>
+  %m = add <8 x i16> %s0s, %s1s
+  %s = lshr <8 x i16> %m, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %s2 = trunc <8 x i16> %s to <8 x i8>
+  ret <8 x i8> %s2
+}
+
+define arm_aapcs_vfpcc <16 x i8> @vhadds_v16i8(<16 x i8> %s0, <16 x i8> %s1) {
+; CHECK-LABEL: vhadds_v16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlt.s8 q2, q1
+; CHECK-NEXT:    vmovlt.s8 q3, q0
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vadd.i16 q2, q3, q2
+; CHECK-NEXT:    vadd.i16 q0, q0, q1
+; CHECK-NEXT:    vshr.u16 q2, q2, #1
+; CHECK-NEXT:    vshr.u16 q0, q0, #1
+; CHECK-NEXT:    vmovnt.i16 q0, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = sext <16 x i8> %s0 to <16 x i16>
+  %s1s = sext <16 x i8> %s1 to <16 x i16>
+  %m = add <16 x i16> %s0s, %s1s
+  %s = lshr <16 x i16> %m, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %s2 = trunc <16 x i16> %s to <16 x i8>
+  ret <16 x i8> %s2
 }
 
-define arm_aapcs_vfpcc <4 x i32> @vhadd_s32(<4 x i32> %src1, <4 x i32> %src2) {
-; CHECK-LABEL: vhadd_s32:
-; CHECK:       @ %bb.0:
+define arm_aapcs_vfpcc <16 x i8> @vhaddu_v16i8(<16 x i8> %s0, <16 x i8> %s1) {
+; CHECK-LABEL: vhaddu_v16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlt.u8 q2, q1
+; CHECK-NEXT:    vmovlt.u8 q3, q0
+; CHECK-NEXT:    vmovlb.u8 q1, q1
+; CHECK-NEXT:    vmovlb.u8 q0, q0
+; CHECK-NEXT:    vadd.i16 q2, q3, q2
+; CHECK-NEXT:    vadd.i16 q0, q0, q1
+; CHECK-NEXT:    vshr.u16 q2, q2, #1
+; CHECK-NEXT:    vshr.u16 q0, q0, #1
+; CHECK-NEXT:    vmovnt.i16 q0, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = zext <16 x i8> %s0 to <16 x i16>
+  %s1s = zext <16 x i8> %s1 to <16 x i16>
+  %m = add <16 x i16> %s0s, %s1s
+  %s = lshr <16 x i16> %m, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %s2 = trunc <16 x i16> %s to <16 x i8>
+  ret <16 x i8> %s2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vrhadds_v4i32(<4 x i32> %s0, <4 x i32> %s1) {
+; CHECK-LABEL: vrhadds_v4i32:
+; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d9}
 ; CHECK-NEXT:    vpush {d9}
 ; CHECK-NEXT:    vmov.f32 s8, s2
@@ -166,89 +311,49 @@ define arm_aapcs_vfpcc <4 x i32> @vhadd_s32(<4 x i32> %src1, <4 x i32> %src2) {
 ; CHECK-NEXT:    asrs r1, r0, #31
 ; CHECK-NEXT:    adds r0, r0, r2
 ; CHECK-NEXT:    adc.w r1, r1, r2, asr #31
+; CHECK-NEXT:    adds r0, #1
+; CHECK-NEXT:    adc r1, r1, #0
 ; CHECK-NEXT:    lsrl r0, r1, #1
 ; CHECK-NEXT:    vmov r1, s0
-; CHECK-NEXT:    adds r2, r1, r3
-; CHECK-NEXT:    asr.w r12, r1, #31
-; CHECK-NEXT:    adc.w r1, r12, r3, asr #31
-; CHECK-NEXT:    lsrl r2, r1, #1
+; CHECK-NEXT:    asrs r2, r1, #31
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    adc.w r3, r2, r3, asr #31
+; CHECK-NEXT:    adds r2, r1, #1
+; CHECK-NEXT:    adc r1, r3, #0
 ; CHECK-NEXT:    vmov r3, s18
+; CHECK-NEXT:    lsrl r2, r1, #1
 ; CHECK-NEXT:    vmov q0[2], q0[0], r2, r0
 ; CHECK-NEXT:    vmov r0, s10
 ; CHECK-NEXT:    vmov r2, s6
 ; CHECK-NEXT:    asrs r1, r0, #31
 ; CHECK-NEXT:    adds r0, r0, r2
 ; CHECK-NEXT:    adc.w r1, r1, r2, asr #31
+; CHECK-NEXT:    adds r0, #1
+; CHECK-NEXT:    adc r1, r1, #0
 ; CHECK-NEXT:    lsrl r0, r1, #1
 ; CHECK-NEXT:    vmov r1, s14
-; CHECK-NEXT:    adds r2, r1, r3
-; CHECK-NEXT:    asr.w r12, r1, #31
-; CHECK-NEXT:    adc.w r1, r12, r3, asr #31
+; CHECK-NEXT:    asrs r2, r1, #31
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    adc.w r3, r2, r3, asr #31
+; CHECK-NEXT:    adds r2, r1, #1
+; CHECK-NEXT:    adc r1, r3, #0
 ; CHECK-NEXT:    lsrl r2, r1, #1
 ; CHECK-NEXT:    vmov q0[3], q0[1], r2, r0
 ; CHECK-NEXT:    vpop {d9}
 ; CHECK-NEXT:    bx lr
-  %sextsrc1 = sext <4 x i32> %src1 to <4 x i64>
-  %sextsrc2 = sext <4 x i32> %src2 to <4 x i64>
-  %add = add <4 x i64> %sextsrc1, %sextsrc2
-  %resulti16 = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
-  %result = trunc <4 x i64> %resulti16 to <4 x i32>
+entry:
+  %s0s = sext <4 x i32> %s0 to <4 x i64>
+  %s1s = sext <4 x i32> %s1 to <4 x i64>
+  %add = add <4 x i64> %s0s, %s1s
+  %add2 = add <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
+  %s = lshr <4 x i64> %add2, <i64 1, i64 1, i64 1, i64 1>
+  %result = trunc <4 x i64> %s to <4 x i32>
   ret <4 x i32> %result
 }
 
-define arm_aapcs_vfpcc <16 x i8> @vrhadd_u8(<16 x i8> %src1, <16 x i8> %src2) {
-; CHECK-LABEL: vrhadd_u8:
-; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmovlt.u8 q2, q1
-; CHECK-NEXT:    vmovlt.u8 q3, q0
-; CHECK-NEXT:    vmovlb.u8 q1, q1
-; CHECK-NEXT:    vmovlb.u8 q0, q0
-; CHECK-NEXT:    vadd.i16 q2, q3, q2
-; CHECK-NEXT:    movs r0, #1
-; CHECK-NEXT:    vadd.i16 q0, q0, q1
-; CHECK-NEXT:    vadd.i16 q2, q2, r0
-; CHECK-NEXT:    vadd.i16 q0, q0, r0
-; CHECK-NEXT:    vshr.u16 q2, q2, #1
-; CHECK-NEXT:    vshr.u16 q0, q0, #1
-; CHECK-NEXT:    vmovnt.i16 q0, q2
-; CHECK-NEXT:    bx lr
-  %zextsrc1 = zext <16 x i8> %src1 to <16 x i16>
-  %zextsrc2 = zext <16 x i8> %src2 to <16 x i16>
-  %add1 = add <16 x i16> %zextsrc1, %zextsrc2
-  %add2 = add <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %resulti16 = lshr <16 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %result = trunc <16 x i16> %resulti16 to <16 x i8>
-  ret <16 x i8> %result
-}
-
-define arm_aapcs_vfpcc <8 x i16> @vrhadd_u16(<8 x i16> %src1, <8 x i16> %src2) {
-; CHECK-LABEL: vrhadd_u16:
-; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmovlt.u16 q2, q1
-; CHECK-NEXT:    vmovlt.u16 q3, q0
-; CHECK-NEXT:    vmovlb.u16 q1, q1
-; CHECK-NEXT:    vmovlb.u16 q0, q0
-; CHECK-NEXT:    vadd.i32 q2, q3, q2
-; CHECK-NEXT:    movs r0, #1
-; CHECK-NEXT:    vadd.i32 q0, q0, q1
-; CHECK-NEXT:    vadd.i32 q2, q2, r0
-; CHECK-NEXT:    vadd.i32 q0, q0, r0
-; CHECK-NEXT:    vshr.u32 q2, q2, #1
-; CHECK-NEXT:    vshr.u32 q0, q0, #1
-; CHECK-NEXT:    vmovnt.i32 q0, q2
-; CHECK-NEXT:    bx lr
-  %zextsrc1 = zext <8 x i16> %src1 to <8 x i32>
-  %zextsrc2 = zext <8 x i16> %src2 to <8 x i32>
-  %add1 = add <8 x i32> %zextsrc1, %zextsrc2
-  %add2 = add <8 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %resulti16 = lshr <8 x i32> %add2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %result = trunc <8 x i32> %resulti16 to <8 x i16>
-  ret <8 x i16> %result
-}
-
-define arm_aapcs_vfpcc <4 x i32> @vrhadd_u32(<4 x i32> %src1, <4 x i32> %src2) {
-; CHECK-LABEL: vrhadd_u32:
-; CHECK:       @ %bb.0:
+define arm_aapcs_vfpcc <4 x i32> @vrhaddu_v4i32(<4 x i32> %s0, <4 x i32> %s1) {
+; CHECK-LABEL: vrhaddu_v4i32:
+; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
 ; CHECK-NEXT:    vpush {d8, d9}
 ; CHECK-NEXT:    vmov.f32 s8, s6
@@ -294,114 +399,254 @@ define arm_aapcs_vfpcc <4 x i32> @vrhadd_u32(<4 x i32> %src1, <4 x i32> %src2) {
 ; CHECK-NEXT:    vmov q0[3], q0[1], r2, r0
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
-  %zextsrc1 = zext <4 x i32> %src1 to <4 x i64>
-  %zextsrc2 = zext <4 x i32> %src2 to <4 x i64>
-  %add1 = add <4 x i64> %zextsrc1, %zextsrc2
-  %add2 = add <4 x i64> %add1, <i64 1, i64 1, i64 1, i64 1>
-  %resulti16 = lshr <4 x i64> %add2, <i64 1, i64 1, i64 1, i64 1>
-  %result = trunc <4 x i64> %resulti16 to <4 x i32>
+entry:
+  %s0s = zext <4 x i32> %s0 to <4 x i64>
+  %s1s = zext <4 x i32> %s1 to <4 x i64>
+  %add = add <4 x i64> %s0s, %s1s
+  %add2 = add <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
+  %s = lshr <4 x i64> %add2, <i64 1, i64 1, i64 1, i64 1>
+  %result = trunc <4 x i64> %s to <4 x i32>
   ret <4 x i32> %result
 }
 
-define arm_aapcs_vfpcc <16 x i8> @vhadd_u8(<16 x i8> %src1, <16 x i8> %src2) {
-; CHECK-LABEL: vhadd_u8:
-; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmovlt.u8 q2, q1
-; CHECK-NEXT:    vmovlt.u8 q3, q0
-; CHECK-NEXT:    vmovlb.u8 q1, q1
-; CHECK-NEXT:    vmovlb.u8 q0, q0
-; CHECK-NEXT:    vadd.i16 q2, q3, q2
-; CHECK-NEXT:    vadd.i16 q0, q0, q1
-; CHECK-NEXT:    vshr.u16 q2, q2, #1
-; CHECK-NEXT:    vshr.u16 q0, q0, #1
-; CHECK-NEXT:    vmovnt.i16 q0, q2
+define arm_aapcs_vfpcc <4 x i16> @vrhadds_v4i16(<4 x i16> %s0, <4 x i16> %s1) {
+; CHECK-LABEL: vrhadds_v4i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vadd.i32 q0, q0, q1
+; CHECK-NEXT:    movs r0, #1
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vshr.u32 q0, q0, #1
 ; CHECK-NEXT:    bx lr
-  %zextsrc1 = zext <16 x i8> %src1 to <16 x i16>
-  %zextsrc2 = zext <16 x i8> %src2 to <16 x i16>
-  %add = add <16 x i16> %zextsrc1, %zextsrc2
-  %resulti16 = lshr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %result = trunc <16 x i16> %resulti16 to <16 x i8>
-  ret <16 x i8> %result
+entry:
+  %s0s = sext <4 x i16> %s0 to <4 x i32>
+  %s1s = sext <4 x i16> %s1 to <4 x i32>
+  %add = add <4 x i32> %s0s, %s1s
+  %add2 = add <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
+  %s = lshr <4 x i32> %add2, <i32 1, i32 1, i32 1, i32 1>
+  %result = trunc <4 x i32> %s to <4 x i16>
+  ret <4 x i16> %result
+}
+
+define arm_aapcs_vfpcc <4 x i16> @vrhaddu_v4i16(<4 x i16> %s0, <4 x i16> %s1) {
+; CHECK-LABEL: vrhaddu_v4i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u16 q1, q1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    vadd.i32 q0, q0, q1
+; CHECK-NEXT:    movs r0, #1
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vshr.u32 q0, q0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = zext <4 x i16> %s0 to <4 x i32>
+  %s1s = zext <4 x i16> %s1 to <4 x i32>
+  %add = add <4 x i32> %s0s, %s1s
+  %add2 = add <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
+  %s = lshr <4 x i32> %add2, <i32 1, i32 1, i32 1, i32 1>
+  %result = trunc <4 x i32> %s to <4 x i16>
+  ret <4 x i16> %result
 }
 
-define arm_aapcs_vfpcc <8 x i16> @vhadd_u16(<8 x i16> %src1, <8 x i16> %src2) {
-; CHECK-LABEL: vhadd_u16:
-; CHECK:       @ %bb.0:
+define arm_aapcs_vfpcc <8 x i16> @vrhadds_v8i16(<8 x i16> %s0, <8 x i16> %s1) {
+; CHECK-LABEL: vrhadds_v8i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlt.s16 q2, q1
+; CHECK-NEXT:    vmovlt.s16 q3, q0
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vadd.i32 q2, q3, q2
+; CHECK-NEXT:    movs r0, #1
+; CHECK-NEXT:    vadd.i32 q0, q0, q1
+; CHECK-NEXT:    vadd.i32 q2, q2, r0
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vshr.u32 q2, q2, #1
+; CHECK-NEXT:    vshr.u32 q0, q0, #1
+; CHECK-NEXT:    vmovnt.i32 q0, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = sext <8 x i16> %s0 to <8 x i32>
+  %s1s = sext <8 x i16> %s1 to <8 x i32>
+  %add = add <8 x i32> %s0s, %s1s
+  %add2 = add <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %s = lshr <8 x i32> %add2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %result = trunc <8 x i32> %s to <8 x i16>
+  ret <8 x i16> %result
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vrhaddu_v8i16(<8 x i16> %s0, <8 x i16> %s1) {
+; CHECK-LABEL: vrhaddu_v8i16:
+; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlt.u16 q2, q1
 ; CHECK-NEXT:    vmovlt.u16 q3, q0
 ; CHECK-NEXT:    vmovlb.u16 q1, q1
 ; CHECK-NEXT:    vmovlb.u16 q0, q0
 ; CHECK-NEXT:    vadd.i32 q2, q3, q2
+; CHECK-NEXT:    movs r0, #1
 ; CHECK-NEXT:    vadd.i32 q0, q0, q1
+; CHECK-NEXT:    vadd.i32 q2, q2, r0
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
 ; CHECK-NEXT:    vshr.u32 q2, q2, #1
 ; CHECK-NEXT:    vshr.u32 q0, q0, #1
 ; CHECK-NEXT:    vmovnt.i32 q0, q2
 ; CHECK-NEXT:    bx lr
-  %zextsrc1 = zext <8 x i16> %src1 to <8 x i32>
-  %zextsrc2 = zext <8 x i16> %src2 to <8 x i32>
-  %add = add <8 x i32> %zextsrc1, %zextsrc2
-  %resulti16 = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %result = trunc <8 x i32> %resulti16 to <8 x i16>
+entry:
+  %s0s = zext <8 x i16> %s0 to <8 x i32>
+  %s1s = zext <8 x i16> %s1 to <8 x i32>
+  %add = add <8 x i32> %s0s, %s1s
+  %add2 = add <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %s = lshr <8 x i32> %add2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %result = trunc <8 x i32> %s to <8 x i16>
   ret <8 x i16> %result
 }
 
-define arm_aapcs_vfpcc <4 x i32> @vhadd_u32(<4 x i32> %src1, <4 x i32> %src2) {
-; CHECK-LABEL: vhadd_u32:
-; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, lr}
-; CHECK-NEXT:    push {r4, lr}
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
-; CHECK-NEXT:    vmov.f32 s8, s6
-; CHECK-NEXT:    vmov.i64 q4, #0xffffffff
-; CHECK-NEXT:    vmov.f32 s10, s7
-; CHECK-NEXT:    vmov.f32 s12, s2
-; CHECK-NEXT:    vand q2, q2, q4
-; CHECK-NEXT:    vmov.f32 s14, s3
-; CHECK-NEXT:    vand q3, q3, q4
-; CHECK-NEXT:    vmov r0, r1, d4
-; CHECK-NEXT:    vmov r2, r3, d6
-; CHECK-NEXT:    vmov.f32 s6, s5
-; CHECK-NEXT:    vmov.f32 s2, s1
-; CHECK-NEXT:    vand q1, q1, q4
-; CHECK-NEXT:    vand q4, q0, q4
-; CHECK-NEXT:    adds r0, r0, r2
-; CHECK-NEXT:    adcs r1, r3
-; CHECK-NEXT:    vmov r3, r2, d8
-; CHECK-NEXT:    lsrl r0, r1, #1
-; CHECK-NEXT:    vmov r1, r12, d2
-; CHECK-NEXT:    adds r4, r3, r1
-; CHECK-NEXT:    adc.w r1, r2, r12
-; CHECK-NEXT:    vmov r2, r3, d7
-; CHECK-NEXT:    lsrl r4, r1, #1
-; CHECK-NEXT:    vmov q0[2], q0[0], r4, r0
-; CHECK-NEXT:    vmov r0, r1, d5
-; CHECK-NEXT:    adds r0, r0, r2
-; CHECK-NEXT:    adcs r1, r3
-; CHECK-NEXT:    vmov r3, r4, d9
-; CHECK-NEXT:    lsrl r0, r1, #1
-; CHECK-NEXT:    vmov r1, r12, d3
-; CHECK-NEXT:    adds r2, r3, r1
-; CHECK-NEXT:    adc.w r1, r4, r12
-; CHECK-NEXT:    lsrl r2, r1, #1
-; CHECK-NEXT:    vmov q0[3], q0[1], r2, r0
-; CHECK-NEXT:    vpop {d8, d9}
-; CHECK-NEXT:    pop {r4, pc}
-  %zextsrc1 = zext <4 x i32> %src1 to <4 x i64>
-  %zextsrc2 = zext <4 x i32> %src2 to <4 x i64>
-  %add = add <4 x i64> %zextsrc1, %zextsrc2
-  %resulti16 = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
-  %result = trunc <4 x i64> %resulti16 to <4 x i32>
-  ret <4 x i32> %result
+define arm_aapcs_vfpcc <4 x i8> @vrhadds_v4i8(<4 x i8> %s0, <4 x i8> %s1) {
+; CHECK-LABEL: vrhadds_v4i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vadd.i32 q0, q0, q1
+; CHECK-NEXT:    movs r0, #1
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    vshr.u32 q0, q0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = sext <4 x i8> %s0 to <4 x i16>
+  %s1s = sext <4 x i8> %s1 to <4 x i16>
+  %add = add <4 x i16> %s0s, %s1s
+  %add2 = add <4 x i16> %add, <i16 1, i16 1, i16 1, i16 1>
+  %s = lshr <4 x i16> %add2, <i16 1, i16 1, i16 1, i16 1>
+  %result = trunc <4 x i16> %s to <4 x i8>
+  ret <4 x i8> %result
 }
 
+define arm_aapcs_vfpcc <4 x i8> @vrhaddu_v4i8(<4 x i8> %s0, <4 x i8> %s1) {
+; CHECK-LABEL: vrhaddu_v4i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q2, #0xff
+; CHECK-NEXT:    movs r0, #1
+; CHECK-NEXT:    vand q1, q1, q2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vadd.i32 q0, q0, q1
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vshr.u32 q0, q0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = zext <4 x i8> %s0 to <4 x i16>
+  %s1s = zext <4 x i8> %s1 to <4 x i16>
+  %add = add <4 x i16> %s0s, %s1s
+  %add2 = add <4 x i16> %add, <i16 1, i16 1, i16 1, i16 1>
+  %s = lshr <4 x i16> %add2, <i16 1, i16 1, i16 1, i16 1>
+  %result = trunc <4 x i16> %s to <4 x i8>
+  ret <4 x i8> %result
+}
+
+define arm_aapcs_vfpcc <8 x i8> @vrhadds_v8i8(<8 x i8> %s0, <8 x i8> %s1) {
+; CHECK-LABEL: vrhadds_v8i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vadd.i16 q0, q0, q1
+; CHECK-NEXT:    movs r0, #1
+; CHECK-NEXT:    vadd.i16 q0, q0, r0
+; CHECK-NEXT:    vshr.u16 q0, q0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = sext <8 x i8> %s0 to <8 x i16>
+  %s1s = sext <8 x i8> %s1 to <8 x i16>
+  %add = add <8 x i16> %s0s, %s1s
+  %add2 = add <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %s = lshr <8 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %result = trunc <8 x i16> %s to <8 x i8>
+  ret <8 x i8> %result
+}
+
+define arm_aapcs_vfpcc <8 x i8> @vrhaddu_v8i8(<8 x i8> %s0, <8 x i8> %s1) {
+; CHECK-LABEL: vrhaddu_v8i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u8 q1, q1
+; CHECK-NEXT:    vmovlb.u8 q0, q0
+; CHECK-NEXT:    vadd.i16 q0, q0, q1
+; CHECK-NEXT:    movs r0, #1
+; CHECK-NEXT:    vadd.i16 q0, q0, r0
+; CHECK-NEXT:    vshr.u16 q0, q0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = zext <8 x i8> %s0 to <8 x i16>
+  %s1s = zext <8 x i8> %s1 to <8 x i16>
+  %add = add <8 x i16> %s0s, %s1s
+  %add2 = add <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %s = lshr <8 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %result = trunc <8 x i16> %s to <8 x i8>
+  ret <8 x i8> %result
+}
+
+define arm_aapcs_vfpcc <16 x i8> @vrhadds_v16i8(<16 x i8> %s0, <16 x i8> %s1) {
+; CHECK-LABEL: vrhadds_v16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlt.s8 q2, q1
+; CHECK-NEXT:    vmovlt.s8 q3, q0
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vadd.i16 q2, q3, q2
+; CHECK-NEXT:    movs r0, #1
+; CHECK-NEXT:    vadd.i16 q0, q0, q1
+; CHECK-NEXT:    vadd.i16 q2, q2, r0
+; CHECK-NEXT:    vadd.i16 q0, q0, r0
+; CHECK-NEXT:    vshr.u16 q2, q2, #1
+; CHECK-NEXT:    vshr.u16 q0, q0, #1
+; CHECK-NEXT:    vmovnt.i16 q0, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = sext <16 x i8> %s0 to <16 x i16>
+  %s1s = sext <16 x i8> %s1 to <16 x i16>
+  %add = add <16 x i16> %s0s, %s1s
+  %add2 = add <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %s = lshr <16 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %result = trunc <16 x i16> %s to <16 x i8>
+  ret <16 x i8> %result
+}
+
+define arm_aapcs_vfpcc <16 x i8> @vrhaddu_v16i8(<16 x i8> %s0, <16 x i8> %s1) {
+; CHECK-LABEL: vrhaddu_v16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlt.u8 q2, q1
+; CHECK-NEXT:    vmovlt.u8 q3, q0
+; CHECK-NEXT:    vmovlb.u8 q1, q1
+; CHECK-NEXT:    vmovlb.u8 q0, q0
+; CHECK-NEXT:    vadd.i16 q2, q3, q2
+; CHECK-NEXT:    movs r0, #1
+; CHECK-NEXT:    vadd.i16 q0, q0, q1
+; CHECK-NEXT:    vadd.i16 q2, q2, r0
+; CHECK-NEXT:    vadd.i16 q0, q0, r0
+; CHECK-NEXT:    vshr.u16 q2, q2, #1
+; CHECK-NEXT:    vshr.u16 q0, q0, #1
+; CHECK-NEXT:    vmovnt.i16 q0, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = zext <16 x i8> %s0 to <16 x i16>
+  %s1s = zext <16 x i8> %s1 to <16 x i16>
+  %add = add <16 x i16> %s0s, %s1s
+  %add2 = add <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %s = lshr <16 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %result = trunc <16 x i16> %s to <16 x i8>
+  ret <16 x i8> %result
+}
+
+
+
+
 define void @vhadd_loop_s8(i8* nocapture readonly %x, i8* nocapture readonly %y, i8* noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vhadd_loop_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #64
-; CHECK-NEXT:  .LBB12_1: @ %vector.body
+; CHECK-NEXT:  .LBB24_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrb.s16 q0, [r0, #8]
 ; CHECK-NEXT:    vldrb.s16 q1, [r1, #8]
@@ -413,7 +658,7 @@ define void @vhadd_loop_s8(i8* nocapture readonly %x, i8* nocapture readonly %y,
 ; CHECK-NEXT:    vadd.i16 q0, q1, q0
 ; CHECK-NEXT:    vshr.u16 q0, q0, #1
 ; CHECK-NEXT:    vstrb.16 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB12_1
+; CHECK-NEXT:    le lr, .LBB24_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -449,7 +694,7 @@ define void @vhadd_loop_s16(i16* nocapture readonly %x, i16* nocapture readonly
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #128
-; CHECK-NEXT:  .LBB13_1: @ %vector.body
+; CHECK-NEXT:  .LBB25_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, #8]
 ; CHECK-NEXT:    vldrh.s32 q1, [r1, #8]
@@ -461,7 +706,7 @@ define void @vhadd_loop_s16(i16* nocapture readonly %x, i16* nocapture readonly
 ; CHECK-NEXT:    vadd.i32 q0, q1, q0
 ; CHECK-NEXT:    vshr.u32 q0, q0, #1
 ; CHECK-NEXT:    vstrh.32 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB13_1
+; CHECK-NEXT:    le lr, .LBB25_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -499,7 +744,7 @@ define void @vhadd_loop_s32(i32* nocapture readonly %x, i32* nocapture readonly
 ; CHECK-NEXT:    .vsave {d9}
 ; CHECK-NEXT:    vpush {d9}
 ; CHECK-NEXT:    mov.w lr, #256
-; CHECK-NEXT:  .LBB14_1: @ %vector.body
+; CHECK-NEXT:  .LBB26_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r1], #16
 ; CHECK-NEXT:    vmov.f32 s4, s2
@@ -536,7 +781,7 @@ define void @vhadd_loop_s32(i32* nocapture readonly %x, i32* nocapture readonly
 ; CHECK-NEXT:    lsrl r6, r3, #1
 ; CHECK-NEXT:    vmov q3[3], q3[1], r6, r4
 ; CHECK-NEXT:    vstrb.8 q3, [r2], #16
-; CHECK-NEXT:    le lr, .LBB14_1
+; CHECK-NEXT:    le lr, .LBB26_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    vpop {d9}
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
@@ -573,7 +818,7 @@ define void @vhadd_loop_u8(i8* nocapture readonly %x, i8* nocapture readonly %y,
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #64
-; CHECK-NEXT:  .LBB15_1: @ %vector.body
+; CHECK-NEXT:  .LBB27_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrb.u16 q0, [r0, #8]
 ; CHECK-NEXT:    vldrb.u16 q1, [r1, #8]
@@ -583,7 +828,7 @@ define void @vhadd_loop_u8(i8* nocapture readonly %x, i8* nocapture readonly %y,
 ; CHECK-NEXT:    vldrb.u16 q0, [r0], #16
 ; CHECK-NEXT:    vhadd.u16 q0, q1, q0
 ; CHECK-NEXT:    vstrb.16 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB15_1
+; CHECK-NEXT:    le lr, .LBB27_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -619,7 +864,7 @@ define void @vhadd_loop_u16(i16* nocapture readonly %x, i16* nocapture readonly
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #128
-; CHECK-NEXT:  .LBB16_1: @ %vector.body
+; CHECK-NEXT:  .LBB28_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, #8]
 ; CHECK-NEXT:    vldrh.u32 q1, [r1, #8]
@@ -629,7 +874,7 @@ define void @vhadd_loop_u16(i16* nocapture readonly %x, i16* nocapture readonly
 ; CHECK-NEXT:    vldrh.u32 q0, [r0], #16
 ; CHECK-NEXT:    vhadd.u32 q0, q1, q0
 ; CHECK-NEXT:    vstrh.32 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB16_1
+; CHECK-NEXT:    le lr, .LBB28_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -670,7 +915,7 @@ define void @vhadd_loop_u32(i32* nocapture readonly %x, i32* nocapture readonly
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11}
 ; CHECK-NEXT:    mov.w lr, #256
 ; CHECK-NEXT:    vmov.i64 q0, #0xffffffff
-; CHECK-NEXT:  .LBB17_1: @ %vector.body
+; CHECK-NEXT:  .LBB29_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q3, [r0], #16
 ; CHECK-NEXT:    vldrw.u32 q4, [r1], #16
@@ -707,7 +952,7 @@ define void @vhadd_loop_u32(i32* nocapture readonly %x, i32* nocapture readonly
 ; CHECK-NEXT:    lsrl r4, r3, #1
 ; CHECK-NEXT:    vmov q4[3], q4[1], r4, r6
 ; CHECK-NEXT:    vstrb.8 q4, [r2], #16
-; CHECK-NEXT:    le lr, .LBB17_1
+; CHECK-NEXT:    le lr, .LBB29_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    add sp, #4
@@ -746,7 +991,7 @@ define void @vrhadd_loop_s8(i8* nocapture readonly %x, i8* nocapture readonly %y
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #64
 ; CHECK-NEXT:    movs r3, #1
-; CHECK-NEXT:  .LBB18_1: @ %vector.body
+; CHECK-NEXT:  .LBB30_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrb.u16 q0, [r1, #8]
 ; CHECK-NEXT:    vldrb.u16 q1, [r0, #8]
@@ -760,7 +1005,7 @@ define void @vrhadd_loop_s8(i8* nocapture readonly %x, i8* nocapture readonly %y
 ; CHECK-NEXT:    vadd.i16 q0, q0, r3
 ; CHECK-NEXT:    vshr.u16 q0, q0, #1
 ; CHECK-NEXT:    vstrb.16 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB18_1
+; CHECK-NEXT:    le lr, .LBB30_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -798,7 +1043,7 @@ define void @vrhadd_loop_s16(i16* nocapture readonly %x, i16* nocapture readonly
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #128
 ; CHECK-NEXT:    movs r3, #1
-; CHECK-NEXT:  .LBB19_1: @ %vector.body
+; CHECK-NEXT:  .LBB31_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrh.u32 q0, [r1, #8]
 ; CHECK-NEXT:    vldrh.u32 q1, [r0, #8]
@@ -812,7 +1057,7 @@ define void @vrhadd_loop_s16(i16* nocapture readonly %x, i16* nocapture readonly
 ; CHECK-NEXT:    vadd.i32 q0, q0, r3
 ; CHECK-NEXT:    vshr.u32 q0, q0, #1
 ; CHECK-NEXT:    vstrh.32 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB19_1
+; CHECK-NEXT:    le lr, .LBB31_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -852,7 +1097,7 @@ define void @vrhadd_loop_s32(i32* nocapture readonly %x, i32* nocapture readonly
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11}
 ; CHECK-NEXT:    mov.w lr, #256
 ; CHECK-NEXT:    vmov.i64 q0, #0xffffffff
-; CHECK-NEXT:  .LBB20_1: @ %vector.body
+; CHECK-NEXT:  .LBB32_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q3, [r1], #16
 ; CHECK-NEXT:    vldrw.u32 q4, [r0], #16
@@ -897,7 +1142,7 @@ define void @vrhadd_loop_s32(i32* nocapture readonly %x, i32* nocapture readonly
 ; CHECK-NEXT:    lsrl r6, r3, #1
 ; CHECK-NEXT:    vmov q4[3], q4[1], r6, r12
 ; CHECK-NEXT:    vstrb.8 q4, [r2], #16
-; CHECK-NEXT:    le lr, .LBB20_1
+; CHECK-NEXT:    le lr, .LBB32_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
@@ -936,7 +1181,7 @@ define void @vrhadd_loop_u8(i8* nocapture readonly %x, i8* nocapture readonly %y
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #64
 ; CHECK-NEXT:    movs r3, #1
-; CHECK-NEXT:  .LBB21_1: @ %vector.body
+; CHECK-NEXT:  .LBB33_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrb.u16 q0, [r1, #8]
 ; CHECK-NEXT:    vldrb.u16 q1, [r0, #8]
@@ -950,7 +1195,7 @@ define void @vrhadd_loop_u8(i8* nocapture readonly %x, i8* nocapture readonly %y
 ; CHECK-NEXT:    vadd.i16 q0, q0, r3
 ; CHECK-NEXT:    vshr.u16 q0, q0, #1
 ; CHECK-NEXT:    vstrb.16 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB21_1
+; CHECK-NEXT:    le lr, .LBB33_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -988,7 +1233,7 @@ define void @vrhadd_loop_u16(i16* nocapture readonly %x, i16* nocapture readonly
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #128
 ; CHECK-NEXT:    movs r3, #1
-; CHECK-NEXT:  .LBB22_1: @ %vector.body
+; CHECK-NEXT:  .LBB34_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrh.u32 q0, [r1, #8]
 ; CHECK-NEXT:    vldrh.u32 q1, [r0, #8]
@@ -1002,7 +1247,7 @@ define void @vrhadd_loop_u16(i16* nocapture readonly %x, i16* nocapture readonly
 ; CHECK-NEXT:    vadd.i32 q0, q0, r3
 ; CHECK-NEXT:    vshr.u32 q0, q0, #1
 ; CHECK-NEXT:    vstrh.32 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB22_1
+; CHECK-NEXT:    le lr, .LBB34_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -1042,7 +1287,7 @@ define void @vrhadd_loop_u32(i32* nocapture readonly %x, i32* nocapture readonly
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11}
 ; CHECK-NEXT:    mov.w lr, #256
 ; CHECK-NEXT:    vmov.i64 q0, #0xffffffff
-; CHECK-NEXT:  .LBB23_1: @ %vector.body
+; CHECK-NEXT:  .LBB35_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q3, [r1], #16
 ; CHECK-NEXT:    vldrw.u32 q4, [r0], #16
@@ -1087,7 +1332,7 @@ define void @vrhadd_loop_u32(i32* nocapture readonly %x, i32* nocapture readonly
 ; CHECK-NEXT:    lsrl r6, r3, #1
 ; CHECK-NEXT:    vmov q4[3], q4[1], r6, r12
 ; CHECK-NEXT:    vstrb.8 q4, [r2], #16
-; CHECK-NEXT:    le lr, .LBB23_1
+; CHECK-NEXT:    le lr, .LBB35_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vmulh.ll b/llvm/test/CodeGen/Thumb2/mve-vmulh.ll
index c54d64ba6b45..46480d47a283 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vmulh.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vmulh.ll
@@ -129,6 +129,43 @@ entry:
   ret <8 x i16> %s2
 }
 
+define arm_aapcs_vfpcc <4 x i8> @vmulhs_v4i8(<4 x i8> %s0, <4 x i8> %s1) {
+; CHECK-LABEL: vmulhs_v4i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmul.i32 q0, q0, q1
+; CHECK-NEXT:    vshr.s32 q0, q0, #8
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = sext <4 x i8> %s0 to <4 x i16>
+  %s1s = sext <4 x i8> %s1 to <4 x i16>
+  %m = mul <4 x i16> %s0s, %s1s
+  %s = ashr <4 x i16> %m, <i16 8, i16 8, i16 8, i16 8>
+  %s2 = trunc <4 x i16> %s to <4 x i8>
+  ret <4 x i8> %s2
+}
+
+define arm_aapcs_vfpcc <4 x i8> @vmulhu_v4i8(<4 x i8> %s0, <4 x i8> %s1) {
+; CHECK-LABEL: vmulhu_v4i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q2, #0xff
+; CHECK-NEXT:    vand q1, q1, q2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmul.i32 q0, q0, q1
+; CHECK-NEXT:    vshr.u32 q0, q0, #8
+; CHECK-NEXT:    bx lr
+entry:
+  %s0s = zext <4 x i8> %s0 to <4 x i16>
+  %s1s = zext <4 x i8> %s1 to <4 x i16>
+  %m = mul <4 x i16> %s0s, %s1s
+  %s = lshr <4 x i16> %m, <i16 8, i16 8, i16 8, i16 8>
+  %s2 = trunc <4 x i16> %s to <4 x i8>
+  ret <4 x i8> %s2
+}
+
 define arm_aapcs_vfpcc <8 x i8> @vmulhs_v8i8(<8 x i8> %s0, <8 x i8> %s1) {
 ; CHECK-LABEL: vmulhs_v8i8:
 ; CHECK:       @ %bb.0: @ %entry
@@ -193,13 +230,13 @@ define void @vmulh_s8(i8* nocapture readonly %x, i8* nocapture readonly %y, i8*
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #64
-; CHECK-NEXT:  .LBB12_1: @ %vector.body
+; CHECK-NEXT:  .LBB14_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrb.u8 q0, [r0], #16
 ; CHECK-NEXT:    vldrb.u8 q1, [r1], #16
 ; CHECK-NEXT:    vmulh.s8 q0, q1, q0
 ; CHECK-NEXT:    vstrb.8 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB12_1
+; CHECK-NEXT:    le lr, .LBB14_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -235,13 +272,13 @@ define void @vmulh_s16(i16* nocapture readonly %x, i16* nocapture readonly %y, i
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #128
-; CHECK-NEXT:  .LBB13_1: @ %vector.body
+; CHECK-NEXT:  .LBB15_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrh.u16 q0, [r0], #16
 ; CHECK-NEXT:    vldrh.u16 q1, [r1], #16
 ; CHECK-NEXT:    vmulh.s16 q0, q1, q0
 ; CHECK-NEXT:    vstrb.8 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB13_1
+; CHECK-NEXT:    le lr, .LBB15_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -277,13 +314,13 @@ define void @vmulh_s32(i32* nocapture readonly %x, i32* nocapture readonly %y, i
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #256
-; CHECK-NEXT:  .LBB14_1: @ %vector.body
+; CHECK-NEXT:  .LBB16_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
 ; CHECK-NEXT:    vldrw.u32 q1, [r1], #16
 ; CHECK-NEXT:    vmulh.s32 q0, q1, q0
 ; CHECK-NEXT:    vstrb.8 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB14_1
+; CHECK-NEXT:    le lr, .LBB16_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -319,13 +356,13 @@ define void @vmulh_u8(i8* nocapture readonly %x, i8* nocapture readonly %y, i8*
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #64
-; CHECK-NEXT:  .LBB15_1: @ %vector.body
+; CHECK-NEXT:  .LBB17_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrb.u8 q0, [r0], #16
 ; CHECK-NEXT:    vldrb.u8 q1, [r1], #16
 ; CHECK-NEXT:    vmulh.u8 q0, q1, q0
 ; CHECK-NEXT:    vstrb.8 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB15_1
+; CHECK-NEXT:    le lr, .LBB17_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -361,13 +398,13 @@ define void @vmulh_u16(i16* nocapture readonly %x, i16* nocapture readonly %y, i
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #128
-; CHECK-NEXT:  .LBB16_1: @ %vector.body
+; CHECK-NEXT:  .LBB18_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrh.u16 q0, [r0], #16
 ; CHECK-NEXT:    vldrh.u16 q1, [r1], #16
 ; CHECK-NEXT:    vmulh.u16 q0, q1, q0
 ; CHECK-NEXT:    vstrb.8 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB16_1
+; CHECK-NEXT:    le lr, .LBB18_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -403,13 +440,13 @@ define void @vmulh_u32(i32* nocapture readonly %x, i32* nocapture readonly %y, i
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #256
-; CHECK-NEXT:  .LBB17_1: @ %vector.body
+; CHECK-NEXT:  .LBB19_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
 ; CHECK-NEXT:    vldrw.u32 q1, [r1], #16
 ; CHECK-NEXT:    vmulh.u32 q0, q1, q0
 ; CHECK-NEXT:    vstrb.8 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB17_1
+; CHECK-NEXT:    le lr, .LBB19_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -448,15 +485,15 @@ define void @vmulh_s32_pred(i32* noalias nocapture %d, i32* noalias nocapture re
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    poplt {r7, pc}
-; CHECK-NEXT:  .LBB18_1: @ %vector.ph
+; CHECK-NEXT:  .LBB20_1: @ %vector.ph
 ; CHECK-NEXT:    dlstp.32 lr, r3
-; CHECK-NEXT:  .LBB18_2: @ %vector.body
+; CHECK-NEXT:  .LBB20_2: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r1], #16
 ; CHECK-NEXT:    vldrw.u32 q1, [r2], #16
 ; CHECK-NEXT:    vmulh.s32 q0, q1, q0
 ; CHECK-NEXT:    vstrw.32 q0, [r0], #16
-; CHECK-NEXT:    letp lr, .LBB18_2
+; CHECK-NEXT:    letp lr, .LBB20_2
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -501,15 +538,15 @@ define void @vmulh_u32_pred(i32* noalias nocapture %d, i32* noalias nocapture re
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    poplt {r7, pc}
-; CHECK-NEXT:  .LBB19_1: @ %vector.ph
+; CHECK-NEXT:  .LBB21_1: @ %vector.ph
 ; CHECK-NEXT:    dlstp.32 lr, r3
-; CHECK-NEXT:  .LBB19_2: @ %vector.body
+; CHECK-NEXT:  .LBB21_2: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r1], #16
 ; CHECK-NEXT:    vldrw.u32 q1, [r2], #16
 ; CHECK-NEXT:    vmulh.u32 q0, q1, q0
 ; CHECK-NEXT:    vstrw.32 q0, [r0], #16
-; CHECK-NEXT:    letp lr, .LBB19_2
+; CHECK-NEXT:    letp lr, .LBB21_2
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -554,15 +591,15 @@ define void @vmulh_s16_pred(i16* noalias nocapture %d, i16* noalias nocapture re
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    poplt {r7, pc}
-; CHECK-NEXT:  .LBB20_1: @ %vector.ph
+; CHECK-NEXT:  .LBB22_1: @ %vector.ph
 ; CHECK-NEXT:    dlstp.16 lr, r3
-; CHECK-NEXT:  .LBB20_2: @ %vector.body
+; CHECK-NEXT:  .LBB22_2: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrh.u16 q0, [r1], #16
 ; CHECK-NEXT:    vldrh.u16 q1, [r2], #16
 ; CHECK-NEXT:    vmulh.s16 q0, q1, q0
 ; CHECK-NEXT:    vstrh.16 q0, [r0], #16
-; CHECK-NEXT:    letp lr, .LBB20_2
+; CHECK-NEXT:    letp lr, .LBB22_2
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -607,15 +644,15 @@ define void @vmulh_u16_pred(i16* noalias nocapture %d, i16* noalias nocapture re
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    poplt {r7, pc}
-; CHECK-NEXT:  .LBB21_1: @ %vector.ph
+; CHECK-NEXT:  .LBB23_1: @ %vector.ph
 ; CHECK-NEXT:    dlstp.16 lr, r3
-; CHECK-NEXT:  .LBB21_2: @ %vector.body
+; CHECK-NEXT:  .LBB23_2: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrh.u16 q0, [r1], #16
 ; CHECK-NEXT:    vldrh.u16 q1, [r2], #16
 ; CHECK-NEXT:    vmulh.u16 q0, q1, q0
 ; CHECK-NEXT:    vstrh.16 q0, [r0], #16
-; CHECK-NEXT:    letp lr, .LBB21_2
+; CHECK-NEXT:    letp lr, .LBB23_2
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -660,15 +697,15 @@ define void @vmulh_s8_pred(i8* noalias nocapture %d, i8* noalias nocapture reado
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    poplt {r7, pc}
-; CHECK-NEXT:  .LBB22_1: @ %vector.ph
+; CHECK-NEXT:  .LBB24_1: @ %vector.ph
 ; CHECK-NEXT:    dlstp.8 lr, r3
-; CHECK-NEXT:  .LBB22_2: @ %vector.body
+; CHECK-NEXT:  .LBB24_2: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrb.u8 q0, [r1], #16
 ; CHECK-NEXT:    vldrb.u8 q1, [r2], #16
 ; CHECK-NEXT:    vmulh.s8 q0, q1, q0
 ; CHECK-NEXT:    vstrb.8 q0, [r0], #16
-; CHECK-NEXT:    letp lr, .LBB22_2
+; CHECK-NEXT:    letp lr, .LBB24_2
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -713,15 +750,15 @@ define void @vmulh_u8_pred(i8* noalias nocapture %d, i8* noalias nocapture reado
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    poplt {r7, pc}
-; CHECK-NEXT:  .LBB23_1: @ %vector.ph
+; CHECK-NEXT:  .LBB25_1: @ %vector.ph
 ; CHECK-NEXT:    dlstp.8 lr, r3
-; CHECK-NEXT:  .LBB23_2: @ %vector.body
+; CHECK-NEXT:  .LBB25_2: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrb.u8 q0, [r1], #16
 ; CHECK-NEXT:    vldrb.u8 q1, [r2], #16
 ; CHECK-NEXT:    vmulh.u8 q0, q1, q0
 ; CHECK-NEXT:    vstrb.8 q0, [r0], #16
-; CHECK-NEXT:    letp lr, .LBB23_2
+; CHECK-NEXT:    letp lr, .LBB25_2
 ; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:


        


More information about the llvm-commits mailing list