[llvm] af6f136 - [ARM] Expand types in VQDMULH tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 12 09:56:20 PDT 2021


Author: David Green
Date: 2021-07-12T17:56:11+01:00
New Revision: af6f136a8c6a842acd20ca1f9f76076fcb212a94

URL: https://github.com/llvm/llvm-project/commit/af6f136a8c6a842acd20ca1f9f76076fcb212a94
DIFF: https://github.com/llvm/llvm-project/commit/af6f136a8c6a842acd20ca1f9f76076fcb212a94.diff

LOG: [ARM] Expand types in VQDMULH tests. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/Thumb2/mve-vqdmulh.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Thumb2/mve-vqdmulh.ll b/llvm/test/CodeGen/Thumb2/mve-vqdmulh.ll
index 5789d6b6fa330..f11d8a9c7abaa 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vqdmulh.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vqdmulh.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s
 
-define arm_aapcs_vfpcc i32 @vqdmulh_i8(<16 x i8> %s0, <16 x i8> %s1) {
-; CHECK-LABEL: vqdmulh_i8:
+define arm_aapcs_vfpcc i32 @vqdmulh_v16i8(<16 x i8> %s0, <16 x i8> %s1) {
+; CHECK-LABEL: vqdmulh_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vqdmulh.s8 q0, q1, q0
 ; CHECK-NEXT:    vaddv.s8 r0, q0
@@ -18,8 +18,8 @@ entry:
   ret i32 %l10
 }
 
-define arm_aapcs_vfpcc <16 x i8> @vqdmulh_i8_b(<16 x i8> %s0, <16 x i8> %s1) {
-; CHECK-LABEL: vqdmulh_i8_b:
+define arm_aapcs_vfpcc <16 x i8> @vqdmulh_v16i8_b(<16 x i8> %s0, <16 x i8> %s1) {
+; CHECK-LABEL: vqdmulh_v16i8_b:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vqdmulh.s8 q0, q1, q0
 ; CHECK-NEXT:    bx lr
@@ -34,8 +34,278 @@ entry:
   ret <16 x i8> %l10
 }
 
-define arm_aapcs_vfpcc i32 @vqdmulh_i16(<8 x i16> %s0, <8 x i16> %s1) {
-; CHECK-LABEL: vqdmulh_i16:
+define arm_aapcs_vfpcc <8 x i8> @vqdmulh_v8i8_b(<8 x i8> %s0, <8 x i8> %s1) {
+; CHECK-LABEL: vqdmulh_v8i8_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    vmov.u16 r0, q0[6]
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    vmov q2[2], q2[0], r1, r0
+; CHECK-NEXT:    vmov.u16 r0, q0[7]
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    vmov q2[3], q2[1], r1, r0
+; CHECK-NEXT:    vmov.u16 r0, q1[6]
+; CHECK-NEXT:    vmov.u16 r1, q1[4]
+; CHECK-NEXT:    vmovlb.s8 q2, q2
+; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
+; CHECK-NEXT:    vmov.u16 r0, q1[7]
+; CHECK-NEXT:    vmov.u16 r1, q1[5]
+; CHECK-NEXT:    vmovlb.s16 q2, q2
+; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
+; CHECK-NEXT:    mov r0, sp
+; CHECK-NEXT:    vmovlb.s8 q3, q3
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    vmovlb.s16 q3, q3
+; CHECK-NEXT:    vmul.i32 q2, q3, q2
+; CHECK-NEXT:    vshr.s32 q3, q2, #7
+; CHECK-NEXT:    vmov.i32 q2, #0x7f
+; CHECK-NEXT:    vmin.s32 q3, q3, q2
+; CHECK-NEXT:    vstrh.32 q3, [r0, #8]
+; CHECK-NEXT:    vmov q3[2], q3[0], r2, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    vmov.u16 r2, q0[1]
+; CHECK-NEXT:    vmov q3[3], q3[1], r2, r1
+; CHECK-NEXT:    vmov.u16 r1, q1[2]
+; CHECK-NEXT:    vmov.u16 r2, q1[0]
+; CHECK-NEXT:    vmovlb.s8 q0, q3
+; CHECK-NEXT:    vmov q3[2], q3[0], r2, r1
+; CHECK-NEXT:    vmov.u16 r1, q1[3]
+; CHECK-NEXT:    vmov.u16 r2, q1[1]
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmov q3[3], q3[1], r2, r1
+; CHECK-NEXT:    vmovlb.s8 q1, q3
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmul.i32 q0, q1, q0
+; CHECK-NEXT:    vshr.s32 q0, q0, #7
+; CHECK-NEXT:    vmin.s32 q0, q0, q2
+; CHECK-NEXT:    vstrh.32 q0, [r0]
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <8 x i8> %s0 to <8 x i32>
+  %l5 = sext <8 x i8> %s1 to <8 x i32>
+  %l6 = mul nsw <8 x i32> %l5, %l2
+  %l7 = ashr <8 x i32> %l6, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+  %l8 = icmp slt <8 x i32> %l7, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
+  %l9 = select <8 x i1> %l8, <8 x i32> %l7, <8 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
+  %l10 = trunc <8 x i32> %l9 to <8 x i8>
+  ret <8 x i8> %l10
+}
+
+define arm_aapcs_vfpcc <4 x i8> @vqdmulh_v4i8_b(<4 x i8> %s0, <4 x i8> %s1) {
+; CHECK-LABEL: vqdmulh_v4i8_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmul.i32 q0, q1, q0
+; CHECK-NEXT:    vmov.i32 q1, #0x7f
+; CHECK-NEXT:    vshr.s32 q0, q0, #7
+; CHECK-NEXT:    vmin.s32 q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <4 x i8> %s0 to <4 x i32>
+  %l5 = sext <4 x i8> %s1 to <4 x i32>
+  %l6 = mul nsw <4 x i32> %l5, %l2
+  %l7 = ashr <4 x i32> %l6, <i32 7, i32 7, i32 7, i32 7>
+  %l8 = icmp slt <4 x i32> %l7, <i32 127, i32 127, i32 127, i32 127>
+  %l9 = select <4 x i1> %l8, <4 x i32> %l7, <4 x i32> <i32 127, i32 127, i32 127, i32 127>
+  %l10 = trunc <4 x i32> %l9 to <4 x i8>
+  ret <4 x i8> %l10
+}
+
+define arm_aapcs_vfpcc <32 x i8> @vqdmulh_v32i8_b(<32 x i8> %s0, <32 x i8> %s1) {
+; CHECK-LABEL: vqdmulh_v32i8_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov.u8 r0, q0[14]
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    vmov q4[2], q4[0], r1, r0
+; CHECK-NEXT:    vmov.u8 r0, q0[15]
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    vmov.u8 r2, q0[8]
+; CHECK-NEXT:    vmov q4[3], q4[1], r1, r0
+; CHECK-NEXT:    vmov.u8 r0, q2[14]
+; CHECK-NEXT:    vmov.u8 r1, q2[12]
+; CHECK-NEXT:    vmovlb.s8 q4, q4
+; CHECK-NEXT:    vmov q5[2], q5[0], r1, r0
+; CHECK-NEXT:    vmov.u8 r0, q2[15]
+; CHECK-NEXT:    vmov.u8 r1, q2[13]
+; CHECK-NEXT:    vmovlb.s16 q4, q4
+; CHECK-NEXT:    vmov q5[3], q5[1], r1, r0
+; CHECK-NEXT:    mov r0, sp
+; CHECK-NEXT:    vmovlb.s8 q5, q5
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    vmovlb.s16 q5, q5
+; CHECK-NEXT:    vmov.u8 r3, q1[8]
+; CHECK-NEXT:    vmul.i32 q4, q5, q4
+; CHECK-NEXT:    vshr.s32 q5, q4, #7
+; CHECK-NEXT:    vmov.i32 q4, #0x7f
+; CHECK-NEXT:    vmin.s32 q5, q5, q4
+; CHECK-NEXT:    vstrb.32 q5, [r0, #12]
+; CHECK-NEXT:    vmov q5[2], q5[0], r2, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    vmov.u8 r2, q0[9]
+; CHECK-NEXT:    vmov q5[3], q5[1], r2, r1
+; CHECK-NEXT:    vmov.u8 r1, q2[10]
+; CHECK-NEXT:    vmov.u8 r2, q2[8]
+; CHECK-NEXT:    vmovlb.s8 q5, q5
+; CHECK-NEXT:    vmov q6[2], q6[0], r2, r1
+; CHECK-NEXT:    vmov.u8 r1, q2[11]
+; CHECK-NEXT:    vmov.u8 r2, q2[9]
+; CHECK-NEXT:    vmovlb.s16 q5, q5
+; CHECK-NEXT:    vmov q6[3], q6[1], r2, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    vmovlb.s8 q6, q6
+; CHECK-NEXT:    vmov.u8 r2, q0[4]
+; CHECK-NEXT:    vmovlb.s16 q6, q6
+; CHECK-NEXT:    vmul.i32 q5, q6, q5
+; CHECK-NEXT:    vshr.s32 q5, q5, #7
+; CHECK-NEXT:    vmin.s32 q5, q5, q4
+; CHECK-NEXT:    vstrb.32 q5, [r0, #8]
+; CHECK-NEXT:    vmov q5[2], q5[0], r2, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    vmov.u8 r2, q0[5]
+; CHECK-NEXT:    vmov q5[3], q5[1], r2, r1
+; CHECK-NEXT:    vmov.u8 r1, q2[6]
+; CHECK-NEXT:    vmov.u8 r2, q2[4]
+; CHECK-NEXT:    vmovlb.s8 q5, q5
+; CHECK-NEXT:    vmov q6[2], q6[0], r2, r1
+; CHECK-NEXT:    vmov.u8 r1, q2[7]
+; CHECK-NEXT:    vmov.u8 r2, q2[5]
+; CHECK-NEXT:    vmovlb.s16 q5, q5
+; CHECK-NEXT:    vmov q6[3], q6[1], r2, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    vmovlb.s8 q6, q6
+; CHECK-NEXT:    vmov.u8 r2, q0[0]
+; CHECK-NEXT:    vmovlb.s16 q6, q6
+; CHECK-NEXT:    vmul.i32 q5, q6, q5
+; CHECK-NEXT:    vshr.s32 q5, q5, #7
+; CHECK-NEXT:    vmin.s32 q5, q5, q4
+; CHECK-NEXT:    vstrb.32 q5, [r0, #4]
+; CHECK-NEXT:    vmov q5[2], q5[0], r2, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    vmov.u8 r2, q0[1]
+; CHECK-NEXT:    vmov q5[3], q5[1], r2, r1
+; CHECK-NEXT:    vmov.u8 r1, q2[2]
+; CHECK-NEXT:    vmov.u8 r2, q2[0]
+; CHECK-NEXT:    vmovlb.s8 q0, q5
+; CHECK-NEXT:    vmov q5[2], q5[0], r2, r1
+; CHECK-NEXT:    vmov.u8 r1, q2[3]
+; CHECK-NEXT:    vmov.u8 r2, q2[1]
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmov q5[3], q5[1], r2, r1
+; CHECK-NEXT:    vmov.u8 r1, q1[14]
+; CHECK-NEXT:    vmovlb.s8 q2, q5
+; CHECK-NEXT:    vmov.u8 r2, q1[12]
+; CHECK-NEXT:    vmovlb.s16 q2, q2
+; CHECK-NEXT:    vmul.i32 q0, q2, q0
+; CHECK-NEXT:    vshr.s32 q0, q0, #7
+; CHECK-NEXT:    vmin.s32 q0, q0, q4
+; CHECK-NEXT:    vstrb.32 q0, [r0]
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, r1
+; CHECK-NEXT:    vmov.u8 r1, q1[15]
+; CHECK-NEXT:    vmov.u8 r2, q1[13]
+; CHECK-NEXT:    vmov q0[3], q0[1], r2, r1
+; CHECK-NEXT:    vmov.u8 r1, q3[14]
+; CHECK-NEXT:    vmov.u8 r2, q3[12]
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmov q2[2], q2[0], r2, r1
+; CHECK-NEXT:    vmov.u8 r1, q3[15]
+; CHECK-NEXT:    vmov.u8 r2, q3[13]
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmov q2[3], q2[1], r2, r1
+; CHECK-NEXT:    add r1, sp, #16
+; CHECK-NEXT:    vmovlb.s8 q2, q2
+; CHECK-NEXT:    vmov.u8 r2, q1[10]
+; CHECK-NEXT:    vmovlb.s16 q2, q2
+; CHECK-NEXT:    vmul.i32 q0, q2, q0
+; CHECK-NEXT:    vshr.s32 q0, q0, #7
+; CHECK-NEXT:    vmin.s32 q0, q0, q4
+; CHECK-NEXT:    vstrb.32 q0, [r1, #12]
+; CHECK-NEXT:    vmov q0[2], q0[0], r3, r2
+; CHECK-NEXT:    vmov.u8 r2, q1[11]
+; CHECK-NEXT:    vmov.u8 r3, q1[9]
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT:    vmov.u8 r2, q3[10]
+; CHECK-NEXT:    vmov.u8 r3, q3[8]
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmov q2[2], q2[0], r3, r2
+; CHECK-NEXT:    vmov.u8 r2, q3[11]
+; CHECK-NEXT:    vmov.u8 r3, q3[9]
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmov q2[3], q2[1], r3, r2
+; CHECK-NEXT:    vmov.u8 r2, q1[6]
+; CHECK-NEXT:    vmovlb.s8 q2, q2
+; CHECK-NEXT:    vmov.u8 r3, q1[4]
+; CHECK-NEXT:    vmovlb.s16 q2, q2
+; CHECK-NEXT:    vmul.i32 q0, q2, q0
+; CHECK-NEXT:    vshr.s32 q0, q0, #7
+; CHECK-NEXT:    vmin.s32 q0, q0, q4
+; CHECK-NEXT:    vstrb.32 q0, [r1, #8]
+; CHECK-NEXT:    vmov q0[2], q0[0], r3, r2
+; CHECK-NEXT:    vmov.u8 r2, q1[7]
+; CHECK-NEXT:    vmov.u8 r3, q1[5]
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT:    vmov.u8 r2, q3[6]
+; CHECK-NEXT:    vmov.u8 r3, q3[4]
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmov q2[2], q2[0], r3, r2
+; CHECK-NEXT:    vmov.u8 r2, q3[7]
+; CHECK-NEXT:    vmov.u8 r3, q3[5]
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmov q2[3], q2[1], r3, r2
+; CHECK-NEXT:    vmov.u8 r2, q1[2]
+; CHECK-NEXT:    vmovlb.s8 q2, q2
+; CHECK-NEXT:    vmov.u8 r3, q1[0]
+; CHECK-NEXT:    vmovlb.s16 q2, q2
+; CHECK-NEXT:    vmul.i32 q0, q2, q0
+; CHECK-NEXT:    vshr.s32 q0, q0, #7
+; CHECK-NEXT:    vmin.s32 q0, q0, q4
+; CHECK-NEXT:    vstrb.32 q0, [r1, #4]
+; CHECK-NEXT:    vmov q0[2], q0[0], r3, r2
+; CHECK-NEXT:    vmov.u8 r2, q1[3]
+; CHECK-NEXT:    vmov.u8 r3, q1[1]
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT:    vmov.u8 r2, q3[2]
+; CHECK-NEXT:    vmov.u8 r3, q3[0]
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmov q1[2], q1[0], r3, r2
+; CHECK-NEXT:    vmov.u8 r2, q3[3]
+; CHECK-NEXT:    vmov.u8 r3, q3[1]
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmov q1[3], q1[1], r3, r2
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmul.i32 q0, q1, q0
+; CHECK-NEXT:    vshr.s32 q0, q0, #7
+; CHECK-NEXT:    vmin.s32 q0, q0, q4
+; CHECK-NEXT:    vstrb.32 q0, [r1]
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vldrw.u32 q1, [r1]
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <32 x i8> %s0 to <32 x i32>
+  %l5 = sext <32 x i8> %s1 to <32 x i32>
+  %l6 = mul nsw <32 x i32> %l5, %l2
+  %l7 = ashr <32 x i32> %l6, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+  %l8 = icmp slt <32 x i32> %l7, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
+  %l9 = select <32 x i1> %l8, <32 x i32> %l7, <32 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
+  %l10 = trunc <32 x i32> %l9 to <32 x i8>
+  ret <32 x i8> %l10
+}
+
+define arm_aapcs_vfpcc i32 @vqdmulh_v8i16(<8 x i16> %s0, <8 x i16> %s1) {
+; CHECK-LABEL: vqdmulh_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vqdmulh.s16 q0, q1, q0
 ; CHECK-NEXT:    vaddv.s16 r0, q0
@@ -51,8 +321,8 @@ entry:
   ret i32 %l10
 }
 
-define arm_aapcs_vfpcc <8 x i16> @vqdmulh_i16_b(<8 x i16> %s0, <8 x i16> %s1) {
-; CHECK-LABEL: vqdmulh_i16_b:
+define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_b(<8 x i16> %s0, <8 x i16> %s1) {
+; CHECK-LABEL: vqdmulh_v8i16_b:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vqdmulh.s16 q0, q1, q0
 ; CHECK-NEXT:    bx lr
@@ -67,8 +337,60 @@ entry:
   ret <8 x i16> %l10
 }
 
-define arm_aapcs_vfpcc <8 x i16> @vqdmulh_i16_c(<8 x i16> %s0, <8 x i16> %s1) {
-; CHECK-LABEL: vqdmulh_i16_c:
+define arm_aapcs_vfpcc <4 x i16> @vqdmulh_v4i16_b(<4 x i16> %s0, <4 x i16> %s1) {
+; CHECK-LABEL: vqdmulh_v4i16_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmullb.s16 q0, q1, q0
+; CHECK-NEXT:    vmov.i32 q1, #0x7fff
+; CHECK-NEXT:    vshr.s32 q0, q0, #15
+; CHECK-NEXT:    vmin.s32 q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <4 x i16> %s0 to <4 x i32>
+  %l5 = sext <4 x i16> %s1 to <4 x i32>
+  %l6 = mul nsw <4 x i32> %l5, %l2
+  %l7 = ashr <4 x i32> %l6, <i32 15, i32 15, i32 15, i32 15>
+  %l4 = icmp slt <4 x i32> %l7, <i32 32767, i32 32767, i32 32767, i32 32767>
+  %l9 = select <4 x i1> %l4, <4 x i32> %l7, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+  %l10 = trunc <4 x i32> %l9 to <4 x i16>
+  ret <4 x i16> %l10
+}
+
+define arm_aapcs_vfpcc <16 x i16> @vqdmulh_v16i16_b(<16 x i16> %s0, <16 x i16> %s1) {
+; CHECK-LABEL: vqdmulh_v16i16_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vmullt.s16 q4, q2, q0
+; CHECK-NEXT:    vmullb.s16 q0, q2, q0
+; CHECK-NEXT:    vmullt.s16 q2, q3, q1
+; CHECK-NEXT:    vmullb.s16 q1, q3, q1
+; CHECK-NEXT:    vshr.s32 q4, q4, #15
+; CHECK-NEXT:    vmov.i32 q5, #0x7fff
+; CHECK-NEXT:    vshr.s32 q0, q0, #15
+; CHECK-NEXT:    vshr.s32 q2, q2, #15
+; CHECK-NEXT:    vshr.s32 q1, q1, #15
+; CHECK-NEXT:    vmin.s32 q4, q4, q5
+; CHECK-NEXT:    vmin.s32 q0, q0, q5
+; CHECK-NEXT:    vmin.s32 q2, q2, q5
+; CHECK-NEXT:    vmin.s32 q1, q1, q5
+; CHECK-NEXT:    vmovnt.i32 q0, q4
+; CHECK-NEXT:    vmovnt.i32 q1, q2
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <16 x i16> %s0 to <16 x i32>
+  %l5 = sext <16 x i16> %s1 to <16 x i32>
+  %l6 = mul nsw <16 x i32> %l5, %l2
+  %l7 = ashr <16 x i32> %l6, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+  %l16 = icmp slt <16 x i32> %l7, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
+  %l9 = select <16 x i1> %l16, <16 x i32> %l7, <16 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
+  %l10 = trunc <16 x i32> %l9 to <16 x i16>
+  ret <16 x i16> %l10
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_c(<8 x i16> %s0, <8 x i16> %s1) {
+; CHECK-LABEL: vqdmulh_v8i16_c:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov q2, q0
 ; CHECK-NEXT:    vmov.u16 r0, q0[2]
@@ -127,8 +449,8 @@ entry:
   ret <8 x i16> %l10
 }
 
-define arm_aapcs_vfpcc <8 x i16> @vqdmulh_i16_interleaved(<8 x i16> %s0, <8 x i16> %s1) {
-; CHECK-LABEL: vqdmulh_i16_interleaved:
+define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_interleaved(<8 x i16> %s0, <8 x i16> %s1) {
+; CHECK-LABEL: vqdmulh_v8i16_interleaved:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vqdmulh.s16 q0, q1, q0
 ; CHECK-NEXT:    bx lr
@@ -148,8 +470,42 @@ entry:
   ret <8 x i16> %4
 }
 
-define arm_aapcs_vfpcc i64 @vqdmulh_i32(<4 x i32> %s0, <4 x i32> %s1) {
-; CHECK-LABEL: vqdmulh_i32:
+define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_interleaved2(<4 x i32> %s0a, <8 x i16> %s1) {
+; CHECK-LABEL: vqdmulh_v8i16_interleaved2:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmullb.s16 q2, q1, q0
+; CHECK-NEXT:    vrev32.16 q1, q1
+; CHECK-NEXT:    vmullb.s16 q0, q1, q0
+; CHECK-NEXT:    vshr.s32 q2, q2, #15
+; CHECK-NEXT:    vmov.i32 q3, #0x7fff
+; CHECK-NEXT:    vshr.s32 q0, q0, #15
+; CHECK-NEXT:    vmin.s32 q2, q2, q3
+; CHECK-NEXT:    vmin.s32 q0, q0, q3
+; CHECK-NEXT:    vmovnt.i32 q2, q0
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    bx lr
+  %s0 = trunc <4 x i32> %s0a to <4 x i16>
+  %strided.vec = shufflevector <8 x i16> %s1, <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %strided.vec44 = shufflevector <8 x i16> %s1, <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %l7 = sext <4 x i16> %strided.vec to <4 x i32>
+  %l8 = sext <4 x i16> %s0 to <4 x i32>
+  %l9 = mul nsw <4 x i32> %l7, %l8
+  %l10 = ashr <4 x i32> %l9, <i32 15, i32 15, i32 15, i32 15>
+  %l11 = icmp slt <4 x i32> %l10, <i32 32767, i32 32767, i32 32767, i32 32767>
+  %l12 = select <4 x i1> %l11, <4 x i32> %l10, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+  %l13 = trunc <4 x i32> %l12 to <4 x i16>
+  %l14 = sext <4 x i16> %strided.vec44 to <4 x i32>
+  %l15 = mul nsw <4 x i32> %l14, %l8
+  %l16 = ashr <4 x i32> %l15, <i32 15, i32 15, i32 15, i32 15>
+  %l17 = icmp slt <4 x i32> %l16, <i32 32767, i32 32767, i32 32767, i32 32767>
+  %l18 = select <4 x i1> %l17, <4 x i32> %l16, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+  %l19 = trunc <4 x i32> %l18 to <4 x i16>
+  %interleaved.vec = shufflevector <4 x i16> %l13, <4 x i16> %l19, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  ret <8 x i16> %interleaved.vec
+}
+
+define arm_aapcs_vfpcc i64 @vqdmulh_v4i32(<4 x i32> %s0, <4 x i32> %s1) {
+; CHECK-LABEL: vqdmulh_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vqdmulh.s32 q0, q1, q0
 ; CHECK-NEXT:    vaddlv.s32 r0, r1, q0
@@ -165,8 +521,8 @@ entry:
   ret i64 %l10
 }
 
-define arm_aapcs_vfpcc <4 x i32> @vqdmulh_i32_b(<4 x i32> %s0, <4 x i32> %s1) {
-; CHECK-LABEL: vqdmulh_i32_b:
+define arm_aapcs_vfpcc <4 x i32> @vqdmulh_v4i32_b(<4 x i32> %s0, <4 x i32> %s1) {
+; CHECK-LABEL: vqdmulh_v4i32_b:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vqdmulh.s32 q0, q1, q0
 ; CHECK-NEXT:    bx lr
@@ -181,6 +537,505 @@ entry:
   ret <4 x i32> %l10
 }
 
+define arm_aapcs_vfpcc <2 x i32> @vqdmulh_v2i32_b(<2 x i32> %s0, <2 x i32> %s1) {
+; CHECK-LABEL: vqdmulh_v2i32_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    vmullb.s32 q2, q1, q0
+; CHECK-NEXT:    mvn r12, #-2147483648
+; CHECK-NEXT:    vmov r0, r5, d5
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    asrl r0, r5, #31
+; CHECK-NEXT:    subs.w r3, r0, r12
+; CHECK-NEXT:    sbcs r3, r5, #0
+; CHECK-NEXT:    mov.w r3, #0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #1
+; CHECK-NEXT:    cmp r3, #0
+; CHECK-NEXT:    vmov r4, r3, d4
+; CHECK-NEXT:    csetm lr, ne
+; CHECK-NEXT:    asrl r4, r3, #31
+; CHECK-NEXT:    subs.w r1, r4, r12
+; CHECK-NEXT:    vmov q2[2], q2[0], r4, r0
+; CHECK-NEXT:    sbcs r1, r3, #0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #1
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    vmov q2[3], q2[1], r3, r5
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, lr
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, lr
+; CHECK-NEXT:    adr r1, .LCPI14_0
+; CHECK-NEXT:    vldrw.u32 q1, [r1]
+; CHECK-NEXT:    vbic q1, q1, q0
+; CHECK-NEXT:    vand q0, q2, q0
+; CHECK-NEXT:    vorr q0, q0, q1
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI14_0:
+; CHECK-NEXT:    .long 2147483647 @ 0x7fffffff
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 2147483647 @ 0x7fffffff
+; CHECK-NEXT:    .long 0 @ 0x0
+entry:
+  %l2 = sext <2 x i32> %s0 to <2 x i64>
+  %l5 = sext <2 x i32> %s1 to <2 x i64>
+  %l6 = mul nsw <2 x i64> %l5, %l2
+  %l7 = ashr <2 x i64> %l6, <i64 31, i64 31>
+  %l8 = icmp slt <2 x i64> %l7, <i64 2147483647, i64 2147483647>
+  %l9 = select <2 x i1> %l8, <2 x i64> %l7, <2 x i64> <i64 2147483647, i64 2147483647>
+  %l10 = trunc <2 x i64> %l9 to <2 x i32>
+  ret <2 x i32> %l10
+}
+
+define arm_aapcs_vfpcc <8 x i32> @vqdmulh_v8i32_b(<8 x i32> %s0, <8 x i32> %s1) {
+; CHECK-LABEL: vqdmulh_v8i32_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vmov.f32 s16, s2
+; CHECK-NEXT:    mvn lr, #-2147483648
+; CHECK-NEXT:    vmov.f32 s20, s10
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:    vmov.f32 s18, s3
+; CHECK-NEXT:    vmov.f32 s22, s11
+; CHECK-NEXT:    vmullb.s32 q6, q5, q4
+; CHECK-NEXT:    vmov.f32 s2, s1
+; CHECK-NEXT:    vmov r2, r3, d12
+; CHECK-NEXT:    asrl r2, r3, #31
+; CHECK-NEXT:    vmov.f32 s10, s9
+; CHECK-NEXT:    subs.w r0, r2, lr
+; CHECK-NEXT:    sbcs r0, r3, #0
+; CHECK-NEXT:    vmov r4, r3, d13
+; CHECK-NEXT:    mov.w r0, #0
+; CHECK-NEXT:    asrl r4, r3, #31
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    subs.w r1, r4, lr
+; CHECK-NEXT:    sbcs r1, r3, #0
+; CHECK-NEXT:    vmov.32 q5[1], r0
+; CHECK-NEXT:    mov.w r1, #0
+; CHECK-NEXT:    vmov q7[2], q7[0], r2, r4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov r3, s10
+; CHECK-NEXT:    vmov q5[2], q5[0], r0, r1
+; CHECK-NEXT:    adr r0, .LCPI15_0
+; CHECK-NEXT:    vldrw.u32 q4, [r0]
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vbic q6, q4, q5
+; CHECK-NEXT:    vand q5, q7, q5
+; CHECK-NEXT:    vorr q5, q5, q6
+; CHECK-NEXT:    smull r2, r1, r1, r0
+; CHECK-NEXT:    asrl r2, r1, #31
+; CHECK-NEXT:    subs.w r0, r2, lr
+; CHECK-NEXT:    sbcs r0, r1, #0
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    mov.w r0, #0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q6[1], r0
+; CHECK-NEXT:    smull r4, r1, r3, r1
+; CHECK-NEXT:    asrl r4, r1, #31
+; CHECK-NEXT:    subs.w r3, r4, lr
+; CHECK-NEXT:    vmov q2[2], q2[0], r2, r4
+; CHECK-NEXT:    sbcs r1, r1, #0
+; CHECK-NEXT:    mov.w r1, #0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov q6[2], q6[0], r0, r1
+; CHECK-NEXT:    vbic q0, q4, q6
+; CHECK-NEXT:    vand q2, q2, q6
+; CHECK-NEXT:    vorr q0, q2, q0
+; CHECK-NEXT:    vmov.f32 s8, s6
+; CHECK-NEXT:    vmov.f32 s1, s2
+; CHECK-NEXT:    vmov.f32 s2, s20
+; CHECK-NEXT:    vmov.f32 s3, s22
+; CHECK-NEXT:    vmov.f32 s20, s14
+; CHECK-NEXT:    vmov.f32 s10, s7
+; CHECK-NEXT:    vmov.f32 s22, s15
+; CHECK-NEXT:    vmullb.s32 q6, q5, q2
+; CHECK-NEXT:    vmov.f32 s6, s5
+; CHECK-NEXT:    vmov r2, r1, d12
+; CHECK-NEXT:    asrl r2, r1, #31
+; CHECK-NEXT:    vmov.f32 s14, s13
+; CHECK-NEXT:    subs.w r0, r2, lr
+; CHECK-NEXT:    sbcs r0, r1, #0
+; CHECK-NEXT:    vmov r4, r1, d13
+; CHECK-NEXT:    mov.w r0, #0
+; CHECK-NEXT:    asrl r4, r1, #31
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    subs.w r3, r4, lr
+; CHECK-NEXT:    sbcs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    mov.w r1, #0
+; CHECK-NEXT:    vmov q6[2], q6[0], r2, r4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vmov q2[2], q2[0], r0, r1
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    vbic q5, q4, q2
+; CHECK-NEXT:    vand q2, q6, q2
+; CHECK-NEXT:    vorr q2, q2, q5
+; CHECK-NEXT:    smull r2, r1, r1, r0
+; CHECK-NEXT:    asrl r2, r1, #31
+; CHECK-NEXT:    subs.w r0, r2, lr
+; CHECK-NEXT:    sbcs r0, r1, #0
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    mov.w r0, #0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q5[1], r0
+; CHECK-NEXT:    smull r4, r1, r3, r1
+; CHECK-NEXT:    asrl r4, r1, #31
+; CHECK-NEXT:    subs.w r3, r4, lr
+; CHECK-NEXT:    vmov q3[2], q3[0], r2, r4
+; CHECK-NEXT:    sbcs r1, r1, #0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #1
+; CHECK-NEXT:    cmp.w r12, #0
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov q5[2], q5[0], r0, r1
+; CHECK-NEXT:    vbic q1, q4, q5
+; CHECK-NEXT:    vand q3, q3, q5
+; CHECK-NEXT:    vorr q1, q3, q1
+; CHECK-NEXT:    vmov.f32 s5, s6
+; CHECK-NEXT:    vmov.f32 s6, s8
+; CHECK-NEXT:    vmov.f32 s7, s10
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r4, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI15_0:
+; CHECK-NEXT:    .long 2147483647 @ 0x7fffffff
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 2147483647 @ 0x7fffffff
+; CHECK-NEXT:    .long 0 @ 0x0
+entry:
+  %l2 = sext <8 x i32> %s0 to <8 x i64>
+  %l5 = sext <8 x i32> %s1 to <8 x i64>
+  %l6 = mul nsw <8 x i64> %l5, %l2
+  %l7 = ashr <8 x i64> %l6, <i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31>
+  %l8 = icmp slt <8 x i64> %l7, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+  %l9 = select <8 x i1> %l8, <8 x i64> %l7, <8 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+  %l10 = trunc <8 x i64> %l9 to <8 x i32>
+  ret <8 x i32> %l10
+}
+
+define arm_aapcs_vfpcc <16 x i32> @vqdmulh_v16i32_b(<16 x i32> %s0, <16 x i32> %s1) {
+; CHECK-LABEL: vqdmulh_v16i32_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    add r0, sp, #104
+; CHECK-NEXT:    vmov.f32 s16, s2
+; CHECK-NEXT:    vldrw.u32 q5, [r0]
+; CHECK-NEXT:    vmov.f32 s18, s3
+; CHECK-NEXT:    mvn lr, #-2147483648
+; CHECK-NEXT:    vstrw.32 q3, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s24, s22
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:    vmov.f32 s26, s23
+; CHECK-NEXT:    vmullb.s32 q7, q6, q4
+; CHECK-NEXT:    vmov.f32 s2, s1
+; CHECK-NEXT:    vmov r2, r3, d14
+; CHECK-NEXT:    asrl r2, r3, #31
+; CHECK-NEXT:    vmov.f32 s22, s21
+; CHECK-NEXT:    subs.w r0, r2, lr
+; CHECK-NEXT:    sbcs r0, r3, #0
+; CHECK-NEXT:    vmov r4, r3, d15
+; CHECK-NEXT:    mov.w r0, #0
+; CHECK-NEXT:    asrl r4, r3, #31
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    subs.w r1, r4, lr
+; CHECK-NEXT:    sbcs r1, r3, #0
+; CHECK-NEXT:    vmov.32 q6[1], r0
+; CHECK-NEXT:    mov.w r1, #0
+; CHECK-NEXT:    vmov q4[2], q4[0], r2, r4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov r3, s22
+; CHECK-NEXT:    vmov q6[2], q6[0], r0, r1
+; CHECK-NEXT:    adr r0, .LCPI16_0
+; CHECK-NEXT:    vldrw.u32 q3, [r0]
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r1, s20
+; CHECK-NEXT:    vand q4, q4, q6
+; CHECK-NEXT:    vbic q7, q3, q6
+; CHECK-NEXT:    vorr q6, q4, q7
+; CHECK-NEXT:    smull r2, r1, r1, r0
+; CHECK-NEXT:    asrl r2, r1, #31
+; CHECK-NEXT:    subs.w r0, r2, lr
+; CHECK-NEXT:    sbcs r0, r1, #0
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    mov.w r0, #0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q7[1], r0
+; CHECK-NEXT:    smull r4, r1, r3, r1
+; CHECK-NEXT:    asrl r4, r1, #31
+; CHECK-NEXT:    subs.w r3, r4, lr
+; CHECK-NEXT:    vmov q4[2], q4[0], r2, r4
+; CHECK-NEXT:    sbcs r1, r1, #0
+; CHECK-NEXT:    mov.w r1, #0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov q7[2], q7[0], r0, r1
+; CHECK-NEXT:    add r0, sp, #120
+; CHECK-NEXT:    vbic q0, q3, q7
+; CHECK-NEXT:    vand q4, q4, q7
+; CHECK-NEXT:    vorr q0, q4, q0
+; CHECK-NEXT:    vldrw.u32 q5, [r0]
+; CHECK-NEXT:    vmov.f32 s1, s2
+; CHECK-NEXT:    vmov.f32 s2, s24
+; CHECK-NEXT:    vmov.f32 s3, s26
+; CHECK-NEXT:    vmov.f32 s16, s6
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s24, s22
+; CHECK-NEXT:    vmov.f32 s18, s7
+; CHECK-NEXT:    vmov.f32 s26, s23
+; CHECK-NEXT:    vmullb.s32 q7, q6, q4
+; CHECK-NEXT:    vmov.f32 s6, s5
+; CHECK-NEXT:    vmov r2, r1, d14
+; CHECK-NEXT:    asrl r2, r1, #31
+; CHECK-NEXT:    vmov.f32 s22, s21
+; CHECK-NEXT:    subs.w r0, r2, lr
+; CHECK-NEXT:    sbcs r0, r1, #0
+; CHECK-NEXT:    vmov r4, r1, d15
+; CHECK-NEXT:    mov.w r0, #0
+; CHECK-NEXT:    asrl r4, r1, #31
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    subs.w r3, r4, lr
+; CHECK-NEXT:    sbcs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q4[1], r0
+; CHECK-NEXT:    mov.w r1, #0
+; CHECK-NEXT:    vmov q7[2], q7[0], r2, r4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov r3, s22
+; CHECK-NEXT:    vmov q4[2], q4[0], r0, r1
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r1, s20
+; CHECK-NEXT:    vbic q6, q3, q4
+; CHECK-NEXT:    vand q4, q7, q4
+; CHECK-NEXT:    vorr q6, q4, q6
+; CHECK-NEXT:    smull r2, r1, r1, r0
+; CHECK-NEXT:    asrl r2, r1, #31
+; CHECK-NEXT:    subs.w r0, r2, lr
+; CHECK-NEXT:    sbcs r0, r1, #0
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    mov.w r0, #0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q7[1], r0
+; CHECK-NEXT:    smull r4, r1, r3, r1
+; CHECK-NEXT:    asrl r4, r1, #31
+; CHECK-NEXT:    subs.w r3, r4, lr
+; CHECK-NEXT:    vmov q4[2], q4[0], r2, r4
+; CHECK-NEXT:    sbcs r1, r1, #0
+; CHECK-NEXT:    mov.w r1, #0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov q7[2], q7[0], r0, r1
+; CHECK-NEXT:    add r0, sp, #136
+; CHECK-NEXT:    vbic q1, q3, q7
+; CHECK-NEXT:    vand q4, q4, q7
+; CHECK-NEXT:    vorr q1, q4, q1
+; CHECK-NEXT:    vldrw.u32 q5, [r0]
+; CHECK-NEXT:    vmov.f32 s5, s6
+; CHECK-NEXT:    vmov.f32 s6, s24
+; CHECK-NEXT:    vmov.f32 s16, s10
+; CHECK-NEXT:    vmov.f32 s7, s26
+; CHECK-NEXT:    vmov.f32 s24, s22
+; CHECK-NEXT:    vmov.f32 s18, s11
+; CHECK-NEXT:    vmov.f32 s26, s23
+; CHECK-NEXT:    vmullb.s32 q7, q6, q4
+; CHECK-NEXT:    vmov.f32 s10, s9
+; CHECK-NEXT:    vmov r2, r1, d14
+; CHECK-NEXT:    asrl r2, r1, #31
+; CHECK-NEXT:    vmov.f32 s22, s21
+; CHECK-NEXT:    subs.w r0, r2, lr
+; CHECK-NEXT:    sbcs r0, r1, #0
+; CHECK-NEXT:    vmov r4, r1, d15
+; CHECK-NEXT:    mov.w r0, #0
+; CHECK-NEXT:    asrl r4, r1, #31
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    subs.w r3, r4, lr
+; CHECK-NEXT:    sbcs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q4[1], r0
+; CHECK-NEXT:    mov.w r1, #0
+; CHECK-NEXT:    vmov q7[2], q7[0], r2, r4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov r3, s22
+; CHECK-NEXT:    vmov q4[2], q4[0], r0, r1
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov r1, s20
+; CHECK-NEXT:    vbic q6, q3, q4
+; CHECK-NEXT:    vand q4, q7, q4
+; CHECK-NEXT:    vorr q6, q4, q6
+; CHECK-NEXT:    smull r2, r1, r1, r0
+; CHECK-NEXT:    asrl r2, r1, #31
+; CHECK-NEXT:    subs.w r0, r2, lr
+; CHECK-NEXT:    sbcs r0, r1, #0
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    mov.w r0, #0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q7[1], r0
+; CHECK-NEXT:    smull r4, r1, r3, r1
+; CHECK-NEXT:    asrl r4, r1, #31
+; CHECK-NEXT:    subs.w r3, r4, lr
+; CHECK-NEXT:    vmov q4[2], q4[0], r2, r4
+; CHECK-NEXT:    sbcs r1, r1, #0
+; CHECK-NEXT:    mov.w r1, #0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vmov q7[2], q7[0], r0, r1
+; CHECK-NEXT:    add r0, sp, #152
+; CHECK-NEXT:    vbic q2, q3, q7
+; CHECK-NEXT:    vand q4, q4, q7
+; CHECK-NEXT:    vorr q2, q4, q2
+; CHECK-NEXT:    vldrw.u32 q5, [r0]
+; CHECK-NEXT:    vmov.f32 s9, s10
+; CHECK-NEXT:    vmov.f32 s10, s24
+; CHECK-NEXT:    vmov.f32 s16, s2
+; CHECK-NEXT:    vmov.f32 s11, s26
+; CHECK-NEXT:    vmov.f32 s24, s22
+; CHECK-NEXT:    vmov.f32 s18, s3
+; CHECK-NEXT:    vmov.f32 s26, s23
+; CHECK-NEXT:    vmullb.s32 q7, q6, q4
+; CHECK-NEXT:    vmov.f32 s2, s1
+; CHECK-NEXT:    vmov r2, r1, d14
+; CHECK-NEXT:    asrl r2, r1, #31
+; CHECK-NEXT:    vmov.f32 s22, s21
+; CHECK-NEXT:    subs.w r0, r2, lr
+; CHECK-NEXT:    sbcs r0, r1, #0
+; CHECK-NEXT:    vmov r4, r1, d15
+; CHECK-NEXT:    mov.w r0, #0
+; CHECK-NEXT:    asrl r4, r1, #31
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    subs.w r3, r4, lr
+; CHECK-NEXT:    sbcs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q4[1], r0
+; CHECK-NEXT:    mov.w r1, #0
+; CHECK-NEXT:    vmov q7[2], q7[0], r2, r4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov r3, s22
+; CHECK-NEXT:    vmov q4[2], q4[0], r0, r1
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r1, s20
+; CHECK-NEXT:    vbic q6, q3, q4
+; CHECK-NEXT:    vand q4, q7, q4
+; CHECK-NEXT:    vorr q6, q4, q6
+; CHECK-NEXT:    smull r2, r1, r1, r0
+; CHECK-NEXT:    asrl r2, r1, #31
+; CHECK-NEXT:    subs.w r0, r2, lr
+; CHECK-NEXT:    sbcs r0, r1, #0
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    mov.w r0, #0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q7[1], r0
+; CHECK-NEXT:    smull r4, r1, r3, r1
+; CHECK-NEXT:    asrl r4, r1, #31
+; CHECK-NEXT:    subs.w r3, r4, lr
+; CHECK-NEXT:    vmov q4[2], q4[0], r2, r4
+; CHECK-NEXT:    sbcs r1, r1, #0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #1
+; CHECK-NEXT:    cmp.w r12, #0
+; CHECK-NEXT:    vldrw.u32 q0, [sp] @ 16-byte Reload
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov q7[2], q7[0], r0, r1
+; CHECK-NEXT:    vbic q3, q3, q7
+; CHECK-NEXT:    vand q4, q4, q7
+; CHECK-NEXT:    vorr q3, q4, q3
+; CHECK-NEXT:    vmov.f32 s13, s14
+; CHECK-NEXT:    vmov.f32 s14, s24
+; CHECK-NEXT:    vmov.f32 s15, s26
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r4, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI16_0:
+; CHECK-NEXT:    .long 2147483647 @ 0x7fffffff
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 2147483647 @ 0x7fffffff
+; CHECK-NEXT:    .long 0 @ 0x0
+entry:
+  %l2 = sext <16 x i32> %s0 to <16 x i64>
+  %l5 = sext <16 x i32> %s1 to <16 x i64>
+  %l6 = mul nsw <16 x i64> %l5, %l2
+  %l7 = ashr <16 x i64> %l6, <i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31>
+  %l8 = icmp slt <16 x i64> %l7, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+  %l9 = select <16 x i1> %l8, <16 x i64> %l7, <16 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+  %l10 = trunc <16 x i64> %l9 to <16 x i32>
+  ret <16 x i32> %l10
+}
 
 
 
@@ -190,13 +1045,13 @@ define void @vqdmulh_loop_i8(i8* nocapture readonly %x, i8* nocapture readonly %
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #64
-; CHECK-NEXT:  .LBB8_1: @ %vector.body
+; CHECK-NEXT:  .LBB17_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrb.u8 q0, [r0], #16
 ; CHECK-NEXT:    vldrb.u8 q1, [r1], #16
 ; CHECK-NEXT:    vqdmulh.s8 q0, q1, q0
 ; CHECK-NEXT:    vstrb.8 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB8_1
+; CHECK-NEXT:    le lr, .LBB17_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -234,13 +1089,13 @@ define void @vqdmulh_loop_i16(i16* nocapture readonly %x, i16* nocapture readonl
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #128
-; CHECK-NEXT:  .LBB9_1: @ %vector.body
+; CHECK-NEXT:  .LBB18_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrh.u16 q0, [r0], #16
 ; CHECK-NEXT:    vldrh.u16 q1, [r1], #16
 ; CHECK-NEXT:    vqdmulh.s16 q0, q1, q0
 ; CHECK-NEXT:    vstrb.8 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB9_1
+; CHECK-NEXT:    le lr, .LBB18_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -278,13 +1133,13 @@ define void @vqdmulh_loop_i32(i32* nocapture readonly %x, i32* nocapture readonl
 ; CHECK-NEXT:    .save {r7, lr}
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    mov.w lr, #256
-; CHECK-NEXT:  .LBB10_1: @ %vector.body
+; CHECK-NEXT:  .LBB19_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
 ; CHECK-NEXT:    vldrw.u32 q1, [r1], #16
 ; CHECK-NEXT:    vqdmulh.s32 q0, q1, q0
 ; CHECK-NEXT:    vstrb.8 q0, [r2], #16
-; CHECK-NEXT:    le lr, .LBB10_1
+; CHECK-NEXT:    le lr, .LBB19_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
 entry:


        


More information about the llvm-commits mailing list