[llvm] 605489d - [ARM] Fix VQDMULH fold for scalar smin

David Green via llvm-commits llvm-commits at lists.llvm.org
Sat Aug 21 08:33:26 PDT 2021


Author: David Green
Date: 2021-08-21T16:33:18+01:00
New Revision: 605489d59309bec871cd49bdd482cff33cbf8ae1

URL: https://github.com/llvm/llvm-project/commit/605489d59309bec871cd49bdd482cff33cbf8ae1
DIFF: https://github.com/llvm/llvm-project/commit/605489d59309bec871cd49bdd482cff33cbf8ae1.diff

LOG: [ARM] Fix VQDMULH fold for scalar smin

Add a variant of mve-vqdmulh tests that uses min/max intrinsics
directly, including a scalar test that shows it misbehaving for min
intrinsics and a fix for the combine to prevent it from misbehaving.

Added: 
    llvm/test/CodeGen/Thumb2/mve-vqdmulh-minmax.ll

Modified: 
    llvm/lib/Target/ARM/ARMISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index f914263179392..5366a64bcf899 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -12890,6 +12890,9 @@ static SDValue PerformVQDMULHCombine(SDNode *N, SelectionDAG &DAG) {
   SDValue Shft;
   ConstantSDNode *Clamp;
 
+  if (!VT.isVector())
+    return SDValue();
+
   if (N->getOpcode() == ISD::SMIN) {
     Shft = N->getOperand(0);
     Clamp = isConstOrConstSplat(N->getOperand(1));

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vqdmulh-minmax.ll b/llvm/test/CodeGen/Thumb2/mve-vqdmulh-minmax.ll
new file mode 100644
index 0000000000000..926a177671413
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-vqdmulh-minmax.ll
@@ -0,0 +1,513 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s
+
+define arm_aapcs_vfpcc i32 @vqdmulh_v16i8(<16 x i8> %s0, <16 x i8> %s1) {
+; CHECK-LABEL: vqdmulh_v16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmulh.s8 q0, q1, q0
+; CHECK-NEXT:    vaddv.s8 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <16 x i8> %s0 to <16 x i32>
+  %l5 = sext <16 x i8> %s1 to <16 x i32>
+  %l6 = mul nsw <16 x i32> %l5, %l2
+  %l7 = ashr <16 x i32> %l6, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+  %l9 = call <16 x i32> @llvm.smin.v16i32(<16 x i32> %l7, <16 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>)
+  %l10 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %l9)
+  ret i32 %l10
+}
+
+define arm_aapcs_vfpcc <16 x i8> @vqdmulh_v16i8_b(<16 x i8> %s0, <16 x i8> %s1) {
+; CHECK-LABEL: vqdmulh_v16i8_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmulh.s8 q0, q1, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <16 x i8> %s0 to <16 x i32>
+  %l5 = sext <16 x i8> %s1 to <16 x i32>
+  %l6 = mul nsw <16 x i32> %l5, %l2
+  %l7 = ashr <16 x i32> %l6, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+  %l9 = call <16 x i32> @llvm.smin.v16i32(<16 x i32> %l7, <16 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>)
+  %l10 = trunc <16 x i32> %l9 to <16 x i8>
+  ret <16 x i8> %l10
+}
+
+define arm_aapcs_vfpcc <8 x i8> @vqdmulh_v8i8_b(<8 x i8> %s0, <8 x i8> %s1) {
+; CHECK-LABEL: vqdmulh_v8i8_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmulh.s8 q0, q1, q0
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <8 x i8> %s0 to <8 x i32>
+  %l5 = sext <8 x i8> %s1 to <8 x i32>
+  %l6 = mul nsw <8 x i32> %l5, %l2
+  %l7 = ashr <8 x i32> %l6, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+  %l9 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %l7, <8 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>)
+  %l10 = trunc <8 x i32> %l9 to <8 x i8>
+  ret <8 x i8> %l10
+}
+
+define arm_aapcs_vfpcc <4 x i8> @vqdmulh_v4i8_b(<4 x i8> %s0, <4 x i8> %s1) {
+; CHECK-LABEL: vqdmulh_v4i8_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmulh.s8 q0, q1, q0
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <4 x i8> %s0 to <4 x i32>
+  %l5 = sext <4 x i8> %s1 to <4 x i32>
+  %l6 = mul nsw <4 x i32> %l5, %l2
+  %l7 = ashr <4 x i32> %l6, <i32 7, i32 7, i32 7, i32 7>
+  %l9 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %l7, <4 x i32> <i32 127, i32 127, i32 127, i32 127>)
+  %l10 = trunc <4 x i32> %l9 to <4 x i8>
+  ret <4 x i8> %l10
+}
+
+define arm_aapcs_vfpcc <32 x i8> @vqdmulh_v32i8_b(<32 x i8> %s0, <32 x i8> %s1) {
+; CHECK-LABEL: vqdmulh_v32i8_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmulh.s8 q0, q2, q0
+; CHECK-NEXT:    vqdmulh.s8 q1, q3, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <32 x i8> %s0 to <32 x i32>
+  %l5 = sext <32 x i8> %s1 to <32 x i32>
+  %l6 = mul nsw <32 x i32> %l5, %l2
+  %l7 = ashr <32 x i32> %l6, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+  %l9 = call <32 x i32> @llvm.smin.v32i32(<32 x i32> %l7, <32 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>)
+  %l10 = trunc <32 x i32> %l9 to <32 x i8>
+  ret <32 x i8> %l10
+}
+
+define arm_aapcs_vfpcc i32 @vqdmulh_v8i16(<8 x i16> %s0, <8 x i16> %s1) {
+; CHECK-LABEL: vqdmulh_v8i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmulh.s16 q0, q1, q0
+; CHECK-NEXT:    vaddv.s16 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <8 x i16> %s0 to <8 x i32>
+  %l5 = sext <8 x i16> %s1 to <8 x i32>
+  %l6 = mul nsw <8 x i32> %l5, %l2
+  %l7 = ashr <8 x i32> %l6, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+  %l9 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %l7, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>)
+  %l10 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %l9)
+  ret i32 %l10
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_b(<8 x i16> %s0, <8 x i16> %s1) {
+; CHECK-LABEL: vqdmulh_v8i16_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmulh.s16 q0, q1, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <8 x i16> %s0 to <8 x i32>
+  %l5 = sext <8 x i16> %s1 to <8 x i32>
+  %l6 = mul nsw <8 x i32> %l5, %l2
+  %l7 = ashr <8 x i32> %l6, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+  %l9 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %l7, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>)
+  %l10 = trunc <8 x i32> %l9 to <8 x i16>
+  ret <8 x i16> %l10
+}
+
+define arm_aapcs_vfpcc <4 x i16> @vqdmulh_v4i16_b(<4 x i16> %s0, <4 x i16> %s1) {
+; CHECK-LABEL: vqdmulh_v4i16_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmulh.s16 q0, q1, q0
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <4 x i16> %s0 to <4 x i32>
+  %l5 = sext <4 x i16> %s1 to <4 x i32>
+  %l6 = mul nsw <4 x i32> %l5, %l2
+  %l7 = ashr <4 x i32> %l6, <i32 15, i32 15, i32 15, i32 15>
+  %l9 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %l7, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>)
+  %l10 = trunc <4 x i32> %l9 to <4 x i16>
+  ret <4 x i16> %l10
+}
+
+define arm_aapcs_vfpcc <16 x i16> @vqdmulh_v16i16_b(<16 x i16> %s0, <16 x i16> %s1) {
+; CHECK-LABEL: vqdmulh_v16i16_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmulh.s16 q0, q2, q0
+; CHECK-NEXT:    vqdmulh.s16 q1, q3, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <16 x i16> %s0 to <16 x i32>
+  %l5 = sext <16 x i16> %s1 to <16 x i32>
+  %l6 = mul nsw <16 x i32> %l5, %l2
+  %l7 = ashr <16 x i32> %l6, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+  %l9 = call <16 x i32> @llvm.smin.v16i32(<16 x i32> %l7, <16 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>)
+  %l10 = trunc <16 x i32> %l9 to <16 x i16>
+  ret <16 x i16> %l10
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_c(<8 x i16> %s0, <8 x i16> %s1) {
+; CHECK-LABEL: vqdmulh_v8i16_c:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov q2, q0
+; CHECK-NEXT:    vmov.u16 r0, q0[2]
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmov.u16 r0, q2[3]
+; CHECK-NEXT:    vmov.u16 r1, q2[1]
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r0
+; CHECK-NEXT:    vmov.u16 r0, q1[2]
+; CHECK-NEXT:    vmov.u16 r1, q1[0]
+; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
+; CHECK-NEXT:    vmov.u16 r0, q1[3]
+; CHECK-NEXT:    vmov.u16 r1, q1[1]
+; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
+; CHECK-NEXT:    vmullb.s16 q0, q3, q0
+; CHECK-NEXT:    vshl.i32 q0, q0, #10
+; CHECK-NEXT:    vshr.s32 q0, q0, #10
+; CHECK-NEXT:    vshr.s32 q3, q0, #15
+; CHECK-NEXT:    vmov r0, r1, d6
+; CHECK-NEXT:    vmov.16 q0[0], r0
+; CHECK-NEXT:    vmov.16 q0[1], r1
+; CHECK-NEXT:    vmov r0, r1, d7
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[6]
+; CHECK-NEXT:    vmov.16 q0[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[4]
+; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
+; CHECK-NEXT:    vmov.u16 r0, q2[7]
+; CHECK-NEXT:    vmov.u16 r1, q2[5]
+; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
+; CHECK-NEXT:    vmov.u16 r0, q1[6]
+; CHECK-NEXT:    vmov.u16 r1, q1[4]
+; CHECK-NEXT:    vmov q2[2], q2[0], r1, r0
+; CHECK-NEXT:    vmov.u16 r0, q1[7]
+; CHECK-NEXT:    vmov.u16 r1, q1[5]
+; CHECK-NEXT:    vmov q2[3], q2[1], r1, r0
+; CHECK-NEXT:    vmullb.s16 q1, q2, q3
+; CHECK-NEXT:    vshl.i32 q1, q1, #10
+; CHECK-NEXT:    vshr.s32 q1, q1, #10
+; CHECK-NEXT:    vshr.s32 q1, q1, #15
+; CHECK-NEXT:    vmov r0, r1, d2
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov.16 q0[5], r1
+; CHECK-NEXT:    vmov r0, r1, d3
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov.16 q0[7], r1
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <8 x i16> %s0 to <8 x i22>
+  %l5 = sext <8 x i16> %s1 to <8 x i22>
+  %l6 = mul nsw <8 x i22> %l5, %l2
+  %l7 = ashr <8 x i22> %l6, <i22 15, i22 15, i22 15, i22 15, i22 15, i22 15, i22 15, i22 15>
+  %l9 = call <8 x i22> @llvm.smin.v8i22(<8 x i22> %l7, <8 x i22> <i22 32767, i22 32767, i22 32767, i22 32767, i22 32767, i22 32767, i22 32767, i22 32767>)
+  %l10 = trunc <8 x i22> %l9 to <8 x i16>
+  ret <8 x i16> %l10
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_interleaved(<8 x i16> %s0, <8 x i16> %s1) {
+; CHECK-LABEL: vqdmulh_v8i16_interleaved:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmulh.s16 q0, q1, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = shufflevector <8 x i16> %s0, <8 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
+  %1 = sext <8 x i16> %0 to <8 x i32>
+  %l2 = sext <8 x i16> %s0 to <8 x i32>
+  %2 = shufflevector <8 x i16> %s1, <8 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
+  %3 = sext <8 x i16> %2 to <8 x i32>
+  %l5 = sext <8 x i16> %s1 to <8 x i32>
+  %l6 = mul nsw <8 x i32> %3, %1
+  %l7 = ashr <8 x i32> %l6, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+  %l9 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %l7, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>)
+  %l10 = trunc <8 x i32> %l9 to <8 x i16>
+  %4 = shufflevector <8 x i16> %l10, <8 x i16> undef, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  ret <8 x i16> %4
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_interleaved2(<4 x i32> %s0a, <8 x i16> %s1) {
+; CHECK-LABEL: vqdmulh_v8i16_interleaved2:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vqdmulh.s16 q2, q1, q0
+; CHECK-NEXT:    vrev32.16 q1, q1
+; CHECK-NEXT:    vqdmulh.s16 q0, q1, q0
+; CHECK-NEXT:    vmovnt.i32 q2, q0
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    bx lr
+  %s0 = trunc <4 x i32> %s0a to <4 x i16>
+  %strided.vec = shufflevector <8 x i16> %s1, <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %strided.vec44 = shufflevector <8 x i16> %s1, <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %l7 = sext <4 x i16> %strided.vec to <4 x i32>
+  %l8 = sext <4 x i16> %s0 to <4 x i32>
+  %l9 = mul nsw <4 x i32> %l7, %l8
+  %l10 = ashr <4 x i32> %l9, <i32 15, i32 15, i32 15, i32 15>
+  %l12 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %l10, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>)
+  %l13 = trunc <4 x i32> %l12 to <4 x i16>
+  %l14 = sext <4 x i16> %strided.vec44 to <4 x i32>
+  %l15 = mul nsw <4 x i32> %l14, %l8
+  %l16 = ashr <4 x i32> %l15, <i32 15, i32 15, i32 15, i32 15>
+  %l18 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %l16, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>)
+  %l19 = trunc <4 x i32> %l18 to <4 x i16>
+  %interleaved.vec = shufflevector <4 x i16> %l13, <4 x i16> %l19, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  ret <8 x i16> %interleaved.vec
+}
+
+define arm_aapcs_vfpcc i64 @vqdmulh_v4i32(<4 x i32> %s0, <4 x i32> %s1) {
+; CHECK-LABEL: vqdmulh_v4i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmulh.s32 q0, q1, q0
+; CHECK-NEXT:    vaddlv.s32 r0, r1, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <4 x i32> %s0 to <4 x i64>
+  %l5 = sext <4 x i32> %s1 to <4 x i64>
+  %l6 = mul nsw <4 x i64> %l5, %l2
+  %l7 = ashr <4 x i64> %l6, <i64 31, i64 31, i64 31, i64 31>
+  %l9 = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %l7, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>)
+  %l10 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %l9)
+  ret i64 %l10
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vqdmulh_v4i32_b(<4 x i32> %s0, <4 x i32> %s1) {
+; CHECK-LABEL: vqdmulh_v4i32_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmulh.s32 q0, q1, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <4 x i32> %s0 to <4 x i64>
+  %l5 = sext <4 x i32> %s1 to <4 x i64>
+  %l6 = mul nsw <4 x i64> %l5, %l2
+  %l7 = ashr <4 x i64> %l6, <i64 31, i64 31, i64 31, i64 31>
+  %l9 = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %l7, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>)
+  %l10 = trunc <4 x i64> %l9 to <4 x i32>
+  ret <4 x i32> %l10
+}
+
+define arm_aapcs_vfpcc <2 x i32> @vqdmulh_v2i32_b(<2 x i32> %s0, <2 x i32> %s1) {
+; CHECK-LABEL: vqdmulh_v2i32_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmulh.s32 q0, q1, q0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    asrs r1, r1, #31
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <2 x i32> %s0 to <2 x i64>
+  %l5 = sext <2 x i32> %s1 to <2 x i64>
+  %l6 = mul nsw <2 x i64> %l5, %l2
+  %l7 = ashr <2 x i64> %l6, <i64 31, i64 31>
+  %l9 = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %l7, <2 x i64> <i64 2147483647, i64 2147483647>)
+  %l10 = trunc <2 x i64> %l9 to <2 x i32>
+  ret <2 x i32> %l10
+}
+
+define arm_aapcs_vfpcc <8 x i32> @vqdmulh_v8i32_b(<8 x i32> %s0, <8 x i32> %s1) {
+; CHECK-LABEL: vqdmulh_v8i32_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmulh.s32 q0, q2, q0
+; CHECK-NEXT:    vqdmulh.s32 q1, q3, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <8 x i32> %s0 to <8 x i64>
+  %l5 = sext <8 x i32> %s1 to <8 x i64>
+  %l6 = mul nsw <8 x i64> %l5, %l2
+  %l7 = ashr <8 x i64> %l6, <i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31>
+  %l9 = call <8 x i64> @llvm.smin.v8i64(<8 x i64> %l7, <8 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>)
+  %l10 = trunc <8 x i64> %l9 to <8 x i32>
+  ret <8 x i32> %l10
+}
+
+define arm_aapcs_vfpcc <16 x i32> @vqdmulh_v16i32_b(<16 x i32> %s0, <16 x i32> %s1) {
+; CHECK-LABEL: vqdmulh_v16i32_b:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    add r0, sp, #16
+; CHECK-NEXT:    vldrw.u32 q4, [r0]
+; CHECK-NEXT:    add r0, sp, #32
+; CHECK-NEXT:    vqdmulh.s32 q0, q4, q0
+; CHECK-NEXT:    vldrw.u32 q4, [r0]
+; CHECK-NEXT:    add r0, sp, #48
+; CHECK-NEXT:    vqdmulh.s32 q1, q4, q1
+; CHECK-NEXT:    vldrw.u32 q4, [r0]
+; CHECK-NEXT:    add r0, sp, #64
+; CHECK-NEXT:    vqdmulh.s32 q2, q4, q2
+; CHECK-NEXT:    vldrw.u32 q4, [r0]
+; CHECK-NEXT:    vqdmulh.s32 q3, q4, q3
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %l2 = sext <16 x i32> %s0 to <16 x i64>
+  %l5 = sext <16 x i32> %s1 to <16 x i64>
+  %l6 = mul nsw <16 x i64> %l5, %l2
+  %l7 = ashr <16 x i64> %l6, <i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31>
+  %l9 = call <16 x i64> @llvm.smin.v16i64(<16 x i64> %l7, <16 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>)
+  %l10 = trunc <16 x i64> %l9 to <16 x i32>
+  ret <16 x i32> %l10
+}
+
+
+
+define void @vqdmulh_loop_i8(i8* nocapture readonly %x, i8* nocapture readonly %y, i8* noalias nocapture %z, i32 %n) local_unnamed_addr #0 {
+; CHECK-LABEL: vqdmulh_loop_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    mov.w lr, #64
+; CHECK-NEXT:  .LBB17_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrb.u8 q0, [r0], #16
+; CHECK-NEXT:    vldrb.u8 q1, [r1], #16
+; CHECK-NEXT:    vqdmulh.s8 q0, q1, q0
+; CHECK-NEXT:    vstrb.8 q0, [r2], #16
+; CHECK-NEXT:    le lr, .LBB17_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds i8, i8* %x, i32 %index
+  %1 = bitcast i8* %0 to <16 x i8>*
+  %wide.load = load <16 x i8>, <16 x i8>* %1, align 1
+  %2 = sext <16 x i8> %wide.load to <16 x i32>
+  %3 = getelementptr inbounds i8, i8* %y, i32 %index
+  %4 = bitcast i8* %3 to <16 x i8>*
+  %wide.load26 = load <16 x i8>, <16 x i8>* %4, align 1
+  %5 = sext <16 x i8> %wide.load26 to <16 x i32>
+  %6 = mul nsw <16 x i32> %5, %2
+  %7 = ashr <16 x i32> %6, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+  %8 = icmp slt <16 x i32> %7, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
+  %9 = call <16 x i32> @llvm.smin.v16i32(<16 x i32> %7, <16 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>)
+  %10 = trunc <16 x i32> %9 to <16 x i8>
+  %11 = getelementptr inbounds i8, i8* %z, i32 %index
+  %12 = bitcast i8* %11 to <16 x i8>*
+  store <16 x i8> %10, <16 x i8>* %12, align 1
+  %index.next = add i32 %index, 16
+  %13 = icmp eq i32 %index.next, 1024
+  br i1 %13, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @vqdmulh_loop_i16(i16* nocapture readonly %x, i16* nocapture readonly %y, i16* noalias nocapture %z, i32 %n) {
+; CHECK-LABEL: vqdmulh_loop_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    mov.w lr, #128
+; CHECK-NEXT:  .LBB18_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrh.u16 q0, [r0], #16
+; CHECK-NEXT:    vldrh.u16 q1, [r1], #16
+; CHECK-NEXT:    vqdmulh.s16 q0, q1, q0
+; CHECK-NEXT:    vstrb.8 q0, [r2], #16
+; CHECK-NEXT:    le lr, .LBB18_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds i16, i16* %x, i32 %index
+  %1 = bitcast i16* %0 to <8 x i16>*
+  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
+  %2 = sext <8 x i16> %wide.load to <8 x i32>
+  %3 = getelementptr inbounds i16, i16* %y, i32 %index
+  %4 = bitcast i16* %3 to <8 x i16>*
+  %wide.load30 = load <8 x i16>, <8 x i16>* %4, align 2
+  %5 = sext <8 x i16> %wide.load30 to <8 x i32>
+  %6 = mul nsw <8 x i32> %5, %2
+  %7 = ashr <8 x i32> %6, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+  %8 = icmp slt <8 x i32> %7, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
+  %9 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %7, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>)
+  %10 = trunc <8 x i32> %9 to <8 x i16>
+  %11 = getelementptr inbounds i16, i16* %z, i32 %index
+  %12 = bitcast i16* %11 to <8 x i16>*
+  store <8 x i16> %10, <8 x i16>* %12, align 2
+  %index.next = add i32 %index, 8
+  %13 = icmp eq i32 %index.next, 1024
+  br i1 %13, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @vqdmulh_loop_i32(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* noalias nocapture %z, i32 %n) {
+; CHECK-LABEL: vqdmulh_loop_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    mov.w lr, #256
+; CHECK-NEXT:  .LBB19_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
+; CHECK-NEXT:    vldrw.u32 q1, [r1], #16
+; CHECK-NEXT:    vqdmulh.s32 q0, q1, q0
+; CHECK-NEXT:    vstrb.8 q0, [r2], #16
+; CHECK-NEXT:    le lr, .LBB19_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %2 = sext <4 x i32> %wide.load to <4 x i64>
+  %3 = getelementptr inbounds i32, i32* %y, i32 %index
+  %4 = bitcast i32* %3 to <4 x i32>*
+  %wide.load30 = load <4 x i32>, <4 x i32>* %4, align 4
+  %5 = sext <4 x i32> %wide.load30 to <4 x i64>
+  %6 = mul nsw <4 x i64> %5, %2
+  %7 = ashr <4 x i64> %6, <i64 31, i64 31, i64 31, i64 31>
+  %8 = icmp slt <4 x i64> %7, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+  %9 = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %7, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>)
+  %10 = trunc <4 x i64> %9 to <4 x i32>
+  %11 = getelementptr inbounds i32, i32* %z, i32 %index
+  %12 = bitcast i32* %11 to <4 x i32>*
+  store <4 x i32> %10, <4 x i32>* %12, align 4
+  %index.next = add i32 %index, 4
+  %13 = icmp eq i32 %index.next, 1024
+  br i1 %13, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define i32 @scalar(i16 %a) {
+; CHECK-LABEL: scalar:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    smulbb r1, r0, r0
+; CHECK-NEXT:    movs r0, #127
+; CHECK-NEXT:    asrs r2, r1, #7
+; CHECK-NEXT:    cmp r2, #127
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    asrlt r0, r1, #7
+; CHECK-NEXT:    bx lr
+  %e = sext i16 %a to i32
+  %d = mul nsw i32 %e, %e
+  %b = ashr i32 %d, 7
+  %c = call i32 @llvm.smin.i32(i32 %b, i32 127)
+  ret i32 %c
+}
+
+declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
+declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
+declare i32 @llvm.smin.i32(i32 %a, i32 %b)
+declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>)
+declare <8 x i64> @llvm.smin.v8i64(<8 x i64>, <8 x i64>)
+declare <16 x i64> @llvm.smin.v16i64(<16 x i64>, <16 x i64>)
+declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.smin.v16i32(<16 x i32>, <16 x i32>)
+declare <32 x i32> @llvm.smin.v32i32(<32 x i32>, <32 x i32>)
+declare <8 x i22> @llvm.smin.v8i22(<8 x i22>, <8 x i22>)


        


More information about the llvm-commits mailing list