[llvm] 24d7641 - [ARM] Transform a floating-point to fixed-point conversion to a VCVT_fix

Sam Tebbs via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 1 07:11:52 PDT 2021


Author: Sam Tebbs
Date: 2021-07-01T15:10:40+01:00
New Revision: 24d76419d6b7a3191ec1f4bfc33a640e716f11c8

URL: https://github.com/llvm/llvm-project/commit/24d76419d6b7a3191ec1f4bfc33a640e716f11c8
DIFF: https://github.com/llvm/llvm-project/commit/24d76419d6b7a3191ec1f4bfc33a640e716f11c8.diff

LOG: [ARM] Transform a floating-point to fixed-point conversion to a VCVT_fix

Much like fixed-point to floating-point conversion, the converse can
also be transformed into a fixed-point VCVT. This patch transforms
multiplications of floating point numbers by 2^n into a VCVT_fix. The
exception is that a float to fixed conversion with 1 fractional bit
ends up being an FADD (FADD(x, x) emulates FMUL(x, 2)) rather than an FMUL so there is a special case for that. This patch also moves the code from https://reviews.llvm.org/D103903 into a separate function as fixed to float and float to fixed are very similar.

Differential Revision: https://reviews.llvm.org/D104793

Added: 
    llvm/test/CodeGen/Thumb2/mve-vcvt-fixed-to-float.ll
    llvm/test/CodeGen/Thumb2/mve-vcvt-float-to-fixed.ll

Modified: 
    llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp

Removed: 
    llvm/test/CodeGen/Thumb2/mve-vcvt-fixed.ll


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index bb6a0c95a114b..9c7055deaaf8c 100644
--- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -197,6 +197,10 @@ class ARMDAGToDAGISel : public SelectionDAGISel {
   bool tryT2IndexedLoad(SDNode *N);
   bool tryMVEIndexedLoad(SDNode *N);
   bool tryFMULFixed(SDNode *N, SDLoc dl);
+  bool tryFP_TO_INT(SDNode *N, SDLoc dl);
+  bool transformFixedFloatingPointConversion(SDNode *N, SDNode *FMul,
+                                             bool IsUnsigned,
+                                             bool FixedToFloat);
 
   /// SelectVLD - Select NEON load intrinsics.  NumVecs should be
   /// 1, 2, 3 or 4.  The opcode arrays specify the instructions used for
@@ -3150,47 +3154,47 @@ bool ARMDAGToDAGISel::tryInsertVectorElt(SDNode *N) {
   return false;
 }
 
-bool ARMDAGToDAGISel::tryFMULFixed(SDNode *N, SDLoc dl) {
-  // Transform a fixed-point to floating-point conversion to a VCVT
-  if (!Subtarget->hasMVEFloatOps())
-    return false;
+bool ARMDAGToDAGISel::transformFixedFloatingPointConversion(SDNode *N,
+                                                            SDNode *FMul,
+                                                            bool IsUnsigned,
+                                                            bool FixedToFloat) {
   auto Type = N->getValueType(0);
-  if (!Type.isVector())
+  unsigned ScalarBits = Type.getScalarSizeInBits();
+  if (ScalarBits > 32)
     return false;
 
-  auto ScalarType = Type.getVectorElementType();
-  unsigned ScalarBits = ScalarType.getSizeInBits();
-  auto LHS = N->getOperand(0);
-  auto RHS = N->getOperand(1);
+  SDNodeFlags FMulFlags = FMul->getFlags();
+  // The fixed-point vcvt and vcvt+vmul are not always equivalent if inf is
+  // allowed in 16 bit unsigned floats
+  if (ScalarBits == 16 && !FMulFlags.hasNoInfs() && IsUnsigned)
+    return false;
 
-  if (ScalarBits > 32)
+  SDValue ImmNode = FMul->getOperand(1);
+  SDValue VecVal = FMul->getOperand(0);
+  if (VecVal->getOpcode() == ISD::UINT_TO_FP ||
+      VecVal->getOpcode() == ISD::SINT_TO_FP)
+    VecVal = VecVal->getOperand(0);
+
+  if (VecVal.getValueType().getScalarSizeInBits() != ScalarBits)
     return false;
 
-  if (RHS.getOpcode() == ISD::BITCAST) {
-    if (RHS.getValueType().getVectorElementType().getSizeInBits() != ScalarBits)
+  if (ImmNode.getOpcode() == ISD::BITCAST) {
+    if (ImmNode.getValueType().getScalarSizeInBits() != ScalarBits)
       return false;
-    RHS = RHS.getOperand(0);
+    ImmNode = ImmNode.getOperand(0);
   }
-  if (RHS.getValueType().getVectorElementType().getSizeInBits() != ScalarBits)
-    return false;
-  if (LHS.getOpcode() != ISD::SINT_TO_FP && LHS.getOpcode() != ISD::UINT_TO_FP)
-    return false;
 
-  bool IsUnsigned = LHS.getOpcode() == ISD::UINT_TO_FP;
-  SDNodeFlags FMulFlags = N->getFlags();
-  // The fixed-point vcvt and vcvt+vmul are not always equivalent if inf is
-  // allowed in 16 bit unsigned floats
-  if (ScalarBits == 16 && !FMulFlags.hasNoInfs() && IsUnsigned)
+  if (ImmNode.getValueType().getScalarSizeInBits() != ScalarBits)
     return false;
 
   APFloat ImmAPF(0.0f);
-  switch (RHS.getOpcode()) {
+  switch (ImmNode.getOpcode()) {
   case ARMISD::VMOVIMM:
   case ARMISD::VDUP: {
-    if (!isa<ConstantSDNode>(RHS.getOperand(0)))
+    if (!isa<ConstantSDNode>(ImmNode.getOperand(0)))
       return false;
-    unsigned Imm = RHS.getConstantOperandVal(0);
-    if (RHS.getOpcode() == ARMISD::VMOVIMM)
+    unsigned Imm = ImmNode.getConstantOperandVal(0);
+    if (ImmNode.getOpcode() == ARMISD::VMOVIMM)
       Imm = ARM_AM::decodeVMOVModImm(Imm, ScalarBits);
     ImmAPF =
         APFloat(ScalarBits == 32 ? APFloat::IEEEsingle() : APFloat::IEEEhalf(),
@@ -3198,24 +3202,26 @@ bool ARMDAGToDAGISel::tryFMULFixed(SDNode *N, SDLoc dl) {
     break;
   }
   case ARMISD::VMOVFPIMM: {
-    ImmAPF = APFloat(ARM_AM::getFPImmFloat(RHS.getConstantOperandVal(0)));
+    ImmAPF = APFloat(ARM_AM::getFPImmFloat(ImmNode.getConstantOperandVal(0)));
     break;
   }
   default:
     return false;
   }
 
-  // Multiplying by a factor of 2^(-n) will convert from fixed point to
-  // floating point, where n is the number of fractional bits in the fixed
-  // point number. Taking the inverse and log2 of the factor will give n
-  APFloat Inverse(0.0f);
-  if (!ImmAPF.getExactInverse(&Inverse))
-    return false;
-
+  // Where n is the number of fractional bits, multiplying by 2^n will convert
+  // from float to fixed and multiplying by 2^-n will convert from fixed to
+  // float. Taking log2 of the factor (after taking the inverse in the case of
+  // float to fixed) will give n.
+  APFloat ToConvert = ImmAPF;
+  if (FixedToFloat) {
+    if (!ImmAPF.getExactInverse(&ToConvert))
+      return false;
+  }
   APSInt Converted(64, 0);
   bool IsExact;
-  Inverse.convertToInteger(Converted, llvm::RoundingMode::NearestTiesToEven,
-                           &IsExact);
+  ToConvert.convertToInteger(Converted, llvm::RoundingMode::NearestTiesToEven,
+                             &IsExact);
   if (!IsExact || !Converted.isPowerOf2())
     return false;
 
@@ -3223,28 +3229,95 @@ bool ARMDAGToDAGISel::tryFMULFixed(SDNode *N, SDLoc dl) {
   if (FracBits > ScalarBits)
     return false;
 
-  auto SintToFpOperand = LHS.getOperand(0);
-  SmallVector<SDValue, 3> Ops{SintToFpOperand,
-                              CurDAG->getConstant(FracBits, dl, MVT::i32)};
-  AddEmptyMVEPredicateToOps(Ops, dl, Type);
+  SmallVector<SDValue, 3> Ops{
+      VecVal, CurDAG->getConstant(FracBits, SDLoc(N), MVT::i32)};
+  AddEmptyMVEPredicateToOps(Ops, SDLoc(N), Type);
 
   unsigned int Opcode;
   switch (ScalarBits) {
   case 16:
-    Opcode = IsUnsigned ? ARM::MVE_VCVTf16u16_fix : ARM::MVE_VCVTf16s16_fix;
+    if (FixedToFloat)
+      Opcode = IsUnsigned ? ARM::MVE_VCVTf16u16_fix : ARM::MVE_VCVTf16s16_fix;
+    else
+      Opcode = IsUnsigned ? ARM::MVE_VCVTu16f16_fix : ARM::MVE_VCVTs16f16_fix;
     break;
   case 32:
-    Opcode = IsUnsigned ? ARM::MVE_VCVTf32u32_fix : ARM::MVE_VCVTf32s32_fix;
+    if (FixedToFloat)
+      Opcode = IsUnsigned ? ARM::MVE_VCVTf32u32_fix : ARM::MVE_VCVTf32s32_fix;
+    else
+      Opcode = IsUnsigned ? ARM::MVE_VCVTu32f32_fix : ARM::MVE_VCVTs32f32_fix;
     break;
   default:
     llvm_unreachable("unexpected number of scalar bits");
     break;
   }
 
-  ReplaceNode(N, CurDAG->getMachineNode(Opcode, dl, Type, Ops));
+  ReplaceNode(N, CurDAG->getMachineNode(Opcode, SDLoc(N), Type, Ops));
   return true;
 }
 
+bool ARMDAGToDAGISel::tryFP_TO_INT(SDNode *N, SDLoc dl) {
+  // Transform a floating-point to fixed-point conversion to a VCVT
+  if (!Subtarget->hasMVEFloatOps())
+    return false;
+  EVT Type = N->getValueType(0);
+  if (!Type.isVector())
+    return false;
+  unsigned int ScalarBits = Type.getScalarSizeInBits();
+
+  bool IsUnsigned = N->getOpcode() == ISD::FP_TO_UINT;
+  SDNode *Node = N->getOperand(0).getNode();
+
+  // floating-point to fixed-point with one fractional bit gets turned into an
+  // FP_TO_[U|S]INT(FADD (x, x)) rather than an FP_TO_[U|S]INT(FMUL (x, y))
+  if (Node->getOpcode() == ISD::FADD) {
+    if (Node->getOperand(0) != Node->getOperand(1))
+      return false;
+    SDNodeFlags Flags = Node->getFlags();
+    // The fixed-point vcvt and vcvt+vmul are not always equivalent if inf is
+    // allowed in 16 bit unsigned floats
+    if (ScalarBits == 16 && !Flags.hasNoInfs() && IsUnsigned)
+      return false;
+
+    unsigned Opcode;
+    switch (ScalarBits) {
+    case 16:
+      Opcode = IsUnsigned ? ARM::MVE_VCVTu16f16_fix : ARM::MVE_VCVTs16f16_fix;
+      break;
+    case 32:
+      Opcode = IsUnsigned ? ARM::MVE_VCVTu32f32_fix : ARM::MVE_VCVTs32f32_fix;
+      break;
+    }
+    SmallVector<SDValue, 3> Ops{Node->getOperand(0),
+                                CurDAG->getConstant(1, dl, MVT::i32)};
+    AddEmptyMVEPredicateToOps(Ops, dl, Type);
+
+    ReplaceNode(N, CurDAG->getMachineNode(Opcode, dl, Type, Ops));
+    return true;
+  }
+
+  if (Node->getOpcode() != ISD::FMUL)
+    return false;
+
+  return transformFixedFloatingPointConversion(N, Node, IsUnsigned, false);
+}
+
+bool ARMDAGToDAGISel::tryFMULFixed(SDNode *N, SDLoc dl) {
+  // Transform a fixed-point to floating-point conversion to a VCVT
+  if (!Subtarget->hasMVEFloatOps())
+    return false;
+  auto Type = N->getValueType(0);
+  if (!Type.isVector())
+    return false;
+
+  auto LHS = N->getOperand(0);
+  if (LHS.getOpcode() != ISD::SINT_TO_FP && LHS.getOpcode() != ISD::UINT_TO_FP)
+    return false;
+
+  return transformFixedFloatingPointConversion(
+      N, N, LHS.getOpcode() == ISD::UINT_TO_FP, true);
+}
+
 bool ARMDAGToDAGISel::tryV6T2BitfieldExtractOp(SDNode *N, bool isSigned) {
   if (!Subtarget->hasV6T2Ops())
     return false;
@@ -3680,6 +3753,11 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
     if (tryV6T2BitfieldExtractOp(N, true))
       return;
     break;
+  case ISD::FP_TO_UINT:
+  case ISD::FP_TO_SINT:
+    if (tryFP_TO_INT(N, dl))
+      return;
+    break;
   case ISD::FMUL:
     if (tryFMULFixed(N, dl))
       return;

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vcvt-fixed.ll b/llvm/test/CodeGen/Thumb2/mve-vcvt-fixed-to-float.ll
similarity index 100%
rename from llvm/test/CodeGen/Thumb2/mve-vcvt-fixed.ll
rename to llvm/test/CodeGen/Thumb2/mve-vcvt-fixed-to-float.ll

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vcvt-float-to-fixed.ll b/llvm/test/CodeGen/Thumb2/mve-vcvt-float-to-fixed.ll
new file mode 100644
index 0000000000000..cab409891ca8b
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-vcvt-float-to-fixed.ll
@@ -0,0 +1,1026 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi %s -o - -mattr=+mve.fp | FileCheck %s
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_1(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #1
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_2(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_2:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #2
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 4.000000e+00, float 4.000000e+00, float 4.000000e+00, float 4.000000e+00>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_3(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_3:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #3
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_4(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_4:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #4
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 1.600000e+01, float 1.600000e+01, float 1.600000e+01, float 1.600000e+01>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_5(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_5:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #5
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 3.200000e+01, float 3.200000e+01, float 3.200000e+01, float 3.200000e+01>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_6(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_6:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #6
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 6.400000e+01, float 6.400000e+01, float 6.400000e+01, float 6.400000e+01>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_7(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_7:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #7
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 1.280000e+02, float 1.280000e+02, float 1.280000e+02, float 1.280000e+02>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_8(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #8
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 2.560000e+02, float 2.560000e+02, float 2.560000e+02, float 2.560000e+02>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_9(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_9:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #9
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 5.120000e+02, float 5.120000e+02, float 5.120000e+02, float 5.120000e+02>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_10(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_10:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #10
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 1.024000e+03, float 1.024000e+03, float 1.024000e+03, float 1.024000e+03>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_11(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_11:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #11
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 2.048000e+03, float 2.048000e+03, float 2.048000e+03, float 2.048000e+03>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_12(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_12:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #12
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 4.096000e+03, float 4.096000e+03, float 4.096000e+03, float 4.096000e+03>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_13(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_13:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #13
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 8.192000e+03, float 8.192000e+03, float 8.192000e+03, float 8.192000e+03>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_14(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_14:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #14
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 1.638400e+04, float 1.638400e+04, float 1.638400e+04, float 1.638400e+04>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_15(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_15:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #15
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 3.276800e+04, float 3.276800e+04, float 3.276800e+04, float 3.276800e+04>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_16(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #16
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 6.553600e+04, float 6.553600e+04, float 6.553600e+04, float 6.553600e+04>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_17(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_17:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #17
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 1.310720e+05, float 1.310720e+05, float 1.310720e+05, float 1.310720e+05>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_18(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_18:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #18
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 2.621440e+05, float 2.621440e+05, float 2.621440e+05, float 2.621440e+05>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_19(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_19:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #19
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 5.242880e+05, float 5.242880e+05, float 5.242880e+05, float 5.242880e+05>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_20(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_20:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #20
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x4130000000000000, float 0x4130000000000000, float 0x4130000000000000, float 0x4130000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_21(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_21:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #21
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x4140000000000000, float 0x4140000000000000, float 0x4140000000000000, float 0x4140000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_22(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_22:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #22
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x4150000000000000, float 0x4150000000000000, float 0x4150000000000000, float 0x4150000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_23(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_23:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #23
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x4160000000000000, float 0x4160000000000000, float 0x4160000000000000, float 0x4160000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_24(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_24:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #24
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x4170000000000000, float 0x4170000000000000, float 0x4170000000000000, float 0x4170000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_25(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_25:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #25
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x4180000000000000, float 0x4180000000000000, float 0x4180000000000000, float 0x4180000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_26(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_26:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #26
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x4190000000000000, float 0x4190000000000000, float 0x4190000000000000, float 0x4190000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_27(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_27:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #27
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x41A0000000000000, float 0x41A0000000000000, float 0x41A0000000000000, float 0x41A0000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_28(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_28:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #28
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x41B0000000000000, float 0x41B0000000000000, float 0x41B0000000000000, float 0x41B0000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_29(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_29:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #29
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x41C0000000000000, float 0x41C0000000000000, float 0x41C0000000000000, float 0x41C0000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_30(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_30:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #30
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x41D0000000000000, float 0x41D0000000000000, float 0x41D0000000000000, float 0x41D0000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_31(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_31:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #31
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x41E0000000000000, float 0x41E0000000000000, float 0x41E0000000000000, float 0x41E0000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_32(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0, #32
+; CHECK-NEXT:    bx lr
+  %2 = fmul <4 x float> %0, <float 0x41F0000000000000, float 0x41F0000000000000, float 0x41F0000000000000, float 0x41F0000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_i32_33(<4 x float> %0) {
+; CHECK-LABEL: vcvt_i32_33:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i32 q1, #0x50000000
+; CHECK-NEXT:    vmul.f32 q0, q0, q1
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+  %2 = fmul <4 x float> %0, <float 0x4200000000000000, float 0x4200000000000000, float 0x4200000000000000, float 0x4200000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_i16_1(<8 x half> %0) {
+; CHECK-LABEL: vcvt_i16_1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #1
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH4000, half 0xH4000, half 0xH4000, half 0xH4000, half 0xH4000, half 0xH4000, half 0xH4000, half 0xH4000>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_i16_2(<8 x half> %0) {
+; CHECK-LABEL: vcvt_i16_2:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #2
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH4400, half 0xH4400, half 0xH4400, half 0xH4400, half 0xH4400, half 0xH4400, half 0xH4400, half 0xH4400>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_i16_3(<8 x half> %0) {
+; CHECK-LABEL: vcvt_i16_3:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #3
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH4800, half 0xH4800, half 0xH4800, half 0xH4800, half 0xH4800, half 0xH4800, half 0xH4800, half 0xH4800>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_i16_4(<8 x half> %0) {
+; CHECK-LABEL: vcvt_i16_4:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #4
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH4C00, half 0xH4C00, half 0xH4C00, half 0xH4C00, half 0xH4C00, half 0xH4C00, half 0xH4C00, half 0xH4C00>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_i16_5(<8 x half> %0) {
+; CHECK-LABEL: vcvt_i16_5:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #5
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH5000, half 0xH5000, half 0xH5000, half 0xH5000, half 0xH5000, half 0xH5000, half 0xH5000, half 0xH5000>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_i16_6(<8 x half> %0) {
+; CHECK-LABEL: vcvt_i16_6:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #6
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH5400, half 0xH5400, half 0xH5400, half 0xH5400, half 0xH5400, half 0xH5400, half 0xH5400, half 0xH5400>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_i16_7(<8 x half> %0) {
+; CHECK-LABEL: vcvt_i16_7:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #7
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH5800, half 0xH5800, half 0xH5800, half 0xH5800, half 0xH5800, half 0xH5800, half 0xH5800, half 0xH5800>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_i16_8(<8 x half> %0) {
+; CHECK-LABEL: vcvt_i16_8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #8
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH5C00, half 0xH5C00, half 0xH5C00, half 0xH5C00, half 0xH5C00, half 0xH5C00, half 0xH5C00, half 0xH5C00>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_i16_9(<8 x half> %0) {
+; CHECK-LABEL: vcvt_i16_9:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #9
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH6000, half 0xH6000, half 0xH6000, half 0xH6000, half 0xH6000, half 0xH6000, half 0xH6000, half 0xH6000>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_i16_10(<8 x half> %0) {
+; CHECK-LABEL: vcvt_i16_10:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #10
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH6400, half 0xH6400, half 0xH6400, half 0xH6400, half 0xH6400, half 0xH6400, half 0xH6400, half 0xH6400>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_i16_11(<8 x half> %0) {
+; CHECK-LABEL: vcvt_i16_11:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #11
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH6800, half 0xH6800, half 0xH6800, half 0xH6800, half 0xH6800, half 0xH6800, half 0xH6800, half 0xH6800>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_i16_12(<8 x half> %0) {
+; CHECK-LABEL: vcvt_i16_12:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #12
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH6C00, half 0xH6C00, half 0xH6C00, half 0xH6C00, half 0xH6C00, half 0xH6C00, half 0xH6C00, half 0xH6C00>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_i16_13(<8 x half> %0) {
+; CHECK-LABEL: vcvt_i16_13:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #13
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH7000, half 0xH7000, half 0xH7000, half 0xH7000, half 0xH7000, half 0xH7000, half 0xH7000, half 0xH7000>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_i16_14(<8 x half> %0) {
+; CHECK-LABEL: vcvt_i16_14:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #14
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH7400, half 0xH7400, half 0xH7400, half 0xH7400, half 0xH7400, half 0xH7400, half 0xH7400, half 0xH7400>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_i16_15(<8 x half> %0) {
+; CHECK-LABEL: vcvt_i16_15:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #15
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_1(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #1
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_2(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_2:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #2
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 4.000000e+00, float 4.000000e+00, float 4.000000e+00, float 4.000000e+00>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_3(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_3:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #3
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_4(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_4:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #4
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 1.600000e+01, float 1.600000e+01, float 1.600000e+01, float 1.600000e+01>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_5(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_5:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #5
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 3.200000e+01, float 3.200000e+01, float 3.200000e+01, float 3.200000e+01>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_6(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_6:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #6
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 6.400000e+01, float 6.400000e+01, float 6.400000e+01, float 6.400000e+01>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_7(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_7:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #7
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 1.280000e+02, float 1.280000e+02, float 1.280000e+02, float 1.280000e+02>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_8(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #8
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 2.560000e+02, float 2.560000e+02, float 2.560000e+02, float 2.560000e+02>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_9(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_9:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #9
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 5.120000e+02, float 5.120000e+02, float 5.120000e+02, float 5.120000e+02>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_10(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_10:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #10
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 1.024000e+03, float 1.024000e+03, float 1.024000e+03, float 1.024000e+03>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_11(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_11:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #11
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 2.048000e+03, float 2.048000e+03, float 2.048000e+03, float 2.048000e+03>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_12(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_12:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #12
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 4.096000e+03, float 4.096000e+03, float 4.096000e+03, float 4.096000e+03>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_13(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_13:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #13
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 8.192000e+03, float 8.192000e+03, float 8.192000e+03, float 8.192000e+03>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_14(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_14:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #14
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 1.638400e+04, float 1.638400e+04, float 1.638400e+04, float 1.638400e+04>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_15(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_15:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #15
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 3.276800e+04, float 3.276800e+04, float 3.276800e+04, float 3.276800e+04>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_16(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #16
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 6.553600e+04, float 6.553600e+04, float 6.553600e+04, float 6.553600e+04>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_17(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_17:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #17
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 1.310720e+05, float 1.310720e+05, float 1.310720e+05, float 1.310720e+05>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_18(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_18:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #18
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 2.621440e+05, float 2.621440e+05, float 2.621440e+05, float 2.621440e+05>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_19(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_19:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #19
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 5.242880e+05, float 5.242880e+05, float 5.242880e+05, float 5.242880e+05>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_20(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_20:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #20
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x4130000000000000, float 0x4130000000000000, float 0x4130000000000000, float 0x4130000000000000>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_21(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_21:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #21
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x4140000000000000, float 0x4140000000000000, float 0x4140000000000000, float 0x4140000000000000>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_22(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_22:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #22
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x4150000000000000, float 0x4150000000000000, float 0x4150000000000000, float 0x4150000000000000>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_23(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_23:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #23
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x4160000000000000, float 0x4160000000000000, float 0x4160000000000000, float 0x4160000000000000>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_24(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_24:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #24
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x4170000000000000, float 0x4170000000000000, float 0x4170000000000000, float 0x4170000000000000>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_25(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_25:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #25
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x4180000000000000, float 0x4180000000000000, float 0x4180000000000000, float 0x4180000000000000>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_26(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_26:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #26
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x4190000000000000, float 0x4190000000000000, float 0x4190000000000000, float 0x4190000000000000>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_27(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_27:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #27
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x41A0000000000000, float 0x41A0000000000000, float 0x41A0000000000000, float 0x41A0000000000000>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_28(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_28:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #28
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x41B0000000000000, float 0x41B0000000000000, float 0x41B0000000000000, float 0x41B0000000000000>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_29(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_29:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #29
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x41C0000000000000, float 0x41C0000000000000, float 0x41C0000000000000, float 0x41C0000000000000>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_30(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_30:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #30
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x41D0000000000000, float 0x41D0000000000000, float 0x41D0000000000000, float 0x41D0000000000000>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_31(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_31:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #31
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <4 x float> %0, <float 0x41E0000000000000, float 0x41E0000000000000, float 0x41E0000000000000, float 0x41E0000000000000>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_32(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0, #32
+; CHECK-NEXT:    bx lr
+  %2 = fmul <4 x float> %0, <float 0x41F0000000000000, float 0x41F0000000000000, float 0x41F0000000000000, float 0x41F0000000000000>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_u32_33(<4 x float> %0) {
+; CHECK-LABEL: vcvt_u32_33:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i32 q1, #0x50000000
+; CHECK-NEXT:    vmul.f32 q0, q0, q1
+; CHECK-NEXT:    vcvt.u32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+  %2 = fmul <4 x float> %0, <float 0x4200000000000000, float 0x4200000000000000, float 0x4200000000000000, float 0x4200000000000000>
+  %3 = fptoui <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_1(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0, #1
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH4000, half 0xH4000, half 0xH4000, half 0xH4000, half 0xH4000, half 0xH4000, half 0xH4000, half 0xH4000>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_2(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_2:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0, #2
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH4400, half 0xH4400, half 0xH4400, half 0xH4400, half 0xH4400, half 0xH4400, half 0xH4400, half 0xH4400>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_3(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_3:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0, #3
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH4800, half 0xH4800, half 0xH4800, half 0xH4800, half 0xH4800, half 0xH4800, half 0xH4800, half 0xH4800>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_4(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_4:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0, #4
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH4C00, half 0xH4C00, half 0xH4C00, half 0xH4C00, half 0xH4C00, half 0xH4C00, half 0xH4C00, half 0xH4C00>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_5(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_5:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0, #5
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH5000, half 0xH5000, half 0xH5000, half 0xH5000, half 0xH5000, half 0xH5000, half 0xH5000, half 0xH5000>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_6(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_6:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0, #6
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH5400, half 0xH5400, half 0xH5400, half 0xH5400, half 0xH5400, half 0xH5400, half 0xH5400, half 0xH5400>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_7(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_7:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0, #7
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH5800, half 0xH5800, half 0xH5800, half 0xH5800, half 0xH5800, half 0xH5800, half 0xH5800, half 0xH5800>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_8(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0, #8
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH5C00, half 0xH5C00, half 0xH5C00, half 0xH5C00, half 0xH5C00, half 0xH5C00, half 0xH5C00, half 0xH5C00>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_9(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_9:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0, #9
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH6000, half 0xH6000, half 0xH6000, half 0xH6000, half 0xH6000, half 0xH6000, half 0xH6000, half 0xH6000>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_10(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_10:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0, #10
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH6400, half 0xH6400, half 0xH6400, half 0xH6400, half 0xH6400, half 0xH6400, half 0xH6400, half 0xH6400>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_11(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_11:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0, #11
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH6800, half 0xH6800, half 0xH6800, half 0xH6800, half 0xH6800, half 0xH6800, half 0xH6800, half 0xH6800>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_12(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_12:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0, #12
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH6C00, half 0xH6C00, half 0xH6C00, half 0xH6C00, half 0xH6C00, half 0xH6C00, half 0xH6C00, half 0xH6C00>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_13(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_13:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0, #13
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH7000, half 0xH7000, half 0xH7000, half 0xH7000, half 0xH7000, half 0xH7000, half 0xH7000, half 0xH7000>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_14(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_14:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0, #14
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH7400, half 0xH7400, half 0xH7400, half 0xH7400, half 0xH7400, half 0xH7400, half 0xH7400, half 0xH7400>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_15(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_15:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0, #15
+; CHECK-NEXT:    bx lr
+  %2 = fmul fast <8 x half> %0, <half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_u16_inf(<8 x half> %0) {
+; CHECK-LABEL: vcvt_u16_inf:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i16 q1, #0x7800
+; CHECK-NEXT:    vmul.f16 q0, q0, q1
+; CHECK-NEXT:    vcvt.u16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+  %2 = fmul <8 x half> %0, <half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800>
+  %3 = fptoui <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define arm_aapcs_vfpcc <8 x i16> @vcvt_s16_inf(<8 x half> %0) {
+; CHECK-LABEL: vcvt_s16_inf:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s16.f16 q0, q0, #15
+; CHECK-NEXT:    bx lr
+  %2 = fmul <8 x half> %0, <half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800, half 0xH7800>
+  %3 = fptosi <8 x half> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_bad_imm(<4 x float> %0) {
+; CHECK-LABEL: vcvt_bad_imm:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r0, #2048
+; CHECK-NEXT:    movt r0, #15104
+; CHECK-NEXT:    vmul.f32 q0, q0, r0
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+  %2 = fmul <4 x float> %0, <float 0x3F60010000000000, float 0x3F60010000000000, float 0x3F60010000000000, float 0x3F60010000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_negative(<4 x float> %0) {
+; CHECK-LABEL: vcvt_negative:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i32 q1, #0xb8000000
+; CHECK-NEXT:    vmul.f32 q0, q0, q1
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+  %2 = fmul <4 x float> %0, <float 0xBF00000000000000, float 0xBF00000000000000, float 0xBF00000000000000, float 0xBF00000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcvt_negative2(<4 x float> %0) {
+; CHECK-LABEL: vcvt_negative2:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i32 q1, #0xb0000000
+; CHECK-NEXT:    vmul.f32 q0, q0, q1
+; CHECK-NEXT:    vcvt.s32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+  %2 = fmul <4 x float> %0, <float 0xBE00000000000000, float 0xBE00000000000000, float 0xBE00000000000000, float 0xBE00000000000000>
+  %3 = fptosi <4 x float> %2 to <4 x i32>
+  ret <4 x i32> %3
+}


        


More information about the llvm-commits mailing list