[llvm] f37e132 - [ARM] Add VFP lowering for fptosi.sat
David Green via llvm-commits
llvm-commits at lists.llvm.org
Fri Sep 3 10:11:18 PDT 2021
Author: David Green
Date: 2021-09-03T18:11:08+01:00
New Revision: f37e132263e427860cc89fc167419e8b8a8a17fd
URL: https://github.com/llvm/llvm-project/commit/f37e132263e427860cc89fc167419e8b8a8a17fd
DIFF: https://github.com/llvm/llvm-project/commit/f37e132263e427860cc89fc167419e8b8a8a17fd.diff
LOG: [ARM] Add VFP lowering for fptosi.sat
This extends D107865 to the VFP insructions, lowering llvm.fptosi.sat
and llvm.fptoui.sat to VCVT instructions that inherently perform the
saturate.
Differential Revision: https://reviews.llvm.org/D107866
Added:
Modified:
llvm/lib/Target/ARM/ARMISelLowering.cpp
llvm/lib/Target/ARM/ARMInstrVFP.td
llvm/test/CodeGen/ARM/fptoi-sat-store.ll
llvm/test/CodeGen/ARM/fptosi-sat-scalar.ll
llvm/test/CodeGen/ARM/fptoui-sat-scalar.ll
llvm/test/CodeGen/Thumb2/mve-fptosi-sat-vector.ll
llvm/test/CodeGen/Thumb2/mve-fptoui-sat-vector.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index efdae2c7117df..08994019da796 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -747,6 +747,12 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
Subtarget->hasFPRegs()) {
addRegisterClass(MVT::f32, &ARM::SPRRegClass);
addRegisterClass(MVT::f64, &ARM::DPRRegClass);
+
+ setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i32, Custom);
+ setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i32, Custom);
+ setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
+ setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
+
if (!Subtarget->hasVFP2Base())
setAllExpand(MVT::f32);
if (!Subtarget->hasFP64())
@@ -5824,14 +5830,25 @@ SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
return Op;
}
-static SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) {
+static SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
+ const ARMSubtarget *Subtarget) {
EVT VT = Op.getValueType();
EVT ToVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
EVT FromVT = Op.getOperand(0).getValueType();
- if (VT == MVT::v4i32 && ToVT == MVT::i32 && FromVT == MVT::v4f32)
+ if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f32)
+ return Op;
+ if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f64 &&
+ Subtarget->hasFP64())
return Op;
- if (VT == MVT::v8i16 && ToVT == MVT::i16 && FromVT == MVT::v8f16)
+ if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f16 &&
+ Subtarget->hasFullFP16())
+ return Op;
+ if (VT == MVT::v4i32 && ToVT == MVT::i32 && FromVT == MVT::v4f32 &&
+ Subtarget->hasMVEFloatOps())
+ return Op;
+ if (VT == MVT::v8i16 && ToVT == MVT::i16 && FromVT == MVT::v8f16 &&
+ Subtarget->hasMVEFloatOps())
return Op;
if (FromVT != MVT::v4f32 && FromVT != MVT::v8f16)
@@ -10194,7 +10211,7 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
case ISD::FP_TO_SINT_SAT:
- case ISD::FP_TO_UINT_SAT: return LowerFP_TO_INT_SAT(Op, DAG);
+ case ISD::FP_TO_UINT_SAT: return LowerFP_TO_INT_SAT(Op, DAG, Subtarget);
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
@@ -10381,6 +10398,10 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::ZERO_EXTEND:
Res = LowerVectorExtend(N, DAG, Subtarget);
break;
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ Res = LowerFP_TO_INT_SAT(SDValue(N, 0), DAG, Subtarget);
+ break;
}
if (Res.getNode())
Results.push_back(Res);
diff --git a/llvm/lib/Target/ARM/ARMInstrVFP.td b/llvm/lib/Target/ARM/ARMInstrVFP.td
index bcd6433a579b4..30ded00ff0b4c 100644
--- a/llvm/lib/Target/ARM/ARMInstrVFP.td
+++ b/llvm/lib/Target/ARM/ARMInstrVFP.td
@@ -1600,6 +1600,8 @@ def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
let Predicates=[HasVFP2, HasDPVFP] in {
def : VFPPat<(i32 (fp_to_sint (f64 DPR:$a))),
(COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>;
+ def : VFPPat<(i32 (fp_to_sint_sat (f64 DPR:$a), i32)),
+ (COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>;
def : VFPPat<(alignedstore32 (i32 (fp_to_sint (f64 DPR:$a))), addrmode5:$ptr),
(VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>;
@@ -1619,6 +1621,8 @@ def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
def : VFPNoNEONPat<(i32 (fp_to_sint SPR:$a)),
(COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>;
+def : VFPPat<(i32 (fp_to_sint_sat SPR:$a, i32)),
+ (COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>;
def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_sint (f32 SPR:$a))),
addrmode5:$ptr),
@@ -1635,6 +1639,8 @@ def VTOSIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001,
def : VFPNoNEONPat<(i32 (fp_to_sint (f16 HPR:$a))),
(COPY_TO_REGCLASS (VTOSIZH (f16 HPR:$a)), GPR)>;
+def : VFPPat<(i32 (fp_to_sint_sat (f16 HPR:$a), i32)),
+ (COPY_TO_REGCLASS (VTOSIZH (f16 HPR:$a)), GPR)>;
def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
(outs SPR:$Sd), (ins DPR:$Dm),
@@ -1647,6 +1653,8 @@ def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
let Predicates=[HasVFP2, HasDPVFP] in {
def : VFPPat<(i32 (fp_to_uint (f64 DPR:$a))),
(COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>;
+ def : VFPPat<(i32 (fp_to_uint_sat (f64 DPR:$a), i32)),
+ (COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>;
def : VFPPat<(alignedstore32 (i32 (fp_to_uint (f64 DPR:$a))), addrmode5:$ptr),
(VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>;
@@ -1666,6 +1674,8 @@ def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
def : VFPNoNEONPat<(i32 (fp_to_uint SPR:$a)),
(COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>;
+def : VFPPat<(i32 (fp_to_uint_sat SPR:$a, i32)),
+ (COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>;
def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_uint (f32 SPR:$a))),
addrmode5:$ptr),
@@ -1682,6 +1692,8 @@ def VTOUIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001,
def : VFPNoNEONPat<(i32 (fp_to_uint (f16 HPR:$a))),
(COPY_TO_REGCLASS (VTOUIZH (f16 HPR:$a)), GPR)>;
+def : VFPPat<(i32 (fp_to_uint_sat (f16 HPR:$a), i32)),
+ (COPY_TO_REGCLASS (VTOUIZH (f16 HPR:$a)), GPR)>;
// And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
let Uses = [FPSCR] in {
diff --git a/llvm/test/CodeGen/ARM/fptoi-sat-store.ll b/llvm/test/CodeGen/ARM/fptoi-sat-store.ll
index 3a6386d8f890b..a17aafb4155f7 100644
--- a/llvm/test/CodeGen/ARM/fptoi-sat-store.ll
+++ b/llvm/test/CodeGen/ARM/fptoi-sat-store.ll
@@ -62,30 +62,10 @@ define void @test_signed_i32_f32(i32* %d, float %f) nounwind {
; VFP-LABEL: test_signed_i32_f32:
; VFP: @ %bb.0:
; VFP-NEXT: vmov s0, r1
-; VFP-NEXT: vldr s2, .LCPI0_0
-; VFP-NEXT: vldr s6, .LCPI0_1
-; VFP-NEXT: vcvt.s32.f32 s4, s0
-; VFP-NEXT: vcmp.f32 s0, s2
-; VFP-NEXT: vmrs APSR_nzcv, fpscr
-; VFP-NEXT: vcmp.f32 s0, s6
-; VFP-NEXT: vmov r1, s4
-; VFP-NEXT: it lt
-; VFP-NEXT: movlt.w r1, #-2147483648
-; VFP-NEXT: vmrs APSR_nzcv, fpscr
-; VFP-NEXT: it gt
-; VFP-NEXT: mvngt r1, #-2147483648
-; VFP-NEXT: vcmp.f32 s0, s0
-; VFP-NEXT: vmrs APSR_nzcv, fpscr
-; VFP-NEXT: it vs
-; VFP-NEXT: movvs r1, #0
+; VFP-NEXT: vcvt.s32.f32 s0, s0
+; VFP-NEXT: vmov r1, s0
; VFP-NEXT: str r1, [r0]
; VFP-NEXT: bx lr
-; VFP-NEXT: .p2align 2
-; VFP-NEXT: @ %bb.1:
-; VFP-NEXT: .LCPI0_0:
-; VFP-NEXT: .long 0xcf000000 @ float -2.14748365E+9
-; VFP-NEXT: .LCPI0_1:
-; VFP-NEXT: .long 0x4effffff @ float 2.14748352E+9
%r = call i32 @llvm.fptosi.sat.i32.f32(float %f)
store i32 %r, i32* %d, align 4
ret void
@@ -160,56 +140,18 @@ define void @test_signed_i32_f64(i32* %d, double %f) nounwind {
; VFP2-LABEL: test_signed_i32_f64:
; VFP2: @ %bb.0:
; VFP2-NEXT: vmov d16, r2, r3
-; VFP2-NEXT: vldr d17, .LCPI1_0
-; VFP2-NEXT: vldr d18, .LCPI1_1
; VFP2-NEXT: vcvt.s32.f64 s0, d16
-; VFP2-NEXT: vcmp.f64 d16, d17
-; VFP2-NEXT: vmrs APSR_nzcv, fpscr
; VFP2-NEXT: vmov r1, s0
-; VFP2-NEXT: vcmp.f64 d16, d18
-; VFP2-NEXT: it lt
-; VFP2-NEXT: movlt.w r1, #-2147483648
-; VFP2-NEXT: vmrs APSR_nzcv, fpscr
-; VFP2-NEXT: it gt
-; VFP2-NEXT: mvngt r1, #-2147483648
-; VFP2-NEXT: vcmp.f64 d16, d16
-; VFP2-NEXT: vmrs APSR_nzcv, fpscr
-; VFP2-NEXT: it vs
-; VFP2-NEXT: movvs r1, #0
; VFP2-NEXT: str r1, [r0]
; VFP2-NEXT: bx lr
-; VFP2-NEXT: .p2align 3
-; VFP2-NEXT: @ %bb.1:
-; VFP2-NEXT: .LCPI1_0:
-; VFP2-NEXT: .long 0 @ double -2147483648
-; VFP2-NEXT: .long 3252682752
-; VFP2-NEXT: .LCPI1_1:
-; VFP2-NEXT: .long 4290772992 @ double 2147483647
-; VFP2-NEXT: .long 1105199103
;
; FP16-LABEL: test_signed_i32_f64:
; FP16: @ %bb.0:
-; FP16-NEXT: vldr d0, .LCPI1_0
-; FP16-NEXT: vmov d1, r2, r3
-; FP16-NEXT: vldr d2, .LCPI1_1
-; FP16-NEXT: vmaxnm.f64 d0, d1, d0
-; FP16-NEXT: vcmp.f64 d1, d1
-; FP16-NEXT: vminnm.f64 d0, d0, d2
+; FP16-NEXT: vmov d0, r2, r3
; FP16-NEXT: vcvt.s32.f64 s0, d0
; FP16-NEXT: vmov r1, s0
-; FP16-NEXT: vmrs APSR_nzcv, fpscr
-; FP16-NEXT: it vs
-; FP16-NEXT: movvs r1, #0
; FP16-NEXT: str r1, [r0]
; FP16-NEXT: bx lr
-; FP16-NEXT: .p2align 3
-; FP16-NEXT: @ %bb.1:
-; FP16-NEXT: .LCPI1_0:
-; FP16-NEXT: .long 0 @ double -2147483648
-; FP16-NEXT: .long 3252682752
-; FP16-NEXT: .LCPI1_1:
-; FP16-NEXT: .long 4290772992 @ double 2147483647
-; FP16-NEXT: .long 1105199103
%r = call i32 @llvm.fptosi.sat.i32.f64(double %f)
store i32 %r, i32* %d, align 4
ret void
@@ -257,23 +199,10 @@ define void @test_unsigned_i32_f32(i32* %d, float %f) nounwind {
; VFP-LABEL: test_unsigned_i32_f32:
; VFP: @ %bb.0:
; VFP-NEXT: vmov s0, r1
-; VFP-NEXT: vldr s4, .LCPI2_0
-; VFP-NEXT: vcvt.u32.f32 s2, s0
-; VFP-NEXT: vcmp.f32 s0, #0
-; VFP-NEXT: vmrs APSR_nzcv, fpscr
-; VFP-NEXT: vcmp.f32 s0, s4
-; VFP-NEXT: vmov r1, s2
-; VFP-NEXT: it lt
-; VFP-NEXT: movlt r1, #0
-; VFP-NEXT: vmrs APSR_nzcv, fpscr
-; VFP-NEXT: it gt
-; VFP-NEXT: movgt.w r1, #-1
+; VFP-NEXT: vcvt.u32.f32 s0, s0
+; VFP-NEXT: vmov r1, s0
; VFP-NEXT: str r1, [r0]
; VFP-NEXT: bx lr
-; VFP-NEXT: .p2align 2
-; VFP-NEXT: @ %bb.1:
-; VFP-NEXT: .LCPI2_0:
-; VFP-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
%r = call i32 @llvm.fptoui.sat.i32.f32(float %f)
store i32 %r, i32* %d, align 4
ret void
@@ -330,43 +259,18 @@ define void @test_unsigned_i32_f64(i32* %d, double %f) nounwind {
; VFP2-LABEL: test_unsigned_i32_f64:
; VFP2: @ %bb.0:
; VFP2-NEXT: vmov d16, r2, r3
-; VFP2-NEXT: vldr d17, .LCPI3_0
-; VFP2-NEXT: vcmp.f64 d16, #0
; VFP2-NEXT: vcvt.u32.f64 s0, d16
-; VFP2-NEXT: vmrs APSR_nzcv, fpscr
; VFP2-NEXT: vmov r1, s0
-; VFP2-NEXT: vcmp.f64 d16, d17
-; VFP2-NEXT: it lt
-; VFP2-NEXT: movlt r1, #0
-; VFP2-NEXT: vmrs APSR_nzcv, fpscr
-; VFP2-NEXT: it gt
-; VFP2-NEXT: movgt.w r1, #-1
; VFP2-NEXT: str r1, [r0]
; VFP2-NEXT: bx lr
-; VFP2-NEXT: .p2align 3
-; VFP2-NEXT: @ %bb.1:
-; VFP2-NEXT: .LCPI3_0:
-; VFP2-NEXT: .long 4292870144 @ double 4294967295
-; VFP2-NEXT: .long 1106247679
;
; FP16-LABEL: test_unsigned_i32_f64:
; FP16: @ %bb.0:
-; FP16-NEXT: vldr d0, .LCPI3_0
-; FP16-NEXT: vmov d1, r2, r3
-; FP16-NEXT: vldr d2, .LCPI3_1
-; FP16-NEXT: vmaxnm.f64 d0, d1, d0
-; FP16-NEXT: vminnm.f64 d0, d0, d2
+; FP16-NEXT: vmov d0, r2, r3
; FP16-NEXT: vcvt.u32.f64 s0, d0
-; FP16-NEXT: vstr s0, [r0]
+; FP16-NEXT: vmov r1, s0
+; FP16-NEXT: str r1, [r0]
; FP16-NEXT: bx lr
-; FP16-NEXT: .p2align 3
-; FP16-NEXT: @ %bb.1:
-; FP16-NEXT: .LCPI3_0:
-; FP16-NEXT: .long 0 @ double 0
-; FP16-NEXT: .long 0
-; FP16-NEXT: .LCPI3_1:
-; FP16-NEXT: .long 4292870144 @ double 4294967295
-; FP16-NEXT: .long 1106247679
%r = call i32 @llvm.fptoui.sat.i32.f64(double %f)
store i32 %r, i32* %d, align 4
ret void
diff --git a/llvm/test/CodeGen/ARM/fptosi-sat-scalar.ll b/llvm/test/CodeGen/ARM/fptosi-sat-scalar.ll
index 97255c5503f69..bcbd88288c587 100644
--- a/llvm/test/CodeGen/ARM/fptosi-sat-scalar.ll
+++ b/llvm/test/CodeGen/ARM/fptosi-sat-scalar.ll
@@ -557,29 +557,9 @@ define i32 @test_signed_i32_f32(float %f) nounwind {
; VFP-LABEL: test_signed_i32_f32:
; VFP: @ %bb.0:
; VFP-NEXT: vmov s0, r0
-; VFP-NEXT: vldr s2, .LCPI5_0
-; VFP-NEXT: vldr s6, .LCPI5_1
-; VFP-NEXT: vcvt.s32.f32 s4, s0
-; VFP-NEXT: vcmp.f32 s0, s2
-; VFP-NEXT: vmrs APSR_nzcv, fpscr
-; VFP-NEXT: vcmp.f32 s0, s6
-; VFP-NEXT: vmov r0, s4
-; VFP-NEXT: it lt
-; VFP-NEXT: movlt.w r0, #-2147483648
-; VFP-NEXT: vmrs APSR_nzcv, fpscr
-; VFP-NEXT: it gt
-; VFP-NEXT: mvngt r0, #-2147483648
-; VFP-NEXT: vcmp.f32 s0, s0
-; VFP-NEXT: vmrs APSR_nzcv, fpscr
-; VFP-NEXT: it vs
-; VFP-NEXT: movvs r0, #0
+; VFP-NEXT: vcvt.s32.f32 s0, s0
+; VFP-NEXT: vmov r0, s0
; VFP-NEXT: bx lr
-; VFP-NEXT: .p2align 2
-; VFP-NEXT: @ %bb.1:
-; VFP-NEXT: .LCPI5_0:
-; VFP-NEXT: .long 0xcf000000 @ float -2.14748365E+9
-; VFP-NEXT: .LCPI5_1:
-; VFP-NEXT: .long 0x4effffff @ float 2.14748352E+9
%x = call i32 @llvm.fptosi.sat.i32.f32(float %f)
ret i32 %x
}
@@ -1892,54 +1872,16 @@ define i32 @test_signed_i32_f64(double %f) nounwind {
; VFP2-LABEL: test_signed_i32_f64:
; VFP2: @ %bb.0:
; VFP2-NEXT: vmov d16, r0, r1
-; VFP2-NEXT: vldr d17, .LCPI15_0
-; VFP2-NEXT: vldr d18, .LCPI15_1
; VFP2-NEXT: vcvt.s32.f64 s0, d16
-; VFP2-NEXT: vcmp.f64 d16, d17
-; VFP2-NEXT: vmrs APSR_nzcv, fpscr
; VFP2-NEXT: vmov r0, s0
-; VFP2-NEXT: vcmp.f64 d16, d18
-; VFP2-NEXT: it lt
-; VFP2-NEXT: movlt.w r0, #-2147483648
-; VFP2-NEXT: vmrs APSR_nzcv, fpscr
-; VFP2-NEXT: it gt
-; VFP2-NEXT: mvngt r0, #-2147483648
-; VFP2-NEXT: vcmp.f64 d16, d16
-; VFP2-NEXT: vmrs APSR_nzcv, fpscr
-; VFP2-NEXT: it vs
-; VFP2-NEXT: movvs r0, #0
; VFP2-NEXT: bx lr
-; VFP2-NEXT: .p2align 3
-; VFP2-NEXT: @ %bb.1:
-; VFP2-NEXT: .LCPI15_0:
-; VFP2-NEXT: .long 0 @ double -2147483648
-; VFP2-NEXT: .long 3252682752
-; VFP2-NEXT: .LCPI15_1:
-; VFP2-NEXT: .long 4290772992 @ double 2147483647
-; VFP2-NEXT: .long 1105199103
;
; FP16-LABEL: test_signed_i32_f64:
; FP16: @ %bb.0:
-; FP16-NEXT: vldr d0, .LCPI15_0
-; FP16-NEXT: vmov d1, r0, r1
-; FP16-NEXT: vldr d2, .LCPI15_1
-; FP16-NEXT: vmaxnm.f64 d0, d1, d0
-; FP16-NEXT: vcmp.f64 d1, d1
-; FP16-NEXT: vminnm.f64 d0, d0, d2
+; FP16-NEXT: vmov d0, r0, r1
; FP16-NEXT: vcvt.s32.f64 s0, d0
; FP16-NEXT: vmov r0, s0
-; FP16-NEXT: vmrs APSR_nzcv, fpscr
-; FP16-NEXT: it vs
-; FP16-NEXT: movvs r0, #0
; FP16-NEXT: bx lr
-; FP16-NEXT: .p2align 3
-; FP16-NEXT: @ %bb.1:
-; FP16-NEXT: .LCPI15_0:
-; FP16-NEXT: .long 0 @ double -2147483648
-; FP16-NEXT: .long 3252682752
-; FP16-NEXT: .LCPI15_1:
-; FP16-NEXT: .long 4290772992 @ double 2147483647
-; FP16-NEXT: .long 1105199103
%x = call i32 @llvm.fptosi.sat.i32.f64(double %f)
ret i32 %x
}
@@ -3520,57 +3462,16 @@ define i32 @test_signed_i32_f16(half %f) nounwind {
; VFP2-NEXT: push {r7, lr}
; VFP2-NEXT: bl __aeabi_h2f
; VFP2-NEXT: vmov s0, r0
-; VFP2-NEXT: vldr s2, .LCPI25_0
-; VFP2-NEXT: vldr s6, .LCPI25_1
-; VFP2-NEXT: vcvt.s32.f32 s4, s0
-; VFP2-NEXT: vcmp.f32 s0, s2
-; VFP2-NEXT: vmrs APSR_nzcv, fpscr
-; VFP2-NEXT: vcmp.f32 s0, s6
-; VFP2-NEXT: vmov r0, s4
-; VFP2-NEXT: it lt
-; VFP2-NEXT: movlt.w r0, #-2147483648
-; VFP2-NEXT: vmrs APSR_nzcv, fpscr
-; VFP2-NEXT: it gt
-; VFP2-NEXT: mvngt r0, #-2147483648
-; VFP2-NEXT: vcmp.f32 s0, s0
-; VFP2-NEXT: vmrs APSR_nzcv, fpscr
-; VFP2-NEXT: it vs
-; VFP2-NEXT: movvs r0, #0
+; VFP2-NEXT: vcvt.s32.f32 s0, s0
+; VFP2-NEXT: vmov r0, s0
; VFP2-NEXT: pop {r7, pc}
-; VFP2-NEXT: .p2align 2
-; VFP2-NEXT: @ %bb.1:
-; VFP2-NEXT: .LCPI25_0:
-; VFP2-NEXT: .long 0xcf000000 @ float -2.14748365E+9
-; VFP2-NEXT: .LCPI25_1:
-; VFP2-NEXT: .long 0x4effffff @ float 2.14748352E+9
;
; FP16-LABEL: test_signed_i32_f16:
; FP16: @ %bb.0:
; FP16-NEXT: vmov.f16 s0, r0
-; FP16-NEXT: vldr s2, .LCPI25_0
-; FP16-NEXT: vcvtb.f32.f16 s0, s0
-; FP16-NEXT: vldr s6, .LCPI25_1
-; FP16-NEXT: vcvt.s32.f32 s4, s0
-; FP16-NEXT: vcmp.f32 s0, s2
-; FP16-NEXT: vmrs APSR_nzcv, fpscr
-; FP16-NEXT: vcmp.f32 s0, s6
-; FP16-NEXT: vmov r0, s4
-; FP16-NEXT: it lt
-; FP16-NEXT: movlt.w r0, #-2147483648
-; FP16-NEXT: vmrs APSR_nzcv, fpscr
-; FP16-NEXT: it gt
-; FP16-NEXT: mvngt r0, #-2147483648
-; FP16-NEXT: vcmp.f32 s0, s0
-; FP16-NEXT: vmrs APSR_nzcv, fpscr
-; FP16-NEXT: it vs
-; FP16-NEXT: movvs r0, #0
+; FP16-NEXT: vcvt.s32.f16 s0, s0
+; FP16-NEXT: vmov r0, s0
; FP16-NEXT: bx lr
-; FP16-NEXT: .p2align 2
-; FP16-NEXT: @ %bb.1:
-; FP16-NEXT: .LCPI25_0:
-; FP16-NEXT: .long 0xcf000000 @ float -2.14748365E+9
-; FP16-NEXT: .LCPI25_1:
-; FP16-NEXT: .long 0x4effffff @ float 2.14748352E+9
%x = call i32 @llvm.fptosi.sat.i32.f16(half %f)
ret i32 %x
}
diff --git a/llvm/test/CodeGen/ARM/fptoui-sat-scalar.ll b/llvm/test/CodeGen/ARM/fptoui-sat-scalar.ll
index f547f8d3b97ef..0fc1eabe9b9f0 100644
--- a/llvm/test/CodeGen/ARM/fptoui-sat-scalar.ll
+++ b/llvm/test/CodeGen/ARM/fptoui-sat-scalar.ll
@@ -429,22 +429,9 @@ define i32 @test_signed_i32_f32(float %f) nounwind {
; VFP-LABEL: test_signed_i32_f32:
; VFP: @ %bb.0:
; VFP-NEXT: vmov s0, r0
-; VFP-NEXT: vldr s4, .LCPI5_0
-; VFP-NEXT: vcvt.u32.f32 s2, s0
-; VFP-NEXT: vcmp.f32 s0, #0
-; VFP-NEXT: vmrs APSR_nzcv, fpscr
-; VFP-NEXT: vcmp.f32 s0, s4
-; VFP-NEXT: vmov r0, s2
-; VFP-NEXT: it lt
-; VFP-NEXT: movlt r0, #0
-; VFP-NEXT: vmrs APSR_nzcv, fpscr
-; VFP-NEXT: it gt
-; VFP-NEXT: movgt.w r0, #-1
+; VFP-NEXT: vcvt.u32.f32 s0, s0
+; VFP-NEXT: vmov r0, s0
; VFP-NEXT: bx lr
-; VFP-NEXT: .p2align 2
-; VFP-NEXT: @ %bb.1:
-; VFP-NEXT: .LCPI5_0:
-; VFP-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
%x = call i32 @llvm.fptoui.sat.i32.f32(float %f)
ret i32 %x
}
@@ -1425,42 +1412,16 @@ define i32 @test_signed_i32_f64(double %f) nounwind {
; VFP2-LABEL: test_signed_i32_f64:
; VFP2: @ %bb.0:
; VFP2-NEXT: vmov d16, r0, r1
-; VFP2-NEXT: vldr d17, .LCPI15_0
-; VFP2-NEXT: vcmp.f64 d16, #0
; VFP2-NEXT: vcvt.u32.f64 s0, d16
-; VFP2-NEXT: vmrs APSR_nzcv, fpscr
; VFP2-NEXT: vmov r0, s0
-; VFP2-NEXT: vcmp.f64 d16, d17
-; VFP2-NEXT: it lt
-; VFP2-NEXT: movlt r0, #0
-; VFP2-NEXT: vmrs APSR_nzcv, fpscr
-; VFP2-NEXT: it gt
-; VFP2-NEXT: movgt.w r0, #-1
; VFP2-NEXT: bx lr
-; VFP2-NEXT: .p2align 3
-; VFP2-NEXT: @ %bb.1:
-; VFP2-NEXT: .LCPI15_0:
-; VFP2-NEXT: .long 4292870144 @ double 4294967295
-; VFP2-NEXT: .long 1106247679
;
; FP16-LABEL: test_signed_i32_f64:
; FP16: @ %bb.0:
-; FP16-NEXT: vldr d0, .LCPI15_0
-; FP16-NEXT: vmov d1, r0, r1
-; FP16-NEXT: vldr d2, .LCPI15_1
-; FP16-NEXT: vmaxnm.f64 d0, d1, d0
-; FP16-NEXT: vminnm.f64 d0, d0, d2
+; FP16-NEXT: vmov d0, r0, r1
; FP16-NEXT: vcvt.u32.f64 s0, d0
; FP16-NEXT: vmov r0, s0
; FP16-NEXT: bx lr
-; FP16-NEXT: .p2align 3
-; FP16-NEXT: @ %bb.1:
-; FP16-NEXT: .LCPI15_0:
-; FP16-NEXT: .long 0 @ double 0
-; FP16-NEXT: .long 0
-; FP16-NEXT: .LCPI15_1:
-; FP16-NEXT: .long 4292870144 @ double 4294967295
-; FP16-NEXT: .long 1106247679
%x = call i32 @llvm.fptoui.sat.i32.f64(double %f)
ret i32 %x
}
@@ -2633,43 +2594,16 @@ define i32 @test_signed_i32_f16(half %f) nounwind {
; VFP2-NEXT: push {r7, lr}
; VFP2-NEXT: bl __aeabi_h2f
; VFP2-NEXT: vmov s0, r0
-; VFP2-NEXT: vldr s4, .LCPI25_0
-; VFP2-NEXT: vcvt.u32.f32 s2, s0
-; VFP2-NEXT: vcmp.f32 s0, #0
-; VFP2-NEXT: vmrs APSR_nzcv, fpscr
-; VFP2-NEXT: vcmp.f32 s0, s4
-; VFP2-NEXT: vmov r0, s2
-; VFP2-NEXT: it lt
-; VFP2-NEXT: movlt r0, #0
-; VFP2-NEXT: vmrs APSR_nzcv, fpscr
-; VFP2-NEXT: it gt
-; VFP2-NEXT: movgt.w r0, #-1
+; VFP2-NEXT: vcvt.u32.f32 s0, s0
+; VFP2-NEXT: vmov r0, s0
; VFP2-NEXT: pop {r7, pc}
-; VFP2-NEXT: .p2align 2
-; VFP2-NEXT: @ %bb.1:
-; VFP2-NEXT: .LCPI25_0:
-; VFP2-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
;
; FP16-LABEL: test_signed_i32_f16:
; FP16: @ %bb.0:
; FP16-NEXT: vmov.f16 s0, r0
-; FP16-NEXT: vldr s4, .LCPI25_0
-; FP16-NEXT: vcvtb.f32.f16 s0, s0
-; FP16-NEXT: vcvt.u32.f32 s2, s0
-; FP16-NEXT: vcmp.f32 s0, #0
-; FP16-NEXT: vmrs APSR_nzcv, fpscr
-; FP16-NEXT: vcmp.f32 s0, s4
-; FP16-NEXT: vmov r0, s2
-; FP16-NEXT: it lt
-; FP16-NEXT: movlt r0, #0
-; FP16-NEXT: vmrs APSR_nzcv, fpscr
-; FP16-NEXT: it gt
-; FP16-NEXT: movgt.w r0, #-1
+; FP16-NEXT: vcvt.u32.f16 s0, s0
+; FP16-NEXT: vmov r0, s0
; FP16-NEXT: bx lr
-; FP16-NEXT: .p2align 2
-; FP16-NEXT: @ %bb.1:
-; FP16-NEXT: .LCPI25_0:
-; FP16-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
%x = call i32 @llvm.fptoui.sat.i32.f16(half %f)
ret i32 %x
}
diff --git a/llvm/test/CodeGen/Thumb2/mve-fptosi-sat-vector.ll b/llvm/test/CodeGen/Thumb2/mve-fptosi-sat-vector.ll
index 0bce93be7f2da..1c2f64e06905d 100644
--- a/llvm/test/CodeGen/Thumb2/mve-fptosi-sat-vector.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-fptosi-sat-vector.ll
@@ -18,29 +18,9 @@ declare <8 x i32> @llvm.fptosi.sat.v8f32.v8i32 (<8 x float>)
define arm_aapcs_vfpcc <1 x i32> @test_signed_v1f32_v1i32(<1 x float> %f) {
; CHECK-LABEL: test_signed_v1f32_v1i32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vcvt.s32.f32 s4, s0
-; CHECK-NEXT: vldr s2, .LCPI0_0
-; CHECK-NEXT: vldr s6, .LCPI0_1
-; CHECK-NEXT: vcmp.f32 s0, s2
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s6
-; CHECK-NEXT: vmov r0, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r0, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r0, #-2147483648
-; CHECK-NEXT: vcmp.f32 s0, s0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r0, #0
+; CHECK-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bx lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI0_0:
-; CHECK-NEXT: .long 0xcf000000 @ float -2.14748365E+9
-; CHECK-NEXT: .LCPI0_1:
-; CHECK-NEXT: .long 0x4effffff @ float 2.14748352E+9
%x = call <1 x i32> @llvm.fptosi.sat.v1f32.v1i32(<1 x float> %f)
ret <1 x i32> %x
}
@@ -126,73 +106,17 @@ define arm_aapcs_vfpcc <2 x i32> @test_signed_v2f32_v2i32(<2 x float> %f) {
define arm_aapcs_vfpcc <3 x i32> @test_signed_v3f32_v3i32(<3 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v3f32_v3i32:
; CHECK-MVE: @ %bb.0:
-; CHECK-MVE-NEXT: vcvt.s32.f32 s12, s2
-; CHECK-MVE-NEXT: vldr s6, .LCPI2_0
-; CHECK-MVE-NEXT: vcvt.s32.f32 s14, s0
-; CHECK-MVE-NEXT: vldr s10, .LCPI2_1
-; CHECK-MVE-NEXT: vcvt.s32.f32 s8, s3
-; CHECK-MVE-NEXT: vcmp.f32 s2, s6
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s10
-; CHECK-MVE-NEXT: vcvt.s32.f32 s4, s1
-; CHECK-MVE-NEXT: vmov r0, s12
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r0, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s2
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r0, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s0, s6
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r0, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r1, s14
-; CHECK-MVE-NEXT: vcmp.f32 s0, s10
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s0, s0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, s6
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r1, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r2, s8
-; CHECK-MVE-NEXT: vcmp.f32 s3, s10
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r2, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, s3
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r2, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r3, s4
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r2, #0
-; CHECK-MVE-NEXT: vcmp.f32 s1, s6
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r3, #-2147483648
-; CHECK-MVE-NEXT: vcmp.f32 s1, s10
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, s1
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r3, #-2147483648
+; CHECK-MVE-NEXT: vcvt.s32.f32 s2, s2
+; CHECK-MVE-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-MVE-NEXT: vcvt.s32.f32 s4, s3
+; CHECK-MVE-NEXT: vcvt.s32.f32 s6, s1
+; CHECK-MVE-NEXT: vmov r0, s2
+; CHECK-MVE-NEXT: vmov r1, s0
; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r3, #0
-; CHECK-MVE-NEXT: vmov q0[3], q0[1], r3, r2
+; CHECK-MVE-NEXT: vmov r0, s4
+; CHECK-MVE-NEXT: vmov r1, s6
+; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0
; CHECK-MVE-NEXT: bx lr
-; CHECK-MVE-NEXT: .p2align 2
-; CHECK-MVE-NEXT: @ %bb.1:
-; CHECK-MVE-NEXT: .LCPI2_0:
-; CHECK-MVE-NEXT: .long 0xcf000000 @ float -2.14748365E+9
-; CHECK-MVE-NEXT: .LCPI2_1:
-; CHECK-MVE-NEXT: .long 0x4effffff @ float 2.14748352E+9
;
; CHECK-MVEFP-LABEL: test_signed_v3f32_v3i32:
; CHECK-MVEFP: @ %bb.0:
@@ -205,73 +129,17 @@ define arm_aapcs_vfpcc <3 x i32> @test_signed_v3f32_v3i32(<3 x float> %f) {
define arm_aapcs_vfpcc <4 x i32> @test_signed_v4f32_v4i32(<4 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v4f32_v4i32:
; CHECK-MVE: @ %bb.0:
-; CHECK-MVE-NEXT: vcvt.s32.f32 s12, s2
-; CHECK-MVE-NEXT: vldr s6, .LCPI3_0
-; CHECK-MVE-NEXT: vcvt.s32.f32 s14, s0
-; CHECK-MVE-NEXT: vldr s10, .LCPI3_1
-; CHECK-MVE-NEXT: vcvt.s32.f32 s8, s3
-; CHECK-MVE-NEXT: vcmp.f32 s2, s6
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s10
-; CHECK-MVE-NEXT: vcvt.s32.f32 s4, s1
-; CHECK-MVE-NEXT: vmov r0, s12
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r0, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s2
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r0, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s0, s6
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r0, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r1, s14
-; CHECK-MVE-NEXT: vcmp.f32 s0, s10
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s0, s0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, s6
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r1, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r2, s8
-; CHECK-MVE-NEXT: vcmp.f32 s3, s10
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r2, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, s3
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r2, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r3, s4
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r2, #0
-; CHECK-MVE-NEXT: vcmp.f32 s1, s6
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r3, #-2147483648
-; CHECK-MVE-NEXT: vcmp.f32 s1, s10
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, s1
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r3, #-2147483648
+; CHECK-MVE-NEXT: vcvt.s32.f32 s2, s2
+; CHECK-MVE-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-MVE-NEXT: vcvt.s32.f32 s4, s3
+; CHECK-MVE-NEXT: vcvt.s32.f32 s6, s1
+; CHECK-MVE-NEXT: vmov r0, s2
+; CHECK-MVE-NEXT: vmov r1, s0
; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r3, #0
-; CHECK-MVE-NEXT: vmov q0[3], q0[1], r3, r2
+; CHECK-MVE-NEXT: vmov r0, s4
+; CHECK-MVE-NEXT: vmov r1, s6
+; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0
; CHECK-MVE-NEXT: bx lr
-; CHECK-MVE-NEXT: .p2align 2
-; CHECK-MVE-NEXT: @ %bb.1:
-; CHECK-MVE-NEXT: .LCPI3_0:
-; CHECK-MVE-NEXT: .long 0xcf000000 @ float -2.14748365E+9
-; CHECK-MVE-NEXT: .LCPI3_1:
-; CHECK-MVE-NEXT: .long 0x4effffff @ float 2.14748352E+9
;
; CHECK-MVEFP-LABEL: test_signed_v4f32_v4i32:
; CHECK-MVEFP: @ %bb.0:
@@ -284,89 +152,21 @@ define arm_aapcs_vfpcc <4 x i32> @test_signed_v4f32_v4i32(<4 x float> %f) {
define arm_aapcs_vfpcc <5 x i32> @test_signed_v5f32_v5i32(<5 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v5f32_v5i32:
; CHECK-MVE: @ %bb.0:
-; CHECK-MVE-NEXT: vcvt.s32.f32 s5, s4
-; CHECK-MVE-NEXT: vldr s10, .LCPI4_0
-; CHECK-MVE-NEXT: vcvt.s32.f32 s7, s3
-; CHECK-MVE-NEXT: vldr s14, .LCPI4_1
-; CHECK-MVE-NEXT: vcvt.s32.f32 s12, s1
-; CHECK-MVE-NEXT: vcmp.f32 s4, s10
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s4, s14
-; CHECK-MVE-NEXT: vcvt.s32.f32 s8, s2
-; CHECK-MVE-NEXT: vcvt.s32.f32 s6, s0
-; CHECK-MVE-NEXT: vmov r1, s5
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s4, s4
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, s10
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r1, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r12, s7
+; CHECK-MVE-NEXT: vcvt.s32.f32 s4, s4
+; CHECK-MVE-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-MVE-NEXT: vcvt.s32.f32 s2, s2
+; CHECK-MVE-NEXT: vcvt.s32.f32 s6, s3
+; CHECK-MVE-NEXT: vcvt.s32.f32 s8, s1
+; CHECK-MVE-NEXT: vmov r1, s4
+; CHECK-MVE-NEXT: vmov r2, s0
; CHECK-MVE-NEXT: str r1, [r0, #16]
-; CHECK-MVE-NEXT: vcmp.f32 s3, s14
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r12, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, s3
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r12, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, s10
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs.w r12, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r2, s12
-; CHECK-MVE-NEXT: vcmp.f32 s1, s14
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r2, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, s1
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r2, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s10
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r2, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r3, s8
-; CHECK-MVE-NEXT: vcmp.f32 s2, s14
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r3, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s2
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r3, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-MVE-NEXT: vmov r1, s2
+; CHECK-MVE-NEXT: vmov q0[2], q0[0], r2, r1
; CHECK-MVE-NEXT: vmov r1, s6
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r3, #0
-; CHECK-MVE-NEXT: vcmp.f32 s0, s10
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r1, #-2147483648
-; CHECK-MVE-NEXT: vcmp.f32 s0, s14
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r1, #-2147483648
-; CHECK-MVE-NEXT: vcmp.f32 s0, s0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r1, #0
-; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r3
-; CHECK-MVE-NEXT: vmov q0[3], q0[1], r2, r12
+; CHECK-MVE-NEXT: vmov r2, s8
+; CHECK-MVE-NEXT: vmov q0[3], q0[1], r2, r1
; CHECK-MVE-NEXT: vstrw.32 q0, [r0]
; CHECK-MVE-NEXT: bx lr
-; CHECK-MVE-NEXT: .p2align 2
-; CHECK-MVE-NEXT: @ %bb.1:
-; CHECK-MVE-NEXT: .LCPI4_0:
-; CHECK-MVE-NEXT: .long 0xcf000000 @ float -2.14748365E+9
-; CHECK-MVE-NEXT: .LCPI4_1:
-; CHECK-MVE-NEXT: .long 0x4effffff @ float 2.14748352E+9
;
; CHECK-MVEFP-LABEL: test_signed_v5f32_v5i32:
; CHECK-MVEFP: @ %bb.0:
@@ -383,104 +183,23 @@ define arm_aapcs_vfpcc <5 x i32> @test_signed_v5f32_v5i32(<5 x float> %f) {
define arm_aapcs_vfpcc <6 x i32> @test_signed_v6f32_v6i32(<6 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v6f32_v6i32:
; CHECK-MVE: @ %bb.0:
-; CHECK-MVE-NEXT: vcvt.s32.f32 s9, s5
-; CHECK-MVE-NEXT: vldr s10, .LCPI5_0
-; CHECK-MVE-NEXT: vcvt.s32.f32 s11, s4
-; CHECK-MVE-NEXT: vldr s6, .LCPI5_1
-; CHECK-MVE-NEXT: vcvt.s32.f32 s7, s3
-; CHECK-MVE-NEXT: vcmp.f32 s5, s10
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s5, s6
-; CHECK-MVE-NEXT: vcvt.s32.f32 s14, s1
-; CHECK-MVE-NEXT: vcvt.s32.f32 s12, s2
-; CHECK-MVE-NEXT: vmov r1, s9
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r1, #-2147483648
-; CHECK-MVE-NEXT: vcmp.f32 s5, s5
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r1, #0
-; CHECK-MVE-NEXT: vcmp.f32 s4, s10
-; CHECK-MVE-NEXT: str r1, [r0, #20]
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r1, s11
-; CHECK-MVE-NEXT: vcmp.f32 s4, s6
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s4, s4
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, s10
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r1, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r12, s7
-; CHECK-MVE-NEXT: vcmp.f32 s3, s6
-; CHECK-MVE-NEXT: str r1, [r0, #16]
-; CHECK-MVE-NEXT: vcvt.s32.f32 s8, s0
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r12, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, s3
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r12, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, s10
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs.w r12, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r2, s14
-; CHECK-MVE-NEXT: vcmp.f32 s1, s6
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r2, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, s1
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r2, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s10
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r2, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r3, s12
-; CHECK-MVE-NEXT: vcmp.f32 s2, s6
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r3, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s2
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r3, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-MVE-NEXT: vcvt.s32.f32 s6, s5
+; CHECK-MVE-NEXT: vcvt.s32.f32 s4, s4
+; CHECK-MVE-NEXT: vcvt.s32.f32 s2, s2
+; CHECK-MVE-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-MVE-NEXT: vcvt.s32.f32 s8, s3
+; CHECK-MVE-NEXT: vcvt.s32.f32 s10, s1
+; CHECK-MVE-NEXT: vmov r1, s6
+; CHECK-MVE-NEXT: vmov r2, s4
+; CHECK-MVE-NEXT: strd r2, r1, [r0, #16]
+; CHECK-MVE-NEXT: vmov r1, s2
+; CHECK-MVE-NEXT: vmov r2, s0
+; CHECK-MVE-NEXT: vmov q0[2], q0[0], r2, r1
; CHECK-MVE-NEXT: vmov r1, s8
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r3, #0
-; CHECK-MVE-NEXT: vcmp.f32 s0, s10
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r1, #-2147483648
-; CHECK-MVE-NEXT: vcmp.f32 s0, s6
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r1, #-2147483648
-; CHECK-MVE-NEXT: vcmp.f32 s0, s0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r1, #0
-; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r3
-; CHECK-MVE-NEXT: vmov q0[3], q0[1], r2, r12
+; CHECK-MVE-NEXT: vmov r2, s10
+; CHECK-MVE-NEXT: vmov q0[3], q0[1], r2, r1
; CHECK-MVE-NEXT: vstrw.32 q0, [r0]
; CHECK-MVE-NEXT: bx lr
-; CHECK-MVE-NEXT: .p2align 2
-; CHECK-MVE-NEXT: @ %bb.1:
-; CHECK-MVE-NEXT: .LCPI5_0:
-; CHECK-MVE-NEXT: .long 0xcf000000 @ float -2.14748365E+9
-; CHECK-MVE-NEXT: .LCPI5_1:
-; CHECK-MVE-NEXT: .long 0x4effffff @ float 2.14748352E+9
;
; CHECK-MVEFP-LABEL: test_signed_v6f32_v6i32:
; CHECK-MVEFP: @ %bb.0:
@@ -499,119 +218,26 @@ define arm_aapcs_vfpcc <6 x i32> @test_signed_v6f32_v6i32(<6 x float> %f) {
define arm_aapcs_vfpcc <7 x i32> @test_signed_v7f32_v7i32(<7 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v7f32_v7i32:
; CHECK-MVE: @ %bb.0:
-; CHECK-MVE-NEXT: vcvt.s32.f32 s13, s5
-; CHECK-MVE-NEXT: vldr s12, .LCPI6_0
-; CHECK-MVE-NEXT: vcvt.s32.f32 s15, s4
-; CHECK-MVE-NEXT: vldr s8, .LCPI6_1
-; CHECK-MVE-NEXT: vcmp.f32 s5, s12
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcvt.s32.f32 s11, s6
-; CHECK-MVE-NEXT: vcmp.f32 s5, s8
-; CHECK-MVE-NEXT: vcvt.s32.f32 s9, s3
-; CHECK-MVE-NEXT: vcvt.s32.f32 s7, s1
-; CHECK-MVE-NEXT: vmov r1, s13
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r1, #-2147483648
-; CHECK-MVE-NEXT: vcmp.f32 s5, s5
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r1, #0
-; CHECK-MVE-NEXT: str r1, [r0, #20]
-; CHECK-MVE-NEXT: vcmp.f32 s4, s12
-; CHECK-MVE-NEXT: vmov r1, s15
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r1, #-2147483648
-; CHECK-MVE-NEXT: vcmp.f32 s4, s8
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r1, #-2147483648
-; CHECK-MVE-NEXT: vcmp.f32 s4, s4
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r1, #0
-; CHECK-MVE-NEXT: vcmp.f32 s6, s12
-; CHECK-MVE-NEXT: str r1, [r0, #16]
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r1, s11
-; CHECK-MVE-NEXT: vcmp.f32 s6, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s6, s6
-; CHECK-MVE-NEXT: vcvt.s32.f32 s14, s2
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, s12
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r1, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r12, s9
-; CHECK-MVE-NEXT: str r1, [r0, #24]
-; CHECK-MVE-NEXT: vcmp.f32 s3, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r12, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcvt.s32.f32 s10, s0
-; CHECK-MVE-NEXT: vcmp.f32 s3, s3
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r12, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, s12
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs.w r12, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r2, s7
-; CHECK-MVE-NEXT: vcmp.f32 s1, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r2, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, s1
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r2, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s12
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r2, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r3, s14
-; CHECK-MVE-NEXT: vcmp.f32 s2, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r3, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s2
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r3, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-MVE-NEXT: vcvt.s32.f32 s8, s5
+; CHECK-MVE-NEXT: vcvt.s32.f32 s4, s4
+; CHECK-MVE-NEXT: vcvt.s32.f32 s6, s6
+; CHECK-MVE-NEXT: vcvt.s32.f32 s2, s2
+; CHECK-MVE-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-MVE-NEXT: vcvt.s32.f32 s10, s3
+; CHECK-MVE-NEXT: vcvt.s32.f32 s12, s1
+; CHECK-MVE-NEXT: vmov r1, s8
+; CHECK-MVE-NEXT: vmov r2, s4
+; CHECK-MVE-NEXT: vmov r3, s6
+; CHECK-MVE-NEXT: strd r2, r1, [r0, #16]
+; CHECK-MVE-NEXT: vmov r1, s2
+; CHECK-MVE-NEXT: vmov r2, s0
+; CHECK-MVE-NEXT: str r3, [r0, #24]
+; CHECK-MVE-NEXT: vmov q0[2], q0[0], r2, r1
; CHECK-MVE-NEXT: vmov r1, s10
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r3, #0
-; CHECK-MVE-NEXT: vcmp.f32 s0, s12
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r1, #-2147483648
-; CHECK-MVE-NEXT: vcmp.f32 s0, s8
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r1, #-2147483648
-; CHECK-MVE-NEXT: vcmp.f32 s0, s0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r1, #0
-; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r3
-; CHECK-MVE-NEXT: vmov q0[3], q0[1], r2, r12
+; CHECK-MVE-NEXT: vmov r2, s12
+; CHECK-MVE-NEXT: vmov q0[3], q0[1], r2, r1
; CHECK-MVE-NEXT: vstrw.32 q0, [r0]
; CHECK-MVE-NEXT: bx lr
-; CHECK-MVE-NEXT: .p2align 2
-; CHECK-MVE-NEXT: @ %bb.1:
-; CHECK-MVE-NEXT: .LCPI6_0:
-; CHECK-MVE-NEXT: .long 0xcf000000 @ float -2.14748365E+9
-; CHECK-MVE-NEXT: .LCPI6_1:
-; CHECK-MVE-NEXT: .long 0x4effffff @ float 2.14748352E+9
;
; CHECK-MVEFP-LABEL: test_signed_v7f32_v7i32:
; CHECK-MVEFP: @ %bb.0:
@@ -632,136 +258,27 @@ define arm_aapcs_vfpcc <7 x i32> @test_signed_v7f32_v7i32(<7 x float> %f) {
define arm_aapcs_vfpcc <8 x i32> @test_signed_v8f32_v8i32(<8 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v8f32_v8i32:
; CHECK-MVE: @ %bb.0:
-; CHECK-MVE-NEXT: .save {r4, r5, r7, lr}
-; CHECK-MVE-NEXT: push {r4, r5, r7, lr}
-; CHECK-MVE-NEXT: .vsave {d8, d9}
-; CHECK-MVE-NEXT: vpush {d8, d9}
-; CHECK-MVE-NEXT: vcvt.s32.f32 s16, s6
-; CHECK-MVE-NEXT: vldr s12, .LCPI7_0
-; CHECK-MVE-NEXT: vcvt.s32.f32 s18, s4
-; CHECK-MVE-NEXT: vldr s10, .LCPI7_1
-; CHECK-MVE-NEXT: vcvt.s32.f32 s15, s7
-; CHECK-MVE-NEXT: vcmp.f32 s6, s12
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s6, s10
-; CHECK-MVE-NEXT: vcvt.s32.f32 s13, s5
-; CHECK-MVE-NEXT: vcvt.s32.f32 s11, s2
-; CHECK-MVE-NEXT: vmov r12, s16
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r12, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s6, s6
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r12, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s4, s12
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs.w r12, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov lr, s18
-; CHECK-MVE-NEXT: vcmp.f32 s4, s10
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w lr, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s4, s4
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt lr, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s7, s12
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs.w lr, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r2, s15
-; CHECK-MVE-NEXT: vcmp.f32 s7, s10
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r2, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s7, s7
-; CHECK-MVE-NEXT: vcvt.s32.f32 s9, s0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r2, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s5, s12
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r2, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r3, s13
-; CHECK-MVE-NEXT: vcmp.f32 s5, s10
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r3, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s5, s5
-; CHECK-MVE-NEXT: vcvt.s32.f32 s14, s3
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r3, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s12
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r3, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r0, s11
-; CHECK-MVE-NEXT: vmov q1[2], q1[0], lr, r12
-; CHECK-MVE-NEXT: vcmp.f32 s2, s10
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r0, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcvt.s32.f32 s8, s1
-; CHECK-MVE-NEXT: vcmp.f32 s2, s2
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r0, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s0, s12
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r0, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r1, s9
-; CHECK-MVE-NEXT: vmov q1[3], q1[1], r3, r2
-; CHECK-MVE-NEXT: vcmp.f32 s0, s10
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s0, s0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, s12
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r1, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r4, s14
-; CHECK-MVE-NEXT: vcmp.f32 s3, s10
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r4, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, s3
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r4, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r5, s8
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r4, #0
-; CHECK-MVE-NEXT: vcmp.f32 s1, s12
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r5, #-2147483648
-; CHECK-MVE-NEXT: vcmp.f32 s1, s10
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, s1
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r5, #-2147483648
+; CHECK-MVE-NEXT: vcvt.s32.f32 s2, s2
+; CHECK-MVE-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-MVE-NEXT: vcvt.s32.f32 s8, s3
+; CHECK-MVE-NEXT: vcvt.s32.f32 s10, s1
+; CHECK-MVE-NEXT: vcvt.s32.f32 s6, s6
+; CHECK-MVE-NEXT: vcvt.s32.f32 s4, s4
+; CHECK-MVE-NEXT: vcvt.s32.f32 s12, s7
+; CHECK-MVE-NEXT: vcvt.s32.f32 s14, s5
+; CHECK-MVE-NEXT: vmov r0, s2
+; CHECK-MVE-NEXT: vmov r1, s0
; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r5, #0
-; CHECK-MVE-NEXT: vmov q0[3], q0[1], r5, r4
-; CHECK-MVE-NEXT: vpop {d8, d9}
-; CHECK-MVE-NEXT: pop {r4, r5, r7, pc}
-; CHECK-MVE-NEXT: .p2align 2
-; CHECK-MVE-NEXT: @ %bb.1:
-; CHECK-MVE-NEXT: .LCPI7_0:
-; CHECK-MVE-NEXT: .long 0xcf000000 @ float -2.14748365E+9
-; CHECK-MVE-NEXT: .LCPI7_1:
-; CHECK-MVE-NEXT: .long 0x4effffff @ float 2.14748352E+9
+; CHECK-MVE-NEXT: vmov r0, s8
+; CHECK-MVE-NEXT: vmov r1, s10
+; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0
+; CHECK-MVE-NEXT: vmov r0, s6
+; CHECK-MVE-NEXT: vmov r1, s4
+; CHECK-MVE-NEXT: vmov q1[2], q1[0], r1, r0
+; CHECK-MVE-NEXT: vmov r0, s12
+; CHECK-MVE-NEXT: vmov r1, s14
+; CHECK-MVE-NEXT: vmov q1[3], q1[1], r1, r0
+; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: test_signed_v8f32_v8i32:
; CHECK-MVEFP: @ %bb.0:
@@ -1758,30 +1275,9 @@ declare <8 x i32> @llvm.fptosi.sat.v8f16.v8i32 (<8 x half>)
define arm_aapcs_vfpcc <1 x i32> @test_signed_v1f16_v1i32(<1 x half> %f) {
; CHECK-LABEL: test_signed_v1f16_v1i32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vldr s2, .LCPI14_0
-; CHECK-NEXT: vcvt.s32.f32 s4, s0
-; CHECK-NEXT: vldr s6, .LCPI14_1
-; CHECK-NEXT: vcmp.f32 s0, s2
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s6
-; CHECK-NEXT: vmov r0, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r0, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r0, #-2147483648
-; CHECK-NEXT: vcmp.f32 s0, s0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r0, #0
+; CHECK-NEXT: vcvt.s32.f16 s0, s0
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bx lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI14_0:
-; CHECK-NEXT: .long 0xcf000000 @ float -2.14748365E+9
-; CHECK-NEXT: .LCPI14_1:
-; CHECK-NEXT: .long 0x4effffff @ float 2.14748352E+9
%x = call <1 x i32> @llvm.fptosi.sat.v1f16.v1i32(<1 x half> %f)
ret <1 x i32> %x
}
@@ -1793,694 +1289,224 @@ define arm_aapcs_vfpcc <2 x i32> @test_signed_v2f16_v2i32(<2 x half> %f) {
; CHECK-NEXT: push {r4, r5, r7, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11}
; CHECK-NEXT: vpush {d8, d9, d10, d11}
-; CHECK-NEXT: vmov q4, q0
-; CHECK-NEXT: vcvtt.f32.f16 s18, s16
-; CHECK-NEXT: vmov r0, s18
-; CHECK-NEXT: bl __aeabi_f2lz
-; CHECK-NEXT: vcvtb.f32.f16 s16, s16
-; CHECK-NEXT: mov r5, r0
-; CHECK-NEXT: vmov r0, s16
-; CHECK-NEXT: vldr s20, .LCPI15_0
-; CHECK-NEXT: vldr s22, .LCPI15_1
-; CHECK-NEXT: mov r4, r1
-; CHECK-NEXT: vcmp.f32 s18, s20
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r5, #-2147483648
-; CHECK-NEXT: vcmp.f32 s18, s22
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r5, #-2147483648
-; CHECK-NEXT: vcmp.f32 s18, s18
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r5, #0
-; CHECK-NEXT: bl __aeabi_f2lz
-; CHECK-NEXT: vcmp.f32 s16, s20
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s16, s22
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r0, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s16, s16
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r0, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s18, s20
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r0, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s18, s22
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r4, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s18, s18
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt r4, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s16, s20
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r4, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r1, #-1
-; CHECK-NEXT: vcmp.f32 s16, s22
-; CHECK-NEXT: vmov q0[2], q0[0], r0, r5
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt r1, #0
-; CHECK-NEXT: vcmp.f32 s16, s16
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r1, #0
-; CHECK-NEXT: vmov q0[3], q0[1], r1, r4
-; CHECK-NEXT: vpop {d8, d9, d10, d11}
-; CHECK-NEXT: pop {r4, r5, r7, pc}
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI15_0:
-; CHECK-NEXT: .long 0xcf000000 @ float -2.14748365E+9
-; CHECK-NEXT: .LCPI15_1:
-; CHECK-NEXT: .long 0x4effffff @ float 2.14748352E+9
- %x = call <2 x i32> @llvm.fptosi.sat.v2f16.v2i32(<2 x half> %f)
- ret <2 x i32> %x
-}
-
-define arm_aapcs_vfpcc <3 x i32> @test_signed_v3f16_v3i32(<3 x half> %f) {
-; CHECK-LABEL: test_signed_v3f16_v3i32:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: vcvtb.f32.f16 s10, s1
-; CHECK-NEXT: vcvtb.f32.f16 s2, s2
-; CHECK-NEXT: vcvt.s32.f32 s12, s10
-; CHECK-NEXT: vldr s6, .LCPI16_1
-; CHECK-NEXT: vcvt.s32.f32 s14, s2
-; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vcvt.s32.f32 s8, s0
-; CHECK-NEXT: vldr s4, .LCPI16_0
-; CHECK-NEXT: vcmp.f32 s10, s6
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s10, s4
-; CHECK-NEXT: vmov r0, s12
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r0, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s10, s10
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r0, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s2, s6
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r0, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r1, s14
-; CHECK-NEXT: vcmp.f32 s2, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s2, s2
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r2, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r1, #0
-; CHECK-NEXT: vcmp.f32 s0, s6
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r2, #-2147483648
-; CHECK-NEXT: vcmp.f32 s0, s4
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r2, #-2147483648
-; CHECK-NEXT: vmov.32 q0[1], r0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r2, #0
-; CHECK-NEXT: vmov q0[2], q0[0], r2, r1
-; CHECK-NEXT: bx lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI16_0:
-; CHECK-NEXT: .long 0x4effffff @ float 2.14748352E+9
-; CHECK-NEXT: .LCPI16_1:
-; CHECK-NEXT: .long 0xcf000000 @ float -2.14748365E+9
- %x = call <3 x i32> @llvm.fptosi.sat.v3f16.v3i32(<3 x half> %f)
- ret <3 x i32> %x
-}
-
-define arm_aapcs_vfpcc <4 x i32> @test_signed_v4f16_v4i32(<4 x half> %f) {
-; CHECK-LABEL: test_signed_v4f16_v4i32:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: vcvtb.f32.f16 s14, s1
-; CHECK-NEXT: vcvtt.f32.f16 s10, s1
-; CHECK-NEXT: vcvt.s32.f32 s1, s14
-; CHECK-NEXT: vcvtt.f32.f16 s6, s0
-; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vldr s4, .LCPI17_1
-; CHECK-NEXT: vcvt.s32.f32 s3, s0
-; CHECK-NEXT: vldr s2, .LCPI17_0
-; CHECK-NEXT: vcvt.s32.f32 s12, s10
-; CHECK-NEXT: vcmp.f32 s14, s4
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s14, s2
-; CHECK-NEXT: vcvt.s32.f32 s8, s6
-; CHECK-NEXT: vmov r0, s1
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r0, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s14, s14
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r0, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s4
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r0, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r1, s3
-; CHECK-NEXT: vcmp.f32 s0, s2
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s10, s4
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r1, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r2, s12
-; CHECK-NEXT: vcmp.f32 s10, s2
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r2, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s10, s10
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r2, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r3, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r2, #0
-; CHECK-NEXT: vcmp.f32 s6, s4
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s6, s2
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r3, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r3, #-2147483648
-; CHECK-NEXT: vcmp.f32 s6, s6
-; CHECK-NEXT: vmov q0[2], q0[0], r1, r0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r3, #0
-; CHECK-NEXT: vmov q0[3], q0[1], r3, r2
-; CHECK-NEXT: bx lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI17_0:
-; CHECK-NEXT: .long 0x4effffff @ float 2.14748352E+9
-; CHECK-NEXT: .LCPI17_1:
-; CHECK-NEXT: .long 0xcf000000 @ float -2.14748365E+9
- %x = call <4 x i32> @llvm.fptosi.sat.v4f16.v4i32(<4 x half> %f)
- ret <4 x i32> %x
-}
-
-define arm_aapcs_vfpcc <5 x i32> @test_signed_v5f16_v5i32(<5 x half> %f) {
-; CHECK-LABEL: test_signed_v5f16_v5i32:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: vcvtb.f32.f16 s2, s2
-; CHECK-NEXT: vcvtb.f32.f16 s12, s1
-; CHECK-NEXT: vcvt.s32.f32 s5, s2
-; CHECK-NEXT: vcvtt.f32.f16 s1, s1
-; CHECK-NEXT: vcvt.s32.f32 s7, s1
-; CHECK-NEXT: vldr s8, .LCPI18_1
-; CHECK-NEXT: vcvtb.f32.f16 s4, s0
-; CHECK-NEXT: vcvtt.f32.f16 s0, s0
-; CHECK-NEXT: vldr s6, .LCPI18_0
-; CHECK-NEXT: vcvt.s32.f32 s3, s0
-; CHECK-NEXT: vcmp.f32 s2, s8
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s2, s6
-; CHECK-NEXT: vcvt.s32.f32 s14, s12
-; CHECK-NEXT: vmov r1, s5
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s2, s2
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s1, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r1, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r12, s7
-; CHECK-NEXT: str r1, [r0, #16]
-; CHECK-NEXT: vcmp.f32 s1, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcvt.s32.f32 s10, s4
-; CHECK-NEXT: vcmp.f32 s1, s1
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r12, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs.w r12, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r2, s3
-; CHECK-NEXT: vcmp.f32 s0, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r2, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r2, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s12, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r2, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r3, s14
-; CHECK-NEXT: vcmp.f32 s12, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r3, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s12, s12
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r3, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r1, s10
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r3, #0
-; CHECK-NEXT: vcmp.f32 s4, s8
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r1, #-2147483648
-; CHECK-NEXT: vcmp.f32 s4, s6
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r1, #-2147483648
-; CHECK-NEXT: vcmp.f32 s4, s4
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r1, #0
-; CHECK-NEXT: vmov q0[2], q0[0], r1, r3
-; CHECK-NEXT: vmov q0[3], q0[1], r2, r12
-; CHECK-NEXT: vstrw.32 q0, [r0]
-; CHECK-NEXT: bx lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI18_0:
-; CHECK-NEXT: .long 0x4effffff @ float 2.14748352E+9
-; CHECK-NEXT: .LCPI18_1:
-; CHECK-NEXT: .long 0xcf000000 @ float -2.14748365E+9
- %x = call <5 x i32> @llvm.fptosi.sat.v5f16.v5i32(<5 x half> %f)
- ret <5 x i32> %x
-}
-
-define arm_aapcs_vfpcc <6 x i32> @test_signed_v6f16_v6i32(<6 x half> %f) {
-; CHECK-LABEL: test_signed_v6f16_v6i32:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: vcvtt.f32.f16 s7, s2
-; CHECK-NEXT: vcvtb.f32.f16 s2, s2
-; CHECK-NEXT: vcvt.s32.f32 s9, s7
-; CHECK-NEXT: vldr s8, .LCPI19_1
-; CHECK-NEXT: vcvt.s32.f32 s11, s2
-; CHECK-NEXT: vcvtb.f32.f16 s12, s1
-; CHECK-NEXT: vcvtt.f32.f16 s1, s1
-; CHECK-NEXT: vldr s6, .LCPI19_0
-; CHECK-NEXT: vcvt.s32.f32 s5, s1
-; CHECK-NEXT: vcvtb.f32.f16 s4, s0
-; CHECK-NEXT: vcmp.f32 s7, s8
-; CHECK-NEXT: vcvtt.f32.f16 s0, s0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s7, s6
-; CHECK-NEXT: vcvt.s32.f32 s3, s0
-; CHECK-NEXT: vmov r1, s9
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r1, #-2147483648
-; CHECK-NEXT: vcmp.f32 s7, s7
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r1, #0
-; CHECK-NEXT: vcmp.f32 s2, s8
-; CHECK-NEXT: str r1, [r0, #20]
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r1, s11
-; CHECK-NEXT: vcmp.f32 s2, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s2, s2
-; CHECK-NEXT: vcvt.s32.f32 s14, s12
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s1, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r1, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r12, s5
-; CHECK-NEXT: str r1, [r0, #16]
-; CHECK-NEXT: vcmp.f32 s1, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcvt.s32.f32 s10, s4
-; CHECK-NEXT: vcmp.f32 s1, s1
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r12, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs.w r12, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r2, s3
-; CHECK-NEXT: vcmp.f32 s0, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r2, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r2, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s12, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r2, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r3, s14
-; CHECK-NEXT: vcmp.f32 s12, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r3, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s12, s12
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r3, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r1, s10
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r3, #0
-; CHECK-NEXT: vcmp.f32 s4, s8
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r1, #-2147483648
-; CHECK-NEXT: vcmp.f32 s4, s6
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r1, #-2147483648
-; CHECK-NEXT: vcmp.f32 s4, s4
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r1, #0
-; CHECK-NEXT: vmov q0[2], q0[0], r1, r3
-; CHECK-NEXT: vmov q0[3], q0[1], r2, r12
-; CHECK-NEXT: vstrw.32 q0, [r0]
-; CHECK-NEXT: bx lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI19_0:
-; CHECK-NEXT: .long 0x4effffff @ float 2.14748352E+9
-; CHECK-NEXT: .LCPI19_1:
-; CHECK-NEXT: .long 0xcf000000 @ float -2.14748365E+9
- %x = call <6 x i32> @llvm.fptosi.sat.v6f16.v6i32(<6 x half> %f)
- ret <6 x i32> %x
-}
-
-define arm_aapcs_vfpcc <7 x i32> @test_signed_v7f16_v7i32(<7 x half> %f) {
-; CHECK-LABEL: test_signed_v7f16_v7i32:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: vcvtt.f32.f16 s11, s2
-; CHECK-NEXT: vcvtb.f32.f16 s2, s2
-; CHECK-NEXT: vcvt.s32.f32 s13, s11
-; CHECK-NEXT: vldr s8, .LCPI20_1
-; CHECK-NEXT: vcvt.s32.f32 s15, s2
-; CHECK-NEXT: vldr s6, .LCPI20_0
-; CHECK-NEXT: vcvtb.f32.f16 s3, s3
-; CHECK-NEXT: vcmp.f32 s11, s8
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcvt.s32.f32 s9, s3
-; CHECK-NEXT: vcmp.f32 s11, s6
-; CHECK-NEXT: vcvtb.f32.f16 s12, s1
-; CHECK-NEXT: vcvtt.f32.f16 s1, s1
-; CHECK-NEXT: vcvtb.f32.f16 s4, s0
-; CHECK-NEXT: vcvt.s32.f32 s7, s1
-; CHECK-NEXT: vcvtt.f32.f16 s0, s0
-; CHECK-NEXT: vmov r1, s13
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r1, #-2147483648
-; CHECK-NEXT: vcmp.f32 s11, s11
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r1, #0
-; CHECK-NEXT: str r1, [r0, #20]
-; CHECK-NEXT: vcmp.f32 s2, s8
-; CHECK-NEXT: vmov r1, s15
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r1, #-2147483648
-; CHECK-NEXT: vcmp.f32 s2, s6
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r1, #-2147483648
-; CHECK-NEXT: vcmp.f32 s2, s2
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r1, #0
-; CHECK-NEXT: vcvt.s32.f32 s5, s0
-; CHECK-NEXT: str r1, [r0, #16]
-; CHECK-NEXT: vcmp.f32 s3, s8
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r1, s9
-; CHECK-NEXT: vcmp.f32 s3, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s3, s3
-; CHECK-NEXT: vcvt.s32.f32 s14, s12
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s1, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r1, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r12, s7
-; CHECK-NEXT: str r1, [r0, #24]
-; CHECK-NEXT: vcmp.f32 s1, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcvt.s32.f32 s10, s4
-; CHECK-NEXT: vcmp.f32 s1, s1
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r12, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs.w r12, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r2, s5
-; CHECK-NEXT: vcmp.f32 s0, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r2, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r2, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s12, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r2, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r3, s14
-; CHECK-NEXT: vcmp.f32 s12, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r3, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s12, s12
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r3, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r1, s10
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r3, #0
-; CHECK-NEXT: vcmp.f32 s4, s8
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r1, #-2147483648
-; CHECK-NEXT: vcmp.f32 s4, s6
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r1, #-2147483648
-; CHECK-NEXT: vcmp.f32 s4, s4
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r1, #0
-; CHECK-NEXT: vmov q0[2], q0[0], r1, r3
-; CHECK-NEXT: vmov q0[3], q0[1], r2, r12
-; CHECK-NEXT: vstrw.32 q0, [r0]
-; CHECK-NEXT: bx lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI20_0:
-; CHECK-NEXT: .long 0x4effffff @ float 2.14748352E+9
-; CHECK-NEXT: .LCPI20_1:
-; CHECK-NEXT: .long 0xcf000000 @ float -2.14748365E+9
- %x = call <7 x i32> @llvm.fptosi.sat.v7f16.v7i32(<7 x half> %f)
- ret <7 x i32> %x
-}
-
-define arm_aapcs_vfpcc <8 x i32> @test_signed_v8f16_v8i32(<8 x half> %f) {
-; CHECK-LABEL: test_signed_v8f16_v8i32:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: .save {r4, r5, r7, lr}
-; CHECK-NEXT: push {r4, r5, r7, lr}
-; CHECK-NEXT: .vsave {d8, d9}
-; CHECK-NEXT: vpush {d8, d9}
-; CHECK-NEXT: vcvtt.f32.f16 s13, s3
-; CHECK-NEXT: vcvtb.f32.f16 s3, s3
-; CHECK-NEXT: vcvt.s32.f32 s16, s3
-; CHECK-NEXT: vcvtt.f32.f16 s9, s2
-; CHECK-NEXT: vcvtb.f32.f16 s2, s2
-; CHECK-NEXT: vldr s8, .LCPI21_1
-; CHECK-NEXT: vcvt.s32.f32 s18, s2
-; CHECK-NEXT: vldr s6, .LCPI21_0
-; CHECK-NEXT: vcvt.s32.f32 s15, s13
-; CHECK-NEXT: vcvtt.f32.f16 s12, s1
-; CHECK-NEXT: vcmp.f32 s3, s8
-; CHECK-NEXT: vcvtb.f32.f16 s1, s1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s3, s6
-; CHECK-NEXT: vcvt.s32.f32 s11, s9
-; CHECK-NEXT: vcvtt.f32.f16 s4, s0
-; CHECK-NEXT: vmov r12, s16
-; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s3, s3
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r12, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s2, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs.w r12, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov lr, s18
-; CHECK-NEXT: vcmp.f32 s2, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w lr, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s2, s2
-; CHECK-NEXT: vcvt.s32.f32 s7, s1
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt lr, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s13, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs.w lr, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r2, s15
-; CHECK-NEXT: vcmp.f32 s13, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r2, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s13, s13
-; CHECK-NEXT: vcvt.s32.f32 s5, s0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r2, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s9, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r2, #0
+; CHECK-NEXT: vmov q4, q0
+; CHECK-NEXT: vcvtt.f32.f16 s18, s16
+; CHECK-NEXT: vmov r0, s18
+; CHECK-NEXT: bl __aeabi_f2lz
+; CHECK-NEXT: vcvtb.f32.f16 s16, s16
+; CHECK-NEXT: mov r5, r0
+; CHECK-NEXT: vmov r0, s16
+; CHECK-NEXT: vldr s20, .LCPI15_0
+; CHECK-NEXT: vldr s22, .LCPI15_1
+; CHECK-NEXT: mov r4, r1
+; CHECK-NEXT: vcmp.f32 s18, s20
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r3, s11
-; CHECK-NEXT: vcmp.f32 s9, s6
; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r3, #-2147483648
+; CHECK-NEXT: movlt.w r5, #-2147483648
+; CHECK-NEXT: vcmp.f32 s18, s22
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s9, s9
-; CHECK-NEXT: vcvt.s32.f32 s14, s12
; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r3, #-2147483648
+; CHECK-NEXT: mvngt r5, #-2147483648
+; CHECK-NEXT: vcmp.f32 s18, s18
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s1, s8
; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r3, #0
+; CHECK-NEXT: movvs r5, #0
+; CHECK-NEXT: bl __aeabi_f2lz
+; CHECK-NEXT: vcmp.f32 s16, s20
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r0, s7
-; CHECK-NEXT: vcmp.f32 s1, s6
+; CHECK-NEXT: vcmp.f32 s16, s22
; CHECK-NEXT: it lt
; CHECK-NEXT: movlt.w r0, #-2147483648
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcvt.s32.f32 s10, s4
-; CHECK-NEXT: vcmp.f32 s1, s1
+; CHECK-NEXT: vcmp.f32 s16, s16
; CHECK-NEXT: it gt
; CHECK-NEXT: mvngt r0, #-2147483648
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s8
+; CHECK-NEXT: vcmp.f32 s18, s20
; CHECK-NEXT: it vs
; CHECK-NEXT: movvs r0, #0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r1, s5
-; CHECK-NEXT: vcmp.f32 s0, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s12, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r1, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r4, s14
-; CHECK-NEXT: vcmp.f32 s12, s6
+; CHECK-NEXT: vcmp.f32 s18, s22
; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r4, #-2147483648
+; CHECK-NEXT: movlt.w r4, #-1
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s12, s12
+; CHECK-NEXT: vcmp.f32 s18, s18
; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r4, #-2147483648
+; CHECK-NEXT: movgt r4, #0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r5, s10
+; CHECK-NEXT: vcmp.f32 s16, s20
; CHECK-NEXT: it vs
; CHECK-NEXT: movvs r4, #0
-; CHECK-NEXT: vcmp.f32 s4, s8
-; CHECK-NEXT: vmov q0[2], q0[0], r1, r0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r5, #-2147483648
-; CHECK-NEXT: vcmp.f32 s4, s6
+; CHECK-NEXT: movlt.w r1, #-1
+; CHECK-NEXT: vcmp.f32 s16, s22
+; CHECK-NEXT: vmov q0[2], q0[0], r0, r5
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s4, s4
-; CHECK-NEXT: vmov q1[2], q1[0], lr, r12
; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r5, #-2147483648
+; CHECK-NEXT: movgt r1, #0
+; CHECK-NEXT: vcmp.f32 s16, s16
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r5, #0
-; CHECK-NEXT: vmov q0[3], q0[1], r5, r4
-; CHECK-NEXT: vmov q1[3], q1[1], r3, r2
-; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: movvs r1, #0
+; CHECK-NEXT: vmov q0[3], q0[1], r1, r4
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: pop {r4, r5, r7, pc}
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI21_0:
-; CHECK-NEXT: .long 0x4effffff @ float 2.14748352E+9
-; CHECK-NEXT: .LCPI21_1:
+; CHECK-NEXT: .LCPI15_0:
; CHECK-NEXT: .long 0xcf000000 @ float -2.14748365E+9
+; CHECK-NEXT: .LCPI15_1:
+; CHECK-NEXT: .long 0x4effffff @ float 2.14748352E+9
+ %x = call <2 x i32> @llvm.fptosi.sat.v2f16.v2i32(<2 x half> %f)
+ ret <2 x i32> %x
+}
+
+define arm_aapcs_vfpcc <3 x i32> @test_signed_v3f16_v3i32(<3 x half> %f) {
+; CHECK-LABEL: test_signed_v3f16_v3i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vcvt.s32.f16 s6, s0
+; CHECK-NEXT: vcvt.s32.f16 s0, s1
+; CHECK-NEXT: vcvt.s32.f16 s4, s2
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: vmov.32 q0[1], r0
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmov r1, s6
+; CHECK-NEXT: vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT: bx lr
+ %x = call <3 x i32> @llvm.fptosi.sat.v3f16.v3i32(<3 x half> %f)
+ ret <3 x i32> %x
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_signed_v4f16_v4i32(<4 x half> %f) {
+; CHECK-LABEL: test_signed_v4f16_v4i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmovx.f16 s2, s1
+; CHECK-NEXT: vcvt.s32.f16 s4, s2
+; CHECK-NEXT: vmovx.f16 s2, s0
+; CHECK-NEXT: vcvt.s32.f16 s6, s2
+; CHECK-NEXT: vcvt.s32.f16 s2, s1
+; CHECK-NEXT: vcvt.s32.f16 s0, s0
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r1, s0
+; CHECK-NEXT: vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmov r1, s6
+; CHECK-NEXT: vmov q0[3], q0[1], r1, r0
+; CHECK-NEXT: bx lr
+ %x = call <4 x i32> @llvm.fptosi.sat.v4f16.v4i32(<4 x half> %f)
+ ret <4 x i32> %x
+}
+
+define arm_aapcs_vfpcc <5 x i32> @test_signed_v5f16_v5i32(<5 x half> %f) {
+; CHECK-LABEL: test_signed_v5f16_v5i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmovx.f16 s6, s0
+; CHECK-NEXT: vmovx.f16 s4, s1
+; CHECK-NEXT: vcvt.s32.f16 s8, s1
+; CHECK-NEXT: vcvt.s32.f16 s0, s0
+; CHECK-NEXT: vcvt.s32.f16 s4, s4
+; CHECK-NEXT: vcvt.s32.f16 s6, s6
+; CHECK-NEXT: vmov r1, s8
+; CHECK-NEXT: vcvt.s32.f16 s2, s2
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: vmov q2[2], q2[0], r2, r1
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: vmov q2[3], q2[1], r2, r1
+; CHECK-NEXT: vmov r1, s2
+; CHECK-NEXT: str r1, [r0, #16]
+; CHECK-NEXT: vstrw.32 q2, [r0]
+; CHECK-NEXT: bx lr
+ %x = call <5 x i32> @llvm.fptosi.sat.v5f16.v5i32(<5 x half> %f)
+ ret <5 x i32> %x
+}
+
+define arm_aapcs_vfpcc <6 x i32> @test_signed_v6f16_v6i32(<6 x half> %f) {
+; CHECK-LABEL: test_signed_v6f16_v6i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmovx.f16 s8, s0
+; CHECK-NEXT: vmovx.f16 s6, s1
+; CHECK-NEXT: vcvt.s32.f16 s10, s1
+; CHECK-NEXT: vcvt.s32.f16 s0, s0
+; CHECK-NEXT: vcvt.s32.f16 s4, s2
+; CHECK-NEXT: vmovx.f16 s2, s2
+; CHECK-NEXT: vcvt.s32.f16 s6, s6
+; CHECK-NEXT: vcvt.s32.f16 s8, s8
+; CHECK-NEXT: vmov r1, s10
+; CHECK-NEXT: vcvt.s32.f16 s2, s2
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: vmov q3[2], q3[0], r2, r1
+; CHECK-NEXT: vmov r1, s6
+; CHECK-NEXT: vmov r2, s8
+; CHECK-NEXT: vmov q3[3], q3[1], r2, r1
+; CHECK-NEXT: vmov r1, s2
+; CHECK-NEXT: vmov r2, s4
+; CHECK-NEXT: strd r2, r1, [r0, #16]
+; CHECK-NEXT: vstrw.32 q3, [r0]
+; CHECK-NEXT: bx lr
+ %x = call <6 x i32> @llvm.fptosi.sat.v6f16.v6i32(<6 x half> %f)
+ ret <6 x i32> %x
+}
+
+define arm_aapcs_vfpcc <7 x i32> @test_signed_v7f16_v7i32(<7 x half> %f) {
+; CHECK-LABEL: test_signed_v7f16_v7i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmovx.f16 s10, s0
+; CHECK-NEXT: vmovx.f16 s8, s1
+; CHECK-NEXT: vcvt.s32.f16 s12, s1
+; CHECK-NEXT: vcvt.s32.f16 s0, s0
+; CHECK-NEXT: vcvt.s32.f16 s4, s2
+; CHECK-NEXT: vmovx.f16 s2, s2
+; CHECK-NEXT: vcvt.s32.f16 s8, s8
+; CHECK-NEXT: vcvt.s32.f16 s10, s10
+; CHECK-NEXT: vmov r1, s12
+; CHECK-NEXT: vcvt.s32.f16 s2, s2
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: vcvt.s32.f16 s6, s3
+; CHECK-NEXT: vmov q3[2], q3[0], r2, r1
+; CHECK-NEXT: vmov r1, s8
+; CHECK-NEXT: vmov r2, s10
+; CHECK-NEXT: vmov q3[3], q3[1], r2, r1
+; CHECK-NEXT: vmov r1, s2
+; CHECK-NEXT: vmov r2, s4
+; CHECK-NEXT: vmov r3, s6
+; CHECK-NEXT: strd r2, r1, [r0, #16]
+; CHECK-NEXT: str r3, [r0, #24]
+; CHECK-NEXT: vstrw.32 q3, [r0]
+; CHECK-NEXT: bx lr
+ %x = call <7 x i32> @llvm.fptosi.sat.v7f16.v7i32(<7 x half> %f)
+ ret <7 x i32> %x
+}
+
+define arm_aapcs_vfpcc <8 x i32> @test_signed_v8f16_v8i32(<8 x half> %f) {
+; CHECK-LABEL: test_signed_v8f16_v8i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmovx.f16 s4, s3
+; CHECK-NEXT: vmovx.f16 s6, s0
+; CHECK-NEXT: vcvt.s32.f16 s8, s4
+; CHECK-NEXT: vmovx.f16 s4, s2
+; CHECK-NEXT: vcvt.s32.f16 s10, s4
+; CHECK-NEXT: vmovx.f16 s4, s1
+; CHECK-NEXT: vcvt.s32.f16 s14, s2
+; CHECK-NEXT: vcvt.s32.f16 s2, s1
+; CHECK-NEXT: vcvt.s32.f16 s0, s0
+; CHECK-NEXT: vcvt.s32.f16 s4, s4
+; CHECK-NEXT: vcvt.s32.f16 s6, s6
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r1, s0
+; CHECK-NEXT: vcvt.s32.f16 s12, s3
+; CHECK-NEXT: vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmov r1, s6
+; CHECK-NEXT: vmov q0[3], q0[1], r1, r0
+; CHECK-NEXT: vmov r0, s12
+; CHECK-NEXT: vmov r1, s14
+; CHECK-NEXT: vmov q1[2], q1[0], r1, r0
+; CHECK-NEXT: vmov r0, s8
+; CHECK-NEXT: vmov r1, s10
+; CHECK-NEXT: vmov q1[3], q1[1], r1, r0
+; CHECK-NEXT: bx lr
%x = call <8 x i32> @llvm.fptosi.sat.v8f16.v8i32(<8 x half> %f)
ret <8 x i32> %x
}
@@ -2798,73 +1824,17 @@ define arm_aapcs_vfpcc <4 x i19> @test_signed_v4f32_v4i19(<4 x float> %f) {
define arm_aapcs_vfpcc <4 x i32> @test_signed_v4f32_v4i32_duplicate(<4 x float> %f) {
; CHECK-MVE-LABEL: test_signed_v4f32_v4i32_duplicate:
; CHECK-MVE: @ %bb.0:
-; CHECK-MVE-NEXT: vcvt.s32.f32 s12, s2
-; CHECK-MVE-NEXT: vldr s6, .LCPI27_0
-; CHECK-MVE-NEXT: vcvt.s32.f32 s14, s0
-; CHECK-MVE-NEXT: vldr s10, .LCPI27_1
-; CHECK-MVE-NEXT: vcvt.s32.f32 s8, s3
-; CHECK-MVE-NEXT: vcmp.f32 s2, s6
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s10
-; CHECK-MVE-NEXT: vcvt.s32.f32 s4, s1
-; CHECK-MVE-NEXT: vmov r0, s12
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r0, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s2
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r0, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s0, s6
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r0, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r1, s14
-; CHECK-MVE-NEXT: vcmp.f32 s0, s10
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s0, s0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r1, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, s6
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r1, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r2, s8
-; CHECK-MVE-NEXT: vcmp.f32 s3, s10
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r2, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, s3
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r2, #-2147483648
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r3, s4
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r2, #0
-; CHECK-MVE-NEXT: vcmp.f32 s1, s6
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r3, #-2147483648
-; CHECK-MVE-NEXT: vcmp.f32 s1, s10
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, s1
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: mvngt r3, #-2147483648
+; CHECK-MVE-NEXT: vcvt.s32.f32 s2, s2
+; CHECK-MVE-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-MVE-NEXT: vcvt.s32.f32 s4, s3
+; CHECK-MVE-NEXT: vcvt.s32.f32 s6, s1
+; CHECK-MVE-NEXT: vmov r0, s2
+; CHECK-MVE-NEXT: vmov r1, s0
; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it vs
-; CHECK-MVE-NEXT: movvs r3, #0
-; CHECK-MVE-NEXT: vmov q0[3], q0[1], r3, r2
+; CHECK-MVE-NEXT: vmov r0, s4
+; CHECK-MVE-NEXT: vmov r1, s6
+; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0
; CHECK-MVE-NEXT: bx lr
-; CHECK-MVE-NEXT: .p2align 2
-; CHECK-MVE-NEXT: @ %bb.1:
-; CHECK-MVE-NEXT: .LCPI27_0:
-; CHECK-MVE-NEXT: .long 0xcf000000 @ float -2.14748365E+9
-; CHECK-MVE-NEXT: .LCPI27_1:
-; CHECK-MVE-NEXT: .long 0x4effffff @ float 2.14748352E+9
;
; CHECK-MVEFP-LABEL: test_signed_v4f32_v4i32_duplicate:
; CHECK-MVEFP: @ %bb.0:
@@ -6086,144 +5056,31 @@ define arm_aapcs_vfpcc <8 x i19> @test_signed_v8f16_v8i19(<8 x half> %f) {
define arm_aapcs_vfpcc <8 x i32> @test_signed_v8f16_v8i32_duplicate(<8 x half> %f) {
; CHECK-LABEL: test_signed_v8f16_v8i32_duplicate:
; CHECK: @ %bb.0:
-; CHECK-NEXT: .save {r4, r5, r7, lr}
-; CHECK-NEXT: push {r4, r5, r7, lr}
-; CHECK-NEXT: .vsave {d8, d9}
-; CHECK-NEXT: vpush {d8, d9}
-; CHECK-NEXT: vcvtt.f32.f16 s13, s3
-; CHECK-NEXT: vcvtb.f32.f16 s3, s3
-; CHECK-NEXT: vcvt.s32.f32 s16, s3
-; CHECK-NEXT: vcvtt.f32.f16 s9, s2
-; CHECK-NEXT: vcvtb.f32.f16 s2, s2
-; CHECK-NEXT: vldr s8, .LCPI47_1
-; CHECK-NEXT: vcvt.s32.f32 s18, s2
-; CHECK-NEXT: vldr s6, .LCPI47_0
-; CHECK-NEXT: vcvt.s32.f32 s15, s13
-; CHECK-NEXT: vcvtt.f32.f16 s12, s1
-; CHECK-NEXT: vcmp.f32 s3, s8
-; CHECK-NEXT: vcvtb.f32.f16 s1, s1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s3, s6
-; CHECK-NEXT: vcvt.s32.f32 s11, s9
-; CHECK-NEXT: vcvtt.f32.f16 s4, s0
-; CHECK-NEXT: vmov r12, s16
-; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s3, s3
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r12, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s2, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs.w r12, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov lr, s18
-; CHECK-NEXT: vcmp.f32 s2, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w lr, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s2, s2
-; CHECK-NEXT: vcvt.s32.f32 s7, s1
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt lr, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s13, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs.w lr, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r2, s15
-; CHECK-NEXT: vcmp.f32 s13, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r2, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s13, s13
-; CHECK-NEXT: vcvt.s32.f32 s5, s0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r2, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s9, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r2, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r3, s11
-; CHECK-NEXT: vcmp.f32 s9, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r3, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s9, s9
-; CHECK-NEXT: vcvt.s32.f32 s14, s12
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r3, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s1, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r3, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r0, s7
-; CHECK-NEXT: vcmp.f32 s1, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r0, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcvt.s32.f32 s10, s4
-; CHECK-NEXT: vcmp.f32 s1, s1
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r0, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r0, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r1, s5
-; CHECK-NEXT: vcmp.f32 s0, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r1, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s12, s8
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r1, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r4, s14
-; CHECK-NEXT: vcmp.f32 s12, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r4, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s12, s12
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r4, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r5, s10
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r4, #0
-; CHECK-NEXT: vcmp.f32 s4, s8
+; CHECK-NEXT: vmovx.f16 s4, s3
+; CHECK-NEXT: vmovx.f16 s6, s0
+; CHECK-NEXT: vcvt.s32.f16 s8, s4
+; CHECK-NEXT: vmovx.f16 s4, s2
+; CHECK-NEXT: vcvt.s32.f16 s10, s4
+; CHECK-NEXT: vmovx.f16 s4, s1
+; CHECK-NEXT: vcvt.s32.f16 s14, s2
+; CHECK-NEXT: vcvt.s32.f16 s2, s1
+; CHECK-NEXT: vcvt.s32.f16 s0, s0
+; CHECK-NEXT: vcvt.s32.f16 s4, s4
+; CHECK-NEXT: vcvt.s32.f16 s6, s6
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r1, s0
+; CHECK-NEXT: vcvt.s32.f16 s12, s3
; CHECK-NEXT: vmov q0[2], q0[0], r1, r0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r5, #-2147483648
-; CHECK-NEXT: vcmp.f32 s4, s6
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s4, s4
-; CHECK-NEXT: vmov q1[2], q1[0], lr, r12
-; CHECK-NEXT: it gt
-; CHECK-NEXT: mvngt r5, #-2147483648
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it vs
-; CHECK-NEXT: movvs r5, #0
-; CHECK-NEXT: vmov q0[3], q0[1], r5, r4
-; CHECK-NEXT: vmov q1[3], q1[1], r3, r2
-; CHECK-NEXT: vpop {d8, d9}
-; CHECK-NEXT: pop {r4, r5, r7, pc}
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI47_0:
-; CHECK-NEXT: .long 0x4effffff @ float 2.14748352E+9
-; CHECK-NEXT: .LCPI47_1:
-; CHECK-NEXT: .long 0xcf000000 @ float -2.14748365E+9
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmov r1, s6
+; CHECK-NEXT: vmov q0[3], q0[1], r1, r0
+; CHECK-NEXT: vmov r0, s12
+; CHECK-NEXT: vmov r1, s14
+; CHECK-NEXT: vmov q1[2], q1[0], r1, r0
+; CHECK-NEXT: vmov r0, s8
+; CHECK-NEXT: vmov r1, s10
+; CHECK-NEXT: vmov q1[3], q1[1], r1, r0
+; CHECK-NEXT: bx lr
%x = call <8 x i32> @llvm.fptosi.sat.v8f16.v8i32(<8 x half> %f)
ret <8 x i32> %x
}
diff --git a/llvm/test/CodeGen/Thumb2/mve-fptoui-sat-vector.ll b/llvm/test/CodeGen/Thumb2/mve-fptoui-sat-vector.ll
index 1d79493a2ae6d..e36674f6bfcbc 100644
--- a/llvm/test/CodeGen/Thumb2/mve-fptoui-sat-vector.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-fptoui-sat-vector.ll
@@ -18,22 +18,9 @@ declare <8 x i32> @llvm.fptoui.sat.v8f32.v8i32 (<8 x float>)
define arm_aapcs_vfpcc <1 x i32> @test_unsigned_v1f32_v1i32(<1 x float> %f) {
; CHECK-LABEL: test_unsigned_v1f32_v1i32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vcvt.u32.f32 s2, s0
-; CHECK-NEXT: vldr s4, .LCPI0_0
-; CHECK-NEXT: vcmp.f32 s0, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s4
-; CHECK-NEXT: vmov r0, s2
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r0, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r0, #-1
+; CHECK-NEXT: vcvt.u32.f32 s0, s0
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bx lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI0_0:
-; CHECK-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
%x = call <1 x i32> @llvm.fptoui.sat.v1f32.v1i32(<1 x float> %f)
ret <1 x i32> %x
}
@@ -100,54 +87,17 @@ define arm_aapcs_vfpcc <2 x i32> @test_unsigned_v2f32_v2i32(<2 x float> %f) {
define arm_aapcs_vfpcc <3 x i32> @test_unsigned_v3f32_v3i32(<3 x float> %f) {
; CHECK-MVE-LABEL: test_unsigned_v3f32_v3i32:
; CHECK-MVE: @ %bb.0:
-; CHECK-MVE-NEXT: vcvt.u32.f32 s10, s2
-; CHECK-MVE-NEXT: vldr s8, .LCPI2_0
-; CHECK-MVE-NEXT: vcvt.u32.f32 s12, s0
-; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s3
-; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s1
-; CHECK-MVE-NEXT: vcmp.f32 s2, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s8
-; CHECK-MVE-NEXT: vmov r0, s10
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r0, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s0, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r0, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r1, s12
-; CHECK-MVE-NEXT: vcmp.f32 s0, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r1, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r1, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r2, s6
-; CHECK-MVE-NEXT: vcmp.f32 s3, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r2, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r3, s4
-; CHECK-MVE-NEXT: vcmp.f32 s1, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r2, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r3, #0
+; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2
+; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0
+; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s3
+; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s1
+; CHECK-MVE-NEXT: vmov r0, s2
+; CHECK-MVE-NEXT: vmov r1, s0
; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r3, #-1
-; CHECK-MVE-NEXT: vmov q0[3], q0[1], r3, r2
+; CHECK-MVE-NEXT: vmov r0, s4
+; CHECK-MVE-NEXT: vmov r1, s6
+; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0
; CHECK-MVE-NEXT: bx lr
-; CHECK-MVE-NEXT: .p2align 2
-; CHECK-MVE-NEXT: @ %bb.1:
-; CHECK-MVE-NEXT: .LCPI2_0:
-; CHECK-MVE-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
;
; CHECK-MVEFP-LABEL: test_unsigned_v3f32_v3i32:
; CHECK-MVEFP: @ %bb.0:
@@ -160,54 +110,17 @@ define arm_aapcs_vfpcc <3 x i32> @test_unsigned_v3f32_v3i32(<3 x float> %f) {
define arm_aapcs_vfpcc <4 x i32> @test_unsigned_v4f32_v4i32(<4 x float> %f) {
; CHECK-MVE-LABEL: test_unsigned_v4f32_v4i32:
; CHECK-MVE: @ %bb.0:
-; CHECK-MVE-NEXT: vcvt.u32.f32 s10, s2
-; CHECK-MVE-NEXT: vldr s8, .LCPI3_0
-; CHECK-MVE-NEXT: vcvt.u32.f32 s12, s0
-; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s3
-; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s1
-; CHECK-MVE-NEXT: vcmp.f32 s2, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s8
-; CHECK-MVE-NEXT: vmov r0, s10
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r0, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s0, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r0, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r1, s12
-; CHECK-MVE-NEXT: vcmp.f32 s0, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r1, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r1, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r2, s6
-; CHECK-MVE-NEXT: vcmp.f32 s3, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r2, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r3, s4
-; CHECK-MVE-NEXT: vcmp.f32 s1, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r2, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r3, #0
+; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2
+; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0
+; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s3
+; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s1
+; CHECK-MVE-NEXT: vmov r0, s2
+; CHECK-MVE-NEXT: vmov r1, s0
; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r3, #-1
-; CHECK-MVE-NEXT: vmov q0[3], q0[1], r3, r2
+; CHECK-MVE-NEXT: vmov r0, s4
+; CHECK-MVE-NEXT: vmov r1, s6
+; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0
; CHECK-MVE-NEXT: bx lr
-; CHECK-MVE-NEXT: .p2align 2
-; CHECK-MVE-NEXT: @ %bb.1:
-; CHECK-MVE-NEXT: .LCPI3_0:
-; CHECK-MVE-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
;
; CHECK-MVEFP-LABEL: test_unsigned_v4f32_v4i32:
; CHECK-MVEFP: @ %bb.0:
@@ -220,66 +133,21 @@ define arm_aapcs_vfpcc <4 x i32> @test_unsigned_v4f32_v4i32(<4 x float> %f) {
define arm_aapcs_vfpcc <5 x i32> @test_unsigned_v5f32_v5i32(<5 x float> %f) {
; CHECK-MVE-LABEL: test_unsigned_v5f32_v5i32:
; CHECK-MVE: @ %bb.0:
-; CHECK-MVE-NEXT: vcvt.u32.f32 s14, s4
-; CHECK-MVE-NEXT: vldr s12, .LCPI4_0
-; CHECK-MVE-NEXT: vcvt.u32.f32 s5, s3
-; CHECK-MVE-NEXT: vcvt.u32.f32 s10, s1
-; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s2
-; CHECK-MVE-NEXT: vcmp.f32 s4, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s0
-; CHECK-MVE-NEXT: vcmp.f32 s4, s12
-; CHECK-MVE-NEXT: vmov r1, s14
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r1, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r1, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r12, s5
-; CHECK-MVE-NEXT: vcmp.f32 s3, s12
+; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s4
+; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0
+; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2
+; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s3
+; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s1
+; CHECK-MVE-NEXT: vmov r1, s4
+; CHECK-MVE-NEXT: vmov r2, s0
; CHECK-MVE-NEXT: str r1, [r0, #16]
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r12, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r12, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r2, s10
-; CHECK-MVE-NEXT: vcmp.f32 s1, s12
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r2, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r2, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r3, s6
-; CHECK-MVE-NEXT: vcmp.f32 s2, s12
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r3, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r1, s8
-; CHECK-MVE-NEXT: vcmp.f32 s0, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r3, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r1, #0
-; CHECK-MVE-NEXT: vcmp.f32 s0, s12
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r1, #-1
-; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r3
-; CHECK-MVE-NEXT: vmov q0[3], q0[1], r2, r12
+; CHECK-MVE-NEXT: vmov r1, s2
+; CHECK-MVE-NEXT: vmov q0[2], q0[0], r2, r1
+; CHECK-MVE-NEXT: vmov r1, s6
+; CHECK-MVE-NEXT: vmov r2, s8
+; CHECK-MVE-NEXT: vmov q0[3], q0[1], r2, r1
; CHECK-MVE-NEXT: vstrw.32 q0, [r0]
; CHECK-MVE-NEXT: bx lr
-; CHECK-MVE-NEXT: .p2align 2
-; CHECK-MVE-NEXT: @ %bb.1:
-; CHECK-MVE-NEXT: .LCPI4_0:
-; CHECK-MVE-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
;
; CHECK-MVEFP-LABEL: test_unsigned_v5f32_v5i32:
; CHECK-MVEFP: @ %bb.0:
@@ -296,76 +164,23 @@ define arm_aapcs_vfpcc <5 x i32> @test_unsigned_v5f32_v5i32(<5 x float> %f) {
define arm_aapcs_vfpcc <6 x i32> @test_unsigned_v6f32_v6i32(<6 x float> %f) {
; CHECK-MVE-LABEL: test_unsigned_v6f32_v6i32:
; CHECK-MVE: @ %bb.0:
-; CHECK-MVE-NEXT: vcvt.u32.f32 s7, s5
-; CHECK-MVE-NEXT: vldr s14, .LCPI5_0
-; CHECK-MVE-NEXT: vcvt.u32.f32 s9, s4
-; CHECK-MVE-NEXT: vcvt.u32.f32 s12, s3
-; CHECK-MVE-NEXT: vcmp.f32 s5, #0
-; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s5, s14
-; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s2
-; CHECK-MVE-NEXT: vmov r1, s7
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r1, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s4, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r1, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r2, s9
-; CHECK-MVE-NEXT: vcvt.u32.f32 s10, s0
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r2, #0
-; CHECK-MVE-NEXT: vcmp.f32 s4, s14
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r2, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r12, s12
+; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s5
+; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s4
+; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2
+; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0
+; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s3
+; CHECK-MVE-NEXT: vcvt.u32.f32 s10, s1
+; CHECK-MVE-NEXT: vmov r1, s6
+; CHECK-MVE-NEXT: vmov r2, s4
; CHECK-MVE-NEXT: strd r2, r1, [r0, #16]
-; CHECK-MVE-NEXT: vcmp.f32 s3, s14
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r12, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r12, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r2, s6
-; CHECK-MVE-NEXT: vcmp.f32 s1, s14
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r2, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r2, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r3, s8
-; CHECK-MVE-NEXT: vcmp.f32 s2, s14
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r3, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r1, s10
-; CHECK-MVE-NEXT: vcmp.f32 s0, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r3, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r1, #0
-; CHECK-MVE-NEXT: vcmp.f32 s0, s14
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r1, #-1
-; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r3
-; CHECK-MVE-NEXT: vmov q0[3], q0[1], r2, r12
+; CHECK-MVE-NEXT: vmov r1, s2
+; CHECK-MVE-NEXT: vmov r2, s0
+; CHECK-MVE-NEXT: vmov q0[2], q0[0], r2, r1
+; CHECK-MVE-NEXT: vmov r1, s8
+; CHECK-MVE-NEXT: vmov r2, s10
+; CHECK-MVE-NEXT: vmov q0[3], q0[1], r2, r1
; CHECK-MVE-NEXT: vstrw.32 q0, [r0]
; CHECK-MVE-NEXT: bx lr
-; CHECK-MVE-NEXT: .p2align 2
-; CHECK-MVE-NEXT: @ %bb.1:
-; CHECK-MVE-NEXT: .LCPI5_0:
-; CHECK-MVE-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
;
; CHECK-MVEFP-LABEL: test_unsigned_v6f32_v6i32:
; CHECK-MVEFP: @ %bb.0:
@@ -384,88 +199,26 @@ define arm_aapcs_vfpcc <6 x i32> @test_unsigned_v6f32_v6i32(<6 x float> %f) {
define arm_aapcs_vfpcc <7 x i32> @test_unsigned_v7f32_v7i32(<7 x float> %f) {
; CHECK-MVE-LABEL: test_unsigned_v7f32_v7i32:
; CHECK-MVE: @ %bb.0:
-; CHECK-MVE-NEXT: vcvt.u32.f32 s11, s5
-; CHECK-MVE-NEXT: vldr s8, .LCPI6_0
-; CHECK-MVE-NEXT: vcvt.u32.f32 s13, s4
-; CHECK-MVE-NEXT: vcvt.u32.f32 s9, s6
-; CHECK-MVE-NEXT: vcmp.f32 s5, #0
+; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s5
+; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s4
+; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s6
+; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2
+; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0
; CHECK-MVE-NEXT: vcvt.u32.f32 s10, s3
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s5, s8
; CHECK-MVE-NEXT: vcvt.u32.f32 s12, s1
-; CHECK-MVE-NEXT: vmov r1, s11
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r1, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r1, #-1
-; CHECK-MVE-NEXT: str r1, [r0, #20]
-; CHECK-MVE-NEXT: vcmp.f32 s4, #0
-; CHECK-MVE-NEXT: vmov r1, s13
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r1, #0
-; CHECK-MVE-NEXT: vcmp.f32 s4, s8
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r1, #-1
-; CHECK-MVE-NEXT: vcvt.u32.f32 s14, s2
-; CHECK-MVE-NEXT: str r1, [r0, #16]
-; CHECK-MVE-NEXT: vcmp.f32 s6, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r1, s9
-; CHECK-MVE-NEXT: vcvt.u32.f32 s7, s0
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r1, #0
-; CHECK-MVE-NEXT: vcmp.f32 s6, s8
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r1, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r12, s10
-; CHECK-MVE-NEXT: str r1, [r0, #24]
-; CHECK-MVE-NEXT: vcmp.f32 s3, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r12, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r12, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-MVE-NEXT: vmov r1, s8
+; CHECK-MVE-NEXT: vmov r2, s4
+; CHECK-MVE-NEXT: vmov r3, s6
+; CHECK-MVE-NEXT: strd r2, r1, [r0, #16]
+; CHECK-MVE-NEXT: vmov r1, s2
+; CHECK-MVE-NEXT: vmov r2, s0
+; CHECK-MVE-NEXT: str r3, [r0, #24]
+; CHECK-MVE-NEXT: vmov q0[2], q0[0], r2, r1
+; CHECK-MVE-NEXT: vmov r1, s10
; CHECK-MVE-NEXT: vmov r2, s12
-; CHECK-MVE-NEXT: vcmp.f32 s1, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r2, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r2, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r3, s14
-; CHECK-MVE-NEXT: vcmp.f32 s2, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r3, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r1, s7
-; CHECK-MVE-NEXT: vcmp.f32 s0, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r3, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r1, #0
-; CHECK-MVE-NEXT: vcmp.f32 s0, s8
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r1, #-1
-; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r3
-; CHECK-MVE-NEXT: vmov q0[3], q0[1], r2, r12
+; CHECK-MVE-NEXT: vmov q0[3], q0[1], r2, r1
; CHECK-MVE-NEXT: vstrw.32 q0, [r0]
; CHECK-MVE-NEXT: bx lr
-; CHECK-MVE-NEXT: .p2align 2
-; CHECK-MVE-NEXT: @ %bb.1:
-; CHECK-MVE-NEXT: .LCPI6_0:
-; CHECK-MVE-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
;
; CHECK-MVEFP-LABEL: test_unsigned_v7f32_v7i32:
; CHECK-MVEFP: @ %bb.0:
@@ -486,101 +239,27 @@ define arm_aapcs_vfpcc <7 x i32> @test_unsigned_v7f32_v7i32(<7 x float> %f) {
define arm_aapcs_vfpcc <8 x i32> @test_unsigned_v8f32_v8i32(<8 x float> %f) {
; CHECK-MVE-LABEL: test_unsigned_v8f32_v8i32:
; CHECK-MVE: @ %bb.0:
-; CHECK-MVE-NEXT: .save {r4, r5, r7, lr}
-; CHECK-MVE-NEXT: push {r4, r5, r7, lr}
-; CHECK-MVE-NEXT: .vsave {d8}
-; CHECK-MVE-NEXT: vpush {d8}
-; CHECK-MVE-NEXT: vcvt.u32.f32 s15, s6
-; CHECK-MVE-NEXT: vldr s8, .LCPI7_0
-; CHECK-MVE-NEXT: vcvt.u32.f32 s16, s4
-; CHECK-MVE-NEXT: vcvt.u32.f32 s13, s7
-; CHECK-MVE-NEXT: vcvt.u32.f32 s12, s5
-; CHECK-MVE-NEXT: vcmp.f32 s6, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s6, s8
-; CHECK-MVE-NEXT: vcvt.u32.f32 s14, s2
-; CHECK-MVE-NEXT: vmov r12, s15
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w r12, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s4, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r12, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov lr, s16
-; CHECK-MVE-NEXT: vcmp.f32 s4, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt.w lr, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcvt.u32.f32 s9, s0
-; CHECK-MVE-NEXT: vcmp.f32 s7, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w lr, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r2, s13
-; CHECK-MVE-NEXT: vcmp.f32 s7, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r2, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcvt.u32.f32 s11, s3
-; CHECK-MVE-NEXT: vcmp.f32 s5, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r2, #-1
+; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2
+; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0
+; CHECK-MVE-NEXT: vcvt.u32.f32 s8, s3
; CHECK-MVE-NEXT: vcvt.u32.f32 s10, s1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r3, s12
-; CHECK-MVE-NEXT: vcmp.f32 s5, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r3, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r3, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r0, s14
-; CHECK-MVE-NEXT: vmov q1[2], q1[0], lr, r12
-; CHECK-MVE-NEXT: vcmp.f32 s2, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r0, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s0, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r0, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r1, s9
-; CHECK-MVE-NEXT: vmov q1[3], q1[1], r3, r2
-; CHECK-MVE-NEXT: vcmp.f32 s0, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r1, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r1, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r4, s11
-; CHECK-MVE-NEXT: vcmp.f32 s3, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r4, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r5, s10
-; CHECK-MVE-NEXT: vcmp.f32 s1, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r4, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r5, #0
+; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s6
+; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s4
+; CHECK-MVE-NEXT: vcvt.u32.f32 s12, s7
+; CHECK-MVE-NEXT: vcvt.u32.f32 s14, s5
+; CHECK-MVE-NEXT: vmov r0, s2
+; CHECK-MVE-NEXT: vmov r1, s0
; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r5, #-1
-; CHECK-MVE-NEXT: vmov q0[3], q0[1], r5, r4
-; CHECK-MVE-NEXT: vpop {d8}
-; CHECK-MVE-NEXT: pop {r4, r5, r7, pc}
-; CHECK-MVE-NEXT: .p2align 2
-; CHECK-MVE-NEXT: @ %bb.1:
-; CHECK-MVE-NEXT: .LCPI7_0:
-; CHECK-MVE-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
+; CHECK-MVE-NEXT: vmov r0, s8
+; CHECK-MVE-NEXT: vmov r1, s10
+; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0
+; CHECK-MVE-NEXT: vmov r0, s6
+; CHECK-MVE-NEXT: vmov r1, s4
+; CHECK-MVE-NEXT: vmov q1[2], q1[0], r1, r0
+; CHECK-MVE-NEXT: vmov r0, s12
+; CHECK-MVE-NEXT: vmov r1, s14
+; CHECK-MVE-NEXT: vmov q1[3], q1[1], r1, r0
+; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: test_unsigned_v8f32_v8i32:
; CHECK-MVEFP: @ %bb.0:
@@ -1378,23 +1057,9 @@ declare <8 x i32> @llvm.fptoui.sat.v8f16.v8i32 (<8 x half>)
define arm_aapcs_vfpcc <1 x i32> @test_unsigned_v1f16_v1i32(<1 x half> %f) {
; CHECK-LABEL: test_unsigned_v1f16_v1i32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vldr s4, .LCPI14_0
-; CHECK-NEXT: vcvt.u32.f32 s2, s0
-; CHECK-NEXT: vcmp.f32 s0, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s4
-; CHECK-NEXT: vmov r0, s2
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r0, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r0, #-1
+; CHECK-NEXT: vcvt.u32.f16 s0, s0
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bx lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI14_0:
-; CHECK-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
%x = call <1 x i32> @llvm.fptoui.sat.v1f16.v1i32(<1 x half> %f)
ret <1 x i32> %x
}
@@ -1463,47 +1128,15 @@ define arm_aapcs_vfpcc <2 x i32> @test_unsigned_v2f16_v2i32(<2 x half> %f) {
define arm_aapcs_vfpcc <3 x i32> @test_unsigned_v3f16_v3i32(<3 x half> %f) {
; CHECK-LABEL: test_unsigned_v3f16_v3i32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vcvtb.f32.f16 s8, s1
-; CHECK-NEXT: vcvtb.f32.f16 s2, s2
-; CHECK-NEXT: vcvt.u32.f32 s10, s8
-; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vcvt.u32.f32 s12, s2
-; CHECK-NEXT: vldr s4, .LCPI16_0
-; CHECK-NEXT: vcvt.u32.f32 s6, s0
-; CHECK-NEXT: vcmp.f32 s8, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s8, s4
-; CHECK-NEXT: vmov r0, s10
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r0, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s2, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r0, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r1, s12
-; CHECK-NEXT: vcmp.f32 s2, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r1, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r2, s6
-; CHECK-NEXT: vcmp.f32 s0, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r1, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r2, #0
+; CHECK-NEXT: vcvt.u32.f16 s6, s0
+; CHECK-NEXT: vcvt.u32.f16 s0, s1
+; CHECK-NEXT: vcvt.u32.f16 s4, s2
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmov.32 q0[1], r0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r2, #-1
-; CHECK-NEXT: vmov q0[2], q0[0], r2, r1
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmov r1, s6
+; CHECK-NEXT: vmov q0[2], q0[0], r1, r0
; CHECK-NEXT: bx lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI16_0:
-; CHECK-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
%x = call <3 x i32> @llvm.fptoui.sat.v3f16.v3i32(<3 x half> %f)
ret <3 x i32> %x
}
@@ -1511,58 +1144,19 @@ define arm_aapcs_vfpcc <3 x i32> @test_unsigned_v3f16_v3i32(<3 x half> %f) {
define arm_aapcs_vfpcc <4 x i32> @test_unsigned_v4f16_v4i32(<4 x half> %f) {
; CHECK-LABEL: test_unsigned_v4f16_v4i32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vcvtb.f32.f16 s12, s1
-; CHECK-NEXT: vcvtt.f32.f16 s4, s0
-; CHECK-NEXT: vcvt.u32.f32 s14, s12
-; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vcvtt.f32.f16 s8, s1
-; CHECK-NEXT: vcvt.u32.f32 s1, s0
-; CHECK-NEXT: vcvt.u32.f32 s10, s8
-; CHECK-NEXT: vldr s2, .LCPI17_0
-; CHECK-NEXT: vcvt.u32.f32 s6, s4
-; CHECK-NEXT: vcmp.f32 s12, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s12, s2
-; CHECK-NEXT: vmov r0, s14
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r0, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r0, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r1, s1
-; CHECK-NEXT: vcmp.f32 s0, s2
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r1, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s8, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r1, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r2, s10
-; CHECK-NEXT: vcmp.f32 s8, s2
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r2, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r3, s6
-; CHECK-NEXT: vcmp.f32 s4, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r2, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s4, s2
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r3, #0
+; CHECK-NEXT: vmovx.f16 s2, s1
+; CHECK-NEXT: vcvt.u32.f16 s4, s2
+; CHECK-NEXT: vmovx.f16 s2, s0
+; CHECK-NEXT: vcvt.u32.f16 s6, s2
+; CHECK-NEXT: vcvt.u32.f16 s2, s1
+; CHECK-NEXT: vcvt.u32.f16 s0, s0
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: vmov q0[2], q0[0], r1, r0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r3, #-1
-; CHECK-NEXT: vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmov r1, s6
+; CHECK-NEXT: vmov q0[3], q0[1], r1, r0
; CHECK-NEXT: bx lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI17_0:
-; CHECK-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
%x = call <4 x i32> @llvm.fptoui.sat.v4f16.v4i32(<4 x half> %f)
ret <4 x i32> %x
}
@@ -1570,71 +1164,23 @@ define arm_aapcs_vfpcc <4 x i32> @test_unsigned_v4f16_v4i32(<4 x half> %f) {
define arm_aapcs_vfpcc <5 x i32> @test_unsigned_v5f16_v5i32(<5 x half> %f) {
; CHECK-LABEL: test_unsigned_v5f16_v5i32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vcvtb.f32.f16 s2, s2
-; CHECK-NEXT: vcvtb.f32.f16 s10, s1
-; CHECK-NEXT: vcvt.u32.f32 s3, s2
-; CHECK-NEXT: vcvtt.f32.f16 s1, s1
-; CHECK-NEXT: vcvt.u32.f32 s5, s1
-; CHECK-NEXT: vcvtb.f32.f16 s6, s0
-; CHECK-NEXT: vcvtt.f32.f16 s0, s0
-; CHECK-NEXT: vldr s4, .LCPI18_0
-; CHECK-NEXT: vcvt.u32.f32 s14, s0
-; CHECK-NEXT: vcvt.u32.f32 s12, s10
-; CHECK-NEXT: vcmp.f32 s2, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcvt.u32.f32 s8, s6
-; CHECK-NEXT: vmov r1, s3
-; CHECK-NEXT: vcmp.f32 s2, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r1, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s1, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r1, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r12, s5
-; CHECK-NEXT: str r1, [r0, #16]
-; CHECK-NEXT: vcmp.f32 s1, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r12, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r2, s14
-; CHECK-NEXT: vcmp.f32 s0, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r2, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s10, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r2, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r3, s12
-; CHECK-NEXT: vcmp.f32 s10, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r3, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vmovx.f16 s6, s0
+; CHECK-NEXT: vmovx.f16 s4, s1
+; CHECK-NEXT: vcvt.u32.f16 s8, s1
+; CHECK-NEXT: vcvt.u32.f16 s0, s0
+; CHECK-NEXT: vcvt.u32.f16 s4, s4
+; CHECK-NEXT: vcvt.u32.f16 s6, s6
; CHECK-NEXT: vmov r1, s8
-; CHECK-NEXT: vcmp.f32 s6, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r3, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r1, #0
-; CHECK-NEXT: vcmp.f32 s6, s4
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r1, #-1
-; CHECK-NEXT: vmov q0[2], q0[0], r1, r3
-; CHECK-NEXT: vmov q0[3], q0[1], r2, r12
-; CHECK-NEXT: vstrw.32 q0, [r0]
+; CHECK-NEXT: vcvt.u32.f16 s2, s2
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: vmov q2[2], q2[0], r2, r1
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: vmov q2[3], q2[1], r2, r1
+; CHECK-NEXT: vmov r1, s2
+; CHECK-NEXT: str r1, [r0, #16]
+; CHECK-NEXT: vstrw.32 q2, [r0]
; CHECK-NEXT: bx lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI18_0:
-; CHECK-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
%x = call <5 x i32> @llvm.fptoui.sat.v5f16.v5i32(<5 x half> %f)
ret <5 x i32> %x
}
@@ -1642,82 +1188,26 @@ define arm_aapcs_vfpcc <5 x i32> @test_unsigned_v5f16_v5i32(<5 x half> %f) {
define arm_aapcs_vfpcc <6 x i32> @test_unsigned_v6f16_v6i32(<6 x half> %f) {
; CHECK-LABEL: test_unsigned_v6f16_v6i32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vcvtt.f32.f16 s5, s2
-; CHECK-NEXT: vcvtb.f32.f16 s2, s2
-; CHECK-NEXT: vcvt.u32.f32 s7, s5
-; CHECK-NEXT: vcvtb.f32.f16 s10, s1
-; CHECK-NEXT: vcvt.u32.f32 s9, s2
-; CHECK-NEXT: vcvtt.f32.f16 s1, s1
-; CHECK-NEXT: vcvt.u32.f32 s3, s1
-; CHECK-NEXT: vcvtb.f32.f16 s6, s0
-; CHECK-NEXT: vcvtt.f32.f16 s0, s0
-; CHECK-NEXT: vldr s4, .LCPI19_0
-; CHECK-NEXT: vcmp.f32 s5, #0
-; CHECK-NEXT: vcvt.u32.f32 s14, s0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s5, s4
-; CHECK-NEXT: vmov r1, s7
-; CHECK-NEXT: vcvt.u32.f32 s12, s10
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r1, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s2, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r1, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r2, s9
-; CHECK-NEXT: vcvt.u32.f32 s8, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r2, #0
-; CHECK-NEXT: vcmp.f32 s2, s4
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s1, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r2, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r12, s3
+; CHECK-NEXT: vmovx.f16 s8, s0
+; CHECK-NEXT: vmovx.f16 s6, s1
+; CHECK-NEXT: vcvt.u32.f16 s10, s1
+; CHECK-NEXT: vcvt.u32.f16 s0, s0
+; CHECK-NEXT: vcvt.u32.f16 s4, s2
+; CHECK-NEXT: vmovx.f16 s2, s2
+; CHECK-NEXT: vcvt.u32.f16 s6, s6
+; CHECK-NEXT: vcvt.u32.f16 s8, s8
+; CHECK-NEXT: vmov r1, s10
+; CHECK-NEXT: vcvt.u32.f16 s2, s2
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: vmov q3[2], q3[0], r2, r1
+; CHECK-NEXT: vmov r1, s6
+; CHECK-NEXT: vmov r2, s8
+; CHECK-NEXT: vmov q3[3], q3[1], r2, r1
+; CHECK-NEXT: vmov r1, s2
+; CHECK-NEXT: vmov r2, s4
; CHECK-NEXT: strd r2, r1, [r0, #16]
-; CHECK-NEXT: vcmp.f32 s1, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r12, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r2, s14
-; CHECK-NEXT: vcmp.f32 s0, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r2, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s10, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r2, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r3, s12
-; CHECK-NEXT: vcmp.f32 s10, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r3, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r1, s8
-; CHECK-NEXT: vcmp.f32 s6, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r3, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r1, #0
-; CHECK-NEXT: vcmp.f32 s6, s4
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r1, #-1
-; CHECK-NEXT: vmov q0[2], q0[0], r1, r3
-; CHECK-NEXT: vmov q0[3], q0[1], r2, r12
-; CHECK-NEXT: vstrw.32 q0, [r0]
+; CHECK-NEXT: vstrw.32 q3, [r0]
; CHECK-NEXT: bx lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI19_0:
-; CHECK-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
%x = call <6 x i32> @llvm.fptoui.sat.v6f16.v6i32(<6 x half> %f)
ret <6 x i32> %x
}
@@ -1725,95 +1215,29 @@ define arm_aapcs_vfpcc <6 x i32> @test_unsigned_v6f16_v6i32(<6 x half> %f) {
define arm_aapcs_vfpcc <7 x i32> @test_unsigned_v7f16_v7i32(<7 x half> %f) {
; CHECK-LABEL: test_unsigned_v7f16_v7i32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vcvtt.f32.f16 s9, s2
-; CHECK-NEXT: vcvtb.f32.f16 s2, s2
-; CHECK-NEXT: vcvt.u32.f32 s11, s9
-; CHECK-NEXT: vcvtb.f32.f16 s3, s3
-; CHECK-NEXT: vcvt.u32.f32 s13, s2
-; CHECK-NEXT: vldr s4, .LCPI20_0
-; CHECK-NEXT: vcvt.u32.f32 s7, s3
-; CHECK-NEXT: vcvtb.f32.f16 s10, s1
-; CHECK-NEXT: vcvtt.f32.f16 s1, s1
-; CHECK-NEXT: vcmp.f32 s9, #0
-; CHECK-NEXT: vcvt.u32.f32 s5, s1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s9, s4
-; CHECK-NEXT: vcvtb.f32.f16 s6, s0
-; CHECK-NEXT: vcvtt.f32.f16 s0, s0
-; CHECK-NEXT: vcvt.u32.f32 s12, s10
-; CHECK-NEXT: vmov r1, s11
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r1, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r1, #-1
-; CHECK-NEXT: vcvt.u32.f32 s14, s0
-; CHECK-NEXT: str r1, [r0, #20]
-; CHECK-NEXT: vcmp.f32 s2, #0
-; CHECK-NEXT: vmov r1, s13
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r1, #0
-; CHECK-NEXT: vcmp.f32 s2, s4
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r1, #-1
-; CHECK-NEXT: vcmp.f32 s3, #0
-; CHECK-NEXT: str r1, [r0, #16]
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r1, s7
-; CHECK-NEXT: vcvt.u32.f32 s8, s6
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r1, #0
-; CHECK-NEXT: vcmp.f32 s3, s4
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s1, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r1, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r12, s5
-; CHECK-NEXT: str r1, [r0, #24]
-; CHECK-NEXT: vcmp.f32 s1, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r12, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r2, s14
-; CHECK-NEXT: vcmp.f32 s0, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r2, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s10, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r2, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r3, s12
-; CHECK-NEXT: vcmp.f32 s10, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r3, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vmovx.f16 s10, s0
+; CHECK-NEXT: vmovx.f16 s8, s1
+; CHECK-NEXT: vcvt.u32.f16 s12, s1
+; CHECK-NEXT: vcvt.u32.f16 s0, s0
+; CHECK-NEXT: vcvt.u32.f16 s4, s2
+; CHECK-NEXT: vmovx.f16 s2, s2
+; CHECK-NEXT: vcvt.u32.f16 s8, s8
+; CHECK-NEXT: vcvt.u32.f16 s10, s10
+; CHECK-NEXT: vmov r1, s12
+; CHECK-NEXT: vcvt.u32.f16 s2, s2
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: vcvt.u32.f16 s6, s3
+; CHECK-NEXT: vmov q3[2], q3[0], r2, r1
; CHECK-NEXT: vmov r1, s8
-; CHECK-NEXT: vcmp.f32 s6, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r3, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r1, #0
-; CHECK-NEXT: vcmp.f32 s6, s4
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r1, #-1
-; CHECK-NEXT: vmov q0[2], q0[0], r1, r3
-; CHECK-NEXT: vmov q0[3], q0[1], r2, r12
-; CHECK-NEXT: vstrw.32 q0, [r0]
+; CHECK-NEXT: vmov r2, s10
+; CHECK-NEXT: vmov q3[3], q3[1], r2, r1
+; CHECK-NEXT: vmov r1, s2
+; CHECK-NEXT: vmov r2, s4
+; CHECK-NEXT: vmov r3, s6
+; CHECK-NEXT: strd r2, r1, [r0, #16]
+; CHECK-NEXT: str r3, [r0, #24]
+; CHECK-NEXT: vstrw.32 q3, [r0]
; CHECK-NEXT: bx lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI20_0:
-; CHECK-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
%x = call <7 x i32> @llvm.fptoui.sat.v7f16.v7i32(<7 x half> %f)
ret <7 x i32> %x
}
@@ -1821,109 +1245,31 @@ define arm_aapcs_vfpcc <7 x i32> @test_unsigned_v7f16_v7i32(<7 x half> %f) {
define arm_aapcs_vfpcc <8 x i32> @test_unsigned_v8f16_v8i32(<8 x half> %f) {
; CHECK-LABEL: test_unsigned_v8f16_v8i32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: .save {r4, r5, r7, lr}
-; CHECK-NEXT: push {r4, r5, r7, lr}
-; CHECK-NEXT: .vsave {d8}
-; CHECK-NEXT: vpush {d8}
-; CHECK-NEXT: vcvtt.f32.f16 s11, s3
-; CHECK-NEXT: vcvtb.f32.f16 s3, s3
-; CHECK-NEXT: vcvt.u32.f32 s15, s3
-; CHECK-NEXT: vcvtt.f32.f16 s7, s2
-; CHECK-NEXT: vcvtb.f32.f16 s2, s2
-; CHECK-NEXT: vcvt.u32.f32 s13, s11
-; CHECK-NEXT: vcvt.u32.f32 s16, s2
-; CHECK-NEXT: vldr s4, .LCPI21_0
-; CHECK-NEXT: vcvt.u32.f32 s9, s7
-; CHECK-NEXT: vcvtt.f32.f16 s10, s1
-; CHECK-NEXT: vcmp.f32 s3, #0
-; CHECK-NEXT: vcvtb.f32.f16 s1, s1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s3, s4
-; CHECK-NEXT: vcvt.u32.f32 s5, s1
-; CHECK-NEXT: vcvtt.f32.f16 s6, s0
-; CHECK-NEXT: vmov r12, s15
-; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s2, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r12, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov lr, s16
-; CHECK-NEXT: vcmp.f32 s2, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w lr, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcvt.u32.f32 s14, s0
-; CHECK-NEXT: vcmp.f32 s11, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w lr, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r2, s13
-; CHECK-NEXT: vcmp.f32 s11, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r2, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcvt.u32.f32 s12, s10
-; CHECK-NEXT: vcmp.f32 s7, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r2, #-1
-; CHECK-NEXT: vcvt.u32.f32 s8, s6
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r3, s9
-; CHECK-NEXT: vcmp.f32 s7, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r3, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s1, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r3, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r0, s5
-; CHECK-NEXT: vcmp.f32 s1, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r0, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r0, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r1, s14
-; CHECK-NEXT: vcmp.f32 s0, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r1, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s10, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r1, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r4, s12
+; CHECK-NEXT: vmovx.f16 s4, s3
+; CHECK-NEXT: vmovx.f16 s6, s0
+; CHECK-NEXT: vcvt.u32.f16 s8, s4
+; CHECK-NEXT: vmovx.f16 s4, s2
+; CHECK-NEXT: vcvt.u32.f16 s10, s4
+; CHECK-NEXT: vmovx.f16 s4, s1
+; CHECK-NEXT: vcvt.u32.f16 s14, s2
+; CHECK-NEXT: vcvt.u32.f16 s2, s1
+; CHECK-NEXT: vcvt.u32.f16 s0, s0
+; CHECK-NEXT: vcvt.u32.f16 s4, s4
+; CHECK-NEXT: vcvt.u32.f16 s6, s6
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r1, s0
+; CHECK-NEXT: vcvt.u32.f16 s12, s3
; CHECK-NEXT: vmov q0[2], q0[0], r1, r0
-; CHECK-NEXT: vcmp.f32 s10, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r4, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r5, s8
-; CHECK-NEXT: vcmp.f32 s6, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r4, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s6, s4
-; CHECK-NEXT: vmov q1[2], q1[0], lr, r12
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r5, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r5, #-1
-; CHECK-NEXT: vmov q0[3], q0[1], r5, r4
-; CHECK-NEXT: vmov q1[3], q1[1], r3, r2
-; CHECK-NEXT: vpop {d8}
-; CHECK-NEXT: pop {r4, r5, r7, pc}
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI21_0:
-; CHECK-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmov r1, s6
+; CHECK-NEXT: vmov q0[3], q0[1], r1, r0
+; CHECK-NEXT: vmov r0, s12
+; CHECK-NEXT: vmov r1, s14
+; CHECK-NEXT: vmov q1[2], q1[0], r1, r0
+; CHECK-NEXT: vmov r0, s8
+; CHECK-NEXT: vmov r1, s10
+; CHECK-NEXT: vmov q1[3], q1[1], r1, r0
+; CHECK-NEXT: bx lr
%x = call <8 x i32> @llvm.fptoui.sat.v8f16.v8i32(<8 x half> %f)
ret <8 x i32> %x
}
@@ -2153,54 +1499,17 @@ define arm_aapcs_vfpcc <4 x i19> @test_unsigned_v4f32_v4i19(<4 x float> %f) {
define arm_aapcs_vfpcc <4 x i32> @test_unsigned_v4f32_v4i32_duplicate(<4 x float> %f) {
; CHECK-MVE-LABEL: test_unsigned_v4f32_v4i32_duplicate:
; CHECK-MVE: @ %bb.0:
-; CHECK-MVE-NEXT: vcvt.u32.f32 s10, s2
-; CHECK-MVE-NEXT: vldr s8, .LCPI27_0
-; CHECK-MVE-NEXT: vcvt.u32.f32 s12, s0
-; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s3
-; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s1
-; CHECK-MVE-NEXT: vcmp.f32 s2, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s2, s8
-; CHECK-MVE-NEXT: vmov r0, s10
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r0, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s0, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r0, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r1, s12
-; CHECK-MVE-NEXT: vcmp.f32 s0, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r1, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s3, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r1, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r2, s6
-; CHECK-MVE-NEXT: vcmp.f32 s3, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r2, #0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vmov r3, s4
-; CHECK-MVE-NEXT: vcmp.f32 s1, #0
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r2, #-1
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: vcmp.f32 s1, s8
-; CHECK-MVE-NEXT: it lt
-; CHECK-MVE-NEXT: movlt r3, #0
+; CHECK-MVE-NEXT: vcvt.u32.f32 s2, s2
+; CHECK-MVE-NEXT: vcvt.u32.f32 s0, s0
+; CHECK-MVE-NEXT: vcvt.u32.f32 s4, s3
+; CHECK-MVE-NEXT: vcvt.u32.f32 s6, s1
+; CHECK-MVE-NEXT: vmov r0, s2
+; CHECK-MVE-NEXT: vmov r1, s0
; CHECK-MVE-NEXT: vmov q0[2], q0[0], r1, r0
-; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-MVE-NEXT: it gt
-; CHECK-MVE-NEXT: movgt.w r3, #-1
-; CHECK-MVE-NEXT: vmov q0[3], q0[1], r3, r2
+; CHECK-MVE-NEXT: vmov r0, s4
+; CHECK-MVE-NEXT: vmov r1, s6
+; CHECK-MVE-NEXT: vmov q0[3], q0[1], r1, r0
; CHECK-MVE-NEXT: bx lr
-; CHECK-MVE-NEXT: .p2align 2
-; CHECK-MVE-NEXT: @ %bb.1:
-; CHECK-MVE-NEXT: .LCPI27_0:
-; CHECK-MVE-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
;
; CHECK-MVEFP-LABEL: test_unsigned_v4f32_v4i32_duplicate:
; CHECK-MVEFP: @ %bb.0:
@@ -4611,109 +3920,31 @@ define arm_aapcs_vfpcc <8 x i19> @test_unsigned_v8f16_v8i19(<8 x half> %f) {
define arm_aapcs_vfpcc <8 x i32> @test_unsigned_v8f16_v8i32_duplicate(<8 x half> %f) {
; CHECK-LABEL: test_unsigned_v8f16_v8i32_duplicate:
; CHECK: @ %bb.0:
-; CHECK-NEXT: .save {r4, r5, r7, lr}
-; CHECK-NEXT: push {r4, r5, r7, lr}
-; CHECK-NEXT: .vsave {d8}
-; CHECK-NEXT: vpush {d8}
-; CHECK-NEXT: vcvtt.f32.f16 s11, s3
-; CHECK-NEXT: vcvtb.f32.f16 s3, s3
-; CHECK-NEXT: vcvt.u32.f32 s15, s3
-; CHECK-NEXT: vcvtt.f32.f16 s7, s2
-; CHECK-NEXT: vcvtb.f32.f16 s2, s2
-; CHECK-NEXT: vcvt.u32.f32 s13, s11
-; CHECK-NEXT: vcvt.u32.f32 s16, s2
-; CHECK-NEXT: vldr s4, .LCPI47_0
-; CHECK-NEXT: vcvt.u32.f32 s9, s7
-; CHECK-NEXT: vcvtt.f32.f16 s10, s1
-; CHECK-NEXT: vcmp.f32 s3, #0
-; CHECK-NEXT: vcvtb.f32.f16 s1, s1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s3, s4
-; CHECK-NEXT: vcvt.u32.f32 s5, s1
-; CHECK-NEXT: vcvtt.f32.f16 s6, s0
-; CHECK-NEXT: vmov r12, s15
-; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s2, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r12, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov lr, s16
-; CHECK-NEXT: vcmp.f32 s2, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w lr, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcvt.u32.f32 s14, s0
-; CHECK-NEXT: vcmp.f32 s11, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w lr, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r2, s13
-; CHECK-NEXT: vcmp.f32 s11, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r2, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcvt.u32.f32 s12, s10
-; CHECK-NEXT: vcmp.f32 s7, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r2, #-1
-; CHECK-NEXT: vcvt.u32.f32 s8, s6
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r3, s9
-; CHECK-NEXT: vcmp.f32 s7, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r3, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s1, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r3, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r0, s5
-; CHECK-NEXT: vcmp.f32 s1, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r0, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s0, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r0, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r1, s14
-; CHECK-NEXT: vcmp.f32 s0, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r1, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s10, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r1, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r4, s12
+; CHECK-NEXT: vmovx.f16 s4, s3
+; CHECK-NEXT: vmovx.f16 s6, s0
+; CHECK-NEXT: vcvt.u32.f16 s8, s4
+; CHECK-NEXT: vmovx.f16 s4, s2
+; CHECK-NEXT: vcvt.u32.f16 s10, s4
+; CHECK-NEXT: vmovx.f16 s4, s1
+; CHECK-NEXT: vcvt.u32.f16 s14, s2
+; CHECK-NEXT: vcvt.u32.f16 s2, s1
+; CHECK-NEXT: vcvt.u32.f16 s0, s0
+; CHECK-NEXT: vcvt.u32.f16 s4, s4
+; CHECK-NEXT: vcvt.u32.f16 s6, s6
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r1, s0
+; CHECK-NEXT: vcvt.u32.f16 s12, s3
; CHECK-NEXT: vmov q0[2], q0[0], r1, r0
-; CHECK-NEXT: vcmp.f32 s10, s4
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r4, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vmov r5, s8
-; CHECK-NEXT: vcmp.f32 s6, #0
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r4, #-1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f32 s6, s4
-; CHECK-NEXT: vmov q1[2], q1[0], lr, r12
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r5, #0
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt.w r5, #-1
-; CHECK-NEXT: vmov q0[3], q0[1], r5, r4
-; CHECK-NEXT: vmov q1[3], q1[1], r3, r2
-; CHECK-NEXT: vpop {d8}
-; CHECK-NEXT: pop {r4, r5, r7, pc}
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI47_0:
-; CHECK-NEXT: .long 0x4f7fffff @ float 4.29496704E+9
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmov r1, s6
+; CHECK-NEXT: vmov q0[3], q0[1], r1, r0
+; CHECK-NEXT: vmov r0, s12
+; CHECK-NEXT: vmov r1, s14
+; CHECK-NEXT: vmov q1[2], q1[0], r1, r0
+; CHECK-NEXT: vmov r0, s8
+; CHECK-NEXT: vmov r1, s10
+; CHECK-NEXT: vmov q1[3], q1[1], r1, r0
+; CHECK-NEXT: bx lr
%x = call <8 x i32> @llvm.fptoui.sat.v8f16.v8i32(<8 x half> %f)
ret <8 x i32> %x
}
More information about the llvm-commits
mailing list