[llvm] bd09593 - [ARM] Add Extra FpToIntSat tests.

David Green via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 25 12:10:32 PDT 2021


Author: David Green
Date: 2021-08-25T20:10:18+01:00
New Revision: bd0959354f43ac6aa24d1a7de9a3af518a21b72d

URL: https://github.com/llvm/llvm-project/commit/bd0959354f43ac6aa24d1a7de9a3af518a21b72d
DIFF: https://github.com/llvm/llvm-project/commit/bd0959354f43ac6aa24d1a7de9a3af518a21b72d.diff

LOG: [ARM] Add Extra FpToIntSat tests.

This adds extra MVE vector fptosi.sat and fptoui.sat tests, along with
adding or adjusting the existing scalar tests to cover more
architectures and instruction combinations.

Added: 
    llvm/test/CodeGen/ARM/fptoi-sat-store.ll
    llvm/test/CodeGen/ARM/fptoui-sat-scalar.ll
    llvm/test/CodeGen/Thumb2/mve-fptosi-sat-vector.ll
    llvm/test/CodeGen/Thumb2/mve-fptoui-sat-vector.ll

Modified: 
    llvm/test/CodeGen/ARM/fptosi-sat-scalar.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/ARM/fptoi-sat-store.ll b/llvm/test/CodeGen/ARM/fptoi-sat-store.ll
new file mode 100644
index 0000000000000..3a6386d8f890b
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/fptoi-sat-store.ll
@@ -0,0 +1,373 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv6-none-eabi -float-abi=soft %s -o - | FileCheck %s --check-prefixes=SOFT
+; RUN: llc -mtriple=thumbv7-none-eabi -mattr=+vfp2 %s -o - | FileCheck %s --check-prefixes=VFP,VFP2
+; RUN: llc -mtriple=thumbv8.1m.main-eabi -mattr=+fullfp16,+fp64 %s -o - | FileCheck %s --check-prefixes=VFP,FP16
+
+declare i32 @llvm.fptosi.sat.i32.f64(double)
+declare i32 @llvm.fptosi.sat.i32.f32(float)
+declare i32 @llvm.fptoui.sat.i32.f64(double)
+declare i32 @llvm.fptoui.sat.i32.f32(float)
+
+define void @test_signed_i32_f32(i32* %d, float %f) nounwind {
+; SOFT-LABEL: test_signed_i32_f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    mov r5, r1
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r0, #207
+; SOFT-NEXT:    lsls r1, r0, #24
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    bl __aeabi_f2iz
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    beq .LBB0_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    b .LBB0_3
+; SOFT-NEXT:  .LBB0_2:
+; SOFT-NEXT:    movs r0, #1
+; SOFT-NEXT:    lsls r6, r0, #31
+; SOFT-NEXT:  .LBB0_3:
+; SOFT-NEXT:    ldr r1, .LCPI0_0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB0_5
+; SOFT-NEXT:  @ %bb.4:
+; SOFT-NEXT:    ldr r6, .LCPI0_1
+; SOFT-NEXT:  .LBB0_5:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    bl __aeabi_fcmpun
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB0_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    movs r6, #0
+; SOFT-NEXT:  .LBB0_7:
+; SOFT-NEXT:    str r6, [r4]
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.8:
+; SOFT-NEXT:  .LCPI0_0:
+; SOFT-NEXT:    .long 1325400063 @ 0x4effffff
+; SOFT-NEXT:  .LCPI0_1:
+; SOFT-NEXT:    .long 2147483647 @ 0x7fffffff
+;
+; VFP-LABEL: test_signed_i32_f32:
+; VFP:       @ %bb.0:
+; VFP-NEXT:    vmov s0, r1
+; VFP-NEXT:    vldr s2, .LCPI0_0
+; VFP-NEXT:    vldr s6, .LCPI0_1
+; VFP-NEXT:    vcvt.s32.f32 s4, s0
+; VFP-NEXT:    vcmp.f32 s0, s2
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s0, s6
+; VFP-NEXT:    vmov r1, s4
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt.w r1, #-2147483648
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it gt
+; VFP-NEXT:    mvngt r1, #-2147483648
+; VFP-NEXT:    vcmp.f32 s0, s0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it vs
+; VFP-NEXT:    movvs r1, #0
+; VFP-NEXT:    str r1, [r0]
+; VFP-NEXT:    bx lr
+; VFP-NEXT:    .p2align 2
+; VFP-NEXT:  @ %bb.1:
+; VFP-NEXT:  .LCPI0_0:
+; VFP-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+; VFP-NEXT:  .LCPI0_1:
+; VFP-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+    %r = call i32 @llvm.fptosi.sat.i32.f32(float %f)
+    store i32 %r, i32* %d, align 4
+    ret void
+}
+
+define void @test_signed_i32_f64(i32* %d, double %f) nounwind {
+; SOFT-LABEL: test_signed_i32_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #12
+; SOFT-NEXT:    sub sp, #12
+; SOFT-NEXT:    mov r5, r3
+; SOFT-NEXT:    mov r6, r2
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    ldr r2, .LCPI1_0
+; SOFT-NEXT:    ldr r3, .LCPI1_1
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    ldr r3, .LCPI1_2
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    bl __aeabi_d2iz
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB1_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    movs r0, #1
+; SOFT-NEXT:    lsls r0, r0, #31
+; SOFT-NEXT:  .LBB1_2:
+; SOFT-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    cmp r1, #0
+; SOFT-NEXT:    bne .LBB1_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    b .LBB1_5
+; SOFT-NEXT:  .LBB1_4:
+; SOFT-NEXT:    ldr r4, .LCPI1_3
+; SOFT-NEXT:  .LBB1_5:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
+; SOFT-NEXT:    bl __aeabi_dcmpun
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB1_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    mov r7, r4
+; SOFT-NEXT:  .LBB1_7:
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    str r7, [r0]
+; SOFT-NEXT:    add sp, #12
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.8:
+; SOFT-NEXT:  .LCPI1_0:
+; SOFT-NEXT:    .long 4290772992 @ 0xffc00000
+; SOFT-NEXT:  .LCPI1_1:
+; SOFT-NEXT:    .long 1105199103 @ 0x41dfffff
+; SOFT-NEXT:  .LCPI1_2:
+; SOFT-NEXT:    .long 3252682752 @ 0xc1e00000
+; SOFT-NEXT:  .LCPI1_3:
+; SOFT-NEXT:    .long 2147483647 @ 0x7fffffff
+;
+; VFP2-LABEL: test_signed_i32_f64:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    vmov d16, r2, r3
+; VFP2-NEXT:    vldr d17, .LCPI1_0
+; VFP2-NEXT:    vldr d18, .LCPI1_1
+; VFP2-NEXT:    vcvt.s32.f64 s0, d16
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vmov r1, s0
+; VFP2-NEXT:    vcmp.f64 d16, d18
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt.w r1, #-2147483648
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    mvngt r1, #-2147483648
+; VFP2-NEXT:    vcmp.f64 d16, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
+; VFP2-NEXT:    movvs r1, #0
+; VFP2-NEXT:    str r1, [r0]
+; VFP2-NEXT:    bx lr
+; VFP2-NEXT:    .p2align 3
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI1_0:
+; VFP2-NEXT:    .long 0 @ double -2147483648
+; VFP2-NEXT:    .long 3252682752
+; VFP2-NEXT:  .LCPI1_1:
+; VFP2-NEXT:    .long 4290772992 @ double 2147483647
+; VFP2-NEXT:    .long 1105199103
+;
+; FP16-LABEL: test_signed_i32_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr d0, .LCPI1_0
+; FP16-NEXT:    vmov d1, r2, r3
+; FP16-NEXT:    vldr d2, .LCPI1_1
+; FP16-NEXT:    vmaxnm.f64 d0, d1, d0
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vminnm.f64 d0, d0, d2
+; FP16-NEXT:    vcvt.s32.f64 s0, d0
+; FP16-NEXT:    vmov r1, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r1, #0
+; FP16-NEXT:    str r1, [r0]
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI1_0:
+; FP16-NEXT:    .long 0 @ double -2147483648
+; FP16-NEXT:    .long 3252682752
+; FP16-NEXT:  .LCPI1_1:
+; FP16-NEXT:    .long 4290772992 @ double 2147483647
+; FP16-NEXT:    .long 1105199103
+    %r = call i32 @llvm.fptosi.sat.i32.f64(double %f)
+    store i32 %r, i32* %d, align 4
+    ret void
+}
+
+define void @test_unsigned_i32_f32(i32* %d, float %f) nounwind {
+; SOFT-LABEL: test_unsigned_i32_f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    mov r7, r1
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:    ldr r1, .LCPI2_0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r5, #0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    bl __aeabi_f2uiz
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB2_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:  .LBB2_2:
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    beq .LBB2_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mvns r0, r5
+; SOFT-NEXT:  .LBB2_4:
+; SOFT-NEXT:    ldr r1, [sp] @ 4-byte Reload
+; SOFT-NEXT:    str r0, [r1]
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI2_0:
+; SOFT-NEXT:    .long 1333788671 @ 0x4f7fffff
+;
+; VFP-LABEL: test_unsigned_i32_f32:
+; VFP:       @ %bb.0:
+; VFP-NEXT:    vmov s0, r1
+; VFP-NEXT:    vldr s4, .LCPI2_0
+; VFP-NEXT:    vcvt.u32.f32 s2, s0
+; VFP-NEXT:    vcmp.f32 s0, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s0, s4
+; VFP-NEXT:    vmov r1, s2
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r1, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r1, #-1
+; VFP-NEXT:    str r1, [r0]
+; VFP-NEXT:    bx lr
+; VFP-NEXT:    .p2align 2
+; VFP-NEXT:  @ %bb.1:
+; VFP-NEXT:  .LCPI2_0:
+; VFP-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %r = call i32 @llvm.fptoui.sat.i32.f32(float %f)
+    store i32 %r, i32* %d, align 4
+    ret void
+}
+
+define void @test_unsigned_i32_f64(i32* %d, double %f) nounwind {
+; SOFT-LABEL: test_unsigned_i32_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #12
+; SOFT-NEXT:    sub sp, #12
+; SOFT-NEXT:    mov r5, r3
+; SOFT-NEXT:    mov r4, r2
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    ldr r2, .LCPI3_0
+; SOFT-NEXT:    ldr r3, .LCPI3_1
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    movs r6, #0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r6
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    bl __aeabi_d2uiz
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    bne .LBB3_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB3_2:
+; SOFT-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    cmp r1, #0
+; SOFT-NEXT:    beq .LBB3_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mvns r0, r6
+; SOFT-NEXT:  .LBB3_4:
+; SOFT-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    str r0, [r1]
+; SOFT-NEXT:    add sp, #12
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI3_0:
+; SOFT-NEXT:    .long 4292870144 @ 0xffe00000
+; SOFT-NEXT:  .LCPI3_1:
+; SOFT-NEXT:    .long 1106247679 @ 0x41efffff
+;
+; VFP2-LABEL: test_unsigned_i32_f64:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    vmov d16, r2, r3
+; VFP2-NEXT:    vldr d17, .LCPI3_0
+; VFP2-NEXT:    vcmp.f64 d16, #0
+; VFP2-NEXT:    vcvt.u32.f64 s0, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vmov r1, s0
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r1, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r1, #-1
+; VFP2-NEXT:    str r1, [r0]
+; VFP2-NEXT:    bx lr
+; VFP2-NEXT:    .p2align 3
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI3_0:
+; VFP2-NEXT:    .long 4292870144 @ double 4294967295
+; VFP2-NEXT:    .long 1106247679
+;
+; FP16-LABEL: test_unsigned_i32_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr d0, .LCPI3_0
+; FP16-NEXT:    vmov d1, r2, r3
+; FP16-NEXT:    vldr d2, .LCPI3_1
+; FP16-NEXT:    vmaxnm.f64 d0, d1, d0
+; FP16-NEXT:    vminnm.f64 d0, d0, d2
+; FP16-NEXT:    vcvt.u32.f64 s0, d0
+; FP16-NEXT:    vstr s0, [r0]
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI3_0:
+; FP16-NEXT:    .long 0 @ double 0
+; FP16-NEXT:    .long 0
+; FP16-NEXT:  .LCPI3_1:
+; FP16-NEXT:    .long 4292870144 @ double 4294967295
+; FP16-NEXT:    .long 1106247679
+    %r = call i32 @llvm.fptoui.sat.i32.f64(double %f)
+    store i32 %r, i32* %d, align 4
+    ret void
+}

diff  --git a/llvm/test/CodeGen/ARM/fptosi-sat-scalar.ll b/llvm/test/CodeGen/ARM/fptosi-sat-scalar.ll
index 6a0e38f744d05..97255c5503f69 100644
--- a/llvm/test/CodeGen/ARM/fptosi-sat-scalar.ll
+++ b/llvm/test/CodeGen/ARM/fptosi-sat-scalar.ll
@@ -1,6 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=arm-eabi -float-abi=soft %s -o - | FileCheck %s --check-prefixes=SOFT
-; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - | FileCheck %s --check-prefixes=VFP2
+; RUN: llc -mtriple=thumbv6-none-eabi -float-abi=soft %s -o - | FileCheck %s --check-prefixes=SOFT
+; RUN: llc -mtriple=thumbv7-none-eabi -mattr=+vfp2 %s -o - | FileCheck %s --check-prefixes=VFP,VFP2
+; RUN: llc -mtriple=thumbv8.1m.main-eabi -mattr=+fullfp16,+fp64 %s -o - | FileCheck %s --check-prefixes=VFP,FP16
 
 ;
 ; 32-bit float to signed integer
@@ -20,53 +21,88 @@ declare i128 @llvm.fptosi.sat.i128.f32(float)
 define i1 @test_signed_i1_f32(float %f) nounwind {
 ; SOFT-LABEL: test_signed_i1_f32:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    mov r1, #0
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
 ; SOFT-NEXT:    mov r4, r0
-; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r1, #1065353216
-; SOFT-NEXT:    mov r5, r0
-; SOFT-NEXT:    orr r1, r1, #-2147483648
-; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI0_0
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    mov r7, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2iz
-; SOFT-NEXT:    mov r7, r0
-; SOFT-NEXT:    cmp r6, #0
-; SOFT-NEXT:    mvneq r7, #0
-; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    movs r5, #0
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    beq .LBB0_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    b .LBB0_3
+; SOFT-NEXT:  .LBB0_2:
+; SOFT-NEXT:    mvns r6, r5
+; SOFT-NEXT:  .LBB0_3:
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r7, r5
+; SOFT-NEXT:    bne .LBB0_5
+; SOFT-NEXT:  @ %bb.4:
+; SOFT-NEXT:    mov r7, r6
+; SOFT-NEXT:  .LBB0_5:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    movne r7, #0
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r7, #0
-; SOFT-NEXT:    mov r0, r7
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB0_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    mov r5, r7
+; SOFT-NEXT:  .LBB0_7:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.8:
+; SOFT-NEXT:  .LCPI0_0:
+; SOFT-NEXT:    .long 3212836864 @ 0xbf800000
 ;
 ; VFP2-LABEL: test_signed_i1_f32:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    vmov s0, r0
-; VFP2-NEXT:    vldr s2, .LCPI0_0
-; VFP2-NEXT:    vcmp.f32 s0, s2
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcvt.s32.f32 s4, s0
-; VFP2-NEXT:    vcmp.f32 s0, #0
+; VFP2-NEXT:    vmov s2, r0
+; VFP2-NEXT:    vmov.f32 s0, #-1.000000e+00
+; VFP2-NEXT:    vcvt.s32.f32 s4, s2
+; VFP2-NEXT:    vcmp.f32 s2, s0
 ; VFP2-NEXT:    vmov r0, s4
-; VFP2-NEXT:    mvnlt r0, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s0, s0
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt.w r0, #-1
+; VFP2-NEXT:    vcmp.f32 s2, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
 ; VFP2-NEXT:    movgt r0, #0
+; VFP2-NEXT:    vcmp.f32 s2, s2
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    mov pc, lr
-; VFP2-NEXT:    .p2align 2
-; VFP2-NEXT:  @ %bb.1:
-; VFP2-NEXT:  .LCPI0_0:
-; VFP2-NEXT:    .long 0xbf800000 @ float -1
+; VFP2-NEXT:    bx lr
+;
+; FP16-LABEL: test_signed_i1_f32:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vmov s2, r0
+; FP16-NEXT:    vldr s4, .LCPI0_0
+; FP16-NEXT:    vmov.f32 s0, #-1.000000e+00
+; FP16-NEXT:    vmaxnm.f32 s0, s2, s0
+; FP16-NEXT:    vcmp.f32 s2, s2
+; FP16-NEXT:    vminnm.f32 s0, s0, s4
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcvt.s32.f32 s0, s0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI0_0:
+; FP16-NEXT:    .long 0x00000000 @ float 0
     %x = call i1 @llvm.fptosi.sat.i1.f32(float %f)
     ret i1 %x
 }
@@ -74,56 +110,98 @@ define i1 @test_signed_i1_f32(float %f) nounwind {
 define i8 @test_signed_i8_f32(float %f) nounwind {
 ; SOFT-LABEL: test_signed_i8_f32:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    mov r1, #16646144
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
 ; SOFT-NEXT:    mov r4, r0
-; SOFT-NEXT:    orr r1, r1, #1107296256
-; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    movs r0, #195
+; SOFT-NEXT:    lsls r1, r0, #24
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-1023410176
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    mov r7, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2iz
-; SOFT-NEXT:    mov r7, r0
-; SOFT-NEXT:    cmp r6, #0
-; SOFT-NEXT:    mvneq r7, #127
-; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    movs r5, #127
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    beq .LBB1_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    b .LBB1_3
+; SOFT-NEXT:  .LBB1_2:
+; SOFT-NEXT:    mvns r6, r5
+; SOFT-NEXT:  .LBB1_3:
+; SOFT-NEXT:    ldr r1, .LCPI1_0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB1_5
+; SOFT-NEXT:  @ %bb.4:
+; SOFT-NEXT:    mov r5, r6
+; SOFT-NEXT:  .LBB1_5:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    movne r7, #127
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r7, #0
-; SOFT-NEXT:    mov r0, r7
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    beq .LBB1_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    movs r5, #0
+; SOFT-NEXT:  .LBB1_7:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.8:
+; SOFT-NEXT:  .LCPI1_0:
+; SOFT-NEXT:    .long 1123942400 @ 0x42fe0000
 ;
 ; VFP2-LABEL: test_signed_i8_f32:
 ; VFP2:       @ %bb.0:
 ; VFP2-NEXT:    vmov s0, r0
 ; VFP2-NEXT:    vldr s2, .LCPI1_0
 ; VFP2-NEXT:    vldr s6, .LCPI1_1
+; VFP2-NEXT:    vcvt.s32.f32 s4, s0
 ; VFP2-NEXT:    vcmp.f32 s0, s2
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcvt.s32.f32 s4, s0
 ; VFP2-NEXT:    vcmp.f32 s0, s6
 ; VFP2-NEXT:    vmov r0, s4
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    mvnlt r0, #127
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s0, s0
+; VFP2-NEXT:    it gt
 ; VFP2-NEXT:    movgt r0, #127
+; VFP2-NEXT:    vcmp.f32 s0, s0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    bx lr
 ; VFP2-NEXT:    .p2align 2
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI1_0:
 ; VFP2-NEXT:    .long 0xc3000000 @ float -128
 ; VFP2-NEXT:  .LCPI1_1:
 ; VFP2-NEXT:    .long 0x42fe0000 @ float 127
+;
+; FP16-LABEL: test_signed_i8_f32:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr s0, .LCPI1_0
+; FP16-NEXT:    vmov s2, r0
+; FP16-NEXT:    vldr s4, .LCPI1_1
+; FP16-NEXT:    vmaxnm.f32 s0, s2, s0
+; FP16-NEXT:    vcmp.f32 s2, s2
+; FP16-NEXT:    vminnm.f32 s0, s0, s4
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcvt.s32.f32 s0, s0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI1_0:
+; FP16-NEXT:    .long 0xc3000000 @ float -128
+; FP16-NEXT:  .LCPI1_1:
+; FP16-NEXT:    .long 0x42fe0000 @ float 127
     %x = call i8 @llvm.fptosi.sat.i8.f32(float %f)
     ret i8 %x
 }
@@ -133,67 +211,97 @@ define i13 @test_signed_i13_f32(float %f) nounwind {
 ; SOFT:       @ %bb.0:
 ; SOFT-NEXT:    .save {r4, r5, r6, lr}
 ; SOFT-NEXT:    push {r4, r5, r6, lr}
-; SOFT-NEXT:    mov r1, #92274688
 ; SOFT-NEXT:    mov r4, r0
-; SOFT-NEXT:    orr r1, r1, #-1073741824
+; SOFT-NEXT:    ldr r1, .LCPI2_0
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2iz
-; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    ldr r0, .LCPI2_0
-; SOFT-NEXT:    ldr r1, .LCPI2_1
-; SOFT-NEXT:    cmp r5, #0
-; SOFT-NEXT:    moveq r6, r0
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    beq .LBB2_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    b .LBB2_3
+; SOFT-NEXT:  .LBB2_2:
+; SOFT-NEXT:    ldr r5, .LCPI2_1
+; SOFT-NEXT:  .LBB2_3:
+; SOFT-NEXT:    ldr r1, .LCPI2_2
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r1, #255
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    orr r1, r1, #3840
+; SOFT-NEXT:    beq .LBB2_5
+; SOFT-NEXT:  @ %bb.4:
+; SOFT-NEXT:    ldr r5, .LCPI2_3
+; SOFT-NEXT:  .LBB2_5:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    movne r6, r1
 ; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    pop {r4, r5, r6, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    beq .LBB2_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    movs r5, #0
+; SOFT-NEXT:  .LBB2_7:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.8:
 ; SOFT-NEXT:  .LCPI2_0:
-; SOFT-NEXT:    .long 4294963200 @ 0xfffff000
+; SOFT-NEXT:    .long 3313500160 @ 0xc5800000
 ; SOFT-NEXT:  .LCPI2_1:
+; SOFT-NEXT:    .long 4294963200 @ 0xfffff000
+; SOFT-NEXT:  .LCPI2_2:
 ; SOFT-NEXT:    .long 1166012416 @ 0x457ff000
+; SOFT-NEXT:  .LCPI2_3:
+; SOFT-NEXT:    .long 4095 @ 0xfff
 ;
 ; VFP2-LABEL: test_signed_i13_f32:
 ; VFP2:       @ %bb.0:
 ; VFP2-NEXT:    vmov s0, r0
 ; VFP2-NEXT:    vldr s2, .LCPI2_0
-; VFP2-NEXT:    vldr s6, .LCPI2_1
+; VFP2-NEXT:    vcvt.s32.f32 s4, s0
 ; VFP2-NEXT:    vcmp.f32 s0, s2
-; VFP2-NEXT:    ldr r0, .LCPI2_2
+; VFP2-NEXT:    vldr s2, .LCPI2_1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcvt.s32.f32 s4, s0
-; VFP2-NEXT:    vcmp.f32 s0, s6
-; VFP2-NEXT:    vmov r1, s4
-; VFP2-NEXT:    movlt r1, r0
+; VFP2-NEXT:    vcmp.f32 s0, s2
+; VFP2-NEXT:    vmov r0, s4
+; VFP2-NEXT:    itt lt
+; VFP2-NEXT:    movwlt r0, #61440
+; VFP2-NEXT:    movtlt r0, #65535
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    mov r0, #255
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movwgt r0, #4095
 ; VFP2-NEXT:    vcmp.f32 s0, s0
-; VFP2-NEXT:    orr r0, r0, #3840
-; VFP2-NEXT:    movle r0, r1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    bx lr
 ; VFP2-NEXT:    .p2align 2
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI2_0:
 ; VFP2-NEXT:    .long 0xc5800000 @ float -4096
 ; VFP2-NEXT:  .LCPI2_1:
 ; VFP2-NEXT:    .long 0x457ff000 @ float 4095
-; VFP2-NEXT:  .LCPI2_2:
-; VFP2-NEXT:    .long 4294963200 @ 0xfffff000
+;
+; FP16-LABEL: test_signed_i13_f32:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr s0, .LCPI2_0
+; FP16-NEXT:    vmov s2, r0
+; FP16-NEXT:    vldr s4, .LCPI2_1
+; FP16-NEXT:    vmaxnm.f32 s0, s2, s0
+; FP16-NEXT:    vcmp.f32 s2, s2
+; FP16-NEXT:    vminnm.f32 s0, s0, s4
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcvt.s32.f32 s0, s0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI2_0:
+; FP16-NEXT:    .long 0xc5800000 @ float -4096
+; FP16-NEXT:  .LCPI2_1:
+; FP16-NEXT:    .long 0x457ff000 @ float 4095
     %x = call i13 @llvm.fptosi.sat.i13.f32(float %f)
     ret i13 %x
 }
@@ -203,66 +311,97 @@ define i16 @test_signed_i16_f32(float %f) nounwind {
 ; SOFT:       @ %bb.0:
 ; SOFT-NEXT:    .save {r4, r5, r6, lr}
 ; SOFT-NEXT:    push {r4, r5, r6, lr}
-; SOFT-NEXT:    mov r1, #-956301312
 ; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r0, #199
+; SOFT-NEXT:    lsls r1, r0, #24
+; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2iz
-; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    ldr r0, .LCPI3_0
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    beq .LBB3_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    b .LBB3_3
+; SOFT-NEXT:  .LBB3_2:
+; SOFT-NEXT:    ldr r5, .LCPI3_0
+; SOFT-NEXT:  .LBB3_3:
 ; SOFT-NEXT:    ldr r1, .LCPI3_1
-; SOFT-NEXT:    cmp r5, #0
-; SOFT-NEXT:    moveq r6, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r1, #255
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    orr r1, r1, #32512
+; SOFT-NEXT:    beq .LBB3_5
+; SOFT-NEXT:  @ %bb.4:
+; SOFT-NEXT:    ldr r5, .LCPI3_2
+; SOFT-NEXT:  .LBB3_5:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    movne r6, r1
 ; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    pop {r4, r5, r6, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    beq .LBB3_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    movs r5, #0
+; SOFT-NEXT:  .LBB3_7:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.8:
 ; SOFT-NEXT:  .LCPI3_0:
 ; SOFT-NEXT:    .long 4294934528 @ 0xffff8000
 ; SOFT-NEXT:  .LCPI3_1:
 ; SOFT-NEXT:    .long 1191181824 @ 0x46fffe00
+; SOFT-NEXT:  .LCPI3_2:
+; SOFT-NEXT:    .long 32767 @ 0x7fff
 ;
 ; VFP2-LABEL: test_signed_i16_f32:
 ; VFP2:       @ %bb.0:
 ; VFP2-NEXT:    vmov s0, r0
 ; VFP2-NEXT:    vldr s2, .LCPI3_0
-; VFP2-NEXT:    vldr s6, .LCPI3_1
+; VFP2-NEXT:    vcvt.s32.f32 s4, s0
 ; VFP2-NEXT:    vcmp.f32 s0, s2
-; VFP2-NEXT:    ldr r0, .LCPI3_2
+; VFP2-NEXT:    vldr s2, .LCPI3_1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcvt.s32.f32 s4, s0
-; VFP2-NEXT:    vcmp.f32 s0, s6
-; VFP2-NEXT:    vmov r1, s4
-; VFP2-NEXT:    movlt r1, r0
+; VFP2-NEXT:    vcmp.f32 s0, s2
+; VFP2-NEXT:    vmov r0, s4
+; VFP2-NEXT:    itt lt
+; VFP2-NEXT:    movwlt r0, #32768
+; VFP2-NEXT:    movtlt r0, #65535
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    mov r0, #255
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movwgt r0, #32767
 ; VFP2-NEXT:    vcmp.f32 s0, s0
-; VFP2-NEXT:    orr r0, r0, #32512
-; VFP2-NEXT:    movle r0, r1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    bx lr
 ; VFP2-NEXT:    .p2align 2
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI3_0:
 ; VFP2-NEXT:    .long 0xc7000000 @ float -32768
 ; VFP2-NEXT:  .LCPI3_1:
 ; VFP2-NEXT:    .long 0x46fffe00 @ float 32767
-; VFP2-NEXT:  .LCPI3_2:
-; VFP2-NEXT:    .long 4294934528 @ 0xffff8000
+;
+; FP16-LABEL: test_signed_i16_f32:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr s0, .LCPI3_0
+; FP16-NEXT:    vmov s2, r0
+; FP16-NEXT:    vldr s4, .LCPI3_1
+; FP16-NEXT:    vmaxnm.f32 s0, s2, s0
+; FP16-NEXT:    vcmp.f32 s2, s2
+; FP16-NEXT:    vminnm.f32 s0, s0, s4
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcvt.s32.f32 s0, s0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI3_0:
+; FP16-NEXT:    .long 0xc7000000 @ float -32768
+; FP16-NEXT:  .LCPI3_1:
+; FP16-NEXT:    .long 0x46fffe00 @ float 32767
     %x = call i16 @llvm.fptosi.sat.i16.f32(float %f)
     ret i16 %x
 }
@@ -272,67 +411,98 @@ define i19 @test_signed_i19_f32(float %f) nounwind {
 ; SOFT:       @ %bb.0:
 ; SOFT-NEXT:    .save {r4, r5, r6, lr}
 ; SOFT-NEXT:    push {r4, r5, r6, lr}
-; SOFT-NEXT:    mov r1, #142606336
 ; SOFT-NEXT:    mov r4, r0
-; SOFT-NEXT:    orr r1, r1, #-1073741824
+; SOFT-NEXT:    ldr r1, .LCPI4_0
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2iz
-; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    mov r0, #66846720
-; SOFT-NEXT:    orr r0, r0, #-67108864
-; SOFT-NEXT:    ldr r1, .LCPI4_0
-; SOFT-NEXT:    cmp r5, #0
-; SOFT-NEXT:    moveq r6, r0
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    beq .LBB4_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    b .LBB4_3
+; SOFT-NEXT:  .LBB4_2:
+; SOFT-NEXT:    ldr r5, .LCPI4_1
+; SOFT-NEXT:  .LBB4_3:
+; SOFT-NEXT:    ldr r1, .LCPI4_2
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    ldr r1, .LCPI4_1
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB4_5
+; SOFT-NEXT:  @ %bb.4:
+; SOFT-NEXT:    ldr r5, .LCPI4_3
+; SOFT-NEXT:  .LBB4_5:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    movne r6, r1
 ; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    pop {r4, r5, r6, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    beq .LBB4_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    movs r5, #0
+; SOFT-NEXT:  .LBB4_7:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.8:
 ; SOFT-NEXT:  .LCPI4_0:
-; SOFT-NEXT:    .long 1216348096 @ 0x487fffc0
+; SOFT-NEXT:    .long 3363831808 @ 0xc8800000
 ; SOFT-NEXT:  .LCPI4_1:
+; SOFT-NEXT:    .long 4294705152 @ 0xfffc0000
+; SOFT-NEXT:  .LCPI4_2:
+; SOFT-NEXT:    .long 1216348096 @ 0x487fffc0
+; SOFT-NEXT:  .LCPI4_3:
 ; SOFT-NEXT:    .long 262143 @ 0x3ffff
 ;
 ; VFP2-LABEL: test_signed_i19_f32:
 ; VFP2:       @ %bb.0:
 ; VFP2-NEXT:    vmov s0, r0
-; VFP2-NEXT:    vldr s6, .LCPI4_2
 ; VFP2-NEXT:    vldr s2, .LCPI4_0
-; VFP2-NEXT:    mov r0, #66846720
 ; VFP2-NEXT:    vcvt.s32.f32 s4, s0
-; VFP2-NEXT:    orr r0, r0, #-67108864
-; VFP2-NEXT:    vcmp.f32 s0, s6
-; VFP2-NEXT:    ldr r1, .LCPI4_1
+; VFP2-NEXT:    vcmp.f32 s0, s2
+; VFP2-NEXT:    vldr s2, .LCPI4_1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s0, s2
-; VFP2-NEXT:    vmov r2, s4
-; VFP2-NEXT:    movge r0, r2
+; VFP2-NEXT:    vmov r0, s4
+; VFP2-NEXT:    itt lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    movtlt r0, #65532
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    itt gt
+; VFP2-NEXT:    movwgt r0, #65535
+; VFP2-NEXT:    movtgt r0, #3
 ; VFP2-NEXT:    vcmp.f32 s0, s0
-; VFP2-NEXT:    movgt r0, r1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    bx lr
 ; VFP2-NEXT:    .p2align 2
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI4_0:
-; VFP2-NEXT:    .long 0x487fffc0 @ float 262143
-; VFP2-NEXT:  .LCPI4_1:
-; VFP2-NEXT:    .long 262143 @ 0x3ffff
-; VFP2-NEXT:  .LCPI4_2:
 ; VFP2-NEXT:    .long 0xc8800000 @ float -262144
+; VFP2-NEXT:  .LCPI4_1:
+; VFP2-NEXT:    .long 0x487fffc0 @ float 262143
+;
+; FP16-LABEL: test_signed_i19_f32:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr s0, .LCPI4_0
+; FP16-NEXT:    vmov s2, r0
+; FP16-NEXT:    vldr s4, .LCPI4_1
+; FP16-NEXT:    vmaxnm.f32 s0, s2, s0
+; FP16-NEXT:    vcmp.f32 s2, s2
+; FP16-NEXT:    vminnm.f32 s0, s0, s4
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcvt.s32.f32 s0, s0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI4_0:
+; FP16-NEXT:    .long 0xc8800000 @ float -262144
+; FP16-NEXT:  .LCPI4_1:
+; FP16-NEXT:    .long 0x487fffc0 @ float 262143
     %x = call i19 @llvm.fptosi.sat.i19.f32(float %f)
     ret i19 %x
 }
@@ -340,55 +510,76 @@ define i19 @test_signed_i19_f32(float %f) nounwind {
 define i32 @test_signed_i32_f32(float %f) nounwind {
 ; SOFT-LABEL: test_signed_i32_f32:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    mvn r1, #-1325400064
+; SOFT-NEXT:    .save {r4, r5, r6, lr}
+; SOFT-NEXT:    push {r4, r5, r6, lr}
 ; SOFT-NEXT:    mov r4, r0
-; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    movs r0, #207
+; SOFT-NEXT:    lsls r1, r0, #24
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-822083584
 ; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2iz
-; SOFT-NEXT:    mov r7, r0
 ; SOFT-NEXT:    cmp r6, #0
-; SOFT-NEXT:    moveq r7, #-2147483648
-; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB5_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    b .LBB5_3
+; SOFT-NEXT:  .LBB5_2:
+; SOFT-NEXT:    movs r0, #1
+; SOFT-NEXT:    lsls r5, r0, #31
+; SOFT-NEXT:  .LBB5_3:
+; SOFT-NEXT:    ldr r1, .LCPI5_0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB5_5
+; SOFT-NEXT:  @ %bb.4:
+; SOFT-NEXT:    ldr r5, .LCPI5_1
+; SOFT-NEXT:  .LBB5_5:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r7, #-2147483648
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r7, #0
-; SOFT-NEXT:    mov r0, r7
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    beq .LBB5_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    movs r5, #0
+; SOFT-NEXT:  .LBB5_7:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.8:
+; SOFT-NEXT:  .LCPI5_0:
+; SOFT-NEXT:    .long 1325400063 @ 0x4effffff
+; SOFT-NEXT:  .LCPI5_1:
+; SOFT-NEXT:    .long 2147483647 @ 0x7fffffff
 ;
-; VFP2-LABEL: test_signed_i32_f32:
-; VFP2:       @ %bb.0:
-; VFP2-NEXT:    vmov s0, r0
-; VFP2-NEXT:    vldr s2, .LCPI5_0
-; VFP2-NEXT:    vldr s6, .LCPI5_1
-; VFP2-NEXT:    vcmp.f32 s0, s2
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcvt.s32.f32 s4, s0
-; VFP2-NEXT:    vcmp.f32 s0, s6
-; VFP2-NEXT:    vmov r0, s4
-; VFP2-NEXT:    movlt r0, #-2147483648
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s0, s0
-; VFP2-NEXT:    mvngt r0, #-2147483648
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    mov pc, lr
-; VFP2-NEXT:    .p2align 2
-; VFP2-NEXT:  @ %bb.1:
-; VFP2-NEXT:  .LCPI5_0:
-; VFP2-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
-; VFP2-NEXT:  .LCPI5_1:
-; VFP2-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+; VFP-LABEL: test_signed_i32_f32:
+; VFP:       @ %bb.0:
+; VFP-NEXT:    vmov s0, r0
+; VFP-NEXT:    vldr s2, .LCPI5_0
+; VFP-NEXT:    vldr s6, .LCPI5_1
+; VFP-NEXT:    vcvt.s32.f32 s4, s0
+; VFP-NEXT:    vcmp.f32 s0, s2
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s0, s6
+; VFP-NEXT:    vmov r0, s4
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt.w r0, #-2147483648
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it gt
+; VFP-NEXT:    mvngt r0, #-2147483648
+; VFP-NEXT:    vcmp.f32 s0, s0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it vs
+; VFP-NEXT:    movvs r0, #0
+; VFP-NEXT:    bx lr
+; VFP-NEXT:    .p2align 2
+; VFP-NEXT:  @ %bb.1:
+; VFP-NEXT:  .LCPI5_0:
+; VFP-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+; VFP-NEXT:  .LCPI5_1:
+; VFP-NEXT:    .long 0x4effffff @ float 2.14748352E+9
     %x = call i32 @llvm.fptosi.sat.i32.f32(float %f)
     ret i32 %x
 }
@@ -396,98 +587,127 @@ define i32 @test_signed_i32_f32(float %f) nounwind {
 define i50 @test_signed_i50_f32(float %f) nounwind {
 ; SOFT-LABEL: test_signed_i50_f32:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    mvn r1, #-1476395008
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #12
+; SOFT-NEXT:    sub sp, #12
 ; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    ldr r1, .LCPI6_0
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r8, r0
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    movs r0, #27
+; SOFT-NEXT:    lsls r5, r0, #27
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-671088640
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    mov r7, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2lz
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
 ; SOFT-NEXT:    cmp r7, #0
-; SOFT-NEXT:    mov r6, r1
-; SOFT-NEXT:    moveq r5, r7
-; SOFT-NEXT:    cmp r8, #0
+; SOFT-NEXT:    bne .LBB6_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r6, r7
+; SOFT-NEXT:  .LBB6_2:
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB6_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mvns r6, r7
+; SOFT-NEXT:  .LBB6_4:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r5, #0
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    bne .LBB6_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:  .LBB6_6:
+; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-671088640
-; SOFT-NEXT:    movne r5, #0
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r1, #16646144
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    orr r1, r1, #-16777216
+; SOFT-NEXT:    beq .LBB6_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r5, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    b .LBB6_9
+; SOFT-NEXT:  .LBB6_8:
+; SOFT-NEXT:    ldr r5, .LCPI6_1
+; SOFT-NEXT:  .LBB6_9:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    moveq r6, r1
-; SOFT-NEXT:    mvn r1, #-1476395008
-; SOFT-NEXT:    bl __aeabi_fcmpgt
 ; SOFT-NEXT:    ldr r1, .LCPI6_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB6_11
+; SOFT-NEXT:  @ %bb.10:
+; SOFT-NEXT:    ldr r5, .LCPI6_2
+; SOFT-NEXT:  .LBB6_11:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    movne r6, r1
 ; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r1, r6
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB6_13
+; SOFT-NEXT:  @ %bb.12:
+; SOFT-NEXT:    mov r7, r5
+; SOFT-NEXT:  .LBB6_13:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    add sp, #12
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.14:
 ; SOFT-NEXT:  .LCPI6_0:
+; SOFT-NEXT:    .long 1476395007 @ 0x57ffffff
+; SOFT-NEXT:  .LCPI6_1:
+; SOFT-NEXT:    .long 4294836224 @ 0xfffe0000
+; SOFT-NEXT:  .LCPI6_2:
 ; SOFT-NEXT:    .long 131071 @ 0x1ffff
 ;
-; VFP2-LABEL: test_signed_i50_f32:
-; VFP2:       @ %bb.0:
-; VFP2-NEXT:    .save {r11, lr}
-; VFP2-NEXT:    push {r11, lr}
-; VFP2-NEXT:    .vsave {d8, d9}
-; VFP2-NEXT:    vpush {d8, d9}
-; VFP2-NEXT:    vldr s16, .LCPI6_0
-; VFP2-NEXT:    vmov s18, r0
-; VFP2-NEXT:    bl __aeabi_f2lz
-; VFP2-NEXT:    vcmp.f32 s18, s16
-; VFP2-NEXT:    mov r2, #16646144
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    orr r2, r2, #-16777216
-; VFP2-NEXT:    vldr s0, .LCPI6_1
-; VFP2-NEXT:    ldr r3, .LCPI6_2
-; VFP2-NEXT:    vcmp.f32 s18, s0
-; VFP2-NEXT:    movlt r1, r2
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s18, s18
-; VFP2-NEXT:    movgt r1, r3
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s18, s16
-; VFP2-NEXT:    movvs r1, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s18, s0
-; VFP2-NEXT:    movlt r0, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s18, s18
-; VFP2-NEXT:    mvngt r0, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    vpop {d8, d9}
-; VFP2-NEXT:    pop {r11, lr}
-; VFP2-NEXT:    mov pc, lr
-; VFP2-NEXT:    .p2align 2
-; VFP2-NEXT:  @ %bb.1:
-; VFP2-NEXT:  .LCPI6_0:
-; VFP2-NEXT:    .long 0xd8000000 @ float -5.62949953E+14
-; VFP2-NEXT:  .LCPI6_1:
-; VFP2-NEXT:    .long 0x57ffffff @ float 5.6294992E+14
-; VFP2-NEXT:  .LCPI6_2:
-; VFP2-NEXT:    .long 131071 @ 0x1ffff
+; VFP-LABEL: test_signed_i50_f32:
+; VFP:       @ %bb.0:
+; VFP-NEXT:    .save {r7, lr}
+; VFP-NEXT:    push {r7, lr}
+; VFP-NEXT:    .vsave {d8}
+; VFP-NEXT:    vpush {d8}
+; VFP-NEXT:    vmov s16, r0
+; VFP-NEXT:    bl __aeabi_f2lz
+; VFP-NEXT:    vldr s0, .LCPI6_0
+; VFP-NEXT:    vldr s2, .LCPI6_1
+; VFP-NEXT:    vcmp.f32 s16, s0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s16, s2
+; VFP-NEXT:    itt lt
+; VFP-NEXT:    movlt r1, #0
+; VFP-NEXT:    movtlt r1, #65534
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s16, s0
+; VFP-NEXT:    itt gt
+; VFP-NEXT:    movwgt r1, #65535
+; VFP-NEXT:    movtgt r1, #1
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r0, #0
+; VFP-NEXT:    vcmp.f32 s16, s2
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r0, #-1
+; VFP-NEXT:    vcmp.f32 s16, s16
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    itt vs
+; VFP-NEXT:    movvs r0, #0
+; VFP-NEXT:    movvs r1, #0
+; VFP-NEXT:    vpop {d8}
+; VFP-NEXT:    pop {r7, pc}
+; VFP-NEXT:    .p2align 2
+; VFP-NEXT:  @ %bb.1:
+; VFP-NEXT:  .LCPI6_0:
+; VFP-NEXT:    .long 0xd8000000 @ float -5.62949953E+14
+; VFP-NEXT:  .LCPI6_1:
+; VFP-NEXT:    .long 0x57ffffff @ float 5.6294992E+14
     %x = call i50 @llvm.fptosi.sat.i50.f32(float %f)
     ret i50 %x
 }
@@ -495,85 +715,125 @@ define i50 @test_signed_i50_f32(float %f) nounwind {
 define i64 @test_signed_i64_f32(float %f) nounwind {
 ; SOFT-LABEL: test_signed_i64_f32:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    mvn r1, #-1593835520
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #12
+; SOFT-NEXT:    sub sp, #12
 ; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    ldr r1, .LCPI7_0
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r8, r0
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    movs r0, #223
+; SOFT-NEXT:    lsls r5, r0, #24
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-553648128
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    mov r7, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2lz
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
 ; SOFT-NEXT:    cmp r7, #0
-; SOFT-NEXT:    mov r6, r1
-; SOFT-NEXT:    moveq r5, r7
-; SOFT-NEXT:    cmp r8, #0
+; SOFT-NEXT:    bne .LBB7_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r6, r7
+; SOFT-NEXT:  .LBB7_2:
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB7_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mvns r6, r7
+; SOFT-NEXT:  .LBB7_4:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r5, #0
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    bne .LBB7_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:  .LBB7_6:
+; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvn r1, #-1593835520
-; SOFT-NEXT:    movne r5, #0
-; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r7, r0
-; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-553648128
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB7_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r5, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    b .LBB7_9
+; SOFT-NEXT:  .LBB7_8:
+; SOFT-NEXT:    movs r0, #1
+; SOFT-NEXT:    lsls r5, r0, #31
+; SOFT-NEXT:  .LBB7_9:
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI7_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB7_11
+; SOFT-NEXT:  @ %bb.10:
+; SOFT-NEXT:    ldr r5, .LCPI7_1
+; SOFT-NEXT:  .LBB7_11:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    moveq r6, #-2147483648
-; SOFT-NEXT:    cmp r7, #0
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r6, #-2147483648
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r1, r6
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB7_13
+; SOFT-NEXT:  @ %bb.12:
+; SOFT-NEXT:    mov r7, r5
+; SOFT-NEXT:  .LBB7_13:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    add sp, #12
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.14:
+; SOFT-NEXT:  .LCPI7_0:
+; SOFT-NEXT:    .long 1593835519 @ 0x5effffff
+; SOFT-NEXT:  .LCPI7_1:
+; SOFT-NEXT:    .long 2147483647 @ 0x7fffffff
 ;
-; VFP2-LABEL: test_signed_i64_f32:
-; VFP2:       @ %bb.0:
-; VFP2-NEXT:    .save {r4, lr}
-; VFP2-NEXT:    push {r4, lr}
-; VFP2-NEXT:    mov r4, r0
-; VFP2-NEXT:    bl __aeabi_f2lz
-; VFP2-NEXT:    vldr s0, .LCPI7_0
-; VFP2-NEXT:    vmov s2, r4
-; VFP2-NEXT:    vldr s4, .LCPI7_1
-; VFP2-NEXT:    vcmp.f32 s2, s0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s4
-; VFP2-NEXT:    movlt r0, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r0, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s0
-; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s4
-; VFP2-NEXT:    movlt r1, #-2147483648
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r1, #-2147483648
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    movvs r1, #0
-; VFP2-NEXT:    pop {r4, lr}
-; VFP2-NEXT:    mov pc, lr
-; VFP2-NEXT:    .p2align 2
-; VFP2-NEXT:  @ %bb.1:
-; VFP2-NEXT:  .LCPI7_0:
-; VFP2-NEXT:    .long 0xdf000000 @ float -9.22337203E+18
-; VFP2-NEXT:  .LCPI7_1:
-; VFP2-NEXT:    .long 0x5effffff @ float 9.22337149E+18
+; VFP-LABEL: test_signed_i64_f32:
+; VFP:       @ %bb.0:
+; VFP-NEXT:    .save {r4, lr}
+; VFP-NEXT:    push {r4, lr}
+; VFP-NEXT:    mov r4, r0
+; VFP-NEXT:    bl __aeabi_f2lz
+; VFP-NEXT:    vldr s0, .LCPI7_0
+; VFP-NEXT:    vmov s2, r4
+; VFP-NEXT:    vldr s4, .LCPI7_1
+; VFP-NEXT:    vcmp.f32 s2, s0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s4
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r0, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s2
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r0, #-1
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s0
+; VFP-NEXT:    it vs
+; VFP-NEXT:    movvs r0, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt.w r1, #-2147483648
+; VFP-NEXT:    vcmp.f32 s2, s4
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it gt
+; VFP-NEXT:    mvngt r1, #-2147483648
+; VFP-NEXT:    vcmp.f32 s2, s2
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it vs
+; VFP-NEXT:    movvs r1, #0
+; VFP-NEXT:    pop {r4, pc}
+; VFP-NEXT:    .p2align 2
+; VFP-NEXT:  @ %bb.1:
+; VFP-NEXT:  .LCPI7_0:
+; VFP-NEXT:    .long 0xdf000000 @ float -9.22337203E+18
+; VFP-NEXT:  .LCPI7_1:
+; VFP-NEXT:    .long 0x5effffff @ float 9.22337149E+18
     %x = call i64 @llvm.fptosi.sat.i64.f32(float %f)
     ret i64 %x
 }
@@ -581,139 +841,205 @@ define i64 @test_signed_i64_f32(float %f) nounwind {
 define i100 @test_signed_i100_f32(float %f) nounwind {
 ; SOFT-LABEL: test_signed_i100_f32:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r8, r9, r10, lr}
-; SOFT-NEXT:    mvn r1, #-1895825408
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #20
+; SOFT-NEXT:    sub sp, #20
 ; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    ldr r1, .LCPI8_0
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r9, r0
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    movs r0, #241
+; SOFT-NEXT:    lsls r5, r0, #24
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-251658240
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __fixsfti
-; SOFT-NEXT:    mov r10, r0
-; SOFT-NEXT:    cmp r5, #0
-; SOFT-NEXT:    mov r6, r1
-; SOFT-NEXT:    moveq r10, r5
-; SOFT-NEXT:    cmp r9, #0
+; SOFT-NEXT:    str r1, [sp] @ 4-byte Spill
+; SOFT-NEXT:    str r2, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    str r3, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    bne .LBB8_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:  .LBB8_2:
+; SOFT-NEXT:    movs r6, #0
+; SOFT-NEXT:    mvns r1, r6
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    str r1, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:    mov r7, r1
+; SOFT-NEXT:    bne .LBB8_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:  .LBB8_4:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r7, r2
-; SOFT-NEXT:    mov r8, r3
-; SOFT-NEXT:    mvnne r10, #0
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bne .LBB8_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB8_6:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvn r1, #-1895825408
-; SOFT-NEXT:    movne r10, #0
+; SOFT-NEXT:    ldr r1, .LCPI8_0
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r7, r0
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-251658240
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    moveq r6, r0
-; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    bne .LBB8_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:  .LBB8_8:
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB8_10
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:    ldr r7, [sp] @ 4-byte Reload
+; SOFT-NEXT:  .LBB8_10:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvnne r6, #0
+; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bne .LBB8_12
+; SOFT-NEXT:  @ %bb.11:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB8_12:
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvn r1, #-1895825408
-; SOFT-NEXT:    movne r6, #0
+; SOFT-NEXT:    ldr r1, .LCPI8_0
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r7, r0
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-251658240
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    moveq r7, r0
-; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    bne .LBB8_14
+; SOFT-NEXT:  @ %bb.13:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:  .LBB8_14:
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    bne .LBB8_16
+; SOFT-NEXT:  @ %bb.15:
+; SOFT-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    str r0, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:  .LBB8_16:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvnne r7, #0
+; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r7, r6
+; SOFT-NEXT:    bne .LBB8_18
+; SOFT-NEXT:  @ %bb.17:
+; SOFT-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:  .LBB8_18:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvn r1, #-1895825408
-; SOFT-NEXT:    movne r7, #0
-; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r5, r0
-; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-251658240
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    movs r5, #7
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB8_20
+; SOFT-NEXT:  @ %bb.19:
+; SOFT-NEXT:    mvns r0, r5
+; SOFT-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:  .LBB8_20:
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI8_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB8_22
+; SOFT-NEXT:  @ %bb.21:
+; SOFT-NEXT:    ldr r5, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:  .LBB8_22:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvneq r8, #7
-; SOFT-NEXT:    cmp r5, #0
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    movne r8, #7
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r10
-; SOFT-NEXT:    movne r8, #0
-; SOFT-NEXT:    mov r1, r6
+; SOFT-NEXT:    bne .LBB8_24
+; SOFT-NEXT:  @ %bb.23:
+; SOFT-NEXT:    mov r6, r5
+; SOFT-NEXT:  .LBB8_24:
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    ldr r1, [sp] @ 4-byte Reload
 ; SOFT-NEXT:    mov r2, r7
-; SOFT-NEXT:    mov r3, r8
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r8, r9, r10, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    mov r3, r6
+; SOFT-NEXT:    add sp, #20
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.25:
+; SOFT-NEXT:  .LCPI8_0:
+; SOFT-NEXT:    .long 1895825407 @ 0x70ffffff
 ;
-; VFP2-LABEL: test_signed_i100_f32:
-; VFP2:       @ %bb.0:
-; VFP2-NEXT:    .save {r4, lr}
-; VFP2-NEXT:    push {r4, lr}
-; VFP2-NEXT:    mov r4, r0
-; VFP2-NEXT:    bl __fixsfti
-; VFP2-NEXT:    vldr s0, .LCPI8_0
-; VFP2-NEXT:    vmov s2, r4
-; VFP2-NEXT:    vldr s4, .LCPI8_1
-; VFP2-NEXT:    vcmp.f32 s2, s0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s4
-; VFP2-NEXT:    movlt r0, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r0, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s0
-; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s4
-; VFP2-NEXT:    movlt r1, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r1, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s0
-; VFP2-NEXT:    movvs r1, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s4
-; VFP2-NEXT:    movlt r2, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r2, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s0
-; VFP2-NEXT:    movvs r2, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s4
-; VFP2-NEXT:    mvnlt r3, #7
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    movgt r3, #7
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    movvs r3, #0
-; VFP2-NEXT:    pop {r4, lr}
-; VFP2-NEXT:    mov pc, lr
-; VFP2-NEXT:    .p2align 2
-; VFP2-NEXT:  @ %bb.1:
-; VFP2-NEXT:  .LCPI8_0:
-; VFP2-NEXT:    .long 0xf1000000 @ float -6.338253E+29
-; VFP2-NEXT:  .LCPI8_1:
-; VFP2-NEXT:    .long 0x70ffffff @ float 6.33825262E+29
+; VFP-LABEL: test_signed_i100_f32:
+; VFP:       @ %bb.0:
+; VFP-NEXT:    .save {r4, lr}
+; VFP-NEXT:    push {r4, lr}
+; VFP-NEXT:    mov r4, r0
+; VFP-NEXT:    bl __fixsfti
+; VFP-NEXT:    vldr s0, .LCPI8_0
+; VFP-NEXT:    vmov s2, r4
+; VFP-NEXT:    vldr s4, .LCPI8_1
+; VFP-NEXT:    vcmp.f32 s2, s0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s4
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r0, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s2
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r0, #-1
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s0
+; VFP-NEXT:    it vs
+; VFP-NEXT:    movvs r0, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s4
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r1, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s2
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r1, #-1
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s0
+; VFP-NEXT:    it vs
+; VFP-NEXT:    movvs r1, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s4
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r2, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s2
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r2, #-1
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s0
+; VFP-NEXT:    it vs
+; VFP-NEXT:    movvs r2, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it lt
+; VFP-NEXT:    mvnlt r3, #7
+; VFP-NEXT:    vcmp.f32 s2, s4
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt r3, #7
+; VFP-NEXT:    vcmp.f32 s2, s2
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it vs
+; VFP-NEXT:    movvs r3, #0
+; VFP-NEXT:    pop {r4, pc}
+; VFP-NEXT:    .p2align 2
+; VFP-NEXT:  @ %bb.1:
+; VFP-NEXT:  .LCPI8_0:
+; VFP-NEXT:    .long 0xf1000000 @ float -6.338253E+29
+; VFP-NEXT:  .LCPI8_1:
+; VFP-NEXT:    .long 0x70ffffff @ float 6.33825262E+29
     %x = call i100 @llvm.fptosi.sat.i100.f32(float %f)
     ret i100 %x
 }
@@ -721,139 +1047,209 @@ define i100 @test_signed_i100_f32(float %f) nounwind {
 define i128 @test_signed_i128_f32(float %f) nounwind {
 ; SOFT-LABEL: test_signed_i128_f32:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r8, r9, r10, lr}
-; SOFT-NEXT:    mvn r1, #-2130706432
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #20
+; SOFT-NEXT:    sub sp, #20
 ; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    ldr r1, .LCPI9_0
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r9, r0
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    movs r0, #255
+; SOFT-NEXT:    lsls r7, r0, #24
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-16777216
+; SOFT-NEXT:    mov r1, r7
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __fixsfti
-; SOFT-NEXT:    mov r10, r0
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    str r2, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    str r3, [sp] @ 4-byte Spill
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    bne .LBB9_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:  .LBB9_2:
+; SOFT-NEXT:    movs r6, #0
+; SOFT-NEXT:    mvns r1, r6
 ; SOFT-NEXT:    cmp r5, #0
-; SOFT-NEXT:    mov r6, r1
-; SOFT-NEXT:    moveq r10, r5
-; SOFT-NEXT:    cmp r9, #0
+; SOFT-NEXT:    str r1, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:    mov r5, r1
+; SOFT-NEXT:    bne .LBB9_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:  .LBB9_4:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r7, r2
-; SOFT-NEXT:    mov r8, r3
-; SOFT-NEXT:    mvnne r10, #0
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bne .LBB9_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:  .LBB9_6:
+; SOFT-NEXT:    str r0, [sp, #12] @ 4-byte Spill
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvn r1, #-2130706432
-; SOFT-NEXT:    movne r10, #0
+; SOFT-NEXT:    ldr r1, .LCPI9_0
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
 ; SOFT-NEXT:    mov r5, r0
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-16777216
+; SOFT-NEXT:    mov r1, r7
 ; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    moveq r6, r0
+; SOFT-NEXT:    bne .LBB9_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:  .LBB9_8:
 ; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    ldr r5, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB9_10
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:    ldr r5, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:  .LBB9_10:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvnne r6, #0
+; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bne .LBB9_12
+; SOFT-NEXT:  @ %bb.11:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:  .LBB9_12:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvn r1, #-2130706432
-; SOFT-NEXT:    movne r6, #0
+; SOFT-NEXT:    ldr r1, .LCPI9_0
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
 ; SOFT-NEXT:    mov r5, r0
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-16777216
+; SOFT-NEXT:    mov r1, r7
 ; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    moveq r7, r0
+; SOFT-NEXT:    bne .LBB9_14
+; SOFT-NEXT:  @ %bb.13:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:  .LBB9_14:
 ; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    bne .LBB9_16
+; SOFT-NEXT:  @ %bb.15:
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    str r0, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:  .LBB9_16:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvnne r7, #0
+; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r5, r6
+; SOFT-NEXT:    bne .LBB9_18
+; SOFT-NEXT:  @ %bb.17:
+; SOFT-NEXT:    ldr r5, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:  .LBB9_18:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvn r1, #-2130706432
-; SOFT-NEXT:    movne r7, #0
-; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r5, r0
-; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-16777216
+; SOFT-NEXT:    mov r1, r7
 ; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB9_20
+; SOFT-NEXT:  @ %bb.19:
+; SOFT-NEXT:    ldr r7, [sp] @ 4-byte Reload
+; SOFT-NEXT:    b .LBB9_21
+; SOFT-NEXT:  .LBB9_20:
+; SOFT-NEXT:    movs r0, #1
+; SOFT-NEXT:    lsls r7, r0, #31
+; SOFT-NEXT:  .LBB9_21:
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI9_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB9_23
+; SOFT-NEXT:  @ %bb.22:
+; SOFT-NEXT:    ldr r7, .LCPI9_1
+; SOFT-NEXT:  .LBB9_23:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    moveq r8, #-2147483648
-; SOFT-NEXT:    cmp r5, #0
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r8, #-2147483648
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r10
-; SOFT-NEXT:    movne r8, #0
-; SOFT-NEXT:    mov r1, r6
-; SOFT-NEXT:    mov r2, r7
-; SOFT-NEXT:    mov r3, r8
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r8, r9, r10, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB9_25
+; SOFT-NEXT:  @ %bb.24:
+; SOFT-NEXT:    mov r6, r7
+; SOFT-NEXT:  .LBB9_25:
+; SOFT-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    mov r2, r5
+; SOFT-NEXT:    mov r3, r6
+; SOFT-NEXT:    add sp, #20
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.26:
+; SOFT-NEXT:  .LCPI9_0:
+; SOFT-NEXT:    .long 2130706431 @ 0x7effffff
+; SOFT-NEXT:  .LCPI9_1:
+; SOFT-NEXT:    .long 2147483647 @ 0x7fffffff
 ;
-; VFP2-LABEL: test_signed_i128_f32:
-; VFP2:       @ %bb.0:
-; VFP2-NEXT:    .save {r4, lr}
-; VFP2-NEXT:    push {r4, lr}
-; VFP2-NEXT:    mov r4, r0
-; VFP2-NEXT:    bl __fixsfti
-; VFP2-NEXT:    vldr s0, .LCPI9_0
-; VFP2-NEXT:    vmov s2, r4
-; VFP2-NEXT:    vldr s4, .LCPI9_1
-; VFP2-NEXT:    vcmp.f32 s2, s0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s4
-; VFP2-NEXT:    movlt r0, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r0, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s0
-; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s4
-; VFP2-NEXT:    movlt r1, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r1, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s0
-; VFP2-NEXT:    movvs r1, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s4
-; VFP2-NEXT:    movlt r2, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r2, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s0
-; VFP2-NEXT:    movvs r2, #0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s4
-; VFP2-NEXT:    movlt r3, #-2147483648
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r3, #-2147483648
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    movvs r3, #0
-; VFP2-NEXT:    pop {r4, lr}
-; VFP2-NEXT:    mov pc, lr
-; VFP2-NEXT:    .p2align 2
-; VFP2-NEXT:  @ %bb.1:
-; VFP2-NEXT:  .LCPI9_0:
-; VFP2-NEXT:    .long 0xff000000 @ float -1.70141183E+38
-; VFP2-NEXT:  .LCPI9_1:
-; VFP2-NEXT:    .long 0x7effffff @ float 1.70141173E+38
+; VFP-LABEL: test_signed_i128_f32:
+; VFP:       @ %bb.0:
+; VFP-NEXT:    .save {r4, lr}
+; VFP-NEXT:    push {r4, lr}
+; VFP-NEXT:    mov r4, r0
+; VFP-NEXT:    bl __fixsfti
+; VFP-NEXT:    vldr s0, .LCPI9_0
+; VFP-NEXT:    vmov s2, r4
+; VFP-NEXT:    vldr s4, .LCPI9_1
+; VFP-NEXT:    vcmp.f32 s2, s0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s4
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r0, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s2
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r0, #-1
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s0
+; VFP-NEXT:    it vs
+; VFP-NEXT:    movvs r0, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s4
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r1, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s2
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r1, #-1
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s0
+; VFP-NEXT:    it vs
+; VFP-NEXT:    movvs r1, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s4
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r2, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s2
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r2, #-1
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s2, s0
+; VFP-NEXT:    it vs
+; VFP-NEXT:    movvs r2, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt.w r3, #-2147483648
+; VFP-NEXT:    vcmp.f32 s2, s4
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it gt
+; VFP-NEXT:    mvngt r3, #-2147483648
+; VFP-NEXT:    vcmp.f32 s2, s2
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it vs
+; VFP-NEXT:    movvs r3, #0
+; VFP-NEXT:    pop {r4, pc}
+; VFP-NEXT:    .p2align 2
+; VFP-NEXT:  @ %bb.1:
+; VFP-NEXT:  .LCPI9_0:
+; VFP-NEXT:    .long 0xff000000 @ float -1.70141183E+38
+; VFP-NEXT:  .LCPI9_1:
+; VFP-NEXT:    .long 0x7effffff @ float 1.70141173E+38
     %x = call i128 @llvm.fptosi.sat.i128.f32(float %f)
     ret i128 %x
 }
@@ -876,60 +1272,94 @@ declare i128 @llvm.fptosi.sat.i128.f64(double)
 define i1 @test_signed_i1_f64(double %f) nounwind {
 ; SOFT-LABEL: test_signed_i1_f64:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    mov r3, #267386880
-; SOFT-NEXT:    mov r2, #0
-; SOFT-NEXT:    orr r3, r3, #-1342177280
-; SOFT-NEXT:    mov r4, r1
-; SOFT-NEXT:    mov r5, r0
-; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    mov r5, r1
 ; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    bl __aeabi_d2iz
+; SOFT-NEXT:    movs r4, #0
+; SOFT-NEXT:    ldr r3, .LCPI10_0
+; SOFT-NEXT:    mov r2, r4
+; SOFT-NEXT:    bl __aeabi_dcmpge
 ; SOFT-NEXT:    mov r7, r0
-; SOFT-NEXT:    cmp r6, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
-; SOFT-NEXT:    mov r3, #0
-; SOFT-NEXT:    mvneq r7, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    bl __aeabi_d2iz
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    bne .LBB10_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mvns r0, r4
+; SOFT-NEXT:  .LBB10_2:
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
+; SOFT-NEXT:    mov r3, r4
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    movne r7, #0
+; SOFT-NEXT:    mov r7, r4
+; SOFT-NEXT:    bne .LBB10_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    ldr r7, [sp] @ 4-byte Reload
+; SOFT-NEXT:  .LBB10_4:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r7, #0
-; SOFT-NEXT:    mov r0, r7
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB10_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r4, r7
+; SOFT-NEXT:  .LBB10_6:
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:  .LCPI10_0:
+; SOFT-NEXT:    .long 3220176896 @ 0xbff00000
 ;
 ; VFP2-LABEL: test_signed_i1_f64:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    vldr d2, .LCPI10_0
-; VFP2-NEXT:    vmov d0, r0, r1
-; VFP2-NEXT:    vcmp.f64 d0, d2
+; VFP2-NEXT:    vmov.f64 d17, #-1.000000e+00
+; VFP2-NEXT:    vmov d16, r0, r1
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    vcvt.s32.f64 s0, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcvt.s32.f64 s2, d0
-; VFP2-NEXT:    vcmp.f64 d0, #0
-; VFP2-NEXT:    vmov r0, s2
-; VFP2-NEXT:    mvnlt r0, #0
+; VFP2-NEXT:    vmov r0, s0
+; VFP2-NEXT:    vcmp.f64 d16, #0
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt.w r0, #-1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d0, d0
+; VFP2-NEXT:    it gt
 ; VFP2-NEXT:    movgt r0, #0
+; VFP2-NEXT:    vcmp.f64 d16, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    mov pc, lr
-; VFP2-NEXT:    .p2align 3
-; VFP2-NEXT:  @ %bb.1:
-; VFP2-NEXT:  .LCPI10_0:
-; VFP2-NEXT:    .long 0 @ double -1
-; VFP2-NEXT:    .long 3220176896
+; VFP2-NEXT:    bx lr
+;
+; FP16-LABEL: test_signed_i1_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vmov.f64 d0, #-1.000000e+00
+; FP16-NEXT:    vmov d1, r0, r1
+; FP16-NEXT:    vldr d2, .LCPI10_0
+; FP16-NEXT:    vmaxnm.f64 d0, d1, d0
+; FP16-NEXT:    vminnm.f64 d0, d0, d2
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vcvt.s32.f64 s0, d0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI10_0:
+; FP16-NEXT:    .long 0 @ double 0
+; FP16-NEXT:    .long 0
     %x = call i1 @llvm.fptosi.sat.i1.f64(double %f)
     ret i1 %x
 }
@@ -937,61 +1367,79 @@ define i1 @test_signed_i1_f64(double %f) nounwind {
 define i8 @test_signed_i8_f64(double %f) nounwind {
 ; SOFT-LABEL: test_signed_i8_f64:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r8, lr}
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    mov r5, r1
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r4, #0
 ; SOFT-NEXT:    ldr r3, .LCPI11_0
-; SOFT-NEXT:    mov r2, #0
-; SOFT-NEXT:    mov r4, r1
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r2, r4
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r3, #6291456
-; SOFT-NEXT:    mov r8, r0
-; SOFT-NEXT:    orr r3, r3, #-1073741824
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:    ldr r3, .LCPI11_1
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
 ; SOFT-NEXT:    bl __aeabi_dcmpge
 ; SOFT-NEXT:    mov r7, r0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_d2iz
-; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r1, #127
 ; SOFT-NEXT:    cmp r7, #0
-; SOFT-NEXT:    mvneq r6, #127
-; SOFT-NEXT:    cmp r8, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    movne r6, #127
+; SOFT-NEXT:    bne .LBB11_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mvns r0, r1
+; SOFT-NEXT:  .LBB11_2:
+; SOFT-NEXT:    ldr r2, [sp] @ 4-byte Reload
+; SOFT-NEXT:    cmp r2, #0
+; SOFT-NEXT:    bne .LBB11_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r1, r0
+; SOFT-NEXT:  .LBB11_4:
+; SOFT-NEXT:    mov r7, r1
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB11_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r4, r7
+; SOFT-NEXT:  .LBB11_6:
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.7:
 ; SOFT-NEXT:  .LCPI11_0:
 ; SOFT-NEXT:    .long 1080016896 @ 0x405fc000
+; SOFT-NEXT:  .LCPI11_1:
+; SOFT-NEXT:    .long 3227516928 @ 0xc0600000
 ;
 ; VFP2-LABEL: test_signed_i8_f64:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    vldr d2, .LCPI11_0
-; VFP2-NEXT:    vmov d0, r0, r1
-; VFP2-NEXT:    vldr d3, .LCPI11_1
-; VFP2-NEXT:    vcmp.f64 d0, d2
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcvt.s32.f64 s2, d0
-; VFP2-NEXT:    vcmp.f64 d0, d3
-; VFP2-NEXT:    vmov r0, s2
+; VFP2-NEXT:    vmov d16, r0, r1
+; VFP2-NEXT:    vldr d17, .LCPI11_0
+; VFP2-NEXT:    vldr d18, .LCPI11_1
+; VFP2-NEXT:    vcvt.s32.f64 s0, d16
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vmov r0, s0
+; VFP2-NEXT:    vcmp.f64 d16, d18
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    mvnlt r0, #127
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d0, d0
+; VFP2-NEXT:    it gt
 ; VFP2-NEXT:    movgt r0, #127
+; VFP2-NEXT:    vcmp.f64 d16, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    bx lr
 ; VFP2-NEXT:    .p2align 3
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI11_0:
@@ -1000,6 +1448,29 @@ define i8 @test_signed_i8_f64(double %f) nounwind {
 ; VFP2-NEXT:  .LCPI11_1:
 ; VFP2-NEXT:    .long 0 @ double 127
 ; VFP2-NEXT:    .long 1080016896
+;
+; FP16-LABEL: test_signed_i8_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr d0, .LCPI11_0
+; FP16-NEXT:    vmov d1, r0, r1
+; FP16-NEXT:    vldr d2, .LCPI11_1
+; FP16-NEXT:    vmaxnm.f64 d0, d1, d0
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vminnm.f64 d0, d0, d2
+; FP16-NEXT:    vcvt.s32.f64 s0, d0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI11_0:
+; FP16-NEXT:    .long 0 @ double -128
+; FP16-NEXT:    .long 3227516928
+; FP16-NEXT:  .LCPI11_1:
+; FP16-NEXT:    .long 0 @ double 127
+; FP16-NEXT:    .long 1080016896
     %x = call i8 @llvm.fptosi.sat.i8.f64(double %f)
     ret i8 %x
 }
@@ -1007,69 +1478,85 @@ define i8 @test_signed_i8_f64(double %f) nounwind {
 define i13 @test_signed_i13_f64(double %f) nounwind {
 ; SOFT-LABEL: test_signed_i13_f64:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r8, lr}
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    mov r5, r1
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r4, #0
 ; SOFT-NEXT:    ldr r3, .LCPI12_0
-; SOFT-NEXT:    mov r2, #0
-; SOFT-NEXT:    mov r4, r1
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r2, r4
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r3, #11534336
-; SOFT-NEXT:    mov r8, r0
-; SOFT-NEXT:    orr r3, r3, #-1073741824
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:    ldr r3, .LCPI12_1
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
 ; SOFT-NEXT:    bl __aeabi_dcmpge
 ; SOFT-NEXT:    mov r7, r0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_d2iz
-; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    ldr r0, .LCPI12_1
 ; SOFT-NEXT:    cmp r7, #0
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    moveq r6, r0
-; SOFT-NEXT:    mov r0, #255
-; SOFT-NEXT:    orr r0, r0, #3840
-; SOFT-NEXT:    cmp r8, #0
-; SOFT-NEXT:    movne r6, r0
-; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    bne .LBB12_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    ldr r0, .LCPI12_2
+; SOFT-NEXT:  .LBB12_2:
+; SOFT-NEXT:    ldr r1, [sp] @ 4-byte Reload
+; SOFT-NEXT:    cmp r1, #0
+; SOFT-NEXT:    bne .LBB12_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    b .LBB12_5
+; SOFT-NEXT:  .LBB12_4:
+; SOFT-NEXT:    ldr r7, .LCPI12_3
+; SOFT-NEXT:  .LBB12_5:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB12_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    mov r4, r7
+; SOFT-NEXT:  .LBB12_7:
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.8:
 ; SOFT-NEXT:  .LCPI12_0:
 ; SOFT-NEXT:    .long 1085275648 @ 0x40affe00
 ; SOFT-NEXT:  .LCPI12_1:
+; SOFT-NEXT:    .long 3232759808 @ 0xc0b00000
+; SOFT-NEXT:  .LCPI12_2:
 ; SOFT-NEXT:    .long 4294963200 @ 0xfffff000
+; SOFT-NEXT:  .LCPI12_3:
+; SOFT-NEXT:    .long 4095 @ 0xfff
 ;
 ; VFP2-LABEL: test_signed_i13_f64:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    vldr d2, .LCPI12_0
-; VFP2-NEXT:    vmov d0, r0, r1
-; VFP2-NEXT:    vldr d3, .LCPI12_1
-; VFP2-NEXT:    vcmp.f64 d0, d2
-; VFP2-NEXT:    ldr r0, .LCPI12_2
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcvt.s32.f64 s2, d0
-; VFP2-NEXT:    vcmp.f64 d0, d3
-; VFP2-NEXT:    vmov r1, s2
-; VFP2-NEXT:    movlt r1, r0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d0, d0
-; VFP2-NEXT:    mov r0, #255
-; VFP2-NEXT:    orr r0, r0, #3840
-; VFP2-NEXT:    movle r0, r1
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vmov d16, r0, r1
+; VFP2-NEXT:    vldr d17, .LCPI12_0
+; VFP2-NEXT:    vcvt.s32.f64 s0, d16
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    vldr d17, .LCPI12_1
+; VFP2-NEXT:    vmov r0, s0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    itt lt
+; VFP2-NEXT:    movwlt r0, #61440
+; VFP2-NEXT:    movtlt r0, #65535
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movwgt r0, #4095
+; VFP2-NEXT:    vcmp.f64 d16, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    bx lr
 ; VFP2-NEXT:    .p2align 3
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI12_0:
@@ -1078,8 +1565,29 @@ define i13 @test_signed_i13_f64(double %f) nounwind {
 ; VFP2-NEXT:  .LCPI12_1:
 ; VFP2-NEXT:    .long 0 @ double 4095
 ; VFP2-NEXT:    .long 1085275648
-; VFP2-NEXT:  .LCPI12_2:
-; VFP2-NEXT:    .long 4294963200 @ 0xfffff000
+;
+; FP16-LABEL: test_signed_i13_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr d0, .LCPI12_0
+; FP16-NEXT:    vmov d1, r0, r1
+; FP16-NEXT:    vldr d2, .LCPI12_1
+; FP16-NEXT:    vmaxnm.f64 d0, d1, d0
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vminnm.f64 d0, d0, d2
+; FP16-NEXT:    vcvt.s32.f64 s0, d0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI12_0:
+; FP16-NEXT:    .long 0 @ double -4096
+; FP16-NEXT:    .long 3232759808
+; FP16-NEXT:  .LCPI12_1:
+; FP16-NEXT:    .long 0 @ double 4095
+; FP16-NEXT:    .long 1085275648
     %x = call i13 @llvm.fptosi.sat.i13.f64(double %f)
     ret i13 %x
 }
@@ -1087,69 +1595,85 @@ define i13 @test_signed_i13_f64(double %f) nounwind {
 define i16 @test_signed_i16_f64(double %f) nounwind {
 ; SOFT-LABEL: test_signed_i16_f64:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r8, lr}
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    mov r5, r1
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r4, #0
 ; SOFT-NEXT:    ldr r3, .LCPI13_0
-; SOFT-NEXT:    mov r2, #0
-; SOFT-NEXT:    mov r4, r1
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r2, r4
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r3, #14680064
-; SOFT-NEXT:    mov r8, r0
-; SOFT-NEXT:    orr r3, r3, #-1073741824
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:    ldr r3, .LCPI13_1
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
 ; SOFT-NEXT:    bl __aeabi_dcmpge
 ; SOFT-NEXT:    mov r7, r0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_d2iz
-; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    ldr r0, .LCPI13_1
 ; SOFT-NEXT:    cmp r7, #0
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    moveq r6, r0
-; SOFT-NEXT:    mov r0, #255
-; SOFT-NEXT:    orr r0, r0, #32512
-; SOFT-NEXT:    cmp r8, #0
-; SOFT-NEXT:    movne r6, r0
-; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    bne .LBB13_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    ldr r0, .LCPI13_2
+; SOFT-NEXT:  .LBB13_2:
+; SOFT-NEXT:    ldr r1, [sp] @ 4-byte Reload
+; SOFT-NEXT:    cmp r1, #0
+; SOFT-NEXT:    bne .LBB13_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    b .LBB13_5
+; SOFT-NEXT:  .LBB13_4:
+; SOFT-NEXT:    ldr r7, .LCPI13_3
+; SOFT-NEXT:  .LBB13_5:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB13_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    mov r4, r7
+; SOFT-NEXT:  .LBB13_7:
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.8:
 ; SOFT-NEXT:  .LCPI13_0:
 ; SOFT-NEXT:    .long 1088421824 @ 0x40dfffc0
 ; SOFT-NEXT:  .LCPI13_1:
+; SOFT-NEXT:    .long 3235905536 @ 0xc0e00000
+; SOFT-NEXT:  .LCPI13_2:
 ; SOFT-NEXT:    .long 4294934528 @ 0xffff8000
+; SOFT-NEXT:  .LCPI13_3:
+; SOFT-NEXT:    .long 32767 @ 0x7fff
 ;
 ; VFP2-LABEL: test_signed_i16_f64:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    vldr d2, .LCPI13_0
-; VFP2-NEXT:    vmov d0, r0, r1
-; VFP2-NEXT:    vldr d3, .LCPI13_1
-; VFP2-NEXT:    vcmp.f64 d0, d2
-; VFP2-NEXT:    ldr r0, .LCPI13_2
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcvt.s32.f64 s2, d0
-; VFP2-NEXT:    vcmp.f64 d0, d3
-; VFP2-NEXT:    vmov r1, s2
-; VFP2-NEXT:    movlt r1, r0
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d0, d0
-; VFP2-NEXT:    mov r0, #255
-; VFP2-NEXT:    orr r0, r0, #32512
-; VFP2-NEXT:    movle r0, r1
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vmov d16, r0, r1
+; VFP2-NEXT:    vldr d17, .LCPI13_0
+; VFP2-NEXT:    vcvt.s32.f64 s0, d16
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    vldr d17, .LCPI13_1
+; VFP2-NEXT:    vmov r0, s0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    itt lt
+; VFP2-NEXT:    movwlt r0, #32768
+; VFP2-NEXT:    movtlt r0, #65535
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movwgt r0, #32767
+; VFP2-NEXT:    vcmp.f64 d16, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    bx lr
 ; VFP2-NEXT:    .p2align 3
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI13_0:
@@ -1158,8 +1682,29 @@ define i16 @test_signed_i16_f64(double %f) nounwind {
 ; VFP2-NEXT:  .LCPI13_1:
 ; VFP2-NEXT:    .long 0 @ double 32767
 ; VFP2-NEXT:    .long 1088421824
-; VFP2-NEXT:  .LCPI13_2:
-; VFP2-NEXT:    .long 4294934528 @ 0xffff8000
+;
+; FP16-LABEL: test_signed_i16_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr d0, .LCPI13_0
+; FP16-NEXT:    vmov d1, r0, r1
+; FP16-NEXT:    vldr d2, .LCPI13_1
+; FP16-NEXT:    vmaxnm.f64 d0, d1, d0
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vminnm.f64 d0, d0, d2
+; FP16-NEXT:    vcvt.s32.f64 s0, d0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI13_0:
+; FP16-NEXT:    .long 0 @ double -32768
+; FP16-NEXT:    .long 3235905536
+; FP16-NEXT:  .LCPI13_1:
+; FP16-NEXT:    .long 0 @ double 32767
+; FP16-NEXT:    .long 1088421824
     %x = call i16 @llvm.fptosi.sat.i16.f64(double %f)
     ret i16 %x
 }
@@ -1167,79 +1712,117 @@ define i16 @test_signed_i16_f64(double %f) nounwind {
 define i19 @test_signed_i19_f64(double %f) nounwind {
 ; SOFT-LABEL: test_signed_i19_f64:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r8, lr}
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    mov r5, r1
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r4, #0
 ; SOFT-NEXT:    ldr r3, .LCPI14_0
-; SOFT-NEXT:    mov r2, #0
-; SOFT-NEXT:    mov r4, r1
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r2, r4
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r3, #17825792
-; SOFT-NEXT:    mov r8, r0
-; SOFT-NEXT:    orr r3, r3, #-1073741824
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:    ldr r3, .LCPI14_1
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
 ; SOFT-NEXT:    bl __aeabi_dcmpge
 ; SOFT-NEXT:    mov r7, r0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_d2iz
-; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    mov r0, #66846720
-; SOFT-NEXT:    orr r0, r0, #-67108864
 ; SOFT-NEXT:    cmp r7, #0
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    moveq r6, r0
-; SOFT-NEXT:    ldr r0, .LCPI14_1
-; SOFT-NEXT:    cmp r8, #0
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    movne r6, r0
-; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    bne .LBB14_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    ldr r0, .LCPI14_2
+; SOFT-NEXT:  .LBB14_2:
+; SOFT-NEXT:    ldr r1, [sp] @ 4-byte Reload
+; SOFT-NEXT:    cmp r1, #0
+; SOFT-NEXT:    bne .LBB14_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    b .LBB14_5
+; SOFT-NEXT:  .LBB14_4:
+; SOFT-NEXT:    ldr r7, .LCPI14_3
+; SOFT-NEXT:  .LBB14_5:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB14_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    mov r4, r7
+; SOFT-NEXT:  .LBB14_7:
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.8:
 ; SOFT-NEXT:  .LCPI14_0:
 ; SOFT-NEXT:    .long 1091567608 @ 0x410ffff8
 ; SOFT-NEXT:  .LCPI14_1:
+; SOFT-NEXT:    .long 3239051264 @ 0xc1100000
+; SOFT-NEXT:  .LCPI14_2:
+; SOFT-NEXT:    .long 4294705152 @ 0xfffc0000
+; SOFT-NEXT:  .LCPI14_3:
 ; SOFT-NEXT:    .long 262143 @ 0x3ffff
 ;
 ; VFP2-LABEL: test_signed_i19_f64:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    vmov d0, r0, r1
-; VFP2-NEXT:    vldr d3, .LCPI14_2
-; VFP2-NEXT:    vldr d2, .LCPI14_0
-; VFP2-NEXT:    mov r0, #66846720
-; VFP2-NEXT:    vcvt.s32.f64 s2, d0
-; VFP2-NEXT:    orr r0, r0, #-67108864
-; VFP2-NEXT:    ldr r1, .LCPI14_1
-; VFP2-NEXT:    vcmp.f64 d0, d3
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vmov r2, s2
-; VFP2-NEXT:    vcmp.f64 d0, d2
-; VFP2-NEXT:    movge r0, r2
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d0, d0
-; VFP2-NEXT:    movgt r0, r1
+; VFP2-NEXT:    vmov d16, r0, r1
+; VFP2-NEXT:    vldr d17, .LCPI14_0
+; VFP2-NEXT:    vcvt.s32.f64 s0, d16
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    vldr d17, .LCPI14_1
+; VFP2-NEXT:    vmov r0, s0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    itt lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    movtlt r0, #65532
+; VFP2-NEXT:    vcmp.f64 d16, d17
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    itt gt
+; VFP2-NEXT:    movwgt r0, #65535
+; VFP2-NEXT:    movtgt r0, #3
+; VFP2-NEXT:    vcmp.f64 d16, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    bx lr
 ; VFP2-NEXT:    .p2align 3
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI14_0:
-; VFP2-NEXT:    .long 0 @ double 262143
-; VFP2-NEXT:    .long 1091567608
-; VFP2-NEXT:  .LCPI14_2:
 ; VFP2-NEXT:    .long 0 @ double -262144
 ; VFP2-NEXT:    .long 3239051264
 ; VFP2-NEXT:  .LCPI14_1:
-; VFP2-NEXT:    .long 262143 @ 0x3ffff
+; VFP2-NEXT:    .long 0 @ double 262143
+; VFP2-NEXT:    .long 1091567608
+;
+; FP16-LABEL: test_signed_i19_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr d0, .LCPI14_0
+; FP16-NEXT:    vmov d1, r0, r1
+; FP16-NEXT:    vldr d2, .LCPI14_1
+; FP16-NEXT:    vmaxnm.f64 d0, d1, d0
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vminnm.f64 d0, d0, d2
+; FP16-NEXT:    vcvt.s32.f64 s0, d0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI14_0:
+; FP16-NEXT:    .long 0 @ double -262144
+; FP16-NEXT:    .long 3239051264
+; FP16-NEXT:  .LCPI14_1:
+; FP16-NEXT:    .long 0 @ double 262143
+; FP16-NEXT:    .long 1091567608
     %x = call i19 @llvm.fptosi.sat.i19.f64(double %f)
     ret i19 %x
 }
@@ -1247,62 +1830,85 @@ define i19 @test_signed_i19_f64(double %f) nounwind {
 define i32 @test_signed_i32_f64(double %f) nounwind {
 ; SOFT-LABEL: test_signed_i32_f64:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    mov r2, #1069547520
-; SOFT-NEXT:    ldr r3, .LCPI15_0
-; SOFT-NEXT:    orr r2, r2, #-1073741824
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
 ; SOFT-NEXT:    mov r4, r1
 ; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    ldr r2, .LCPI15_0
+; SOFT-NEXT:    ldr r3, .LCPI15_1
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r3, #31457280
-; SOFT-NEXT:    mov r8, r0
-; SOFT-NEXT:    orr r3, r3, #-1073741824
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:    movs r6, #0
+; SOFT-NEXT:    ldr r3, .LCPI15_2
 ; SOFT-NEXT:    mov r0, r5
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
+; SOFT-NEXT:    mov r2, r6
 ; SOFT-NEXT:    bl __aeabi_dcmpge
 ; SOFT-NEXT:    mov r7, r0
 ; SOFT-NEXT:    mov r0, r5
 ; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    bl __aeabi_d2iz
-; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    cmp r7, #0
-; SOFT-NEXT:    moveq r6, #-2147483648
-; SOFT-NEXT:    cmp r8, #0
+; SOFT-NEXT:    bne .LBB15_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    movs r0, #1
+; SOFT-NEXT:    lsls r0, r0, #31
+; SOFT-NEXT:  .LBB15_2:
+; SOFT-NEXT:    ldr r1, [sp] @ 4-byte Reload
+; SOFT-NEXT:    cmp r1, #0
+; SOFT-NEXT:    bne .LBB15_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    b .LBB15_5
+; SOFT-NEXT:  .LBB15_4:
+; SOFT-NEXT:    ldr r7, .LCPI15_3
+; SOFT-NEXT:  .LBB15_5:
 ; SOFT-NEXT:    mov r0, r5
 ; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    mov r2, r5
 ; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    mvnne r6, #-2147483648
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r6, #0
+; SOFT-NEXT:    bne .LBB15_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    mov r6, r7
+; SOFT-NEXT:  .LBB15_7:
 ; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r8, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.8:
 ; SOFT-NEXT:  .LCPI15_0:
+; SOFT-NEXT:    .long 4290772992 @ 0xffc00000
+; SOFT-NEXT:  .LCPI15_1:
 ; SOFT-NEXT:    .long 1105199103 @ 0x41dfffff
+; SOFT-NEXT:  .LCPI15_2:
+; SOFT-NEXT:    .long 3252682752 @ 0xc1e00000
+; SOFT-NEXT:  .LCPI15_3:
+; SOFT-NEXT:    .long 2147483647 @ 0x7fffffff
 ;
 ; VFP2-LABEL: test_signed_i32_f64:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    vldr d2, .LCPI15_0
-; VFP2-NEXT:    vmov d0, r0, r1
-; VFP2-NEXT:    vldr d3, .LCPI15_1
-; VFP2-NEXT:    vcmp.f64 d0, d2
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcvt.s32.f64 s2, d0
-; VFP2-NEXT:    vcmp.f64 d0, d3
-; VFP2-NEXT:    vmov r0, s2
-; VFP2-NEXT:    movlt r0, #-2147483648
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d0, d0
+; VFP2-NEXT:    vmov d16, r0, r1
+; VFP2-NEXT:    vldr d17, .LCPI15_0
+; VFP2-NEXT:    vldr d18, .LCPI15_1
+; VFP2-NEXT:    vcvt.s32.f64 s0, d16
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vmov r0, s0
+; VFP2-NEXT:    vcmp.f64 d16, d18
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt.w r0, #-2147483648
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
 ; VFP2-NEXT:    mvngt r0, #-2147483648
+; VFP2-NEXT:    vcmp.f64 d16, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    bx lr
 ; VFP2-NEXT:    .p2align 3
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI15_0:
@@ -1311,6 +1917,29 @@ define i32 @test_signed_i32_f64(double %f) nounwind {
 ; VFP2-NEXT:  .LCPI15_1:
 ; VFP2-NEXT:    .long 4290772992 @ double 2147483647
 ; VFP2-NEXT:    .long 1105199103
+;
+; FP16-LABEL: test_signed_i32_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr d0, .LCPI15_0
+; FP16-NEXT:    vmov d1, r0, r1
+; FP16-NEXT:    vldr d2, .LCPI15_1
+; FP16-NEXT:    vmaxnm.f64 d0, d1, d0
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vminnm.f64 d0, d0, d2
+; FP16-NEXT:    vcvt.s32.f64 s0, d0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI15_0:
+; FP16-NEXT:    .long 0 @ double -2147483648
+; FP16-NEXT:    .long 3252682752
+; FP16-NEXT:  .LCPI15_1:
+; FP16-NEXT:    .long 4290772992 @ double 2147483647
+; FP16-NEXT:    .long 1105199103
     %x = call i32 @llvm.fptosi.sat.i32.f64(double %f)
     ret i32 %x
 }
@@ -1318,103 +1947,144 @@ define i32 @test_signed_i32_f64(double %f) nounwind {
 define i50 @test_signed_i50_f64(double %f) nounwind {
 ; SOFT-LABEL: test_signed_i50_f64:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r8, r9, r11, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r8, r9, r11, lr}
-; SOFT-NEXT:    mvn r2, #15
-; SOFT-NEXT:    mvn r3, #-1124073472
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #20
+; SOFT-NEXT:    sub sp, #20
 ; SOFT-NEXT:    mov r4, r1
 ; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    movs r0, #15
+; SOFT-NEXT:    mvns r2, r0
+; SOFT-NEXT:    ldr r3, .LCPI16_0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    str r2, [sp, #12] @ 4-byte Spill
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r8, r0
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    movs r0, #195
+; SOFT-NEXT:    lsls r3, r0, #24
+; SOFT-NEXT:    movs r6, #0
 ; SOFT-NEXT:    mov r0, r5
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
-; SOFT-NEXT:    mov r3, #-1023410176
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    str r3, [sp, #8] @ 4-byte Spill
 ; SOFT-NEXT:    bl __aeabi_dcmpge
-; SOFT-NEXT:    mov r9, r0
+; SOFT-NEXT:    mov r7, r0
 ; SOFT-NEXT:    mov r0, r5
 ; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    bl __aeabi_d2lz
-; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    cmp r9, #0
-; SOFT-NEXT:    mov r7, r1
-; SOFT-NEXT:    moveq r6, r9
-; SOFT-NEXT:    cmp r8, #0
+; SOFT-NEXT:    str r1, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    bne .LBB16_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB16_2:
+; SOFT-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    cmp r1, #0
+; SOFT-NEXT:    bne .LBB16_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    b .LBB16_5
+; SOFT-NEXT:  .LBB16_4:
+; SOFT-NEXT:    mvns r7, r6
+; SOFT-NEXT:  .LBB16_5:
 ; SOFT-NEXT:    mov r0, r5
 ; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    mov r2, r5
 ; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    mvnne r6, #0
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bne .LBB16_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB16_7:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
 ; SOFT-NEXT:    mov r0, r5
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvn r2, #15
-; SOFT-NEXT:    mvn r3, #-1124073472
-; SOFT-NEXT:    movne r6, #0
+; SOFT-NEXT:    ldr r2, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:    ldr r3, .LCPI16_0
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r8, r0
+; SOFT-NEXT:    mov r7, r0
 ; SOFT-NEXT:    mov r0, r5
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
-; SOFT-NEXT:    mov r3, #-1023410176
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    ldr r3, [sp, #8] @ 4-byte Reload
 ; SOFT-NEXT:    bl __aeabi_dcmpge
-; SOFT-NEXT:    mov r1, #16646144
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    orr r1, r1, #-16777216
-; SOFT-NEXT:    ldr r0, .LCPI16_0
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    moveq r7, r1
-; SOFT-NEXT:    cmp r8, #0
-; SOFT-NEXT:    movne r7, r0
+; SOFT-NEXT:    bne .LBB16_9
+; SOFT-NEXT:  @ %bb.8:
+; SOFT-NEXT:    ldr r0, .LCPI16_1
+; SOFT-NEXT:    str r0, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:  .LBB16_9:
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    bne .LBB16_11
+; SOFT-NEXT:  @ %bb.10:
+; SOFT-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    b .LBB16_12
+; SOFT-NEXT:  .LBB16_11:
+; SOFT-NEXT:    ldr r7, .LCPI16_2
+; SOFT-NEXT:  .LBB16_12:
 ; SOFT-NEXT:    mov r0, r5
 ; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r2, r5
+; SOFT-NEXT:    mov r3, r4
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    movne r7, #0
-; SOFT-NEXT:    mov r1, r7
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r8, r9, r11, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB16_14
+; SOFT-NEXT:  @ %bb.13:
+; SOFT-NEXT:    mov r6, r7
+; SOFT-NEXT:  .LBB16_14:
+; SOFT-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    mov r1, r6
+; SOFT-NEXT:    add sp, #20
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.15:
 ; SOFT-NEXT:  .LCPI16_0:
+; SOFT-NEXT:    .long 1124073471 @ 0x42ffffff
+; SOFT-NEXT:  .LCPI16_1:
+; SOFT-NEXT:    .long 4294836224 @ 0xfffe0000
+; SOFT-NEXT:  .LCPI16_2:
 ; SOFT-NEXT:    .long 131071 @ 0x1ffff
 ;
 ; VFP2-LABEL: test_signed_i50_f64:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    .save {r4, r5, r11, lr}
-; VFP2-NEXT:    push {r4, r5, r11, lr}
-; VFP2-NEXT:    mov r4, r1
-; VFP2-NEXT:    mov r5, r0
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    .vsave {d8}
+; VFP2-NEXT:    vpush {d8}
+; VFP2-NEXT:    vmov d8, r0, r1
 ; VFP2-NEXT:    bl __aeabi_d2lz
-; VFP2-NEXT:    vldr d0, .LCPI16_0
-; VFP2-NEXT:    vmov d2, r5, r4
-; VFP2-NEXT:    vldr d1, .LCPI16_1
-; VFP2-NEXT:    mov r2, #16646144
-; VFP2-NEXT:    vcmp.f64 d2, d0
-; VFP2-NEXT:    orr r2, r2, #-16777216
-; VFP2-NEXT:    ldr r3, .LCPI16_2
+; VFP2-NEXT:    vldr d16, .LCPI16_0
+; VFP2-NEXT:    vldr d17, .LCPI16_1
+; VFP2-NEXT:    vcmp.f64 d8, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d2, d1
-; VFP2-NEXT:    movlt r1, r2
+; VFP2-NEXT:    itt lt
+; VFP2-NEXT:    movlt r1, #0
+; VFP2-NEXT:    movtlt r1, #65534
+; VFP2-NEXT:    vcmp.f64 d8, d17
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d2, d2
-; VFP2-NEXT:    movgt r1, r3
+; VFP2-NEXT:    itt gt
+; VFP2-NEXT:    movwgt r1, #65535
+; VFP2-NEXT:    movtgt r1, #1
+; VFP2-NEXT:    vcmp.f64 d8, d8
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d2, d0
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r1, #0
+; VFP2-NEXT:    vcmp.f64 d8, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d2, d1
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vcmp.f64 d8, d17
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d2, d2
-; VFP2-NEXT:    mvngt r0, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
+; VFP2-NEXT:    vcmp.f64 d8, d8
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    pop {r4, r5, r11, lr}
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    vpop {d8}
+; VFP2-NEXT:    pop {r7, pc}
 ; VFP2-NEXT:    .p2align 3
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI16_0:
@@ -1423,8 +2093,35 @@ define i50 @test_signed_i50_f64(double %f) nounwind {
 ; VFP2-NEXT:  .LCPI16_1:
 ; VFP2-NEXT:    .long 4294967280 @ double 562949953421311
 ; VFP2-NEXT:    .long 1124073471
-; VFP2-NEXT:  .LCPI16_2:
-; VFP2-NEXT:    .long 131071 @ 0x1ffff
+;
+; FP16-LABEL: test_signed_i50_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r7, lr}
+; FP16-NEXT:    push {r7, lr}
+; FP16-NEXT:    .vsave {d8}
+; FP16-NEXT:    vpush {d8}
+; FP16-NEXT:    vldr d0, .LCPI16_0
+; FP16-NEXT:    vmov d8, r0, r1
+; FP16-NEXT:    vldr d1, .LCPI16_1
+; FP16-NEXT:    vmaxnm.f64 d0, d8, d0
+; FP16-NEXT:    vminnm.f64 d0, d0, d1
+; FP16-NEXT:    vmov r0, r1, d0
+; FP16-NEXT:    bl __aeabi_d2lz
+; FP16-NEXT:    vcmp.f64 d8, d8
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    itt vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    movvs r1, #0
+; FP16-NEXT:    vpop {d8}
+; FP16-NEXT:    pop {r7, pc}
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI16_0:
+; FP16-NEXT:    .long 0 @ double -562949953421312
+; FP16-NEXT:    .long 3271557120
+; FP16-NEXT:  .LCPI16_1:
+; FP16-NEXT:    .long 4294967280 @ double 562949953421311
+; FP16-NEXT:    .long 1124073471
     %x = call i50 @llvm.fptosi.sat.i50.f64(double %f)
     ret i50 %x
 }
@@ -1432,103 +2129,136 @@ define i50 @test_signed_i50_f64(double %f) nounwind {
 define i64 @test_signed_i64_f64(double %f) nounwind {
 ; SOFT-LABEL: test_signed_i64_f64:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; SOFT-NEXT:    .pad #4
-; SOFT-NEXT:    sub sp, sp, #4
-; SOFT-NEXT:    ldr r8, .LCPI17_0
-; SOFT-NEXT:    mvn r2, #0
-; SOFT-NEXT:    mov r4, r1
-; SOFT-NEXT:    mov r5, r0
-; SOFT-NEXT:    mov r3, r8
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #12
+; SOFT-NEXT:    sub sp, #12
+; SOFT-NEXT:    mov r5, r1
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r4, #0
+; SOFT-NEXT:    mvns r2, r4
+; SOFT-NEXT:    ldr r3, .LCPI17_0
+; SOFT-NEXT:    str r2, [sp, #8] @ 4-byte Spill
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r9, #65011712
-; SOFT-NEXT:    mov r10, r0
-; SOFT-NEXT:    orr r9, r9, #-1073741824
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
-; SOFT-NEXT:    mov r3, r9
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:    ldr r3, .LCPI17_1
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
 ; SOFT-NEXT:    bl __aeabi_dcmpge
-; SOFT-NEXT:    mov r11, r0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_d2lz
-; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    cmp r11, #0
-; SOFT-NEXT:    mov r7, r1
-; SOFT-NEXT:    moveq r6, r11
-; SOFT-NEXT:    cmp r10, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    mvnne r6, #0
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    bne .LBB17_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB17_2:
+; SOFT-NEXT:    ldr r1, [sp] @ 4-byte Reload
+; SOFT-NEXT:    cmp r1, #0
+; SOFT-NEXT:    ldr r7, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB17_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:  .LBB17_4:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvn r2, #0
-; SOFT-NEXT:    mov r3, r8
-; SOFT-NEXT:    movne r6, #0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    bne .LBB17_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB17_6:
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    ldr r2, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    ldr r3, .LCPI17_0
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r8, r0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
-; SOFT-NEXT:    mov r3, r9
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
+; SOFT-NEXT:    ldr r3, .LCPI17_1
 ; SOFT-NEXT:    bl __aeabi_dcmpge
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    moveq r7, #-2147483648
-; SOFT-NEXT:    cmp r8, #0
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    mvnne r7, #-2147483648
+; SOFT-NEXT:    bne .LBB17_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    movs r0, #1
+; SOFT-NEXT:    lsls r0, r0, #31
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:  .LBB17_8:
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    bne .LBB17_10
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:    ldr r7, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    b .LBB17_11
+; SOFT-NEXT:  .LBB17_10:
+; SOFT-NEXT:    ldr r7, .LCPI17_2
+; SOFT-NEXT:  .LBB17_11:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    movne r7, #0
-; SOFT-NEXT:    mov r1, r7
-; SOFT-NEXT:    add sp, sp, #4
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB17_13
+; SOFT-NEXT:  @ %bb.12:
+; SOFT-NEXT:    mov r4, r7
+; SOFT-NEXT:  .LBB17_13:
+; SOFT-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    add sp, #12
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.14:
 ; SOFT-NEXT:  .LCPI17_0:
 ; SOFT-NEXT:    .long 1138753535 @ 0x43dfffff
+; SOFT-NEXT:  .LCPI17_1:
+; SOFT-NEXT:    .long 3286237184 @ 0xc3e00000
+; SOFT-NEXT:  .LCPI17_2:
+; SOFT-NEXT:    .long 2147483647 @ 0x7fffffff
 ;
 ; VFP2-LABEL: test_signed_i64_f64:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    .save {r4, r5, r11, lr}
-; VFP2-NEXT:    push {r4, r5, r11, lr}
+; VFP2-NEXT:    .save {r4, r5, r7, lr}
+; VFP2-NEXT:    push {r4, r5, r7, lr}
 ; VFP2-NEXT:    mov r4, r1
 ; VFP2-NEXT:    mov r5, r0
 ; VFP2-NEXT:    bl __aeabi_d2lz
-; VFP2-NEXT:    vldr d0, .LCPI17_0
-; VFP2-NEXT:    vmov d1, r5, r4
-; VFP2-NEXT:    vldr d2, .LCPI17_1
-; VFP2-NEXT:    vcmp.f64 d1, d0
+; VFP2-NEXT:    vldr d16, .LCPI17_0
+; VFP2-NEXT:    vmov d17, r5, r4
+; VFP2-NEXT:    vldr d18, .LCPI17_1
+; VFP2-NEXT:    vcmp.f64 d17, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d2
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vcmp.f64 d17, d18
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d1
-; VFP2-NEXT:    mvngt r0, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
+; VFP2-NEXT:    vcmp.f64 d17, d17
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d0
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
+; VFP2-NEXT:    vcmp.f64 d17, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d2
-; VFP2-NEXT:    movlt r1, #-2147483648
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt.w r1, #-2147483648
+; VFP2-NEXT:    vcmp.f64 d17, d18
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d1
+; VFP2-NEXT:    it gt
 ; VFP2-NEXT:    mvngt r1, #-2147483648
+; VFP2-NEXT:    vcmp.f64 d17, d17
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r1, #0
-; VFP2-NEXT:    pop {r4, r5, r11, lr}
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    pop {r4, r5, r7, pc}
 ; VFP2-NEXT:    .p2align 3
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI17_0:
@@ -1537,6 +2267,50 @@ define i64 @test_signed_i64_f64(double %f) nounwind {
 ; VFP2-NEXT:  .LCPI17_1:
 ; VFP2-NEXT:    .long 4294967295 @ double 9.2233720368547748E+18
 ; VFP2-NEXT:    .long 1138753535
+;
+; FP16-LABEL: test_signed_i64_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r4, r5, r7, lr}
+; FP16-NEXT:    push {r4, r5, r7, lr}
+; FP16-NEXT:    mov r4, r1
+; FP16-NEXT:    mov r5, r0
+; FP16-NEXT:    bl __aeabi_d2lz
+; FP16-NEXT:    vldr d0, .LCPI17_0
+; FP16-NEXT:    vmov d1, r5, r4
+; FP16-NEXT:    vldr d2, .LCPI17_1
+; FP16-NEXT:    vcmp.f64 d1, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r0, #0
+; FP16-NEXT:    vcmp.f64 d1, d2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r0, #-1
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    vcmp.f64 d1, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt.w r1, #-2147483648
+; FP16-NEXT:    vcmp.f64 d1, d2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    mvngt r1, #-2147483648
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r1, #0
+; FP16-NEXT:    pop {r4, r5, r7, pc}
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI17_0:
+; FP16-NEXT:    .long 0 @ double -9.2233720368547758E+18
+; FP16-NEXT:    .long 3286237184
+; FP16-NEXT:  .LCPI17_1:
+; FP16-NEXT:    .long 4294967295 @ double 9.2233720368547748E+18
+; FP16-NEXT:    .long 1138753535
     %x = call i64 @llvm.fptosi.sat.i64.f64(double %f)
     ret i64 %x
 }
@@ -1544,171 +2318,230 @@ define i64 @test_signed_i64_f64(double %f) nounwind {
 define i100 @test_signed_i100_f64(double %f) nounwind {
 ; SOFT-LABEL: test_signed_i100_f64:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; SOFT-NEXT:    .pad #4
-; SOFT-NEXT:    sub sp, sp, #4
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #20
+; SOFT-NEXT:    sub sp, #20
+; SOFT-NEXT:    mov r5, r1
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r4, #0
+; SOFT-NEXT:    mvns r2, r4
 ; SOFT-NEXT:    ldr r3, .LCPI18_0
-; SOFT-NEXT:    mvn r2, #0
-; SOFT-NEXT:    mov r4, r1
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    str r2, [sp, #16] @ 4-byte Spill
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r3, #102760448
-; SOFT-NEXT:    mov r10, r0
-; SOFT-NEXT:    orr r3, r3, #-1073741824
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
+; SOFT-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:    ldr r3, .LCPI18_1
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
 ; SOFT-NEXT:    bl __aeabi_dcmpge
-; SOFT-NEXT:    mov r11, r0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __fixdfti
-; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    cmp r11, #0
-; SOFT-NEXT:    mov r7, r1
-; SOFT-NEXT:    mov r8, r2
-; SOFT-NEXT:    mov r9, r3
-; SOFT-NEXT:    moveq r6, r11
-; SOFT-NEXT:    cmp r10, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    mvnne r6, #0
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    str r2, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    bne .LBB18_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB18_2:
+; SOFT-NEXT:    str r3, [sp] @ 4-byte Spill
+; SOFT-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:    cmp r1, #0
+; SOFT-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB18_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:  .LBB18_4:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
 ; SOFT-NEXT:    bl __aeabi_dcmpun
-; SOFT-NEXT:    ldr r11, .LCPI18_0
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvn r2, #0
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r3, r11
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    bne .LBB18_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB18_6:
+; SOFT-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    ldr r2, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    ldr r3, .LCPI18_0
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r3, #102760448
-; SOFT-NEXT:    mov r10, r0
-; SOFT-NEXT:    orr r3, r3, #-1073741824
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
+; SOFT-NEXT:    ldr r3, .LCPI18_1
 ; SOFT-NEXT:    bl __aeabi_dcmpge
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    moveq r7, r0
-; SOFT-NEXT:    cmp r10, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    mvnne r7, #0
+; SOFT-NEXT:    bne .LBB18_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:  .LBB18_8:
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB18_10
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:    ldr r7, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:  .LBB18_10:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvn r2, #0
-; SOFT-NEXT:    mov r3, r11
-; SOFT-NEXT:    movne r7, #0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    bne .LBB18_12
+; SOFT-NEXT:  @ %bb.11:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB18_12:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    ldr r2, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    ldr r3, .LCPI18_0
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r3, #102760448
-; SOFT-NEXT:    mov r10, r0
-; SOFT-NEXT:    orr r3, r3, #-1073741824
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
+; SOFT-NEXT:    ldr r3, .LCPI18_1
 ; SOFT-NEXT:    bl __aeabi_dcmpge
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    moveq r8, r0
-; SOFT-NEXT:    cmp r10, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    mvnne r8, #0
+; SOFT-NEXT:    bne .LBB18_14
+; SOFT-NEXT:  @ %bb.13:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:  .LBB18_14:
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB18_16
+; SOFT-NEXT:  @ %bb.15:
+; SOFT-NEXT:    ldr r7, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:  .LBB18_16:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvn r2, #0
-; SOFT-NEXT:    mov r3, r11
-; SOFT-NEXT:    movne r8, #0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    bne .LBB18_18
+; SOFT-NEXT:  @ %bb.17:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB18_18:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    ldr r2, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    ldr r3, .LCPI18_0
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r3, #102760448
-; SOFT-NEXT:    mov r10, r0
-; SOFT-NEXT:    orr r3, r3, #-1073741824
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
+; SOFT-NEXT:    str r0, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
+; SOFT-NEXT:    ldr r3, .LCPI18_1
 ; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    movs r7, #7
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mvneq r9, #7
-; SOFT-NEXT:    cmp r10, #0
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    movne r9, #7
+; SOFT-NEXT:    beq .LBB18_20
+; SOFT-NEXT:  @ %bb.19:
+; SOFT-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; SOFT-NEXT:    b .LBB18_21
+; SOFT-NEXT:  .LBB18_20:
+; SOFT-NEXT:    mvns r0, r7
+; SOFT-NEXT:  .LBB18_21:
+; SOFT-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    cmp r1, #0
+; SOFT-NEXT:    bne .LBB18_23
+; SOFT-NEXT:  @ %bb.22:
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:  .LBB18_23:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    movne r9, #0
-; SOFT-NEXT:    mov r1, r7
-; SOFT-NEXT:    mov r2, r8
-; SOFT-NEXT:    mov r3, r9
-; SOFT-NEXT:    add sp, sp, #4
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB18_25
+; SOFT-NEXT:  @ %bb.24:
+; SOFT-NEXT:    mov r4, r7
+; SOFT-NEXT:  .LBB18_25:
+; SOFT-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    ldr r2, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    mov r3, r4
+; SOFT-NEXT:    add sp, #20
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.26:
 ; SOFT-NEXT:  .LCPI18_0:
 ; SOFT-NEXT:    .long 1176502271 @ 0x461fffff
+; SOFT-NEXT:  .LCPI18_1:
+; SOFT-NEXT:    .long 3323985920 @ 0xc6200000
 ;
 ; VFP2-LABEL: test_signed_i100_f64:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    .save {r4, r5, r11, lr}
-; VFP2-NEXT:    push {r4, r5, r11, lr}
+; VFP2-NEXT:    .save {r4, r5, r7, lr}
+; VFP2-NEXT:    push {r4, r5, r7, lr}
 ; VFP2-NEXT:    mov r4, r1
 ; VFP2-NEXT:    mov r5, r0
 ; VFP2-NEXT:    bl __fixdfti
-; VFP2-NEXT:    vldr d0, .LCPI18_0
-; VFP2-NEXT:    vmov d1, r5, r4
-; VFP2-NEXT:    vldr d2, .LCPI18_1
-; VFP2-NEXT:    vcmp.f64 d1, d0
+; VFP2-NEXT:    vldr d16, .LCPI18_0
+; VFP2-NEXT:    vmov d17, r5, r4
+; VFP2-NEXT:    vldr d18, .LCPI18_1
+; VFP2-NEXT:    vcmp.f64 d17, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d2
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vcmp.f64 d17, d18
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d1
-; VFP2-NEXT:    mvngt r0, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
+; VFP2-NEXT:    vcmp.f64 d17, d17
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d0
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
+; VFP2-NEXT:    vcmp.f64 d17, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d2
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r1, #0
+; VFP2-NEXT:    vcmp.f64 d17, d18
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d1
-; VFP2-NEXT:    mvngt r1, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r1, #-1
+; VFP2-NEXT:    vcmp.f64 d17, d17
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d0
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r1, #0
+; VFP2-NEXT:    vcmp.f64 d17, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d2
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r2, #0
+; VFP2-NEXT:    vcmp.f64 d17, d18
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d1
-; VFP2-NEXT:    mvngt r2, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r2, #-1
+; VFP2-NEXT:    vcmp.f64 d17, d17
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d0
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r2, #0
+; VFP2-NEXT:    vcmp.f64 d17, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d2
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    mvnlt r3, #7
+; VFP2-NEXT:    vcmp.f64 d17, d18
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d1
+; VFP2-NEXT:    it gt
 ; VFP2-NEXT:    movgt r3, #7
+; VFP2-NEXT:    vcmp.f64 d17, d17
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r3, #0
-; VFP2-NEXT:    pop {r4, r5, r11, lr}
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    pop {r4, r5, r7, pc}
 ; VFP2-NEXT:    .p2align 3
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI18_0:
@@ -1717,6 +2550,74 @@ define i100 @test_signed_i100_f64(double %f) nounwind {
 ; VFP2-NEXT:  .LCPI18_1:
 ; VFP2-NEXT:    .long 4294967295 @ double 6.3382530011411463E+29
 ; VFP2-NEXT:    .long 1176502271
+;
+; FP16-LABEL: test_signed_i100_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r4, r5, r7, lr}
+; FP16-NEXT:    push {r4, r5, r7, lr}
+; FP16-NEXT:    mov r4, r1
+; FP16-NEXT:    mov r5, r0
+; FP16-NEXT:    bl __fixdfti
+; FP16-NEXT:    vldr d0, .LCPI18_0
+; FP16-NEXT:    vmov d1, r5, r4
+; FP16-NEXT:    vldr d2, .LCPI18_1
+; FP16-NEXT:    vcmp.f64 d1, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r0, #0
+; FP16-NEXT:    vcmp.f64 d1, d2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r0, #-1
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    vcmp.f64 d1, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r1, #0
+; FP16-NEXT:    vcmp.f64 d1, d2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r1, #-1
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r1, #0
+; FP16-NEXT:    vcmp.f64 d1, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r2, #0
+; FP16-NEXT:    vcmp.f64 d1, d2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r2, #-1
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r2, #0
+; FP16-NEXT:    vcmp.f64 d1, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    mvnlt r3, #7
+; FP16-NEXT:    vcmp.f64 d1, d2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt r3, #7
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r3, #0
+; FP16-NEXT:    pop {r4, r5, r7, pc}
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI18_0:
+; FP16-NEXT:    .long 0 @ double -6.338253001141147E+29
+; FP16-NEXT:    .long 3323985920
+; FP16-NEXT:  .LCPI18_1:
+; FP16-NEXT:    .long 4294967295 @ double 6.3382530011411463E+29
+; FP16-NEXT:    .long 1176502271
     %x = call i100 @llvm.fptosi.sat.i100.f64(double %f)
     ret i100 %x
 }
@@ -1724,171 +2625,232 @@ define i100 @test_signed_i100_f64(double %f) nounwind {
 define i128 @test_signed_i128_f64(double %f) nounwind {
 ; SOFT-LABEL: test_signed_i128_f64:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; SOFT-NEXT:    .pad #4
-; SOFT-NEXT:    sub sp, sp, #4
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #20
+; SOFT-NEXT:    sub sp, #20
+; SOFT-NEXT:    mov r5, r1
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r4, #0
+; SOFT-NEXT:    mvns r2, r4
 ; SOFT-NEXT:    ldr r3, .LCPI19_0
-; SOFT-NEXT:    mvn r2, #0
-; SOFT-NEXT:    mov r4, r1
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    str r2, [sp, #16] @ 4-byte Spill
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r3, #132120576
-; SOFT-NEXT:    mov r10, r0
-; SOFT-NEXT:    orr r3, r3, #-1073741824
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    ldr r3, .LCPI19_1
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
 ; SOFT-NEXT:    bl __aeabi_dcmpge
-; SOFT-NEXT:    mov r11, r0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __fixdfti
-; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    cmp r11, #0
-; SOFT-NEXT:    mov r7, r1
-; SOFT-NEXT:    mov r8, r2
-; SOFT-NEXT:    mov r9, r3
-; SOFT-NEXT:    moveq r6, r11
-; SOFT-NEXT:    cmp r10, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    mvnne r6, #0
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    str r2, [sp] @ 4-byte Spill
+; SOFT-NEXT:    str r3, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    bne .LBB19_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB19_2:
+; SOFT-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    cmp r1, #0
+; SOFT-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB19_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:  .LBB19_4:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
 ; SOFT-NEXT:    bl __aeabi_dcmpun
-; SOFT-NEXT:    ldr r11, .LCPI19_0
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvn r2, #0
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r3, r11
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    bne .LBB19_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB19_6:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    ldr r2, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    ldr r3, .LCPI19_0
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r3, #132120576
-; SOFT-NEXT:    mov r10, r0
-; SOFT-NEXT:    orr r3, r3, #-1073741824
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
+; SOFT-NEXT:    ldr r3, .LCPI19_1
 ; SOFT-NEXT:    bl __aeabi_dcmpge
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    moveq r7, r0
-; SOFT-NEXT:    cmp r10, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    mvnne r7, #0
+; SOFT-NEXT:    bne .LBB19_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:  .LBB19_8:
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB19_10
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:    ldr r7, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:  .LBB19_10:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvn r2, #0
-; SOFT-NEXT:    mov r3, r11
-; SOFT-NEXT:    movne r7, #0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    bne .LBB19_12
+; SOFT-NEXT:  @ %bb.11:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB19_12:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    ldr r2, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    ldr r3, .LCPI19_0
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r3, #132120576
-; SOFT-NEXT:    mov r10, r0
-; SOFT-NEXT:    orr r3, r3, #-1073741824
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
+; SOFT-NEXT:    ldr r3, .LCPI19_1
 ; SOFT-NEXT:    bl __aeabi_dcmpge
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    moveq r8, r0
-; SOFT-NEXT:    cmp r10, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    mvnne r8, #0
+; SOFT-NEXT:    bne .LBB19_14
+; SOFT-NEXT:  @ %bb.13:
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:  .LBB19_14:
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB19_16
+; SOFT-NEXT:  @ %bb.15:
+; SOFT-NEXT:    ldr r7, [sp] @ 4-byte Reload
+; SOFT-NEXT:  .LBB19_16:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvn r2, #0
-; SOFT-NEXT:    mov r3, r11
-; SOFT-NEXT:    movne r8, #0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    bne .LBB19_18
+; SOFT-NEXT:  @ %bb.17:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB19_18:
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    ldr r2, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    ldr r3, .LCPI19_0
 ; SOFT-NEXT:    bl __aeabi_dcmpgt
-; SOFT-NEXT:    mov r3, #132120576
-; SOFT-NEXT:    mov r10, r0
-; SOFT-NEXT:    orr r3, r3, #-1073741824
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, #0
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
+; SOFT-NEXT:    ldr r3, .LCPI19_1
 ; SOFT-NEXT:    bl __aeabi_dcmpge
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    moveq r9, #-2147483648
-; SOFT-NEXT:    cmp r10, #0
-; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mov r2, r5
-; SOFT-NEXT:    mov r3, r4
-; SOFT-NEXT:    mvnne r9, #-2147483648
+; SOFT-NEXT:    bne .LBB19_20
+; SOFT-NEXT:  @ %bb.19:
+; SOFT-NEXT:    movs r0, #1
+; SOFT-NEXT:    lsls r0, r0, #31
+; SOFT-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:  .LBB19_20:
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    bne .LBB19_22
+; SOFT-NEXT:  @ %bb.21:
+; SOFT-NEXT:    ldr r7, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:    b .LBB19_23
+; SOFT-NEXT:  .LBB19_22:
+; SOFT-NEXT:    ldr r7, .LCPI19_2
+; SOFT-NEXT:  .LBB19_23:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r5
 ; SOFT-NEXT:    bl __aeabi_dcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    movne r9, #0
-; SOFT-NEXT:    mov r1, r7
-; SOFT-NEXT:    mov r2, r8
-; SOFT-NEXT:    mov r3, r9
-; SOFT-NEXT:    add sp, sp, #4
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB19_25
+; SOFT-NEXT:  @ %bb.24:
+; SOFT-NEXT:    mov r4, r7
+; SOFT-NEXT:  .LBB19_25:
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    ldr r2, [sp] @ 4-byte Reload
+; SOFT-NEXT:    mov r3, r4
+; SOFT-NEXT:    add sp, #20
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.26:
 ; SOFT-NEXT:  .LCPI19_0:
 ; SOFT-NEXT:    .long 1205862399 @ 0x47dfffff
+; SOFT-NEXT:  .LCPI19_1:
+; SOFT-NEXT:    .long 3353346048 @ 0xc7e00000
+; SOFT-NEXT:  .LCPI19_2:
+; SOFT-NEXT:    .long 2147483647 @ 0x7fffffff
 ;
 ; VFP2-LABEL: test_signed_i128_f64:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    .save {r4, r5, r11, lr}
-; VFP2-NEXT:    push {r4, r5, r11, lr}
+; VFP2-NEXT:    .save {r4, r5, r7, lr}
+; VFP2-NEXT:    push {r4, r5, r7, lr}
 ; VFP2-NEXT:    mov r4, r1
 ; VFP2-NEXT:    mov r5, r0
 ; VFP2-NEXT:    bl __fixdfti
-; VFP2-NEXT:    vldr d0, .LCPI19_0
-; VFP2-NEXT:    vmov d1, r5, r4
-; VFP2-NEXT:    vldr d2, .LCPI19_1
-; VFP2-NEXT:    vcmp.f64 d1, d0
+; VFP2-NEXT:    vldr d16, .LCPI19_0
+; VFP2-NEXT:    vmov d17, r5, r4
+; VFP2-NEXT:    vldr d18, .LCPI19_1
+; VFP2-NEXT:    vcmp.f64 d17, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d2
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vcmp.f64 d17, d18
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d1
-; VFP2-NEXT:    mvngt r0, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
+; VFP2-NEXT:    vcmp.f64 d17, d17
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d0
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
+; VFP2-NEXT:    vcmp.f64 d17, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d2
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r1, #0
+; VFP2-NEXT:    vcmp.f64 d17, d18
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d1
-; VFP2-NEXT:    mvngt r1, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r1, #-1
+; VFP2-NEXT:    vcmp.f64 d17, d17
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d0
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r1, #0
+; VFP2-NEXT:    vcmp.f64 d17, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d2
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r2, #0
+; VFP2-NEXT:    vcmp.f64 d17, d18
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d1
-; VFP2-NEXT:    mvngt r2, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r2, #-1
+; VFP2-NEXT:    vcmp.f64 d17, d17
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d0
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r2, #0
+; VFP2-NEXT:    vcmp.f64 d17, d16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d2
-; VFP2-NEXT:    movlt r3, #-2147483648
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt.w r3, #-2147483648
+; VFP2-NEXT:    vcmp.f64 d17, d18
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f64 d1, d1
+; VFP2-NEXT:    it gt
 ; VFP2-NEXT:    mvngt r3, #-2147483648
+; VFP2-NEXT:    vcmp.f64 d17, d17
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r3, #0
-; VFP2-NEXT:    pop {r4, r5, r11, lr}
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    pop {r4, r5, r7, pc}
 ; VFP2-NEXT:    .p2align 3
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI19_0:
@@ -1897,6 +2859,74 @@ define i128 @test_signed_i128_f64(double %f) nounwind {
 ; VFP2-NEXT:  .LCPI19_1:
 ; VFP2-NEXT:    .long 4294967295 @ double 1.7014118346046921E+38
 ; VFP2-NEXT:    .long 1205862399
+;
+; FP16-LABEL: test_signed_i128_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r4, r5, r7, lr}
+; FP16-NEXT:    push {r4, r5, r7, lr}
+; FP16-NEXT:    mov r4, r1
+; FP16-NEXT:    mov r5, r0
+; FP16-NEXT:    bl __fixdfti
+; FP16-NEXT:    vldr d0, .LCPI19_0
+; FP16-NEXT:    vmov d1, r5, r4
+; FP16-NEXT:    vldr d2, .LCPI19_1
+; FP16-NEXT:    vcmp.f64 d1, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r0, #0
+; FP16-NEXT:    vcmp.f64 d1, d2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r0, #-1
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    vcmp.f64 d1, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r1, #0
+; FP16-NEXT:    vcmp.f64 d1, d2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r1, #-1
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r1, #0
+; FP16-NEXT:    vcmp.f64 d1, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r2, #0
+; FP16-NEXT:    vcmp.f64 d1, d2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r2, #-1
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r2, #0
+; FP16-NEXT:    vcmp.f64 d1, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt.w r3, #-2147483648
+; FP16-NEXT:    vcmp.f64 d1, d2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    mvngt r3, #-2147483648
+; FP16-NEXT:    vcmp.f64 d1, d1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r3, #0
+; FP16-NEXT:    pop {r4, r5, r7, pc}
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI19_0:
+; FP16-NEXT:    .long 0 @ double -1.7014118346046923E+38
+; FP16-NEXT:    .long 3353346048
+; FP16-NEXT:  .LCPI19_1:
+; FP16-NEXT:    .long 4294967295 @ double 1.7014118346046921E+38
+; FP16-NEXT:    .long 1205862399
     %x = call i128 @llvm.fptosi.sat.i128.f64(double %f)
     ret i128 %x
 }
@@ -1919,60 +2949,94 @@ declare i128 @llvm.fptosi.sat.i128.f16(half)
 define i1 @test_signed_i1_f16(half %f) nounwind {
 ; SOFT-LABEL: test_signed_i1_f16:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, lr}
-; SOFT-NEXT:    push {r4, r5, r6, lr}
-; SOFT-NEXT:    mov r1, #255
-; SOFT-NEXT:    orr r1, r1, #65280
-; SOFT-NEXT:    and r0, r0, r1
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    uxth r0, r0
 ; SOFT-NEXT:    bl __aeabi_h2f
-; SOFT-NEXT:    mov r1, #1065353216
 ; SOFT-NEXT:    mov r4, r0
-; SOFT-NEXT:    orr r1, r1, #-2147483648
+; SOFT-NEXT:    ldr r1, .LCPI20_0
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r7, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2iz
+; SOFT-NEXT:    movs r5, #0
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    beq .LBB20_2
+; SOFT-NEXT:  @ %bb.1:
 ; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    b .LBB20_3
+; SOFT-NEXT:  .LBB20_2:
+; SOFT-NEXT:    mvns r6, r5
+; SOFT-NEXT:  .LBB20_3:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #0
-; SOFT-NEXT:    mvneq r6, #0
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r7, r5
+; SOFT-NEXT:    bne .LBB20_5
+; SOFT-NEXT:  @ %bb.4:
+; SOFT-NEXT:    mov r7, r6
+; SOFT-NEXT:  .LBB20_5:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    movne r6, #0
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    pop {r4, r5, r6, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB20_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    mov r5, r7
+; SOFT-NEXT:  .LBB20_7:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.8:
+; SOFT-NEXT:  .LCPI20_0:
+; SOFT-NEXT:    .long 3212836864 @ 0xbf800000
 ;
 ; VFP2-LABEL: test_signed_i1_f16:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    .save {r11, lr}
-; VFP2-NEXT:    push {r11, lr}
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
 ; VFP2-NEXT:    bl __aeabi_h2f
-; VFP2-NEXT:    vmov s0, r0
-; VFP2-NEXT:    vldr s2, .LCPI20_0
-; VFP2-NEXT:    vcmp.f32 s0, s2
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcvt.s32.f32 s4, s0
-; VFP2-NEXT:    vcmp.f32 s0, #0
+; VFP2-NEXT:    vmov s2, r0
+; VFP2-NEXT:    vmov.f32 s0, #-1.000000e+00
+; VFP2-NEXT:    vcvt.s32.f32 s4, s2
+; VFP2-NEXT:    vcmp.f32 s2, s0
 ; VFP2-NEXT:    vmov r0, s4
-; VFP2-NEXT:    mvnlt r0, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s0, s0
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt.w r0, #-1
+; VFP2-NEXT:    vcmp.f32 s2, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
 ; VFP2-NEXT:    movgt r0, #0
+; VFP2-NEXT:    vcmp.f32 s2, s2
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    pop {r11, lr}
-; VFP2-NEXT:    mov pc, lr
-; VFP2-NEXT:    .p2align 2
-; VFP2-NEXT:  @ %bb.1:
-; VFP2-NEXT:  .LCPI20_0:
-; VFP2-NEXT:    .long 0xbf800000 @ float -1
+; VFP2-NEXT:    pop {r7, pc}
+;
+; FP16-LABEL: test_signed_i1_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vldr s2, .LCPI20_0
+; FP16-NEXT:    vcvtb.f32.f16 s0, s0
+; FP16-NEXT:    vmov.f32 s4, #-1.000000e+00
+; FP16-NEXT:    vmaxnm.f32 s4, s0, s4
+; FP16-NEXT:    vcmp.f32 s0, s0
+; FP16-NEXT:    vminnm.f32 s2, s4, s2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcvt.s32.f32 s2, s2
+; FP16-NEXT:    vmov r0, s2
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI20_0:
+; FP16-NEXT:    .long 0x00000000 @ float 0
     %x = call i1 @llvm.fptosi.sat.i1.f16(half %f)
     ret i1 %x
 }
@@ -1980,63 +3044,104 @@ define i1 @test_signed_i1_f16(half %f) nounwind {
 define i8 @test_signed_i8_f16(half %f) nounwind {
 ; SOFT-LABEL: test_signed_i8_f16:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, lr}
-; SOFT-NEXT:    push {r4, r5, r6, lr}
-; SOFT-NEXT:    mov r1, #255
-; SOFT-NEXT:    orr r1, r1, #65280
-; SOFT-NEXT:    and r0, r0, r1
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    uxth r0, r0
 ; SOFT-NEXT:    bl __aeabi_h2f
-; SOFT-NEXT:    mov r1, #-1023410176
 ; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r0, #195
+; SOFT-NEXT:    lsls r1, r0, #24
+; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r7, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2iz
-; SOFT-NEXT:    mov r1, #16646144
+; SOFT-NEXT:    movs r5, #127
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    beq .LBB21_2
+; SOFT-NEXT:  @ %bb.1:
 ; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    orr r1, r1, #1107296256
-; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    b .LBB21_3
+; SOFT-NEXT:  .LBB21_2:
+; SOFT-NEXT:    mvns r6, r5
+; SOFT-NEXT:  .LBB21_3:
+; SOFT-NEXT:    ldr r1, .LCPI21_0
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvneq r6, #127
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB21_5
+; SOFT-NEXT:  @ %bb.4:
+; SOFT-NEXT:    mov r5, r6
+; SOFT-NEXT:  .LBB21_5:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    movne r6, #127
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    pop {r4, r5, r6, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    beq .LBB21_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    movs r5, #0
+; SOFT-NEXT:  .LBB21_7:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.8:
+; SOFT-NEXT:  .LCPI21_0:
+; SOFT-NEXT:    .long 1123942400 @ 0x42fe0000
 ;
 ; VFP2-LABEL: test_signed_i8_f16:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    .save {r11, lr}
-; VFP2-NEXT:    push {r11, lr}
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
 ; VFP2-NEXT:    bl __aeabi_h2f
 ; VFP2-NEXT:    vmov s0, r0
 ; VFP2-NEXT:    vldr s2, .LCPI21_0
 ; VFP2-NEXT:    vldr s6, .LCPI21_1
+; VFP2-NEXT:    vcvt.s32.f32 s4, s0
 ; VFP2-NEXT:    vcmp.f32 s0, s2
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcvt.s32.f32 s4, s0
 ; VFP2-NEXT:    vcmp.f32 s0, s6
 ; VFP2-NEXT:    vmov r0, s4
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    mvnlt r0, #127
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s0, s0
+; VFP2-NEXT:    it gt
 ; VFP2-NEXT:    movgt r0, #127
+; VFP2-NEXT:    vcmp.f32 s0, s0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    pop {r11, lr}
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    pop {r7, pc}
 ; VFP2-NEXT:    .p2align 2
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI21_0:
 ; VFP2-NEXT:    .long 0xc3000000 @ float -128
 ; VFP2-NEXT:  .LCPI21_1:
 ; VFP2-NEXT:    .long 0x42fe0000 @ float 127
+;
+; FP16-LABEL: test_signed_i8_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vldr s4, .LCPI21_1
+; FP16-NEXT:    vcvtb.f32.f16 s0, s0
+; FP16-NEXT:    vldr s2, .LCPI21_0
+; FP16-NEXT:    vmaxnm.f32 s4, s0, s4
+; FP16-NEXT:    vcmp.f32 s0, s0
+; FP16-NEXT:    vminnm.f32 s2, s4, s2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcvt.s32.f32 s2, s2
+; FP16-NEXT:    vmov r0, s2
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI21_0:
+; FP16-NEXT:    .long 0x42fe0000 @ float 127
+; FP16-NEXT:  .LCPI21_1:
+; FP16-NEXT:    .long 0xc3000000 @ float -128
     %x = call i8 @llvm.fptosi.sat.i8.f16(half %f)
     ret i8 %x
 }
@@ -2046,75 +3151,103 @@ define i13 @test_signed_i13_f16(half %f) nounwind {
 ; SOFT:       @ %bb.0:
 ; SOFT-NEXT:    .save {r4, r5, r6, lr}
 ; SOFT-NEXT:    push {r4, r5, r6, lr}
-; SOFT-NEXT:    mov r1, #255
-; SOFT-NEXT:    orr r1, r1, #65280
-; SOFT-NEXT:    and r0, r0, r1
+; SOFT-NEXT:    uxth r0, r0
 ; SOFT-NEXT:    bl __aeabi_h2f
-; SOFT-NEXT:    mov r1, #92274688
 ; SOFT-NEXT:    mov r4, r0
-; SOFT-NEXT:    orr r1, r1, #-1073741824
+; SOFT-NEXT:    ldr r1, .LCPI22_0
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2iz
-; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    ldr r0, .LCPI22_0
-; SOFT-NEXT:    ldr r1, .LCPI22_1
-; SOFT-NEXT:    cmp r5, #0
-; SOFT-NEXT:    moveq r6, r0
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    beq .LBB22_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    b .LBB22_3
+; SOFT-NEXT:  .LBB22_2:
+; SOFT-NEXT:    ldr r5, .LCPI22_1
+; SOFT-NEXT:  .LBB22_3:
+; SOFT-NEXT:    ldr r1, .LCPI22_2
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r1, #255
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    orr r1, r1, #3840
+; SOFT-NEXT:    beq .LBB22_5
+; SOFT-NEXT:  @ %bb.4:
+; SOFT-NEXT:    ldr r5, .LCPI22_3
+; SOFT-NEXT:  .LBB22_5:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    movne r6, r1
 ; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    pop {r4, r5, r6, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    beq .LBB22_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    movs r5, #0
+; SOFT-NEXT:  .LBB22_7:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.8:
 ; SOFT-NEXT:  .LCPI22_0:
-; SOFT-NEXT:    .long 4294963200 @ 0xfffff000
+; SOFT-NEXT:    .long 3313500160 @ 0xc5800000
 ; SOFT-NEXT:  .LCPI22_1:
+; SOFT-NEXT:    .long 4294963200 @ 0xfffff000
+; SOFT-NEXT:  .LCPI22_2:
 ; SOFT-NEXT:    .long 1166012416 @ 0x457ff000
+; SOFT-NEXT:  .LCPI22_3:
+; SOFT-NEXT:    .long 4095 @ 0xfff
 ;
 ; VFP2-LABEL: test_signed_i13_f16:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    .save {r11, lr}
-; VFP2-NEXT:    push {r11, lr}
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
 ; VFP2-NEXT:    bl __aeabi_h2f
 ; VFP2-NEXT:    vmov s0, r0
 ; VFP2-NEXT:    vldr s2, .LCPI22_0
-; VFP2-NEXT:    vldr s6, .LCPI22_1
+; VFP2-NEXT:    vcvt.s32.f32 s4, s0
 ; VFP2-NEXT:    vcmp.f32 s0, s2
-; VFP2-NEXT:    ldr r0, .LCPI22_2
+; VFP2-NEXT:    vldr s2, .LCPI22_1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcvt.s32.f32 s4, s0
-; VFP2-NEXT:    vcmp.f32 s0, s6
-; VFP2-NEXT:    vmov r1, s4
-; VFP2-NEXT:    movlt r1, r0
+; VFP2-NEXT:    vcmp.f32 s0, s2
+; VFP2-NEXT:    vmov r0, s4
+; VFP2-NEXT:    itt lt
+; VFP2-NEXT:    movwlt r0, #61440
+; VFP2-NEXT:    movtlt r0, #65535
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    mov r0, #255
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movwgt r0, #4095
 ; VFP2-NEXT:    vcmp.f32 s0, s0
-; VFP2-NEXT:    orr r0, r0, #3840
-; VFP2-NEXT:    movle r0, r1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    pop {r11, lr}
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    pop {r7, pc}
 ; VFP2-NEXT:    .p2align 2
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI22_0:
 ; VFP2-NEXT:    .long 0xc5800000 @ float -4096
 ; VFP2-NEXT:  .LCPI22_1:
 ; VFP2-NEXT:    .long 0x457ff000 @ float 4095
-; VFP2-NEXT:  .LCPI22_2:
-; VFP2-NEXT:    .long 4294963200 @ 0xfffff000
+;
+; FP16-LABEL: test_signed_i13_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vldr s4, .LCPI22_1
+; FP16-NEXT:    vcvtb.f32.f16 s0, s0
+; FP16-NEXT:    vldr s2, .LCPI22_0
+; FP16-NEXT:    vmaxnm.f32 s4, s0, s4
+; FP16-NEXT:    vcmp.f32 s0, s0
+; FP16-NEXT:    vminnm.f32 s2, s4, s2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcvt.s32.f32 s2, s2
+; FP16-NEXT:    vmov r0, s2
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI22_0:
+; FP16-NEXT:    .long 0x457ff000 @ float 4095
+; FP16-NEXT:  .LCPI22_1:
+; FP16-NEXT:    .long 0xc5800000 @ float -4096
     %x = call i13 @llvm.fptosi.sat.i13.f16(half %f)
     ret i13 %x
 }
@@ -2124,74 +3257,103 @@ define i16 @test_signed_i16_f16(half %f) nounwind {
 ; SOFT:       @ %bb.0:
 ; SOFT-NEXT:    .save {r4, r5, r6, lr}
 ; SOFT-NEXT:    push {r4, r5, r6, lr}
-; SOFT-NEXT:    mov r1, #255
-; SOFT-NEXT:    orr r1, r1, #65280
-; SOFT-NEXT:    and r0, r0, r1
+; SOFT-NEXT:    uxth r0, r0
 ; SOFT-NEXT:    bl __aeabi_h2f
-; SOFT-NEXT:    mov r1, #-956301312
 ; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r0, #199
+; SOFT-NEXT:    lsls r1, r0, #24
+; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2iz
-; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    ldr r0, .LCPI23_0
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    beq .LBB23_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    b .LBB23_3
+; SOFT-NEXT:  .LBB23_2:
+; SOFT-NEXT:    ldr r5, .LCPI23_0
+; SOFT-NEXT:  .LBB23_3:
 ; SOFT-NEXT:    ldr r1, .LCPI23_1
-; SOFT-NEXT:    cmp r5, #0
-; SOFT-NEXT:    moveq r6, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    mov r1, #255
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    orr r1, r1, #32512
+; SOFT-NEXT:    beq .LBB23_5
+; SOFT-NEXT:  @ %bb.4:
+; SOFT-NEXT:    ldr r5, .LCPI23_2
+; SOFT-NEXT:  .LBB23_5:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    movne r6, r1
 ; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    pop {r4, r5, r6, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    beq .LBB23_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    movs r5, #0
+; SOFT-NEXT:  .LBB23_7:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.8:
 ; SOFT-NEXT:  .LCPI23_0:
 ; SOFT-NEXT:    .long 4294934528 @ 0xffff8000
 ; SOFT-NEXT:  .LCPI23_1:
 ; SOFT-NEXT:    .long 1191181824 @ 0x46fffe00
+; SOFT-NEXT:  .LCPI23_2:
+; SOFT-NEXT:    .long 32767 @ 0x7fff
 ;
 ; VFP2-LABEL: test_signed_i16_f16:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    .save {r11, lr}
-; VFP2-NEXT:    push {r11, lr}
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
 ; VFP2-NEXT:    bl __aeabi_h2f
 ; VFP2-NEXT:    vmov s0, r0
 ; VFP2-NEXT:    vldr s2, .LCPI23_0
-; VFP2-NEXT:    vldr s6, .LCPI23_1
+; VFP2-NEXT:    vcvt.s32.f32 s4, s0
 ; VFP2-NEXT:    vcmp.f32 s0, s2
-; VFP2-NEXT:    ldr r0, .LCPI23_2
+; VFP2-NEXT:    vldr s2, .LCPI23_1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcvt.s32.f32 s4, s0
-; VFP2-NEXT:    vcmp.f32 s0, s6
-; VFP2-NEXT:    vmov r1, s4
-; VFP2-NEXT:    movlt r1, r0
+; VFP2-NEXT:    vcmp.f32 s0, s2
+; VFP2-NEXT:    vmov r0, s4
+; VFP2-NEXT:    itt lt
+; VFP2-NEXT:    movwlt r0, #32768
+; VFP2-NEXT:    movtlt r0, #65535
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    mov r0, #255
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movwgt r0, #32767
 ; VFP2-NEXT:    vcmp.f32 s0, s0
-; VFP2-NEXT:    orr r0, r0, #32512
-; VFP2-NEXT:    movle r0, r1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    pop {r11, lr}
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    pop {r7, pc}
 ; VFP2-NEXT:    .p2align 2
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI23_0:
 ; VFP2-NEXT:    .long 0xc7000000 @ float -32768
 ; VFP2-NEXT:  .LCPI23_1:
 ; VFP2-NEXT:    .long 0x46fffe00 @ float 32767
-; VFP2-NEXT:  .LCPI23_2:
-; VFP2-NEXT:    .long 4294934528 @ 0xffff8000
+;
+; FP16-LABEL: test_signed_i16_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vldr s4, .LCPI23_1
+; FP16-NEXT:    vcvtb.f32.f16 s0, s0
+; FP16-NEXT:    vldr s2, .LCPI23_0
+; FP16-NEXT:    vmaxnm.f32 s4, s0, s4
+; FP16-NEXT:    vcmp.f32 s0, s0
+; FP16-NEXT:    vminnm.f32 s2, s4, s2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcvt.s32.f32 s2, s2
+; FP16-NEXT:    vmov r0, s2
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI23_0:
+; FP16-NEXT:    .long 0x46fffe00 @ float 32767
+; FP16-NEXT:  .LCPI23_1:
+; FP16-NEXT:    .long 0xc7000000 @ float -32768
     %x = call i16 @llvm.fptosi.sat.i16.f16(half %f)
     ret i16 %x
 }
@@ -2201,75 +3363,104 @@ define i19 @test_signed_i19_f16(half %f) nounwind {
 ; SOFT:       @ %bb.0:
 ; SOFT-NEXT:    .save {r4, r5, r6, lr}
 ; SOFT-NEXT:    push {r4, r5, r6, lr}
-; SOFT-NEXT:    mov r1, #255
-; SOFT-NEXT:    orr r1, r1, #65280
-; SOFT-NEXT:    and r0, r0, r1
+; SOFT-NEXT:    uxth r0, r0
 ; SOFT-NEXT:    bl __aeabi_h2f
-; SOFT-NEXT:    mov r1, #142606336
 ; SOFT-NEXT:    mov r4, r0
-; SOFT-NEXT:    orr r1, r1, #-1073741824
+; SOFT-NEXT:    ldr r1, .LCPI24_0
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2iz
-; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    mov r0, #66846720
-; SOFT-NEXT:    orr r0, r0, #-67108864
-; SOFT-NEXT:    ldr r1, .LCPI24_0
-; SOFT-NEXT:    cmp r5, #0
-; SOFT-NEXT:    moveq r6, r0
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    beq .LBB24_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    b .LBB24_3
+; SOFT-NEXT:  .LBB24_2:
+; SOFT-NEXT:    ldr r5, .LCPI24_1
+; SOFT-NEXT:  .LBB24_3:
+; SOFT-NEXT:    ldr r1, .LCPI24_2
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    ldr r1, .LCPI24_1
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB24_5
+; SOFT-NEXT:  @ %bb.4:
+; SOFT-NEXT:    ldr r5, .LCPI24_3
+; SOFT-NEXT:  .LBB24_5:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    movne r6, r1
 ; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    pop {r4, r5, r6, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    beq .LBB24_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    movs r5, #0
+; SOFT-NEXT:  .LBB24_7:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.8:
 ; SOFT-NEXT:  .LCPI24_0:
-; SOFT-NEXT:    .long 1216348096 @ 0x487fffc0
+; SOFT-NEXT:    .long 3363831808 @ 0xc8800000
 ; SOFT-NEXT:  .LCPI24_1:
+; SOFT-NEXT:    .long 4294705152 @ 0xfffc0000
+; SOFT-NEXT:  .LCPI24_2:
+; SOFT-NEXT:    .long 1216348096 @ 0x487fffc0
+; SOFT-NEXT:  .LCPI24_3:
 ; SOFT-NEXT:    .long 262143 @ 0x3ffff
 ;
 ; VFP2-LABEL: test_signed_i19_f16:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    .save {r11, lr}
-; VFP2-NEXT:    push {r11, lr}
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
 ; VFP2-NEXT:    bl __aeabi_h2f
 ; VFP2-NEXT:    vmov s0, r0
-; VFP2-NEXT:    vldr s6, .LCPI24_2
 ; VFP2-NEXT:    vldr s2, .LCPI24_0
-; VFP2-NEXT:    mov r0, #66846720
 ; VFP2-NEXT:    vcvt.s32.f32 s4, s0
-; VFP2-NEXT:    orr r0, r0, #-67108864
-; VFP2-NEXT:    vcmp.f32 s0, s6
-; VFP2-NEXT:    ldr r1, .LCPI24_1
+; VFP2-NEXT:    vcmp.f32 s0, s2
+; VFP2-NEXT:    vldr s2, .LCPI24_1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s0, s2
-; VFP2-NEXT:    vmov r2, s4
-; VFP2-NEXT:    movge r0, r2
+; VFP2-NEXT:    vmov r0, s4
+; VFP2-NEXT:    itt lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    movtlt r0, #65532
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    itt gt
+; VFP2-NEXT:    movwgt r0, #65535
+; VFP2-NEXT:    movtgt r0, #3
 ; VFP2-NEXT:    vcmp.f32 s0, s0
-; VFP2-NEXT:    movgt r0, r1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    pop {r11, lr}
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    pop {r7, pc}
 ; VFP2-NEXT:    .p2align 2
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI24_0:
-; VFP2-NEXT:    .long 0x487fffc0 @ float 262143
-; VFP2-NEXT:  .LCPI24_1:
-; VFP2-NEXT:    .long 262143 @ 0x3ffff
-; VFP2-NEXT:  .LCPI24_2:
 ; VFP2-NEXT:    .long 0xc8800000 @ float -262144
+; VFP2-NEXT:  .LCPI24_1:
+; VFP2-NEXT:    .long 0x487fffc0 @ float 262143
+;
+; FP16-LABEL: test_signed_i19_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vldr s4, .LCPI24_1
+; FP16-NEXT:    vcvtb.f32.f16 s0, s0
+; FP16-NEXT:    vldr s2, .LCPI24_0
+; FP16-NEXT:    vmaxnm.f32 s4, s0, s4
+; FP16-NEXT:    vcmp.f32 s0, s0
+; FP16-NEXT:    vminnm.f32 s2, s4, s2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcvt.s32.f32 s2, s2
+; FP16-NEXT:    vmov r0, s2
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI24_0:
+; FP16-NEXT:    .long 0x487fffc0 @ float 262143
+; FP16-NEXT:  .LCPI24_1:
+; FP16-NEXT:    .long 0xc8800000 @ float -262144
     %x = call i19 @llvm.fptosi.sat.i19.f16(half %f)
     ret i19 %x
 }
@@ -2279,60 +3470,107 @@ define i32 @test_signed_i32_f16(half %f) nounwind {
 ; SOFT:       @ %bb.0:
 ; SOFT-NEXT:    .save {r4, r5, r6, lr}
 ; SOFT-NEXT:    push {r4, r5, r6, lr}
-; SOFT-NEXT:    mov r1, #255
-; SOFT-NEXT:    orr r1, r1, #65280
-; SOFT-NEXT:    and r0, r0, r1
+; SOFT-NEXT:    uxth r0, r0
 ; SOFT-NEXT:    bl __aeabi_h2f
-; SOFT-NEXT:    mov r1, #-822083584
 ; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r0, #207
+; SOFT-NEXT:    lsls r1, r0, #24
+; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2iz
-; SOFT-NEXT:    mov r6, r0
-; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    beq .LBB25_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    b .LBB25_3
+; SOFT-NEXT:  .LBB25_2:
+; SOFT-NEXT:    movs r0, #1
+; SOFT-NEXT:    lsls r5, r0, #31
+; SOFT-NEXT:  .LBB25_3:
+; SOFT-NEXT:    ldr r1, .LCPI25_0
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvn r1, #-1325400064
-; SOFT-NEXT:    moveq r6, #-2147483648
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB25_5
+; SOFT-NEXT:  @ %bb.4:
+; SOFT-NEXT:    ldr r5, .LCPI25_1
+; SOFT-NEXT:  .LBB25_5:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r6, #-2147483648
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r0, r6
-; SOFT-NEXT:    pop {r4, r5, r6, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    beq .LBB25_7
+; SOFT-NEXT:  @ %bb.6:
+; SOFT-NEXT:    movs r5, #0
+; SOFT-NEXT:  .LBB25_7:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.8:
+; SOFT-NEXT:  .LCPI25_0:
+; SOFT-NEXT:    .long 1325400063 @ 0x4effffff
+; SOFT-NEXT:  .LCPI25_1:
+; SOFT-NEXT:    .long 2147483647 @ 0x7fffffff
 ;
 ; VFP2-LABEL: test_signed_i32_f16:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    .save {r11, lr}
-; VFP2-NEXT:    push {r11, lr}
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
 ; VFP2-NEXT:    bl __aeabi_h2f
 ; VFP2-NEXT:    vmov s0, r0
 ; VFP2-NEXT:    vldr s2, .LCPI25_0
 ; VFP2-NEXT:    vldr s6, .LCPI25_1
+; VFP2-NEXT:    vcvt.s32.f32 s4, s0
 ; VFP2-NEXT:    vcmp.f32 s0, s2
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcvt.s32.f32 s4, s0
 ; VFP2-NEXT:    vcmp.f32 s0, s6
 ; VFP2-NEXT:    vmov r0, s4
-; VFP2-NEXT:    movlt r0, #-2147483648
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt.w r0, #-2147483648
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s0, s0
+; VFP2-NEXT:    it gt
 ; VFP2-NEXT:    mvngt r0, #-2147483648
+; VFP2-NEXT:    vcmp.f32 s0, s0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    pop {r11, lr}
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    pop {r7, pc}
 ; VFP2-NEXT:    .p2align 2
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI25_0:
 ; VFP2-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
 ; VFP2-NEXT:  .LCPI25_1:
 ; VFP2-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+;
+; FP16-LABEL: test_signed_i32_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vldr s2, .LCPI25_0
+; FP16-NEXT:    vcvtb.f32.f16 s0, s0
+; FP16-NEXT:    vldr s6, .LCPI25_1
+; FP16-NEXT:    vcvt.s32.f32 s4, s0
+; FP16-NEXT:    vcmp.f32 s0, s2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s0, s6
+; FP16-NEXT:    vmov r0, s4
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt.w r0, #-2147483648
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    mvngt r0, #-2147483648
+; FP16-NEXT:    vcmp.f32 s0, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI25_0:
+; FP16-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+; FP16-NEXT:  .LCPI25_1:
+; FP16-NEXT:    .long 0x4effffff @ float 2.14748352E+9
     %x = call i32 @llvm.fptosi.sat.i32.f16(half %f)
     ret i32 %x
 }
@@ -2340,102 +3578,174 @@ define i32 @test_signed_i32_f16(half %f) nounwind {
 define i50 @test_signed_i50_f16(half %f) nounwind {
 ; SOFT-LABEL: test_signed_i50_f16:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    mov r1, #255
-; SOFT-NEXT:    orr r1, r1, #65280
-; SOFT-NEXT:    and r0, r0, r1
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #12
+; SOFT-NEXT:    sub sp, #12
+; SOFT-NEXT:    uxth r0, r0
 ; SOFT-NEXT:    bl __aeabi_h2f
-; SOFT-NEXT:    mov r1, #-671088640
 ; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    ldr r1, .LCPI26_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    movs r0, #27
+; SOFT-NEXT:    lsls r5, r0, #27
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    mov r7, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2lz
-; SOFT-NEXT:    mov r5, r0
-; SOFT-NEXT:    mov r6, r1
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
 ; SOFT-NEXT:    cmp r7, #0
-; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvn r1, #-1476395008
-; SOFT-NEXT:    moveq r5, r7
-; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    bne .LBB26_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r6, r7
+; SOFT-NEXT:  .LBB26_2:
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB26_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mvns r6, r7
+; SOFT-NEXT:  .LBB26_4:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r5, #0
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    bne .LBB26_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:  .LBB26_6:
+; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-671088640
-; SOFT-NEXT:    movne r5, #0
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r1, #16646144
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    orr r1, r1, #-16777216
+; SOFT-NEXT:    beq .LBB26_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r5, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    b .LBB26_9
+; SOFT-NEXT:  .LBB26_8:
+; SOFT-NEXT:    ldr r5, .LCPI26_1
+; SOFT-NEXT:  .LBB26_9:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    moveq r6, r1
-; SOFT-NEXT:    mvn r1, #-1476395008
-; SOFT-NEXT:    bl __aeabi_fcmpgt
 ; SOFT-NEXT:    ldr r1, .LCPI26_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB26_11
+; SOFT-NEXT:  @ %bb.10:
+; SOFT-NEXT:    ldr r5, .LCPI26_2
+; SOFT-NEXT:  .LBB26_11:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    movne r6, r1
 ; SOFT-NEXT:    mov r1, r4
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r1, r6
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB26_13
+; SOFT-NEXT:  @ %bb.12:
+; SOFT-NEXT:    mov r7, r5
+; SOFT-NEXT:  .LBB26_13:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    add sp, #12
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
 ; SOFT-NEXT:    .p2align 2
-; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:  @ %bb.14:
 ; SOFT-NEXT:  .LCPI26_0:
+; SOFT-NEXT:    .long 1476395007 @ 0x57ffffff
+; SOFT-NEXT:  .LCPI26_1:
+; SOFT-NEXT:    .long 4294836224 @ 0xfffe0000
+; SOFT-NEXT:  .LCPI26_2:
 ; SOFT-NEXT:    .long 131071 @ 0x1ffff
 ;
 ; VFP2-LABEL: test_signed_i50_f16:
 ; VFP2:       @ %bb.0:
-; VFP2-NEXT:    .save {r11, lr}
-; VFP2-NEXT:    push {r11, lr}
-; VFP2-NEXT:    .vsave {d8, d9}
-; VFP2-NEXT:    vpush {d8, d9}
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    .vsave {d8}
+; VFP2-NEXT:    vpush {d8}
 ; VFP2-NEXT:    bl __aeabi_h2f
-; VFP2-NEXT:    vldr s16, .LCPI26_0
-; VFP2-NEXT:    vmov s18, r0
+; VFP2-NEXT:    vmov s16, r0
 ; VFP2-NEXT:    bl __aeabi_f2lz
-; VFP2-NEXT:    vcmp.f32 s18, s16
-; VFP2-NEXT:    mov r2, #16646144
+; VFP2-NEXT:    vldr s0, .LCPI26_0
+; VFP2-NEXT:    vldr s2, .LCPI26_1
+; VFP2-NEXT:    vcmp.f32 s16, s0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    orr r2, r2, #-16777216
-; VFP2-NEXT:    vldr s0, .LCPI26_1
-; VFP2-NEXT:    ldr r3, .LCPI26_2
-; VFP2-NEXT:    vcmp.f32 s18, s0
-; VFP2-NEXT:    movlt r1, r2
-; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s18, s18
-; VFP2-NEXT:    movgt r1, r3
+; VFP2-NEXT:    vcmp.f32 s16, s2
+; VFP2-NEXT:    itt lt
+; VFP2-NEXT:    movlt r1, #0
+; VFP2-NEXT:    movtlt r1, #65534
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s18, s16
-; VFP2-NEXT:    movvs r1, #0
+; VFP2-NEXT:    vcmp.f32 s16, s0
+; VFP2-NEXT:    itt gt
+; VFP2-NEXT:    movwgt r1, #65535
+; VFP2-NEXT:    movtgt r1, #1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s18, s0
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vcmp.f32 s16, s2
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s18, s18
-; VFP2-NEXT:    mvngt r0, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
+; VFP2-NEXT:    vcmp.f32 s16, s16
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    itt vs
 ; VFP2-NEXT:    movvs r0, #0
-; VFP2-NEXT:    vpop {d8, d9}
-; VFP2-NEXT:    pop {r11, lr}
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    movvs r1, #0
+; VFP2-NEXT:    vpop {d8}
+; VFP2-NEXT:    pop {r7, pc}
 ; VFP2-NEXT:    .p2align 2
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI26_0:
 ; VFP2-NEXT:    .long 0xd8000000 @ float -5.62949953E+14
 ; VFP2-NEXT:  .LCPI26_1:
 ; VFP2-NEXT:    .long 0x57ffffff @ float 5.6294992E+14
-; VFP2-NEXT:  .LCPI26_2:
-; VFP2-NEXT:    .long 131071 @ 0x1ffff
+;
+; FP16-LABEL: test_signed_i50_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r7, lr}
+; FP16-NEXT:    push {r7, lr}
+; FP16-NEXT:    .vsave {d8}
+; FP16-NEXT:    vpush {d8}
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vcvtb.f32.f16 s16, s0
+; FP16-NEXT:    vmov r0, s16
+; FP16-NEXT:    bl __aeabi_f2lz
+; FP16-NEXT:    vldr s0, .LCPI26_0
+; FP16-NEXT:    vldr s2, .LCPI26_1
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s2
+; FP16-NEXT:    itt lt
+; FP16-NEXT:    movlt r1, #0
+; FP16-NEXT:    movtlt r1, #65534
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    itt gt
+; FP16-NEXT:    movwgt r1, #65535
+; FP16-NEXT:    movtgt r1, #1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r0, #0
+; FP16-NEXT:    vcmp.f32 s16, s2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r0, #-1
+; FP16-NEXT:    vcmp.f32 s16, s16
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    itt vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    movvs r1, #0
+; FP16-NEXT:    vpop {d8}
+; FP16-NEXT:    pop {r7, pc}
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI26_0:
+; FP16-NEXT:    .long 0xd8000000 @ float -5.62949953E+14
+; FP16-NEXT:  .LCPI26_1:
+; FP16-NEXT:    .long 0x57ffffff @ float 5.6294992E+14
     %x = call i50 @llvm.fptosi.sat.i50.f16(half %f)
     ret i50 %x
 }
@@ -2443,51 +3753,86 @@ define i50 @test_signed_i50_f16(half %f) nounwind {
 define i64 @test_signed_i64_f16(half %f) nounwind {
 ; SOFT-LABEL: test_signed_i64_f16:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    mov r1, #255
-; SOFT-NEXT:    orr r1, r1, #65280
-; SOFT-NEXT:    and r0, r0, r1
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #12
+; SOFT-NEXT:    sub sp, #12
+; SOFT-NEXT:    uxth r0, r0
 ; SOFT-NEXT:    bl __aeabi_h2f
-; SOFT-NEXT:    mov r1, #-553648128
 ; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    ldr r1, .LCPI27_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    movs r0, #223
+; SOFT-NEXT:    lsls r5, r0, #24
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    mov r7, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __aeabi_f2lz
-; SOFT-NEXT:    mov r5, r0
-; SOFT-NEXT:    mov r6, r1
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
 ; SOFT-NEXT:    cmp r7, #0
-; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvn r1, #-1593835520
-; SOFT-NEXT:    moveq r5, r7
-; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    bne .LBB27_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r6, r7
+; SOFT-NEXT:  .LBB27_2:
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB27_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mvns r6, r7
+; SOFT-NEXT:  .LBB27_4:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r5, #0
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    bne .LBB27_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:  .LBB27_6:
+; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-553648128
-; SOFT-NEXT:    movne r5, #0
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB27_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r5, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    b .LBB27_9
+; SOFT-NEXT:  .LBB27_8:
+; SOFT-NEXT:    movs r0, #1
+; SOFT-NEXT:    lsls r5, r0, #31
+; SOFT-NEXT:  .LBB27_9:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvn r1, #-1593835520
-; SOFT-NEXT:    moveq r6, #-2147483648
+; SOFT-NEXT:    ldr r1, .LCPI27_0
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB27_11
+; SOFT-NEXT:  @ %bb.10:
+; SOFT-NEXT:    ldr r5, .LCPI27_1
+; SOFT-NEXT:  .LBB27_11:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r6, #-2147483648
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r5
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    mov r1, r6
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r11, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB27_13
+; SOFT-NEXT:  @ %bb.12:
+; SOFT-NEXT:    mov r7, r5
+; SOFT-NEXT:  .LBB27_13:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    add sp, #12
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.14:
+; SOFT-NEXT:  .LCPI27_0:
+; SOFT-NEXT:    .long 1593835519 @ 0x5effffff
+; SOFT-NEXT:  .LCPI27_1:
+; SOFT-NEXT:    .long 2147483647 @ 0x7fffffff
 ;
 ; VFP2-LABEL: test_signed_i64_f16:
 ; VFP2:       @ %bb.0:
@@ -2502,29 +3847,79 @@ define i64 @test_signed_i64_f16(half %f) nounwind {
 ; VFP2-NEXT:    vcmp.f32 s2, s0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s4
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r0, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r0, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s0
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt.w r1, #-2147483648
 ; VFP2-NEXT:    vcmp.f32 s2, s4
-; VFP2-NEXT:    movlt r1, #-2147483648
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s2
+; VFP2-NEXT:    it gt
 ; VFP2-NEXT:    mvngt r1, #-2147483648
+; VFP2-NEXT:    vcmp.f32 s2, s2
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r1, #0
-; VFP2-NEXT:    pop {r4, lr}
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    pop {r4, pc}
 ; VFP2-NEXT:    .p2align 2
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI27_0:
 ; VFP2-NEXT:    .long 0xdf000000 @ float -9.22337203E+18
 ; VFP2-NEXT:  .LCPI27_1:
 ; VFP2-NEXT:    .long 0x5effffff @ float 9.22337149E+18
+;
+; FP16-LABEL: test_signed_i64_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r7, lr}
+; FP16-NEXT:    push {r7, lr}
+; FP16-NEXT:    .vsave {d8}
+; FP16-NEXT:    vpush {d8}
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vcvtb.f32.f16 s16, s0
+; FP16-NEXT:    vmov r0, s16
+; FP16-NEXT:    bl __aeabi_f2lz
+; FP16-NEXT:    vldr s0, .LCPI27_0
+; FP16-NEXT:    vldr s2, .LCPI27_1
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s2
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r0, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s16
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r0, #-1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt.w r1, #-2147483648
+; FP16-NEXT:    vcmp.f32 s16, s2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    mvngt r1, #-2147483648
+; FP16-NEXT:    vcmp.f32 s16, s16
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r1, #0
+; FP16-NEXT:    vpop {d8}
+; FP16-NEXT:    pop {r7, pc}
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI27_0:
+; FP16-NEXT:    .long 0xdf000000 @ float -9.22337203E+18
+; FP16-NEXT:  .LCPI27_1:
+; FP16-NEXT:    .long 0x5effffff @ float 9.22337149E+18
     %x = call i64 @llvm.fptosi.sat.i64.f16(half %f)
     ret i64 %x
 }
@@ -2532,85 +3927,142 @@ define i64 @test_signed_i64_f16(half %f) nounwind {
 define i100 @test_signed_i100_f16(half %f) nounwind {
 ; SOFT-LABEL: test_signed_i100_f16:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r8, r9, r11, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r8, r9, r11, lr}
-; SOFT-NEXT:    mov r1, #255
-; SOFT-NEXT:    orr r1, r1, #65280
-; SOFT-NEXT:    and r0, r0, r1
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #20
+; SOFT-NEXT:    sub sp, #20
+; SOFT-NEXT:    uxth r0, r0
 ; SOFT-NEXT:    bl __aeabi_h2f
-; SOFT-NEXT:    mov r1, #-251658240
 ; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    ldr r1, .LCPI28_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    movs r0, #241
+; SOFT-NEXT:    lsls r5, r0, #24
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __fixsfti
-; SOFT-NEXT:    mov r9, r0
-; SOFT-NEXT:    mov r6, r1
-; SOFT-NEXT:    cmp r5, #0
-; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvn r1, #-1895825408
-; SOFT-NEXT:    mov r7, r2
-; SOFT-NEXT:    mov r8, r3
-; SOFT-NEXT:    moveq r9, r5
-; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    str r1, [sp] @ 4-byte Spill
+; SOFT-NEXT:    str r2, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    str r3, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    bne .LBB28_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:  .LBB28_2:
+; SOFT-NEXT:    movs r6, #0
+; SOFT-NEXT:    mvns r1, r6
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    str r1, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:    mov r7, r1
+; SOFT-NEXT:    bne .LBB28_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:  .LBB28_4:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r9, #0
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bne .LBB28_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB28_6:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-251658240
-; SOFT-NEXT:    movne r9, #0
-; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mvn r1, #-1895825408
-; SOFT-NEXT:    moveq r6, r0
-; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI28_0
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB28_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:  .LBB28_8:
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB28_10
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:    ldr r7, [sp] @ 4-byte Reload
+; SOFT-NEXT:  .LBB28_10:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r6, #0
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bne .LBB28_12
+; SOFT-NEXT:  @ %bb.11:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB28_12:
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-251658240
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mvn r1, #-1895825408
-; SOFT-NEXT:    moveq r7, r0
-; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI28_0
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB28_14
+; SOFT-NEXT:  @ %bb.13:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:  .LBB28_14:
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    bne .LBB28_16
+; SOFT-NEXT:  @ %bb.15:
+; SOFT-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    str r0, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:  .LBB28_16:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r7, #0
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r7, r6
+; SOFT-NEXT:    bne .LBB28_18
+; SOFT-NEXT:  @ %bb.17:
+; SOFT-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:  .LBB28_18:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-251658240
-; SOFT-NEXT:    movne r7, #0
+; SOFT-NEXT:    mov r1, r5
 ; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    movs r5, #7
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB28_20
+; SOFT-NEXT:  @ %bb.19:
+; SOFT-NEXT:    mvns r0, r5
+; SOFT-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:  .LBB28_20:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvn r1, #-1895825408
-; SOFT-NEXT:    mvneq r8, #7
+; SOFT-NEXT:    ldr r1, .LCPI28_0
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB28_22
+; SOFT-NEXT:  @ %bb.21:
+; SOFT-NEXT:    ldr r5, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:  .LBB28_22:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    movne r8, #7
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r9
-; SOFT-NEXT:    movne r8, #0
-; SOFT-NEXT:    mov r1, r6
+; SOFT-NEXT:    bne .LBB28_24
+; SOFT-NEXT:  @ %bb.23:
+; SOFT-NEXT:    mov r6, r5
+; SOFT-NEXT:  .LBB28_24:
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    ldr r1, [sp] @ 4-byte Reload
 ; SOFT-NEXT:    mov r2, r7
-; SOFT-NEXT:    mov r3, r8
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r8, r9, r11, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    mov r3, r6
+; SOFT-NEXT:    add sp, #20
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.25:
+; SOFT-NEXT:  .LCPI28_0:
+; SOFT-NEXT:    .long 1895825407 @ 0x70ffffff
 ;
 ; VFP2-LABEL: test_signed_i100_f16:
 ; VFP2:       @ %bb.0:
@@ -2625,47 +4077,127 @@ define i100 @test_signed_i100_f16(half %f) nounwind {
 ; VFP2-NEXT:    vcmp.f32 s2, s0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s4
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r0, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r0, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s0
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s4
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r1, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r1, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r1, #-1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s0
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r1, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s4
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r2, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r2, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r2, #-1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s0
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r2, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s4
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    mvnlt r3, #7
+; VFP2-NEXT:    vcmp.f32 s2, s4
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s2
+; VFP2-NEXT:    it gt
 ; VFP2-NEXT:    movgt r3, #7
+; VFP2-NEXT:    vcmp.f32 s2, s2
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r3, #0
-; VFP2-NEXT:    pop {r4, lr}
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    pop {r4, pc}
 ; VFP2-NEXT:    .p2align 2
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI28_0:
 ; VFP2-NEXT:    .long 0xf1000000 @ float -6.338253E+29
 ; VFP2-NEXT:  .LCPI28_1:
 ; VFP2-NEXT:    .long 0x70ffffff @ float 6.33825262E+29
+;
+; FP16-LABEL: test_signed_i100_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r7, lr}
+; FP16-NEXT:    push {r7, lr}
+; FP16-NEXT:    .vsave {d8}
+; FP16-NEXT:    vpush {d8}
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vcvtb.f32.f16 s16, s0
+; FP16-NEXT:    vmov r0, s16
+; FP16-NEXT:    bl __fixsfti
+; FP16-NEXT:    vldr s0, .LCPI28_0
+; FP16-NEXT:    vldr s2, .LCPI28_1
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s2
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r0, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s16
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r0, #-1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s2
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r1, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s16
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r1, #-1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r1, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s2
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r2, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s16
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r2, #-1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r2, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    mvnlt r3, #7
+; FP16-NEXT:    vcmp.f32 s16, s2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt r3, #7
+; FP16-NEXT:    vcmp.f32 s16, s16
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r3, #0
+; FP16-NEXT:    vpop {d8}
+; FP16-NEXT:    pop {r7, pc}
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI28_0:
+; FP16-NEXT:    .long 0xf1000000 @ float -6.338253E+29
+; FP16-NEXT:  .LCPI28_1:
+; FP16-NEXT:    .long 0x70ffffff @ float 6.33825262E+29
     %x = call i100 @llvm.fptosi.sat.i100.f16(half %f)
     ret i100 %x
 }
@@ -2673,85 +4205,146 @@ define i100 @test_signed_i100_f16(half %f) nounwind {
 define i128 @test_signed_i128_f16(half %f) nounwind {
 ; SOFT-LABEL: test_signed_i128_f16:
 ; SOFT:       @ %bb.0:
-; SOFT-NEXT:    .save {r4, r5, r6, r7, r8, r9, r11, lr}
-; SOFT-NEXT:    push {r4, r5, r6, r7, r8, r9, r11, lr}
-; SOFT-NEXT:    mov r1, #255
-; SOFT-NEXT:    orr r1, r1, #65280
-; SOFT-NEXT:    and r0, r0, r1
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #20
+; SOFT-NEXT:    sub sp, #20
+; SOFT-NEXT:    uxth r0, r0
 ; SOFT-NEXT:    bl __aeabi_h2f
-; SOFT-NEXT:    mov r1, #-16777216
 ; SOFT-NEXT:    mov r4, r0
-; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    ldr r1, .LCPI29_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
 ; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    movs r0, #255
+; SOFT-NEXT:    lsls r7, r0, #24
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r6, r0
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    bl __fixsfti
-; SOFT-NEXT:    mov r9, r0
-; SOFT-NEXT:    mov r6, r1
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    str r2, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    str r3, [sp] @ 4-byte Spill
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    bne .LBB29_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:  .LBB29_2:
+; SOFT-NEXT:    movs r6, #0
+; SOFT-NEXT:    mvns r1, r6
 ; SOFT-NEXT:    cmp r5, #0
-; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvn r1, #-2130706432
-; SOFT-NEXT:    mov r7, r2
-; SOFT-NEXT:    mov r8, r3
-; SOFT-NEXT:    moveq r9, r5
-; SOFT-NEXT:    bl __aeabi_fcmpgt
-; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    str r1, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:    mov r5, r1
+; SOFT-NEXT:    bne .LBB29_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:  .LBB29_4:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r9, #0
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bne .LBB29_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:  .LBB29_6:
+; SOFT-NEXT:    str r0, [sp, #12] @ 4-byte Spill
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-16777216
-; SOFT-NEXT:    movne r9, #0
-; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mvn r1, #-2130706432
-; SOFT-NEXT:    moveq r6, r0
-; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI29_0
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB29_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:  .LBB29_8:
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    ldr r5, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB29_10
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:    ldr r5, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:  .LBB29_10:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r6, #0
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bne .LBB29_12
+; SOFT-NEXT:  @ %bb.11:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:  .LBB29_12:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-16777216
-; SOFT-NEXT:    movne r6, #0
-; SOFT-NEXT:    bl __aeabi_fcmpge
-; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mvn r1, #-2130706432
-; SOFT-NEXT:    moveq r7, r0
-; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI29_0
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB29_14
+; SOFT-NEXT:  @ %bb.13:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:  .LBB29_14:
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    bne .LBB29_16
+; SOFT-NEXT:  @ %bb.15:
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    str r0, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:  .LBB29_16:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r7, #0
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r5, r6
+; SOFT-NEXT:    bne .LBB29_18
+; SOFT-NEXT:  @ %bb.17:
+; SOFT-NEXT:    ldr r5, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:  .LBB29_18:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mov r1, #-16777216
-; SOFT-NEXT:    movne r7, #0
+; SOFT-NEXT:    mov r1, r7
 ; SOFT-NEXT:    bl __aeabi_fcmpge
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB29_20
+; SOFT-NEXT:  @ %bb.19:
+; SOFT-NEXT:    ldr r7, [sp] @ 4-byte Reload
+; SOFT-NEXT:    b .LBB29_21
+; SOFT-NEXT:  .LBB29_20:
+; SOFT-NEXT:    movs r0, #1
+; SOFT-NEXT:    lsls r7, r0, #31
+; SOFT-NEXT:  .LBB29_21:
 ; SOFT-NEXT:    mov r0, r4
-; SOFT-NEXT:    mvn r1, #-2130706432
-; SOFT-NEXT:    moveq r8, #-2147483648
+; SOFT-NEXT:    ldr r1, .LCPI29_0
 ; SOFT-NEXT:    bl __aeabi_fcmpgt
 ; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    beq .LBB29_23
+; SOFT-NEXT:  @ %bb.22:
+; SOFT-NEXT:    ldr r7, .LCPI29_1
+; SOFT-NEXT:  .LBB29_23:
 ; SOFT-NEXT:    mov r0, r4
 ; SOFT-NEXT:    mov r1, r4
-; SOFT-NEXT:    mvnne r8, #-2147483648
 ; SOFT-NEXT:    bl __aeabi_fcmpun
 ; SOFT-NEXT:    cmp r0, #0
-; SOFT-NEXT:    mov r0, r9
-; SOFT-NEXT:    movne r8, #0
-; SOFT-NEXT:    mov r1, r6
-; SOFT-NEXT:    mov r2, r7
-; SOFT-NEXT:    mov r3, r8
-; SOFT-NEXT:    pop {r4, r5, r6, r7, r8, r9, r11, lr}
-; SOFT-NEXT:    mov pc, lr
+; SOFT-NEXT:    bne .LBB29_25
+; SOFT-NEXT:  @ %bb.24:
+; SOFT-NEXT:    mov r6, r7
+; SOFT-NEXT:  .LBB29_25:
+; SOFT-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    mov r2, r5
+; SOFT-NEXT:    mov r3, r6
+; SOFT-NEXT:    add sp, #20
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.26:
+; SOFT-NEXT:  .LCPI29_0:
+; SOFT-NEXT:    .long 2130706431 @ 0x7effffff
+; SOFT-NEXT:  .LCPI29_1:
+; SOFT-NEXT:    .long 2147483647 @ 0x7fffffff
 ;
 ; VFP2-LABEL: test_signed_i128_f16:
 ; VFP2:       @ %bb.0:
@@ -2766,47 +4359,127 @@ define i128 @test_signed_i128_f16(half %f) nounwind {
 ; VFP2-NEXT:    vcmp.f32 s2, s0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s4
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r0, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r0, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s0
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r0, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s4
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r1, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r1, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r1, #-1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s0
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r1, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s4
+; VFP2-NEXT:    it lt
 ; VFP2-NEXT:    movlt r2, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s2
-; VFP2-NEXT:    mvngt r2, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r2, #-1
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
 ; VFP2-NEXT:    vcmp.f32 s2, s0
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r2, #0
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt.w r3, #-2147483648
 ; VFP2-NEXT:    vcmp.f32 s2, s4
-; VFP2-NEXT:    movlt r3, #-2147483648
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
-; VFP2-NEXT:    vcmp.f32 s2, s2
+; VFP2-NEXT:    it gt
 ; VFP2-NEXT:    mvngt r3, #-2147483648
+; VFP2-NEXT:    vcmp.f32 s2, s2
 ; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it vs
 ; VFP2-NEXT:    movvs r3, #0
-; VFP2-NEXT:    pop {r4, lr}
-; VFP2-NEXT:    mov pc, lr
+; VFP2-NEXT:    pop {r4, pc}
 ; VFP2-NEXT:    .p2align 2
 ; VFP2-NEXT:  @ %bb.1:
 ; VFP2-NEXT:  .LCPI29_0:
 ; VFP2-NEXT:    .long 0xff000000 @ float -1.70141183E+38
 ; VFP2-NEXT:  .LCPI29_1:
 ; VFP2-NEXT:    .long 0x7effffff @ float 1.70141173E+38
+;
+; FP16-LABEL: test_signed_i128_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r7, lr}
+; FP16-NEXT:    push {r7, lr}
+; FP16-NEXT:    .vsave {d8}
+; FP16-NEXT:    vpush {d8}
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vcvtb.f32.f16 s16, s0
+; FP16-NEXT:    vmov r0, s16
+; FP16-NEXT:    bl __fixsfti
+; FP16-NEXT:    vldr s0, .LCPI29_0
+; FP16-NEXT:    vldr s2, .LCPI29_1
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s2
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r0, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s16
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r0, #-1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r0, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s2
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r1, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s16
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r1, #-1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r1, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s2
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r2, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s16
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r2, #-1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r2, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt.w r3, #-2147483648
+; FP16-NEXT:    vcmp.f32 s16, s2
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    mvngt r3, #-2147483648
+; FP16-NEXT:    vcmp.f32 s16, s16
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it vs
+; FP16-NEXT:    movvs r3, #0
+; FP16-NEXT:    vpop {d8}
+; FP16-NEXT:    pop {r7, pc}
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI29_0:
+; FP16-NEXT:    .long 0xff000000 @ float -1.70141183E+38
+; FP16-NEXT:  .LCPI29_1:
+; FP16-NEXT:    .long 0x7effffff @ float 1.70141173E+38
     %x = call i128 @llvm.fptosi.sat.i128.f16(half %f)
     ret i128 %x
 }

diff  --git a/llvm/test/CodeGen/ARM/fptoui-sat-scalar.ll b/llvm/test/CodeGen/ARM/fptoui-sat-scalar.ll
new file mode 100644
index 0000000000000..f547f8d3b97ef
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/fptoui-sat-scalar.ll
@@ -0,0 +1,3331 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv6-none-eabi -float-abi=soft %s -o - | FileCheck %s --check-prefixes=SOFT
+; RUN: llc -mtriple=thumbv7-none-eabi -mattr=+vfp2 %s -o - | FileCheck %s --check-prefixes=VFP,VFP2
+; RUN: llc -mtriple=thumbv8.1m.main-eabi -mattr=+fullfp16,+fp64 %s -o - | FileCheck %s --check-prefixes=VFP,FP16
+
+;
+; 32-bit float to signed integer
+;
+
+declare   i1 @llvm.fptoui.sat.i1.f32  (float)
+declare   i8 @llvm.fptoui.sat.i8.f32  (float)
+declare  i13 @llvm.fptoui.sat.i13.f32 (float)
+declare  i16 @llvm.fptoui.sat.i16.f32 (float)
+declare  i19 @llvm.fptoui.sat.i19.f32 (float)
+declare  i32 @llvm.fptoui.sat.i32.f32 (float)
+declare  i50 @llvm.fptoui.sat.i50.f32 (float)
+declare  i64 @llvm.fptoui.sat.i64.f32 (float)
+declare i100 @llvm.fptoui.sat.i100.f32(float)
+declare i128 @llvm.fptoui.sat.i128.f32(float)
+
+define i1 @test_signed_i1_f32(float %f) nounwind {
+; SOFT-LABEL: test_signed_i1_f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, lr}
+; SOFT-NEXT:    push {r4, r5, r6, lr}
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    movs r0, #127
+; SOFT-NEXT:    lsls r1, r0, #23
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r1, #0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    bl __aeabi_f2uiz
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    beq .LBB0_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB0_4
+; SOFT-NEXT:  .LBB0_2:
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:  .LBB0_3:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    beq .LBB0_2
+; SOFT-NEXT:  .LBB0_4:
+; SOFT-NEXT:    movs r0, #1
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+;
+; VFP2-LABEL: test_signed_i1_f32:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    vmov s2, r0
+; VFP2-NEXT:    vmov.f32 s0, #1.000000e+00
+; VFP2-NEXT:    vcvt.u32.f32 s4, s2
+; VFP2-NEXT:    vcmp.f32 s2, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s2, s0
+; VFP2-NEXT:    vmov r0, s4
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt r0, #1
+; VFP2-NEXT:    bx lr
+;
+; FP16-LABEL: test_signed_i1_f32:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr s0, .LCPI0_0
+; FP16-NEXT:    vmov s2, r0
+; FP16-NEXT:    vmov.f32 s4, #1.000000e+00
+; FP16-NEXT:    vmaxnm.f32 s0, s2, s0
+; FP16-NEXT:    vminnm.f32 s0, s0, s4
+; FP16-NEXT:    vcvt.u32.f32 s0, s0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI0_0:
+; FP16-NEXT:    .long 0x00000000 @ float 0
+    %x = call i1 @llvm.fptoui.sat.i1.f32(float %f)
+    ret i1 %x
+}
+
+define i8 @test_signed_i8_f32(float %f) nounwind {
+; SOFT-LABEL: test_signed_i8_f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, lr}
+; SOFT-NEXT:    push {r4, r5, r6, lr}
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    ldr r1, .LCPI1_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r1, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_f2uiz
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB1_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB1_4
+; SOFT-NEXT:  .LBB1_2:
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:  .LBB1_3:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    beq .LBB1_2
+; SOFT-NEXT:  .LBB1_4:
+; SOFT-NEXT:    movs r0, #255
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI1_0:
+; SOFT-NEXT:    .long 1132396544 @ 0x437f0000
+;
+; VFP2-LABEL: test_signed_i8_f32:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    vmov s0, r0
+; VFP2-NEXT:    vldr s4, .LCPI1_0
+; VFP2-NEXT:    vcvt.u32.f32 s2, s0
+; VFP2-NEXT:    vcmp.f32 s0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s0, s4
+; VFP2-NEXT:    vmov r0, s2
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt r0, #255
+; VFP2-NEXT:    bx lr
+; VFP2-NEXT:    .p2align 2
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI1_0:
+; VFP2-NEXT:    .long 0x437f0000 @ float 255
+;
+; FP16-LABEL: test_signed_i8_f32:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr s0, .LCPI1_0
+; FP16-NEXT:    vmov s2, r0
+; FP16-NEXT:    vldr s4, .LCPI1_1
+; FP16-NEXT:    vmaxnm.f32 s0, s2, s0
+; FP16-NEXT:    vminnm.f32 s0, s0, s4
+; FP16-NEXT:    vcvt.u32.f32 s0, s0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI1_0:
+; FP16-NEXT:    .long 0x00000000 @ float 0
+; FP16-NEXT:  .LCPI1_1:
+; FP16-NEXT:    .long 0x437f0000 @ float 255
+    %x = call i8 @llvm.fptoui.sat.i8.f32(float %f)
+    ret i8 %x
+}
+
+define i13 @test_signed_i13_f32(float %f) nounwind {
+; SOFT-LABEL: test_signed_i13_f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, lr}
+; SOFT-NEXT:    push {r4, r5, r6, lr}
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    ldr r1, .LCPI2_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r1, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_f2uiz
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB2_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB2_4
+; SOFT-NEXT:  .LBB2_2:
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:  .LBB2_3:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    beq .LBB2_2
+; SOFT-NEXT:  .LBB2_4:
+; SOFT-NEXT:    ldr r0, .LCPI2_1
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI2_0:
+; SOFT-NEXT:    .long 1174403072 @ 0x45fff800
+; SOFT-NEXT:  .LCPI2_1:
+; SOFT-NEXT:    .long 8191 @ 0x1fff
+;
+; VFP2-LABEL: test_signed_i13_f32:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    vmov s0, r0
+; VFP2-NEXT:    vldr s4, .LCPI2_0
+; VFP2-NEXT:    vcvt.u32.f32 s2, s0
+; VFP2-NEXT:    vcmp.f32 s0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s0, s4
+; VFP2-NEXT:    vmov r0, s2
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movwgt r0, #8191
+; VFP2-NEXT:    bx lr
+; VFP2-NEXT:    .p2align 2
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI2_0:
+; VFP2-NEXT:    .long 0x45fff800 @ float 8191
+;
+; FP16-LABEL: test_signed_i13_f32:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr s0, .LCPI2_0
+; FP16-NEXT:    vmov s2, r0
+; FP16-NEXT:    vldr s4, .LCPI2_1
+; FP16-NEXT:    vmaxnm.f32 s0, s2, s0
+; FP16-NEXT:    vminnm.f32 s0, s0, s4
+; FP16-NEXT:    vcvt.u32.f32 s0, s0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI2_0:
+; FP16-NEXT:    .long 0x00000000 @ float 0
+; FP16-NEXT:  .LCPI2_1:
+; FP16-NEXT:    .long 0x45fff800 @ float 8191
+    %x = call i13 @llvm.fptoui.sat.i13.f32(float %f)
+    ret i13 %x
+}
+
+define i16 @test_signed_i16_f32(float %f) nounwind {
+; SOFT-LABEL: test_signed_i16_f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, lr}
+; SOFT-NEXT:    push {r4, r5, r6, lr}
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    ldr r1, .LCPI3_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r1, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_f2uiz
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB3_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB3_4
+; SOFT-NEXT:  .LBB3_2:
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:  .LBB3_3:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    beq .LBB3_2
+; SOFT-NEXT:  .LBB3_4:
+; SOFT-NEXT:    ldr r0, .LCPI3_1
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI3_0:
+; SOFT-NEXT:    .long 1199570688 @ 0x477fff00
+; SOFT-NEXT:  .LCPI3_1:
+; SOFT-NEXT:    .long 65535 @ 0xffff
+;
+; VFP2-LABEL: test_signed_i16_f32:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    vmov s0, r0
+; VFP2-NEXT:    vldr s4, .LCPI3_0
+; VFP2-NEXT:    vcvt.u32.f32 s2, s0
+; VFP2-NEXT:    vcmp.f32 s0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s0, s4
+; VFP2-NEXT:    vmov r0, s2
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movwgt r0, #65535
+; VFP2-NEXT:    bx lr
+; VFP2-NEXT:    .p2align 2
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI3_0:
+; VFP2-NEXT:    .long 0x477fff00 @ float 65535
+;
+; FP16-LABEL: test_signed_i16_f32:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr s0, .LCPI3_0
+; FP16-NEXT:    vmov s2, r0
+; FP16-NEXT:    vldr s4, .LCPI3_1
+; FP16-NEXT:    vmaxnm.f32 s0, s2, s0
+; FP16-NEXT:    vminnm.f32 s0, s0, s4
+; FP16-NEXT:    vcvt.u32.f32 s0, s0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI3_0:
+; FP16-NEXT:    .long 0x00000000 @ float 0
+; FP16-NEXT:  .LCPI3_1:
+; FP16-NEXT:    .long 0x477fff00 @ float 65535
+    %x = call i16 @llvm.fptoui.sat.i16.f32(float %f)
+    ret i16 %x
+}
+
+define i19 @test_signed_i19_f32(float %f) nounwind {
+; SOFT-LABEL: test_signed_i19_f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, lr}
+; SOFT-NEXT:    push {r4, r5, r6, lr}
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    ldr r1, .LCPI4_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r1, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_f2uiz
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB4_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB4_4
+; SOFT-NEXT:  .LBB4_2:
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:  .LBB4_3:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    beq .LBB4_2
+; SOFT-NEXT:  .LBB4_4:
+; SOFT-NEXT:    ldr r0, .LCPI4_1
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI4_0:
+; SOFT-NEXT:    .long 1224736736 @ 0x48ffffe0
+; SOFT-NEXT:  .LCPI4_1:
+; SOFT-NEXT:    .long 524287 @ 0x7ffff
+;
+; VFP2-LABEL: test_signed_i19_f32:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    vmov s0, r0
+; VFP2-NEXT:    vldr s4, .LCPI4_0
+; VFP2-NEXT:    vcvt.u32.f32 s2, s0
+; VFP2-NEXT:    vcmp.f32 s0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s0, s4
+; VFP2-NEXT:    vmov r0, s2
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    itt gt
+; VFP2-NEXT:    movwgt r0, #65535
+; VFP2-NEXT:    movtgt r0, #7
+; VFP2-NEXT:    bx lr
+; VFP2-NEXT:    .p2align 2
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI4_0:
+; VFP2-NEXT:    .long 0x48ffffe0 @ float 524287
+;
+; FP16-LABEL: test_signed_i19_f32:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr s0, .LCPI4_0
+; FP16-NEXT:    vmov s2, r0
+; FP16-NEXT:    vldr s4, .LCPI4_1
+; FP16-NEXT:    vmaxnm.f32 s0, s2, s0
+; FP16-NEXT:    vminnm.f32 s0, s0, s4
+; FP16-NEXT:    vcvt.u32.f32 s0, s0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI4_0:
+; FP16-NEXT:    .long 0x00000000 @ float 0
+; FP16-NEXT:  .LCPI4_1:
+; FP16-NEXT:    .long 0x48ffffe0 @ float 524287
+    %x = call i19 @llvm.fptoui.sat.i19.f32(float %f)
+    ret i19 %x
+}
+
+define i32 @test_signed_i32_f32(float %f) nounwind {
+; SOFT-LABEL: test_signed_i32_f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    ldr r1, .LCPI5_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    movs r4, #0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    bl __aeabi_f2uiz
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    beq .LBB5_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    bne .LBB5_4
+; SOFT-NEXT:  .LBB5_2:
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:  .LBB5_3:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB5_2
+; SOFT-NEXT:  .LBB5_4:
+; SOFT-NEXT:    mvns r0, r4
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI5_0:
+; SOFT-NEXT:    .long 1333788671 @ 0x4f7fffff
+;
+; VFP-LABEL: test_signed_i32_f32:
+; VFP:       @ %bb.0:
+; VFP-NEXT:    vmov s0, r0
+; VFP-NEXT:    vldr s4, .LCPI5_0
+; VFP-NEXT:    vcvt.u32.f32 s2, s0
+; VFP-NEXT:    vcmp.f32 s0, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s0, s4
+; VFP-NEXT:    vmov r0, s2
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r0, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r0, #-1
+; VFP-NEXT:    bx lr
+; VFP-NEXT:    .p2align 2
+; VFP-NEXT:  @ %bb.1:
+; VFP-NEXT:  .LCPI5_0:
+; VFP-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call i32 @llvm.fptoui.sat.i32.f32(float %f)
+    ret i32 %x
+}
+
+define i50 @test_signed_i50_f32(float %f) nounwind {
+; SOFT-LABEL: test_signed_i50_f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    ldr r1, .LCPI6_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_f2ulz
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    bne .LBB6_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:  .LBB6_2:
+; SOFT-NEXT:    mov r5, r1
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    beq .LBB6_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mvns r0, r7
+; SOFT-NEXT:  .LBB6_4:
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    ldr r1, .LCPI6_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB6_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:  .LBB6_6:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    beq .LBB6_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r5, .LCPI6_1
+; SOFT-NEXT:  .LBB6_8:
+; SOFT-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:  .LCPI6_0:
+; SOFT-NEXT:    .long 1484783615 @ 0x587fffff
+; SOFT-NEXT:  .LCPI6_1:
+; SOFT-NEXT:    .long 262143 @ 0x3ffff
+;
+; VFP-LABEL: test_signed_i50_f32:
+; VFP:       @ %bb.0:
+; VFP-NEXT:    .save {r7, lr}
+; VFP-NEXT:    push {r7, lr}
+; VFP-NEXT:    .vsave {d8}
+; VFP-NEXT:    vpush {d8}
+; VFP-NEXT:    vmov s16, r0
+; VFP-NEXT:    bl __aeabi_f2ulz
+; VFP-NEXT:    vldr s0, .LCPI6_0
+; VFP-NEXT:    vcmp.f32 s16, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    itt lt
+; VFP-NEXT:    movlt r0, #0
+; VFP-NEXT:    movlt r1, #0
+; VFP-NEXT:    vcmp.f32 s16, s0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    ittt gt
+; VFP-NEXT:    movwgt r1, #65535
+; VFP-NEXT:    movtgt r1, #3
+; VFP-NEXT:    movgt.w r0, #-1
+; VFP-NEXT:    vpop {d8}
+; VFP-NEXT:    pop {r7, pc}
+; VFP-NEXT:    .p2align 2
+; VFP-NEXT:  @ %bb.1:
+; VFP-NEXT:  .LCPI6_0:
+; VFP-NEXT:    .long 0x587fffff @ float 1.12589984E+15
+    %x = call i50 @llvm.fptoui.sat.i50.f32(float %f)
+    ret i50 %x
+}
+
+define i64 @test_signed_i64_f32(float %f) nounwind {
+; SOFT-LABEL: test_signed_i64_f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #12
+; SOFT-NEXT:    sub sp, #12
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    ldr r1, .LCPI7_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    bl __aeabi_f2ulz
+; SOFT-NEXT:    str r1, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    bne .LBB7_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:  .LBB7_2:
+; SOFT-NEXT:    mvns r5, r7
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    bne .LBB7_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r1, r0
+; SOFT-NEXT:  .LBB7_4:
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI7_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB7_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:  .LBB7_6:
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB7_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r5, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:  .LBB7_8:
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    add sp, #12
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:  .LCPI7_0:
+; SOFT-NEXT:    .long 1602224127 @ 0x5f7fffff
+;
+; VFP-LABEL: test_signed_i64_f32:
+; VFP:       @ %bb.0:
+; VFP-NEXT:    .save {r7, lr}
+; VFP-NEXT:    push {r7, lr}
+; VFP-NEXT:    .vsave {d8}
+; VFP-NEXT:    vpush {d8}
+; VFP-NEXT:    vmov s16, r0
+; VFP-NEXT:    bl __aeabi_f2ulz
+; VFP-NEXT:    vldr s0, .LCPI7_0
+; VFP-NEXT:    vcmp.f32 s16, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r0, #0
+; VFP-NEXT:    vcmp.f32 s16, s0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s16, #0
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r0, #-1
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r1, #0
+; VFP-NEXT:    vcmp.f32 s16, s0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r1, #-1
+; VFP-NEXT:    vpop {d8}
+; VFP-NEXT:    pop {r7, pc}
+; VFP-NEXT:    .p2align 2
+; VFP-NEXT:  @ %bb.1:
+; VFP-NEXT:  .LCPI7_0:
+; VFP-NEXT:    .long 0x5f7fffff @ float 1.8446743E+19
+    %x = call i64 @llvm.fptoui.sat.i64.f32(float %f)
+    ret i64 %x
+}
+
+define i100 @test_signed_i100_f32(float %f) nounwind {
+; SOFT-LABEL: test_signed_i100_f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #20
+; SOFT-NEXT:    sub sp, #20
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    ldr r1, .LCPI8_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r6, #0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    mov r1, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    bl __fixunssfti
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    str r2, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    bne .LBB8_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB8_2:
+; SOFT-NEXT:    str r3, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:    mvns r7, r6
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bne .LBB8_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r1, r0
+; SOFT-NEXT:  .LBB8_4:
+; SOFT-NEXT:    str r1, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    ldr r1, .LCPI8_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    mov r1, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB8_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:  .LBB8_6:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    bne .LBB8_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:  .LBB8_8:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    ldr r1, .LCPI8_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    mov r1, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB8_10
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:  .LBB8_10:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB8_12
+; SOFT-NEXT:  @ %bb.11:
+; SOFT-NEXT:    ldr r7, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:  .LBB8_12:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    ldr r1, .LCPI8_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    mov r1, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    ldr r3, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB8_14
+; SOFT-NEXT:  @ %bb.13:
+; SOFT-NEXT:    mov r3, r0
+; SOFT-NEXT:  .LBB8_14:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    beq .LBB8_16
+; SOFT-NEXT:  @ %bb.15:
+; SOFT-NEXT:    movs r3, #15
+; SOFT-NEXT:  .LBB8_16:
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    add sp, #20
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.17:
+; SOFT-NEXT:  .LCPI8_0:
+; SOFT-NEXT:    .long 1904214015 @ 0x717fffff
+;
+; VFP-LABEL: test_signed_i100_f32:
+; VFP:       @ %bb.0:
+; VFP-NEXT:    .save {r7, lr}
+; VFP-NEXT:    push {r7, lr}
+; VFP-NEXT:    .vsave {d8}
+; VFP-NEXT:    vpush {d8}
+; VFP-NEXT:    vmov s16, r0
+; VFP-NEXT:    bl __fixunssfti
+; VFP-NEXT:    vldr s0, .LCPI8_0
+; VFP-NEXT:    vcmp.f32 s16, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r0, #0
+; VFP-NEXT:    vcmp.f32 s16, s0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s16, #0
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r0, #-1
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s16, s0
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r1, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s16, #0
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r1, #-1
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s16, s0
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r2, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s16, #0
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r2, #-1
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r3, #0
+; VFP-NEXT:    vcmp.f32 s16, s0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt r3, #15
+; VFP-NEXT:    vpop {d8}
+; VFP-NEXT:    pop {r7, pc}
+; VFP-NEXT:    .p2align 2
+; VFP-NEXT:  @ %bb.1:
+; VFP-NEXT:  .LCPI8_0:
+; VFP-NEXT:    .long 0x717fffff @ float 1.26765052E+30
+    %x = call i100 @llvm.fptoui.sat.i100.f32(float %f)
+    ret i100 %x
+}
+
+define i128 @test_signed_i128_f32(float %f) nounwind {
+; SOFT-LABEL: test_signed_i128_f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #20
+; SOFT-NEXT:    sub sp, #20
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    ldr r1, .LCPI9_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    bl __fixunssfti
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    str r2, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    str r3, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    bne .LBB9_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:  .LBB9_2:
+; SOFT-NEXT:    mvns r6, r7
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    mov r1, r6
+; SOFT-NEXT:    bne .LBB9_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r1, r0
+; SOFT-NEXT:  .LBB9_4:
+; SOFT-NEXT:    str r1, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI9_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB9_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:  .LBB9_6:
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bne .LBB9_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:  .LBB9_8:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI9_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB9_10
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:  .LBB9_10:
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bne .LBB9_12
+; SOFT-NEXT:  @ %bb.11:
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:  .LBB9_12:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI9_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB9_14
+; SOFT-NEXT:  @ %bb.13:
+; SOFT-NEXT:    str r0, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:  .LBB9_14:
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    ldr r2, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB9_16
+; SOFT-NEXT:  @ %bb.15:
+; SOFT-NEXT:    ldr r6, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:  .LBB9_16:
+; SOFT-NEXT:    mov r3, r6
+; SOFT-NEXT:    add sp, #20
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.17:
+; SOFT-NEXT:  .LCPI9_0:
+; SOFT-NEXT:    .long 2139095039 @ 0x7f7fffff
+;
+; VFP-LABEL: test_signed_i128_f32:
+; VFP:       @ %bb.0:
+; VFP-NEXT:    .save {r7, lr}
+; VFP-NEXT:    push {r7, lr}
+; VFP-NEXT:    .vsave {d8}
+; VFP-NEXT:    vpush {d8}
+; VFP-NEXT:    vmov s16, r0
+; VFP-NEXT:    bl __fixunssfti
+; VFP-NEXT:    vldr s0, .LCPI9_0
+; VFP-NEXT:    vcmp.f32 s16, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r0, #0
+; VFP-NEXT:    vcmp.f32 s16, s0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s16, #0
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r0, #-1
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s16, s0
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r1, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s16, #0
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r1, #-1
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s16, s0
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r2, #0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    vcmp.f32 s16, #0
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r2, #-1
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it lt
+; VFP-NEXT:    movlt r3, #0
+; VFP-NEXT:    vcmp.f32 s16, s0
+; VFP-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP-NEXT:    it gt
+; VFP-NEXT:    movgt.w r3, #-1
+; VFP-NEXT:    vpop {d8}
+; VFP-NEXT:    pop {r7, pc}
+; VFP-NEXT:    .p2align 2
+; VFP-NEXT:  @ %bb.1:
+; VFP-NEXT:  .LCPI9_0:
+; VFP-NEXT:    .long 0x7f7fffff @ float 3.40282347E+38
+    %x = call i128 @llvm.fptoui.sat.i128.f32(float %f)
+    ret i128 %x
+}
+
+;
+; 64-bit float to signed integer
+;
+
+declare   i1 @llvm.fptoui.sat.i1.f64  (double)
+declare   i8 @llvm.fptoui.sat.i8.f64  (double)
+declare  i13 @llvm.fptoui.sat.i13.f64 (double)
+declare  i16 @llvm.fptoui.sat.i16.f64 (double)
+declare  i19 @llvm.fptoui.sat.i19.f64 (double)
+declare  i32 @llvm.fptoui.sat.i32.f64 (double)
+declare  i50 @llvm.fptoui.sat.i50.f64 (double)
+declare  i64 @llvm.fptoui.sat.i64.f64 (double)
+declare i100 @llvm.fptoui.sat.i100.f64(double)
+declare i128 @llvm.fptoui.sat.i128.f64(double)
+
+define i1 @test_signed_i1_f64(double %f) nounwind {
+; SOFT-LABEL: test_signed_i1_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    mov r4, r1
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    ldr r3, .LCPI10_0
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    mov r3, r7
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    bl __aeabi_d2uiz
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    beq .LBB10_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    bne .LBB10_4
+; SOFT-NEXT:  .LBB10_2:
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:  .LBB10_3:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB10_2
+; SOFT-NEXT:  .LBB10_4:
+; SOFT-NEXT:    movs r0, #1
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI10_0:
+; SOFT-NEXT:    .long 1072693248 @ 0x3ff00000
+;
+; VFP2-LABEL: test_signed_i1_f64:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    vmov d16, r0, r1
+; VFP2-NEXT:    vcmp.f64 d16, #0
+; VFP2-NEXT:    vcvt.u32.f64 s0, d16
+; VFP2-NEXT:    vmov.f64 d17, #1.000000e+00
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vmov r0, s0
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt r0, #1
+; VFP2-NEXT:    bx lr
+;
+; FP16-LABEL: test_signed_i1_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vmov.f64 d0, #1.000000e+00
+; FP16-NEXT:    vldr d1, .LCPI10_0
+; FP16-NEXT:    vmov d2, r0, r1
+; FP16-NEXT:    vmaxnm.f64 d1, d2, d1
+; FP16-NEXT:    vminnm.f64 d0, d1, d0
+; FP16-NEXT:    vcvt.u32.f64 s0, d0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI10_0:
+; FP16-NEXT:    .long 0 @ double 0
+; FP16-NEXT:    .long 0
+    %x = call i1 @llvm.fptoui.sat.i1.f64(double %f)
+    ret i1 %x
+}
+
+define i8 @test_signed_i8_f64(double %f) nounwind {
+; SOFT-LABEL: test_signed_i8_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    mov r4, r1
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    ldr r3, .LCPI11_0
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    mov r3, r7
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    bl __aeabi_d2uiz
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    beq .LBB11_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    bne .LBB11_4
+; SOFT-NEXT:  .LBB11_2:
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:  .LBB11_3:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB11_2
+; SOFT-NEXT:  .LBB11_4:
+; SOFT-NEXT:    movs r0, #255
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI11_0:
+; SOFT-NEXT:    .long 1081073664 @ 0x406fe000
+;
+; VFP2-LABEL: test_signed_i8_f64:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    vmov d16, r0, r1
+; VFP2-NEXT:    vldr d17, .LCPI11_0
+; VFP2-NEXT:    vcmp.f64 d16, #0
+; VFP2-NEXT:    vcvt.u32.f64 s0, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vmov r0, s0
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt r0, #255
+; VFP2-NEXT:    bx lr
+; VFP2-NEXT:    .p2align 3
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI11_0:
+; VFP2-NEXT:    .long 0 @ double 255
+; VFP2-NEXT:    .long 1081073664
+;
+; FP16-LABEL: test_signed_i8_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr d0, .LCPI11_0
+; FP16-NEXT:    vmov d1, r0, r1
+; FP16-NEXT:    vldr d2, .LCPI11_1
+; FP16-NEXT:    vmaxnm.f64 d0, d1, d0
+; FP16-NEXT:    vminnm.f64 d0, d0, d2
+; FP16-NEXT:    vcvt.u32.f64 s0, d0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI11_0:
+; FP16-NEXT:    .long 0 @ double 0
+; FP16-NEXT:    .long 0
+; FP16-NEXT:  .LCPI11_1:
+; FP16-NEXT:    .long 0 @ double 255
+; FP16-NEXT:    .long 1081073664
+    %x = call i8 @llvm.fptoui.sat.i8.f64(double %f)
+    ret i8 %x
+}
+
+define i13 @test_signed_i13_f64(double %f) nounwind {
+; SOFT-LABEL: test_signed_i13_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    mov r4, r1
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    ldr r3, .LCPI12_0
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    mov r3, r7
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    bl __aeabi_d2uiz
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    beq .LBB12_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    bne .LBB12_4
+; SOFT-NEXT:  .LBB12_2:
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:  .LBB12_3:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB12_2
+; SOFT-NEXT:  .LBB12_4:
+; SOFT-NEXT:    ldr r0, .LCPI12_1
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI12_0:
+; SOFT-NEXT:    .long 1086324480 @ 0x40bfff00
+; SOFT-NEXT:  .LCPI12_1:
+; SOFT-NEXT:    .long 8191 @ 0x1fff
+;
+; VFP2-LABEL: test_signed_i13_f64:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    vmov d16, r0, r1
+; VFP2-NEXT:    vldr d17, .LCPI12_0
+; VFP2-NEXT:    vcmp.f64 d16, #0
+; VFP2-NEXT:    vcvt.u32.f64 s0, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vmov r0, s0
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movwgt r0, #8191
+; VFP2-NEXT:    bx lr
+; VFP2-NEXT:    .p2align 3
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI12_0:
+; VFP2-NEXT:    .long 0 @ double 8191
+; VFP2-NEXT:    .long 1086324480
+;
+; FP16-LABEL: test_signed_i13_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr d0, .LCPI12_0
+; FP16-NEXT:    vmov d1, r0, r1
+; FP16-NEXT:    vldr d2, .LCPI12_1
+; FP16-NEXT:    vmaxnm.f64 d0, d1, d0
+; FP16-NEXT:    vminnm.f64 d0, d0, d2
+; FP16-NEXT:    vcvt.u32.f64 s0, d0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI12_0:
+; FP16-NEXT:    .long 0 @ double 0
+; FP16-NEXT:    .long 0
+; FP16-NEXT:  .LCPI12_1:
+; FP16-NEXT:    .long 0 @ double 8191
+; FP16-NEXT:    .long 1086324480
+    %x = call i13 @llvm.fptoui.sat.i13.f64(double %f)
+    ret i13 %x
+}
+
+define i16 @test_signed_i16_f64(double %f) nounwind {
+; SOFT-LABEL: test_signed_i16_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    mov r4, r1
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    ldr r3, .LCPI13_0
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    mov r3, r7
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    bl __aeabi_d2uiz
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    beq .LBB13_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    bne .LBB13_4
+; SOFT-NEXT:  .LBB13_2:
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:  .LBB13_3:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB13_2
+; SOFT-NEXT:  .LBB13_4:
+; SOFT-NEXT:    ldr r0, .LCPI13_1
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI13_0:
+; SOFT-NEXT:    .long 1089470432 @ 0x40efffe0
+; SOFT-NEXT:  .LCPI13_1:
+; SOFT-NEXT:    .long 65535 @ 0xffff
+;
+; VFP2-LABEL: test_signed_i16_f64:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    vmov d16, r0, r1
+; VFP2-NEXT:    vldr d17, .LCPI13_0
+; VFP2-NEXT:    vcmp.f64 d16, #0
+; VFP2-NEXT:    vcvt.u32.f64 s0, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vmov r0, s0
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movwgt r0, #65535
+; VFP2-NEXT:    bx lr
+; VFP2-NEXT:    .p2align 3
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI13_0:
+; VFP2-NEXT:    .long 0 @ double 65535
+; VFP2-NEXT:    .long 1089470432
+;
+; FP16-LABEL: test_signed_i16_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr d0, .LCPI13_0
+; FP16-NEXT:    vmov d1, r0, r1
+; FP16-NEXT:    vldr d2, .LCPI13_1
+; FP16-NEXT:    vmaxnm.f64 d0, d1, d0
+; FP16-NEXT:    vminnm.f64 d0, d0, d2
+; FP16-NEXT:    vcvt.u32.f64 s0, d0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI13_0:
+; FP16-NEXT:    .long 0 @ double 0
+; FP16-NEXT:    .long 0
+; FP16-NEXT:  .LCPI13_1:
+; FP16-NEXT:    .long 0 @ double 65535
+; FP16-NEXT:    .long 1089470432
+    %x = call i16 @llvm.fptoui.sat.i16.f64(double %f)
+    ret i16 %x
+}
+
+define i19 @test_signed_i19_f64(double %f) nounwind {
+; SOFT-LABEL: test_signed_i19_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    mov r4, r1
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    ldr r3, .LCPI14_0
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    mov r3, r7
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    bl __aeabi_d2uiz
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    beq .LBB14_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    bne .LBB14_4
+; SOFT-NEXT:  .LBB14_2:
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:  .LBB14_3:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB14_2
+; SOFT-NEXT:  .LBB14_4:
+; SOFT-NEXT:    ldr r0, .LCPI14_1
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI14_0:
+; SOFT-NEXT:    .long 1092616188 @ 0x411ffffc
+; SOFT-NEXT:  .LCPI14_1:
+; SOFT-NEXT:    .long 524287 @ 0x7ffff
+;
+; VFP2-LABEL: test_signed_i19_f64:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    vmov d16, r0, r1
+; VFP2-NEXT:    vldr d17, .LCPI14_0
+; VFP2-NEXT:    vcmp.f64 d16, #0
+; VFP2-NEXT:    vcvt.u32.f64 s0, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vmov r0, s0
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    itt gt
+; VFP2-NEXT:    movwgt r0, #65535
+; VFP2-NEXT:    movtgt r0, #7
+; VFP2-NEXT:    bx lr
+; VFP2-NEXT:    .p2align 3
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI14_0:
+; VFP2-NEXT:    .long 0 @ double 524287
+; VFP2-NEXT:    .long 1092616188
+;
+; FP16-LABEL: test_signed_i19_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr d0, .LCPI14_0
+; FP16-NEXT:    vmov d1, r0, r1
+; FP16-NEXT:    vldr d2, .LCPI14_1
+; FP16-NEXT:    vmaxnm.f64 d0, d1, d0
+; FP16-NEXT:    vminnm.f64 d0, d0, d2
+; FP16-NEXT:    vcvt.u32.f64 s0, d0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI14_0:
+; FP16-NEXT:    .long 0 @ double 0
+; FP16-NEXT:    .long 0
+; FP16-NEXT:  .LCPI14_1:
+; FP16-NEXT:    .long 0 @ double 524287
+; FP16-NEXT:    .long 1092616188
+    %x = call i19 @llvm.fptoui.sat.i19.f64(double %f)
+    ret i19 %x
+}
+
+define i32 @test_signed_i32_f64(double %f) nounwind {
+; SOFT-LABEL: test_signed_i32_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    mov r5, r1
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    ldr r2, .LCPI15_0
+; SOFT-NEXT:    ldr r3, .LCPI15_1
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:    movs r4, #0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
+; SOFT-NEXT:    mov r3, r4
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    bl __aeabi_d2uiz
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    bne .LBB15_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:  .LBB15_2:
+; SOFT-NEXT:    ldr r1, [sp] @ 4-byte Reload
+; SOFT-NEXT:    cmp r1, #0
+; SOFT-NEXT:    beq .LBB15_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mvns r0, r4
+; SOFT-NEXT:  .LBB15_4:
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI15_0:
+; SOFT-NEXT:    .long 4292870144 @ 0xffe00000
+; SOFT-NEXT:  .LCPI15_1:
+; SOFT-NEXT:    .long 1106247679 @ 0x41efffff
+;
+; VFP2-LABEL: test_signed_i32_f64:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    vmov d16, r0, r1
+; VFP2-NEXT:    vldr d17, .LCPI15_0
+; VFP2-NEXT:    vcmp.f64 d16, #0
+; VFP2-NEXT:    vcvt.u32.f64 s0, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vmov r0, s0
+; VFP2-NEXT:    vcmp.f64 d16, d17
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
+; VFP2-NEXT:    bx lr
+; VFP2-NEXT:    .p2align 3
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI15_0:
+; VFP2-NEXT:    .long 4292870144 @ double 4294967295
+; VFP2-NEXT:    .long 1106247679
+;
+; FP16-LABEL: test_signed_i32_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vldr d0, .LCPI15_0
+; FP16-NEXT:    vmov d1, r0, r1
+; FP16-NEXT:    vldr d2, .LCPI15_1
+; FP16-NEXT:    vmaxnm.f64 d0, d1, d0
+; FP16-NEXT:    vminnm.f64 d0, d0, d2
+; FP16-NEXT:    vcvt.u32.f64 s0, d0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI15_0:
+; FP16-NEXT:    .long 0 @ double 0
+; FP16-NEXT:    .long 0
+; FP16-NEXT:  .LCPI15_1:
+; FP16-NEXT:    .long 4292870144 @ double 4294967295
+; FP16-NEXT:    .long 1106247679
+    %x = call i32 @llvm.fptoui.sat.i32.f64(double %f)
+    ret i32 %x
+}
+
+define i50 @test_signed_i50_f64(double %f) nounwind {
+; SOFT-LABEL: test_signed_i50_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #12
+; SOFT-NEXT:    sub sp, #12
+; SOFT-NEXT:    mov r4, r1
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    movs r0, #7
+; SOFT-NEXT:    mvns r2, r0
+; SOFT-NEXT:    ldr r3, .LCPI16_0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    str r2, [sp] @ 4-byte Spill
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    mov r3, r7
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    bl __aeabi_d2ulz
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    bne .LBB16_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:  .LBB16_2:
+; SOFT-NEXT:    ldr r2, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    cmp r2, #0
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    beq .LBB16_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mvns r0, r7
+; SOFT-NEXT:  .LBB16_4:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    ldr r2, [sp] @ 4-byte Reload
+; SOFT-NEXT:    ldr r3, .LCPI16_0
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    mov r3, r7
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB16_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r1, r0
+; SOFT-NEXT:  .LBB16_6:
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    beq .LBB16_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r1, .LCPI16_1
+; SOFT-NEXT:  .LBB16_8:
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    add sp, #12
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:  .LCPI16_0:
+; SOFT-NEXT:    .long 1125122047 @ 0x430fffff
+; SOFT-NEXT:  .LCPI16_1:
+; SOFT-NEXT:    .long 262143 @ 0x3ffff
+;
+; VFP2-LABEL: test_signed_i50_f64:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    .vsave {d8}
+; VFP2-NEXT:    vpush {d8}
+; VFP2-NEXT:    vmov d8, r0, r1
+; VFP2-NEXT:    bl __aeabi_d2ulz
+; VFP2-NEXT:    vcmp.f64 d8, #0
+; VFP2-NEXT:    vldr d16, .LCPI16_0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    itt lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    movlt r1, #0
+; VFP2-NEXT:    vcmp.f64 d8, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    ittt gt
+; VFP2-NEXT:    movwgt r1, #65535
+; VFP2-NEXT:    movtgt r1, #3
+; VFP2-NEXT:    movgt.w r0, #-1
+; VFP2-NEXT:    vpop {d8}
+; VFP2-NEXT:    pop {r7, pc}
+; VFP2-NEXT:    .p2align 3
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI16_0:
+; VFP2-NEXT:    .long 4294967288 @ double 1125899906842623
+; VFP2-NEXT:    .long 1125122047
+;
+; FP16-LABEL: test_signed_i50_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r7, lr}
+; FP16-NEXT:    push {r7, lr}
+; FP16-NEXT:    vldr d0, .LCPI16_0
+; FP16-NEXT:    vmov d1, r0, r1
+; FP16-NEXT:    vldr d2, .LCPI16_1
+; FP16-NEXT:    vmaxnm.f64 d0, d1, d0
+; FP16-NEXT:    vminnm.f64 d0, d0, d2
+; FP16-NEXT:    vmov r0, r1, d0
+; FP16-NEXT:    bl __aeabi_d2ulz
+; FP16-NEXT:    pop {r7, pc}
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI16_0:
+; FP16-NEXT:    .long 0 @ double 0
+; FP16-NEXT:    .long 0
+; FP16-NEXT:  .LCPI16_1:
+; FP16-NEXT:    .long 4294967288 @ double 1125899906842623
+; FP16-NEXT:    .long 1125122047
+    %x = call i50 @llvm.fptoui.sat.i50.f64(double %f)
+    ret i50 %x
+}
+
+define i64 @test_signed_i64_f64(double %f) nounwind {
+; SOFT-LABEL: test_signed_i64_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #20
+; SOFT-NEXT:    sub sp, #20
+; SOFT-NEXT:    mov r5, r1
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    movs r6, #0
+; SOFT-NEXT:    mvns r2, r6
+; SOFT-NEXT:    ldr r3, .LCPI17_0
+; SOFT-NEXT:    str r2, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r6
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    bl __aeabi_d2ulz
+; SOFT-NEXT:    str r1, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB17_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:  .LBB17_2:
+; SOFT-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    cmp r1, #0
+; SOFT-NEXT:    ldr r4, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    bne .LBB17_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r1, r0
+; SOFT-NEXT:  .LBB17_4:
+; SOFT-NEXT:    str r1, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
+; SOFT-NEXT:    ldr r3, .LCPI17_0
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r6
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB17_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:  .LBB17_6:
+; SOFT-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB17_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r4, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:  .LBB17_8:
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    add sp, #20
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:  .LCPI17_0:
+; SOFT-NEXT:    .long 1139802111 @ 0x43efffff
+;
+; VFP2-LABEL: test_signed_i64_f64:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    .vsave {d8}
+; VFP2-NEXT:    vpush {d8}
+; VFP2-NEXT:    vmov d8, r0, r1
+; VFP2-NEXT:    bl __aeabi_d2ulz
+; VFP2-NEXT:    vcmp.f64 d8, #0
+; VFP2-NEXT:    vldr d16, .LCPI17_0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vcmp.f64 d8, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
+; VFP2-NEXT:    vcmp.f64 d8, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r1, #0
+; VFP2-NEXT:    vcmp.f64 d8, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r1, #-1
+; VFP2-NEXT:    vpop {d8}
+; VFP2-NEXT:    pop {r7, pc}
+; VFP2-NEXT:    .p2align 3
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI17_0:
+; VFP2-NEXT:    .long 4294967295 @ double 1.844674407370955E+19
+; VFP2-NEXT:    .long 1139802111
+;
+; FP16-LABEL: test_signed_i64_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r7, lr}
+; FP16-NEXT:    push {r7, lr}
+; FP16-NEXT:    .vsave {d8}
+; FP16-NEXT:    vpush {d8}
+; FP16-NEXT:    vmov d8, r0, r1
+; FP16-NEXT:    bl __aeabi_d2ulz
+; FP16-NEXT:    vcmp.f64 d8, #0
+; FP16-NEXT:    vldr d0, .LCPI17_0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r0, #0
+; FP16-NEXT:    vcmp.f64 d8, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r0, #-1
+; FP16-NEXT:    vcmp.f64 d8, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r1, #0
+; FP16-NEXT:    vcmp.f64 d8, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r1, #-1
+; FP16-NEXT:    vpop {d8}
+; FP16-NEXT:    pop {r7, pc}
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI17_0:
+; FP16-NEXT:    .long 4294967295 @ double 1.844674407370955E+19
+; FP16-NEXT:    .long 1139802111
+    %x = call i64 @llvm.fptoui.sat.i64.f64(double %f)
+    ret i64 %x
+}
+
+define i100 @test_signed_i100_f64(double %f) nounwind {
+; SOFT-LABEL: test_signed_i100_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #28
+; SOFT-NEXT:    sub sp, #28
+; SOFT-NEXT:    mov r4, r1
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r5, #0
+; SOFT-NEXT:    mvns r2, r5
+; SOFT-NEXT:    ldr r3, .LCPI18_0
+; SOFT-NEXT:    str r2, [sp, #20] @ 4-byte Spill
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    str r0, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r2, r5
+; SOFT-NEXT:    mov r3, r5
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    bl __fixunsdfti
+; SOFT-NEXT:    str r1, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    str r2, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    bne .LBB18_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB18_2:
+; SOFT-NEXT:    str r3, [sp, #24] @ 4-byte Spill
+; SOFT-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    cmp r1, #0
+; SOFT-NEXT:    ldr r7, [sp, #20] @ 4-byte Reload
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bne .LBB18_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r1, r0
+; SOFT-NEXT:  .LBB18_4:
+; SOFT-NEXT:    str r1, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    ldr r3, .LCPI18_0
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r2, r5
+; SOFT-NEXT:    mov r3, r5
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB18_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:  .LBB18_6:
+; SOFT-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    bne .LBB18_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:  .LBB18_8:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    ldr r3, .LCPI18_0
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r2, r5
+; SOFT-NEXT:    mov r3, r5
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB18_10
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:  .LBB18_10:
+; SOFT-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    bne .LBB18_12
+; SOFT-NEXT:  @ %bb.11:
+; SOFT-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:  .LBB18_12:
+; SOFT-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    ldr r3, .LCPI18_0
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    mov r2, r5
+; SOFT-NEXT:    mov r3, r5
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    ldr r3, [sp, #24] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB18_14
+; SOFT-NEXT:  @ %bb.13:
+; SOFT-NEXT:    mov r3, r0
+; SOFT-NEXT:  .LBB18_14:
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    ldr r0, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    beq .LBB18_16
+; SOFT-NEXT:  @ %bb.15:
+; SOFT-NEXT:    movs r3, #15
+; SOFT-NEXT:  .LBB18_16:
+; SOFT-NEXT:    ldr r2, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:    add sp, #28
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.17:
+; SOFT-NEXT:  .LCPI18_0:
+; SOFT-NEXT:    .long 1177550847 @ 0x462fffff
+;
+; VFP2-LABEL: test_signed_i100_f64:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    .vsave {d8}
+; VFP2-NEXT:    vpush {d8}
+; VFP2-NEXT:    vmov d8, r0, r1
+; VFP2-NEXT:    bl __fixunsdfti
+; VFP2-NEXT:    vcmp.f64 d8, #0
+; VFP2-NEXT:    vldr d16, .LCPI18_0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vcmp.f64 d8, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
+; VFP2-NEXT:    vcmp.f64 d8, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r1, #0
+; VFP2-NEXT:    vcmp.f64 d8, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r1, #-1
+; VFP2-NEXT:    vcmp.f64 d8, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r2, #0
+; VFP2-NEXT:    vcmp.f64 d8, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r2, #-1
+; VFP2-NEXT:    vcmp.f64 d8, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r3, #0
+; VFP2-NEXT:    vcmp.f64 d8, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt r3, #15
+; VFP2-NEXT:    vpop {d8}
+; VFP2-NEXT:    pop {r7, pc}
+; VFP2-NEXT:    .p2align 3
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI18_0:
+; VFP2-NEXT:    .long 4294967295 @ double 1.2676506002282293E+30
+; VFP2-NEXT:    .long 1177550847
+;
+; FP16-LABEL: test_signed_i100_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r7, lr}
+; FP16-NEXT:    push {r7, lr}
+; FP16-NEXT:    .vsave {d8}
+; FP16-NEXT:    vpush {d8}
+; FP16-NEXT:    vmov d8, r0, r1
+; FP16-NEXT:    bl __fixunsdfti
+; FP16-NEXT:    vcmp.f64 d8, #0
+; FP16-NEXT:    vldr d0, .LCPI18_0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r0, #0
+; FP16-NEXT:    vcmp.f64 d8, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r0, #-1
+; FP16-NEXT:    vcmp.f64 d8, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r1, #0
+; FP16-NEXT:    vcmp.f64 d8, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r1, #-1
+; FP16-NEXT:    vcmp.f64 d8, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r2, #0
+; FP16-NEXT:    vcmp.f64 d8, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r2, #-1
+; FP16-NEXT:    vcmp.f64 d8, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r3, #0
+; FP16-NEXT:    vcmp.f64 d8, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt r3, #15
+; FP16-NEXT:    vpop {d8}
+; FP16-NEXT:    pop {r7, pc}
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI18_0:
+; FP16-NEXT:    .long 4294967295 @ double 1.2676506002282293E+30
+; FP16-NEXT:    .long 1177550847
+    %x = call i100 @llvm.fptoui.sat.i100.f64(double %f)
+    ret i100 %x
+}
+
+define i128 @test_signed_i128_f64(double %f) nounwind {
+; SOFT-LABEL: test_signed_i128_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #28
+; SOFT-NEXT:    sub sp, #28
+; SOFT-NEXT:    mov r5, r1
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    movs r6, #0
+; SOFT-NEXT:    mvns r2, r6
+; SOFT-NEXT:    ldr r3, .LCPI19_0
+; SOFT-NEXT:    str r2, [sp, #24] @ 4-byte Spill
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    str r0, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r6
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    bl __fixunsdfti
+; SOFT-NEXT:    str r1, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    str r2, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:    str r3, [sp, #20] @ 4-byte Spill
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB19_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:  .LBB19_2:
+; SOFT-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    cmp r1, #0
+; SOFT-NEXT:    ldr r4, [sp, #24] @ 4-byte Reload
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    bne .LBB19_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r1, r0
+; SOFT-NEXT:  .LBB19_4:
+; SOFT-NEXT:    str r1, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
+; SOFT-NEXT:    ldr r3, .LCPI19_0
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r6
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB19_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:  .LBB19_6:
+; SOFT-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    bne .LBB19_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:  .LBB19_8:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
+; SOFT-NEXT:    ldr r3, .LCPI19_0
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r6
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB19_10
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:  .LBB19_10:
+; SOFT-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    bne .LBB19_12
+; SOFT-NEXT:  @ %bb.11:
+; SOFT-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:  .LBB19_12:
+; SOFT-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r4
+; SOFT-NEXT:    ldr r3, .LCPI19_0
+; SOFT-NEXT:    bl __aeabi_dcmpgt
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    mov r2, r6
+; SOFT-NEXT:    mov r3, r6
+; SOFT-NEXT:    bl __aeabi_dcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB19_14
+; SOFT-NEXT:  @ %bb.13:
+; SOFT-NEXT:    str r0, [sp, #20] @ 4-byte Spill
+; SOFT-NEXT:  .LBB19_14:
+; SOFT-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    ldr r0, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    ldr r2, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB19_16
+; SOFT-NEXT:  @ %bb.15:
+; SOFT-NEXT:    ldr r4, [sp, #20] @ 4-byte Reload
+; SOFT-NEXT:  .LBB19_16:
+; SOFT-NEXT:    mov r3, r4
+; SOFT-NEXT:    add sp, #28
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.17:
+; SOFT-NEXT:  .LCPI19_0:
+; SOFT-NEXT:    .long 1206910975 @ 0x47efffff
+;
+; VFP2-LABEL: test_signed_i128_f64:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    .vsave {d8}
+; VFP2-NEXT:    vpush {d8}
+; VFP2-NEXT:    vmov d8, r0, r1
+; VFP2-NEXT:    bl __fixunsdfti
+; VFP2-NEXT:    vcmp.f64 d8, #0
+; VFP2-NEXT:    vldr d16, .LCPI19_0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vcmp.f64 d8, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
+; VFP2-NEXT:    vcmp.f64 d8, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r1, #0
+; VFP2-NEXT:    vcmp.f64 d8, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r1, #-1
+; VFP2-NEXT:    vcmp.f64 d8, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r2, #0
+; VFP2-NEXT:    vcmp.f64 d8, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r2, #-1
+; VFP2-NEXT:    vcmp.f64 d8, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r3, #0
+; VFP2-NEXT:    vcmp.f64 d8, d16
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r3, #-1
+; VFP2-NEXT:    vpop {d8}
+; VFP2-NEXT:    pop {r7, pc}
+; VFP2-NEXT:    .p2align 3
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI19_0:
+; VFP2-NEXT:    .long 4294967295 @ double 3.4028236692093843E+38
+; VFP2-NEXT:    .long 1206910975
+;
+; FP16-LABEL: test_signed_i128_f64:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r7, lr}
+; FP16-NEXT:    push {r7, lr}
+; FP16-NEXT:    .vsave {d8}
+; FP16-NEXT:    vpush {d8}
+; FP16-NEXT:    vmov d8, r0, r1
+; FP16-NEXT:    bl __fixunsdfti
+; FP16-NEXT:    vcmp.f64 d8, #0
+; FP16-NEXT:    vldr d0, .LCPI19_0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r0, #0
+; FP16-NEXT:    vcmp.f64 d8, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r0, #-1
+; FP16-NEXT:    vcmp.f64 d8, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r1, #0
+; FP16-NEXT:    vcmp.f64 d8, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r1, #-1
+; FP16-NEXT:    vcmp.f64 d8, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r2, #0
+; FP16-NEXT:    vcmp.f64 d8, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r2, #-1
+; FP16-NEXT:    vcmp.f64 d8, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r3, #0
+; FP16-NEXT:    vcmp.f64 d8, d0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r3, #-1
+; FP16-NEXT:    vpop {d8}
+; FP16-NEXT:    pop {r7, pc}
+; FP16-NEXT:    .p2align 3
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI19_0:
+; FP16-NEXT:    .long 4294967295 @ double 3.4028236692093843E+38
+; FP16-NEXT:    .long 1206910975
+    %x = call i128 @llvm.fptoui.sat.i128.f64(double %f)
+    ret i128 %x
+}
+
+;
+; 16-bit float to signed integer
+;
+
+declare   i1 @llvm.fptoui.sat.i1.f16  (half)
+declare   i8 @llvm.fptoui.sat.i8.f16  (half)
+declare  i13 @llvm.fptoui.sat.i13.f16 (half)
+declare  i16 @llvm.fptoui.sat.i16.f16 (half)
+declare  i19 @llvm.fptoui.sat.i19.f16 (half)
+declare  i32 @llvm.fptoui.sat.i32.f16 (half)
+declare  i50 @llvm.fptoui.sat.i50.f16 (half)
+declare  i64 @llvm.fptoui.sat.i64.f16 (half)
+declare i100 @llvm.fptoui.sat.i100.f16(half)
+declare i128 @llvm.fptoui.sat.i128.f16(half)
+
+define i1 @test_signed_i1_f16(half %f) nounwind {
+; SOFT-LABEL: test_signed_i1_f16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, lr}
+; SOFT-NEXT:    push {r4, r5, r6, lr}
+; SOFT-NEXT:    uxth r0, r0
+; SOFT-NEXT:    bl __aeabi_h2f
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    movs r0, #127
+; SOFT-NEXT:    lsls r1, r0, #23
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r1, #0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    bl __aeabi_f2uiz
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    beq .LBB20_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB20_4
+; SOFT-NEXT:  .LBB20_2:
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:  .LBB20_3:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    beq .LBB20_2
+; SOFT-NEXT:  .LBB20_4:
+; SOFT-NEXT:    movs r0, #1
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+;
+; VFP2-LABEL: test_signed_i1_f16:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    bl __aeabi_h2f
+; VFP2-NEXT:    vmov s2, r0
+; VFP2-NEXT:    vmov.f32 s0, #1.000000e+00
+; VFP2-NEXT:    vcvt.u32.f32 s4, s2
+; VFP2-NEXT:    vcmp.f32 s2, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s2, s0
+; VFP2-NEXT:    vmov r0, s4
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt r0, #1
+; VFP2-NEXT:    pop {r7, pc}
+;
+; FP16-LABEL: test_signed_i1_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vldr s4, .LCPI20_0
+; FP16-NEXT:    vcvtb.f32.f16 s0, s0
+; FP16-NEXT:    vmov.f32 s2, #1.000000e+00
+; FP16-NEXT:    vmaxnm.f32 s0, s0, s4
+; FP16-NEXT:    vminnm.f32 s0, s0, s2
+; FP16-NEXT:    vcvt.u32.f32 s0, s0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI20_0:
+; FP16-NEXT:    .long 0x00000000 @ float 0
+    %x = call i1 @llvm.fptoui.sat.i1.f16(half %f)
+    ret i1 %x
+}
+
+define i8 @test_signed_i8_f16(half %f) nounwind {
+; SOFT-LABEL: test_signed_i8_f16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, lr}
+; SOFT-NEXT:    push {r4, r5, r6, lr}
+; SOFT-NEXT:    uxth r0, r0
+; SOFT-NEXT:    bl __aeabi_h2f
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    ldr r1, .LCPI21_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r1, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_f2uiz
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB21_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB21_4
+; SOFT-NEXT:  .LBB21_2:
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:  .LBB21_3:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    beq .LBB21_2
+; SOFT-NEXT:  .LBB21_4:
+; SOFT-NEXT:    movs r0, #255
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI21_0:
+; SOFT-NEXT:    .long 1132396544 @ 0x437f0000
+;
+; VFP2-LABEL: test_signed_i8_f16:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    bl __aeabi_h2f
+; VFP2-NEXT:    vmov s0, r0
+; VFP2-NEXT:    vldr s4, .LCPI21_0
+; VFP2-NEXT:    vcvt.u32.f32 s2, s0
+; VFP2-NEXT:    vcmp.f32 s0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s0, s4
+; VFP2-NEXT:    vmov r0, s2
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt r0, #255
+; VFP2-NEXT:    pop {r7, pc}
+; VFP2-NEXT:    .p2align 2
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI21_0:
+; VFP2-NEXT:    .long 0x437f0000 @ float 255
+;
+; FP16-LABEL: test_signed_i8_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vldr s4, .LCPI21_1
+; FP16-NEXT:    vcvtb.f32.f16 s0, s0
+; FP16-NEXT:    vldr s2, .LCPI21_0
+; FP16-NEXT:    vmaxnm.f32 s0, s0, s4
+; FP16-NEXT:    vminnm.f32 s0, s0, s2
+; FP16-NEXT:    vcvt.u32.f32 s0, s0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI21_0:
+; FP16-NEXT:    .long 0x437f0000 @ float 255
+; FP16-NEXT:  .LCPI21_1:
+; FP16-NEXT:    .long 0x00000000 @ float 0
+    %x = call i8 @llvm.fptoui.sat.i8.f16(half %f)
+    ret i8 %x
+}
+
+define i13 @test_signed_i13_f16(half %f) nounwind {
+; SOFT-LABEL: test_signed_i13_f16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, lr}
+; SOFT-NEXT:    push {r4, r5, r6, lr}
+; SOFT-NEXT:    uxth r0, r0
+; SOFT-NEXT:    bl __aeabi_h2f
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    ldr r1, .LCPI22_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r1, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_f2uiz
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB22_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB22_4
+; SOFT-NEXT:  .LBB22_2:
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:  .LBB22_3:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    beq .LBB22_2
+; SOFT-NEXT:  .LBB22_4:
+; SOFT-NEXT:    ldr r0, .LCPI22_1
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI22_0:
+; SOFT-NEXT:    .long 1174403072 @ 0x45fff800
+; SOFT-NEXT:  .LCPI22_1:
+; SOFT-NEXT:    .long 8191 @ 0x1fff
+;
+; VFP2-LABEL: test_signed_i13_f16:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    bl __aeabi_h2f
+; VFP2-NEXT:    vmov s0, r0
+; VFP2-NEXT:    vldr s4, .LCPI22_0
+; VFP2-NEXT:    vcvt.u32.f32 s2, s0
+; VFP2-NEXT:    vcmp.f32 s0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s0, s4
+; VFP2-NEXT:    vmov r0, s2
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movwgt r0, #8191
+; VFP2-NEXT:    pop {r7, pc}
+; VFP2-NEXT:    .p2align 2
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI22_0:
+; VFP2-NEXT:    .long 0x45fff800 @ float 8191
+;
+; FP16-LABEL: test_signed_i13_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vldr s4, .LCPI22_1
+; FP16-NEXT:    vcvtb.f32.f16 s0, s0
+; FP16-NEXT:    vldr s2, .LCPI22_0
+; FP16-NEXT:    vmaxnm.f32 s0, s0, s4
+; FP16-NEXT:    vminnm.f32 s0, s0, s2
+; FP16-NEXT:    vcvt.u32.f32 s0, s0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI22_0:
+; FP16-NEXT:    .long 0x45fff800 @ float 8191
+; FP16-NEXT:  .LCPI22_1:
+; FP16-NEXT:    .long 0x00000000 @ float 0
+    %x = call i13 @llvm.fptoui.sat.i13.f16(half %f)
+    ret i13 %x
+}
+
+define i16 @test_signed_i16_f16(half %f) nounwind {
+; SOFT-LABEL: test_signed_i16_f16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, lr}
+; SOFT-NEXT:    push {r4, r5, r6, lr}
+; SOFT-NEXT:    uxth r0, r0
+; SOFT-NEXT:    bl __aeabi_h2f
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    ldr r1, .LCPI23_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r1, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_f2uiz
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB23_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB23_4
+; SOFT-NEXT:  .LBB23_2:
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:  .LBB23_3:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    beq .LBB23_2
+; SOFT-NEXT:  .LBB23_4:
+; SOFT-NEXT:    ldr r0, .LCPI23_1
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI23_0:
+; SOFT-NEXT:    .long 1199570688 @ 0x477fff00
+; SOFT-NEXT:  .LCPI23_1:
+; SOFT-NEXT:    .long 65535 @ 0xffff
+;
+; VFP2-LABEL: test_signed_i16_f16:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    bl __aeabi_h2f
+; VFP2-NEXT:    vmov s0, r0
+; VFP2-NEXT:    vldr s4, .LCPI23_0
+; VFP2-NEXT:    vcvt.u32.f32 s2, s0
+; VFP2-NEXT:    vcmp.f32 s0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s0, s4
+; VFP2-NEXT:    vmov r0, s2
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movwgt r0, #65535
+; VFP2-NEXT:    pop {r7, pc}
+; VFP2-NEXT:    .p2align 2
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI23_0:
+; VFP2-NEXT:    .long 0x477fff00 @ float 65535
+;
+; FP16-LABEL: test_signed_i16_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vldr s4, .LCPI23_1
+; FP16-NEXT:    vcvtb.f32.f16 s0, s0
+; FP16-NEXT:    vldr s2, .LCPI23_0
+; FP16-NEXT:    vmaxnm.f32 s0, s0, s4
+; FP16-NEXT:    vminnm.f32 s0, s0, s2
+; FP16-NEXT:    vcvt.u32.f32 s0, s0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI23_0:
+; FP16-NEXT:    .long 0x477fff00 @ float 65535
+; FP16-NEXT:  .LCPI23_1:
+; FP16-NEXT:    .long 0x00000000 @ float 0
+    %x = call i16 @llvm.fptoui.sat.i16.f16(half %f)
+    ret i16 %x
+}
+
+define i19 @test_signed_i19_f16(half %f) nounwind {
+; SOFT-LABEL: test_signed_i19_f16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, lr}
+; SOFT-NEXT:    push {r4, r5, r6, lr}
+; SOFT-NEXT:    uxth r0, r0
+; SOFT-NEXT:    bl __aeabi_h2f
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    ldr r1, .LCPI24_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r1, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_f2uiz
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB24_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB24_4
+; SOFT-NEXT:  .LBB24_2:
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:  .LBB24_3:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    beq .LBB24_2
+; SOFT-NEXT:  .LBB24_4:
+; SOFT-NEXT:    ldr r0, .LCPI24_1
+; SOFT-NEXT:    pop {r4, r5, r6, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI24_0:
+; SOFT-NEXT:    .long 1224736736 @ 0x48ffffe0
+; SOFT-NEXT:  .LCPI24_1:
+; SOFT-NEXT:    .long 524287 @ 0x7ffff
+;
+; VFP2-LABEL: test_signed_i19_f16:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    bl __aeabi_h2f
+; VFP2-NEXT:    vmov s0, r0
+; VFP2-NEXT:    vldr s4, .LCPI24_0
+; VFP2-NEXT:    vcvt.u32.f32 s2, s0
+; VFP2-NEXT:    vcmp.f32 s0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s0, s4
+; VFP2-NEXT:    vmov r0, s2
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    itt gt
+; VFP2-NEXT:    movwgt r0, #65535
+; VFP2-NEXT:    movtgt r0, #7
+; VFP2-NEXT:    pop {r7, pc}
+; VFP2-NEXT:    .p2align 2
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI24_0:
+; VFP2-NEXT:    .long 0x48ffffe0 @ float 524287
+;
+; FP16-LABEL: test_signed_i19_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vldr s4, .LCPI24_1
+; FP16-NEXT:    vcvtb.f32.f16 s0, s0
+; FP16-NEXT:    vldr s2, .LCPI24_0
+; FP16-NEXT:    vmaxnm.f32 s0, s0, s4
+; FP16-NEXT:    vminnm.f32 s0, s0, s2
+; FP16-NEXT:    vcvt.u32.f32 s0, s0
+; FP16-NEXT:    vmov r0, s0
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI24_0:
+; FP16-NEXT:    .long 0x48ffffe0 @ float 524287
+; FP16-NEXT:  .LCPI24_1:
+; FP16-NEXT:    .long 0x00000000 @ float 0
+    %x = call i19 @llvm.fptoui.sat.i19.f16(half %f)
+    ret i19 %x
+}
+
+define i32 @test_signed_i32_f16(half %f) nounwind {
+; SOFT-LABEL: test_signed_i32_f16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    uxth r0, r0
+; SOFT-NEXT:    bl __aeabi_h2f
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    ldr r1, .LCPI25_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    movs r4, #0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    mov r1, r4
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    bl __aeabi_f2uiz
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    beq .LBB25_3
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    bne .LBB25_4
+; SOFT-NEXT:  .LBB25_2:
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:  .LBB25_3:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    beq .LBB25_2
+; SOFT-NEXT:  .LBB25_4:
+; SOFT-NEXT:    mvns r0, r4
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:  .LCPI25_0:
+; SOFT-NEXT:    .long 1333788671 @ 0x4f7fffff
+;
+; VFP2-LABEL: test_signed_i32_f16:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    bl __aeabi_h2f
+; VFP2-NEXT:    vmov s0, r0
+; VFP2-NEXT:    vldr s4, .LCPI25_0
+; VFP2-NEXT:    vcvt.u32.f32 s2, s0
+; VFP2-NEXT:    vcmp.f32 s0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s0, s4
+; VFP2-NEXT:    vmov r0, s2
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
+; VFP2-NEXT:    pop {r7, pc}
+; VFP2-NEXT:    .p2align 2
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI25_0:
+; VFP2-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+;
+; FP16-LABEL: test_signed_i32_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vldr s4, .LCPI25_0
+; FP16-NEXT:    vcvtb.f32.f16 s0, s0
+; FP16-NEXT:    vcvt.u32.f32 s2, s0
+; FP16-NEXT:    vcmp.f32 s0, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s0, s4
+; FP16-NEXT:    vmov r0, s2
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r0, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r0, #-1
+; FP16-NEXT:    bx lr
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI25_0:
+; FP16-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call i32 @llvm.fptoui.sat.i32.f16(half %f)
+    ret i32 %x
+}
+
+define i50 @test_signed_i50_f16(half %f) nounwind {
+; SOFT-LABEL: test_signed_i50_f16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #4
+; SOFT-NEXT:    sub sp, #4
+; SOFT-NEXT:    uxth r0, r0
+; SOFT-NEXT:    bl __aeabi_h2f
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    ldr r1, .LCPI26_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bl __aeabi_f2ulz
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    bne .LBB26_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:  .LBB26_2:
+; SOFT-NEXT:    mov r5, r1
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    beq .LBB26_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mvns r0, r7
+; SOFT-NEXT:  .LBB26_4:
+; SOFT-NEXT:    str r0, [sp] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    ldr r1, .LCPI26_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB26_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:  .LBB26_6:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    beq .LBB26_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r5, .LCPI26_1
+; SOFT-NEXT:  .LBB26_8:
+; SOFT-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    add sp, #4
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:  .LCPI26_0:
+; SOFT-NEXT:    .long 1484783615 @ 0x587fffff
+; SOFT-NEXT:  .LCPI26_1:
+; SOFT-NEXT:    .long 262143 @ 0x3ffff
+;
+; VFP2-LABEL: test_signed_i50_f16:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    .vsave {d8}
+; VFP2-NEXT:    vpush {d8}
+; VFP2-NEXT:    bl __aeabi_h2f
+; VFP2-NEXT:    vmov s16, r0
+; VFP2-NEXT:    bl __aeabi_f2ulz
+; VFP2-NEXT:    vldr s0, .LCPI26_0
+; VFP2-NEXT:    vcmp.f32 s16, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    itt lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    movlt r1, #0
+; VFP2-NEXT:    vcmp.f32 s16, s0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    ittt gt
+; VFP2-NEXT:    movwgt r1, #65535
+; VFP2-NEXT:    movtgt r1, #3
+; VFP2-NEXT:    movgt.w r0, #-1
+; VFP2-NEXT:    vpop {d8}
+; VFP2-NEXT:    pop {r7, pc}
+; VFP2-NEXT:    .p2align 2
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI26_0:
+; VFP2-NEXT:    .long 0x587fffff @ float 1.12589984E+15
+;
+; FP16-LABEL: test_signed_i50_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r7, lr}
+; FP16-NEXT:    push {r7, lr}
+; FP16-NEXT:    .vsave {d8}
+; FP16-NEXT:    vpush {d8}
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vcvtb.f32.f16 s16, s0
+; FP16-NEXT:    vmov r0, s16
+; FP16-NEXT:    bl __aeabi_f2ulz
+; FP16-NEXT:    vldr s0, .LCPI26_0
+; FP16-NEXT:    vcmp.f32 s16, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    itt lt
+; FP16-NEXT:    movlt r0, #0
+; FP16-NEXT:    movlt r1, #0
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    ittt gt
+; FP16-NEXT:    movwgt r1, #65535
+; FP16-NEXT:    movtgt r1, #3
+; FP16-NEXT:    movgt.w r0, #-1
+; FP16-NEXT:    vpop {d8}
+; FP16-NEXT:    pop {r7, pc}
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI26_0:
+; FP16-NEXT:    .long 0x587fffff @ float 1.12589984E+15
+    %x = call i50 @llvm.fptoui.sat.i50.f16(half %f)
+    ret i50 %x
+}
+
+define i64 @test_signed_i64_f16(half %f) nounwind {
+; SOFT-LABEL: test_signed_i64_f16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #12
+; SOFT-NEXT:    sub sp, #12
+; SOFT-NEXT:    uxth r0, r0
+; SOFT-NEXT:    bl __aeabi_h2f
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    ldr r1, .LCPI27_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    bl __aeabi_f2ulz
+; SOFT-NEXT:    str r1, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    bne .LBB27_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:  .LBB27_2:
+; SOFT-NEXT:    mvns r5, r7
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    bne .LBB27_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r1, r0
+; SOFT-NEXT:  .LBB27_4:
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI27_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB27_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:  .LBB27_6:
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB27_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r5, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:  .LBB27_8:
+; SOFT-NEXT:    mov r1, r5
+; SOFT-NEXT:    add sp, #12
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:  .LCPI27_0:
+; SOFT-NEXT:    .long 1602224127 @ 0x5f7fffff
+;
+; VFP2-LABEL: test_signed_i64_f16:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    .vsave {d8}
+; VFP2-NEXT:    vpush {d8}
+; VFP2-NEXT:    bl __aeabi_h2f
+; VFP2-NEXT:    vmov s16, r0
+; VFP2-NEXT:    bl __aeabi_f2ulz
+; VFP2-NEXT:    vldr s0, .LCPI27_0
+; VFP2-NEXT:    vcmp.f32 s16, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vcmp.f32 s16, s0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s16, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r1, #0
+; VFP2-NEXT:    vcmp.f32 s16, s0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r1, #-1
+; VFP2-NEXT:    vpop {d8}
+; VFP2-NEXT:    pop {r7, pc}
+; VFP2-NEXT:    .p2align 2
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI27_0:
+; VFP2-NEXT:    .long 0x5f7fffff @ float 1.8446743E+19
+;
+; FP16-LABEL: test_signed_i64_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r7, lr}
+; FP16-NEXT:    push {r7, lr}
+; FP16-NEXT:    .vsave {d8}
+; FP16-NEXT:    vpush {d8}
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vcvtb.f32.f16 s16, s0
+; FP16-NEXT:    vmov r0, s16
+; FP16-NEXT:    bl __aeabi_f2ulz
+; FP16-NEXT:    vldr s0, .LCPI27_0
+; FP16-NEXT:    vcmp.f32 s16, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r0, #0
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, #0
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r0, #-1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r1, #0
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r1, #-1
+; FP16-NEXT:    vpop {d8}
+; FP16-NEXT:    pop {r7, pc}
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI27_0:
+; FP16-NEXT:    .long 0x5f7fffff @ float 1.8446743E+19
+    %x = call i64 @llvm.fptoui.sat.i64.f16(half %f)
+    ret i64 %x
+}
+
+define i100 @test_signed_i100_f16(half %f) nounwind {
+; SOFT-LABEL: test_signed_i100_f16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #20
+; SOFT-NEXT:    sub sp, #20
+; SOFT-NEXT:    uxth r0, r0
+; SOFT-NEXT:    bl __aeabi_h2f
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    ldr r1, .LCPI28_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    movs r6, #0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    mov r1, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r7, r0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    bl __fixunssfti
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    str r2, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    cmp r7, #0
+; SOFT-NEXT:    bne .LBB28_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:  .LBB28_2:
+; SOFT-NEXT:    str r3, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:    mvns r7, r6
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bne .LBB28_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r1, r0
+; SOFT-NEXT:  .LBB28_4:
+; SOFT-NEXT:    str r1, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    ldr r1, .LCPI28_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    mov r1, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB28_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:  .LBB28_6:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    mov r0, r7
+; SOFT-NEXT:    bne .LBB28_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:  .LBB28_8:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    ldr r1, .LCPI28_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    mov r1, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB28_10
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:  .LBB28_10:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    bne .LBB28_12
+; SOFT-NEXT:  @ %bb.11:
+; SOFT-NEXT:    ldr r7, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:  .LBB28_12:
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    ldr r1, .LCPI28_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    mov r0, r5
+; SOFT-NEXT:    mov r1, r6
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    ldr r3, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB28_14
+; SOFT-NEXT:  @ %bb.13:
+; SOFT-NEXT:    mov r3, r0
+; SOFT-NEXT:  .LBB28_14:
+; SOFT-NEXT:    cmp r4, #0
+; SOFT-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    beq .LBB28_16
+; SOFT-NEXT:  @ %bb.15:
+; SOFT-NEXT:    movs r3, #15
+; SOFT-NEXT:  .LBB28_16:
+; SOFT-NEXT:    mov r2, r7
+; SOFT-NEXT:    add sp, #20
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.17:
+; SOFT-NEXT:  .LCPI28_0:
+; SOFT-NEXT:    .long 1904214015 @ 0x717fffff
+;
+; VFP2-LABEL: test_signed_i100_f16:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    .vsave {d8}
+; VFP2-NEXT:    vpush {d8}
+; VFP2-NEXT:    bl __aeabi_h2f
+; VFP2-NEXT:    vmov s16, r0
+; VFP2-NEXT:    bl __fixunssfti
+; VFP2-NEXT:    vldr s0, .LCPI28_0
+; VFP2-NEXT:    vcmp.f32 s16, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vcmp.f32 s16, s0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s16, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s16, s0
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r1, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s16, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r1, #-1
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s16, s0
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r2, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s16, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r2, #-1
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r3, #0
+; VFP2-NEXT:    vcmp.f32 s16, s0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt r3, #15
+; VFP2-NEXT:    vpop {d8}
+; VFP2-NEXT:    pop {r7, pc}
+; VFP2-NEXT:    .p2align 2
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI28_0:
+; VFP2-NEXT:    .long 0x717fffff @ float 1.26765052E+30
+;
+; FP16-LABEL: test_signed_i100_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r7, lr}
+; FP16-NEXT:    push {r7, lr}
+; FP16-NEXT:    .vsave {d8}
+; FP16-NEXT:    vpush {d8}
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vcvtb.f32.f16 s16, s0
+; FP16-NEXT:    vmov r0, s16
+; FP16-NEXT:    bl __fixunssfti
+; FP16-NEXT:    vldr s0, .LCPI28_0
+; FP16-NEXT:    vcmp.f32 s16, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r0, #0
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, #0
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r0, #-1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r1, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, #0
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r1, #-1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r2, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, #0
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r2, #-1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r3, #0
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt r3, #15
+; FP16-NEXT:    vpop {d8}
+; FP16-NEXT:    pop {r7, pc}
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI28_0:
+; FP16-NEXT:    .long 0x717fffff @ float 1.26765052E+30
+    %x = call i100 @llvm.fptoui.sat.i100.f16(half %f)
+    ret i100 %x
+}
+
+define i128 @test_signed_i128_f16(half %f) nounwind {
+; SOFT-LABEL: test_signed_i128_f16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    push {r4, r5, r6, r7, lr}
+; SOFT-NEXT:    .pad #20
+; SOFT-NEXT:    sub sp, #20
+; SOFT-NEXT:    uxth r0, r0
+; SOFT-NEXT:    bl __aeabi_h2f
+; SOFT-NEXT:    mov r4, r0
+; SOFT-NEXT:    ldr r1, .LCPI29_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    movs r7, #0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    mov r6, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    bl __fixunssfti
+; SOFT-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    str r2, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    str r3, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:    cmp r6, #0
+; SOFT-NEXT:    bne .LBB29_2
+; SOFT-NEXT:  @ %bb.1:
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:  .LBB29_2:
+; SOFT-NEXT:    mvns r6, r7
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    mov r1, r6
+; SOFT-NEXT:    bne .LBB29_4
+; SOFT-NEXT:  @ %bb.3:
+; SOFT-NEXT:    mov r1, r0
+; SOFT-NEXT:  .LBB29_4:
+; SOFT-NEXT:    str r1, [sp, #12] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI29_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB29_6
+; SOFT-NEXT:  @ %bb.5:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:  .LBB29_6:
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bne .LBB29_8
+; SOFT-NEXT:  @ %bb.7:
+; SOFT-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:  .LBB29_8:
+; SOFT-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI29_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB29_10
+; SOFT-NEXT:  @ %bb.9:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:  .LBB29_10:
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    mov r0, r6
+; SOFT-NEXT:    bne .LBB29_12
+; SOFT-NEXT:  @ %bb.11:
+; SOFT-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:  .LBB29_12:
+; SOFT-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    ldr r1, .LCPI29_0
+; SOFT-NEXT:    bl __aeabi_fcmpgt
+; SOFT-NEXT:    mov r5, r0
+; SOFT-NEXT:    mov r0, r4
+; SOFT-NEXT:    mov r1, r7
+; SOFT-NEXT:    bl __aeabi_fcmpge
+; SOFT-NEXT:    cmp r0, #0
+; SOFT-NEXT:    bne .LBB29_14
+; SOFT-NEXT:  @ %bb.13:
+; SOFT-NEXT:    str r0, [sp, #16] @ 4-byte Spill
+; SOFT-NEXT:  .LBB29_14:
+; SOFT-NEXT:    cmp r5, #0
+; SOFT-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; SOFT-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; SOFT-NEXT:    ldr r2, [sp, #8] @ 4-byte Reload
+; SOFT-NEXT:    bne .LBB29_16
+; SOFT-NEXT:  @ %bb.15:
+; SOFT-NEXT:    ldr r6, [sp, #16] @ 4-byte Reload
+; SOFT-NEXT:  .LBB29_16:
+; SOFT-NEXT:    mov r3, r6
+; SOFT-NEXT:    add sp, #20
+; SOFT-NEXT:    pop {r4, r5, r6, r7, pc}
+; SOFT-NEXT:    .p2align 2
+; SOFT-NEXT:  @ %bb.17:
+; SOFT-NEXT:  .LCPI29_0:
+; SOFT-NEXT:    .long 2139095039 @ 0x7f7fffff
+;
+; VFP2-LABEL: test_signed_i128_f16:
+; VFP2:       @ %bb.0:
+; VFP2-NEXT:    .save {r7, lr}
+; VFP2-NEXT:    push {r7, lr}
+; VFP2-NEXT:    .vsave {d8}
+; VFP2-NEXT:    vpush {d8}
+; VFP2-NEXT:    bl __aeabi_h2f
+; VFP2-NEXT:    vmov s16, r0
+; VFP2-NEXT:    bl __fixunssfti
+; VFP2-NEXT:    vldr s0, .LCPI29_0
+; VFP2-NEXT:    vcmp.f32 s16, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r0, #0
+; VFP2-NEXT:    vcmp.f32 s16, s0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s16, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r0, #-1
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s16, s0
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r1, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s16, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r1, #-1
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s16, s0
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r2, #0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    vcmp.f32 s16, #0
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r2, #-1
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it lt
+; VFP2-NEXT:    movlt r3, #0
+; VFP2-NEXT:    vcmp.f32 s16, s0
+; VFP2-NEXT:    vmrs APSR_nzcv, fpscr
+; VFP2-NEXT:    it gt
+; VFP2-NEXT:    movgt.w r3, #-1
+; VFP2-NEXT:    vpop {d8}
+; VFP2-NEXT:    pop {r7, pc}
+; VFP2-NEXT:    .p2align 2
+; VFP2-NEXT:  @ %bb.1:
+; VFP2-NEXT:  .LCPI29_0:
+; VFP2-NEXT:    .long 0x7f7fffff @ float 3.40282347E+38
+;
+; FP16-LABEL: test_signed_i128_f16:
+; FP16:       @ %bb.0:
+; FP16-NEXT:    .save {r7, lr}
+; FP16-NEXT:    push {r7, lr}
+; FP16-NEXT:    .vsave {d8}
+; FP16-NEXT:    vpush {d8}
+; FP16-NEXT:    vmov.f16 s0, r0
+; FP16-NEXT:    vcvtb.f32.f16 s16, s0
+; FP16-NEXT:    vmov r0, s16
+; FP16-NEXT:    bl __fixunssfti
+; FP16-NEXT:    vldr s0, .LCPI29_0
+; FP16-NEXT:    vcmp.f32 s16, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r0, #0
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, #0
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r0, #-1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r1, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, #0
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r1, #-1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r2, #0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    vcmp.f32 s16, #0
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r2, #-1
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it lt
+; FP16-NEXT:    movlt r3, #0
+; FP16-NEXT:    vcmp.f32 s16, s0
+; FP16-NEXT:    vmrs APSR_nzcv, fpscr
+; FP16-NEXT:    it gt
+; FP16-NEXT:    movgt.w r3, #-1
+; FP16-NEXT:    vpop {d8}
+; FP16-NEXT:    pop {r7, pc}
+; FP16-NEXT:    .p2align 2
+; FP16-NEXT:  @ %bb.1:
+; FP16-NEXT:  .LCPI29_0:
+; FP16-NEXT:    .long 0x7f7fffff @ float 3.40282347E+38
+    %x = call i128 @llvm.fptoui.sat.i128.f16(half %f)
+    ret i128 %x
+}

diff  --git a/llvm/test/CodeGen/Thumb2/mve-fptosi-sat-vector.ll b/llvm/test/CodeGen/Thumb2/mve-fptosi-sat-vector.ll
new file mode 100644
index 0000000000000..24e4f289c8386
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-fptosi-sat-vector.ll
@@ -0,0 +1,7690 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK
+
+;
+; Float to signed 32-bit -- Vector size variation
+;
+
+declare <1 x i32> @llvm.fptosi.sat.v1f32.v1i32 (<1 x float>)
+declare <2 x i32> @llvm.fptosi.sat.v2f32.v2i32 (<2 x float>)
+declare <3 x i32> @llvm.fptosi.sat.v3f32.v3i32 (<3 x float>)
+declare <4 x i32> @llvm.fptosi.sat.v4f32.v4i32 (<4 x float>)
+declare <5 x i32> @llvm.fptosi.sat.v5f32.v5i32 (<5 x float>)
+declare <6 x i32> @llvm.fptosi.sat.v6f32.v6i32 (<6 x float>)
+declare <7 x i32> @llvm.fptosi.sat.v7f32.v7i32 (<7 x float>)
+declare <8 x i32> @llvm.fptosi.sat.v8f32.v8i32 (<8 x float>)
+
+define arm_aapcs_vfpcc <1 x i32> @test_signed_v1f32_v1i32(<1 x float> %f) {
+; CHECK-LABEL: test_signed_v1f32_v1i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 s4, s0
+; CHECK-NEXT:    vldr s2, .LCPI0_0
+; CHECK-NEXT:    vldr s6, .LCPI0_1
+; CHECK-NEXT:    vcmp.f32 s0, s2
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s6
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r0, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI0_0:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+; CHECK-NEXT:  .LCPI0_1:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+    %x = call <1 x i32> @llvm.fptosi.sat.v1f32.v1i32(<1 x float> %f)
+    ret <1 x i32> %x
+}
+
+define arm_aapcs_vfpcc <2 x i32> @test_signed_v2f32_v2i32(<2 x float> %f) {
+; CHECK-LABEL: test_signed_v2f32_v2i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vmov r0, s17
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vldr s18, .LCPI1_0
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    vldr s20, .LCPI1_1
+; CHECK-NEXT:    vcmp.f32 s17, s18
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r5, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r5, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcmp.f32 s16, s18
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s18
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r4, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s18
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-1
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    vmov q0[2], q0[0], r0, r5
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r1, #0
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r4
+; CHECK-NEXT:    vpop {d8, d9, d10}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI1_0:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+; CHECK-NEXT:  .LCPI1_1:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+    %x = call <2 x i32> @llvm.fptosi.sat.v2f32.v2i32(<2 x float> %f)
+    ret <2 x i32> %x
+}
+
+define arm_aapcs_vfpcc <3 x i32> @test_signed_v3f32_v3i32(<3 x float> %f) {
+; CHECK-LABEL: test_signed_v3f32_v3i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 s12, s2
+; CHECK-NEXT:    vldr s6, .LCPI2_0
+; CHECK-NEXT:    vcvt.s32.f32 s14, s0
+; CHECK-NEXT:    vldr s10, .LCPI2_1
+; CHECK-NEXT:    vcvt.s32.f32 s8, s3
+; CHECK-NEXT:    vcmp.f32 s2, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s10
+; CHECK-NEXT:    vcvt.s32.f32 s4, s1
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s6
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s14
+; CHECK-NEXT:    vcmp.f32 s0, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s6
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vcmp.f32 s3, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vcmp.f32 s1, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s1, s10
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI2_0:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+; CHECK-NEXT:  .LCPI2_1:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+    %x = call <3 x i32> @llvm.fptosi.sat.v3f32.v3i32(<3 x float> %f)
+    ret <3 x i32> %x
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_signed_v4f32_v4i32(<4 x float> %f) {
+; CHECK-LABEL: test_signed_v4f32_v4i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 s12, s2
+; CHECK-NEXT:    vldr s6, .LCPI3_0
+; CHECK-NEXT:    vcvt.s32.f32 s14, s0
+; CHECK-NEXT:    vldr s10, .LCPI3_1
+; CHECK-NEXT:    vcvt.s32.f32 s8, s3
+; CHECK-NEXT:    vcmp.f32 s2, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s10
+; CHECK-NEXT:    vcvt.s32.f32 s4, s1
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s6
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s14
+; CHECK-NEXT:    vcmp.f32 s0, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s6
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vcmp.f32 s3, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vcmp.f32 s1, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s1, s10
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI3_0:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+; CHECK-NEXT:  .LCPI3_1:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+    %x = call <4 x i32> @llvm.fptosi.sat.v4f32.v4i32(<4 x float> %f)
+    ret <4 x i32> %x
+}
+
+define arm_aapcs_vfpcc <5 x i32> @test_signed_v5f32_v5i32(<5 x float> %f) {
+; CHECK-LABEL: test_signed_v5f32_v5i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 s5, s4
+; CHECK-NEXT:    vldr s10, .LCPI4_0
+; CHECK-NEXT:    vcvt.s32.f32 s7, s3
+; CHECK-NEXT:    vldr s14, .LCPI4_1
+; CHECK-NEXT:    vcvt.s32.f32 s12, s1
+; CHECK-NEXT:    vcmp.f32 s4, s10
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s4, s14
+; CHECK-NEXT:    vcvt.s32.f32 s8, s2
+; CHECK-NEXT:    vcvt.s32.f32 s6, s0
+; CHECK-NEXT:    vmov r1, s5
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s4, s4
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s10
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r12, s7
+; CHECK-NEXT:    str r1, [r0, #16]
+; CHECK-NEXT:    vcmp.f32 s3, s14
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s10
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcmp.f32 s1, s14
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s10
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    vcmp.f32 s2, s14
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vcmp.f32 s0, s10
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s0, s14
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r3
+; CHECK-NEXT:    vmov q0[3], q0[1], r2, r12
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI4_0:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+; CHECK-NEXT:  .LCPI4_1:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+    %x = call <5 x i32> @llvm.fptosi.sat.v5f32.v5i32(<5 x float> %f)
+    ret <5 x i32> %x
+}
+
+define arm_aapcs_vfpcc <6 x i32> @test_signed_v6f32_v6i32(<6 x float> %f) {
+; CHECK-LABEL: test_signed_v6f32_v6i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 s9, s5
+; CHECK-NEXT:    vldr s10, .LCPI5_0
+; CHECK-NEXT:    vcvt.s32.f32 s11, s4
+; CHECK-NEXT:    vldr s6, .LCPI5_1
+; CHECK-NEXT:    vcvt.s32.f32 s7, s3
+; CHECK-NEXT:    vcmp.f32 s5, s10
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s5, s6
+; CHECK-NEXT:    vcvt.s32.f32 s14, s1
+; CHECK-NEXT:    vcvt.s32.f32 s12, s2
+; CHECK-NEXT:    vmov r1, s9
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s5, s5
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vcmp.f32 s4, s10
+; CHECK-NEXT:    str r1, [r0, #20]
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s11
+; CHECK-NEXT:    vcmp.f32 s4, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s4, s4
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s10
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r12, s7
+; CHECK-NEXT:    vcmp.f32 s3, s6
+; CHECK-NEXT:    str r1, [r0, #16]
+; CHECK-NEXT:    vcvt.s32.f32 s8, s0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s10
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    vcmp.f32 s1, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s10
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vcmp.f32 s2, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vcmp.f32 s0, s10
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s0, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r3
+; CHECK-NEXT:    vmov q0[3], q0[1], r2, r12
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI5_0:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+; CHECK-NEXT:  .LCPI5_1:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+    %x = call <6 x i32> @llvm.fptosi.sat.v6f32.v6i32(<6 x float> %f)
+    ret <6 x i32> %x
+}
+
+define arm_aapcs_vfpcc <7 x i32> @test_signed_v7f32_v7i32(<7 x float> %f) {
+; CHECK-LABEL: test_signed_v7f32_v7i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 s13, s5
+; CHECK-NEXT:    vldr s12, .LCPI6_0
+; CHECK-NEXT:    vcvt.s32.f32 s15, s4
+; CHECK-NEXT:    vldr s8, .LCPI6_1
+; CHECK-NEXT:    vcmp.f32 s5, s12
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.s32.f32 s11, s6
+; CHECK-NEXT:    vcmp.f32 s5, s8
+; CHECK-NEXT:    vcvt.s32.f32 s9, s3
+; CHECK-NEXT:    vcvt.s32.f32 s7, s1
+; CHECK-NEXT:    vmov r1, s13
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s5, s5
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    str r1, [r0, #20]
+; CHECK-NEXT:    vcmp.f32 s4, s12
+; CHECK-NEXT:    vmov r1, s15
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s4, s8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s4, s4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vcmp.f32 s6, s12
+; CHECK-NEXT:    str r1, [r0, #16]
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s11
+; CHECK-NEXT:    vcmp.f32 s6, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s6, s6
+; CHECK-NEXT:    vcvt.s32.f32 s14, s2
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r12, s9
+; CHECK-NEXT:    str r1, [r0, #24]
+; CHECK-NEXT:    vcmp.f32 s3, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.s32.f32 s10, s0
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s7
+; CHECK-NEXT:    vcmp.f32 s1, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vcmp.f32 s2, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vcmp.f32 s0, s12
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s0, s8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r3
+; CHECK-NEXT:    vmov q0[3], q0[1], r2, r12
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI6_0:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+; CHECK-NEXT:  .LCPI6_1:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+    %x = call <7 x i32> @llvm.fptosi.sat.v7f32.v7i32(<7 x float> %f)
+    ret <7 x i32> %x
+}
+
+define arm_aapcs_vfpcc <8 x i32> @test_signed_v8f32_v8i32(<8 x float> %f) {
+; CHECK-LABEL: test_signed_v8f32_v8i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vcvt.s32.f32 s16, s6
+; CHECK-NEXT:    vldr s12, .LCPI7_0
+; CHECK-NEXT:    vcvt.s32.f32 s18, s4
+; CHECK-NEXT:    vldr s10, .LCPI7_1
+; CHECK-NEXT:    vcvt.s32.f32 s15, s7
+; CHECK-NEXT:    vcmp.f32 s6, s12
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s6, s10
+; CHECK-NEXT:    vcvt.s32.f32 s13, s5
+; CHECK-NEXT:    vcvt.s32.f32 s11, s2
+; CHECK-NEXT:    vmov r12, s16
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s6, s6
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s4, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov lr, s18
+; CHECK-NEXT:    vcmp.f32 s4, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w lr, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s4, s4
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt lr, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s7, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w lr, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s15
+; CHECK-NEXT:    vcmp.f32 s7, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s7, s7
+; CHECK-NEXT:    vcvt.s32.f32 s9, s0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s5, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s13
+; CHECK-NEXT:    vcmp.f32 s5, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s5, s5
+; CHECK-NEXT:    vcvt.s32.f32 s14, s3
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    vmov q1[2], q1[0], lr, r12
+; CHECK-NEXT:    vcmp.f32 s2, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.s32.f32 s8, s1
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s9
+; CHECK-NEXT:    vmov q1[3], q1[1], r3, r2
+; CHECK-NEXT:    vcmp.f32 s0, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r4, s14
+; CHECK-NEXT:    vcmp.f32 s3, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r4, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r4, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r5, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r4, #0
+; CHECK-NEXT:    vcmp.f32 s1, s12
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r5, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s1, s10
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r5, #-2147483648
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r5, r4
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI7_0:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+; CHECK-NEXT:  .LCPI7_1:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+    %x = call <8 x i32> @llvm.fptosi.sat.v8f32.v8i32(<8 x float> %f)
+    ret <8 x i32> %x
+}
+
+;
+; Double to signed 32-bit -- Vector size variation
+;
+
+declare <1 x i32> @llvm.fptosi.sat.v1f64.v1i32 (<1 x double>)
+declare <2 x i32> @llvm.fptosi.sat.v2f64.v2i32 (<2 x double>)
+declare <3 x i32> @llvm.fptosi.sat.v3f64.v3i32 (<3 x double>)
+declare <4 x i32> @llvm.fptosi.sat.v4f64.v4i32 (<4 x double>)
+declare <5 x i32> @llvm.fptosi.sat.v5f64.v5i32 (<5 x double>)
+declare <6 x i32> @llvm.fptosi.sat.v6f64.v6i32 (<6 x double>)
+
+define arm_aapcs_vfpcc <1 x i32> @test_signed_v1f64_v1i32(<1 x double> %f) {
+; CHECK-LABEL: test_signed_v1f64_v1i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
+; CHECK-NEXT:    vldr d1, .LCPI8_0
+; CHECK-NEXT:    vmov r5, r4, d0
+; CHECK-NEXT:    vmov r2, r3, d1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI8_1
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2iz
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r6, #-2147483648
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r6, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r6, #0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI8_0:
+; CHECK-NEXT:    .long 4290772992 @ double 2147483647
+; CHECK-NEXT:    .long 1105199103
+; CHECK-NEXT:  .LCPI8_1:
+; CHECK-NEXT:    .long 0 @ double -2147483648
+; CHECK-NEXT:    .long 3252682752
+    %x = call <1 x i32> @llvm.fptosi.sat.v1f64.v1i32(<1 x double> %f)
+    ret <1 x i32> %x
+}
+
+define arm_aapcs_vfpcc <2 x i32> @test_signed_v2f64_v2i32(<2 x double> %f) {
+; CHECK-LABEL: test_signed_v2f64_v2i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI9_0
+; CHECK-NEXT:    vmov r9, r8, d9
+; CHECK-NEXT:    vmov r11, r10, d0
+; CHECK-NEXT:    str.w r11, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    str.w r10, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI9_1
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    vmov r5, r3, d0
+; CHECK-NEXT:    str r3, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    str r5, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    str r1, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r0, #-2147483648
+; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r0, #-2147483648
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    str r4, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    ldr r5, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r11, #-2147483648
+; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r10, r1
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r11, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r11, #0
+; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r5, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r5, #-1
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    ldr r3, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r10, #-1
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r11, r0
+; CHECK-NEXT:    vmov q0[3], q0[1], r10, r5
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI9_0:
+; CHECK-NEXT:    .long 4290772992 @ double 2147483647
+; CHECK-NEXT:    .long 1105199103
+; CHECK-NEXT:  .LCPI9_1:
+; CHECK-NEXT:    .long 0 @ double -2147483648
+; CHECK-NEXT:    .long 3252682752
+    %x = call <2 x i32> @llvm.fptosi.sat.v2f64.v2i32(<2 x double> %f)
+    ret <2 x i32> %x
+}
+
+define arm_aapcs_vfpcc <3 x i32> @test_signed_v3f64_v3i32(<3 x double> %f) {
+; CHECK-LABEL: test_signed_v3f64_v3i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #24
+; CHECK-NEXT:    sub sp, #24
+; CHECK-NEXT:    vmov.f32 s16, s0
+; CHECK-NEXT:    vmov.f32 s17, s1
+; CHECK-NEXT:    vldr d0, .LCPI10_0
+; CHECK-NEXT:    vmov r4, r6, d1
+; CHECK-NEXT:    vmov r2, r11, d0
+; CHECK-NEXT:    vmov.f32 s18, s4
+; CHECK-NEXT:    vmov.f32 s19, s5
+; CHECK-NEXT:    str r2, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    str.w r11, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI10_1
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    vmov r2, r8, d0
+; CHECK-NEXT:    str r2, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    str.w r8, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r10, #-2147483648
+; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    vmov r5, r7, d9
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r10, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    ldr r2, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r6, #-2147483648
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    vmov r9, r8, d8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r6, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r6, #0
+; CHECK-NEXT:    ldr r2, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    ldr r3, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    ldr r3, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r7, #-2147483648
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r7, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    vmov.32 q0[1], r10
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r7, #0
+; CHECK-NEXT:    vmov q0[2], q0[0], r7, r6
+; CHECK-NEXT:    add sp, #24
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI10_0:
+; CHECK-NEXT:    .long 4290772992 @ double 2147483647
+; CHECK-NEXT:    .long 1105199103
+; CHECK-NEXT:  .LCPI10_1:
+; CHECK-NEXT:    .long 0 @ double -2147483648
+; CHECK-NEXT:    .long 3252682752
+    %x = call <3 x i32> @llvm.fptosi.sat.v3f64.v3i32(<3 x double> %f)
+    ret <3 x i32> %x
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_signed_v4f64_v4i32(<4 x double> %f) {
+; CHECK-LABEL: test_signed_v4f64_v4i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI11_0
+; CHECK-NEXT:    vmov q5, q1
+; CHECK-NEXT:    vmov r5, r6, d10
+; CHECK-NEXT:    vmov r9, r3, d0
+; CHECK-NEXT:    str r3, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI11_1
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    str r3, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    str r2, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    vmov r11, r1, d11
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    vmov r7, r10, d8
+; CHECK-NEXT:    str r1, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r0, #-2147483648
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r0, #-2147483648
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    ldr.w r8, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r10
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    str r4, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    str.w r9, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r4, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r10
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r10
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r6, #-2147483648
+; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r10
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r6, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r6, #0
+; CHECK-NEXT:    ldr r5, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r11
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr.w r9, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    mov r0, r11
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r11
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r8, #-2147483648
+; CHECK-NEXT:    mov r0, r11
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    cmp.w r10, #0
+; CHECK-NEXT:    vmov r7, r4, d9
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r8, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r8, #0
+; CHECK-NEXT:    ldr r2, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    ldr r3, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r3, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r5, #-2147483648
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    cmp.w r10, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r5, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    ldr r0, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r6, r0
+; CHECK-NEXT:    vmov q0[3], q0[1], r5, r8
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI11_0:
+; CHECK-NEXT:    .long 4290772992 @ double 2147483647
+; CHECK-NEXT:    .long 1105199103
+; CHECK-NEXT:  .LCPI11_1:
+; CHECK-NEXT:    .long 0 @ double -2147483648
+; CHECK-NEXT:    .long 3252682752
+    %x = call <4 x i32> @llvm.fptosi.sat.v4f64.v4i32(<4 x double> %f)
+    ret <4 x i32> %x
+}
+
+define arm_aapcs_vfpcc <5 x i32> @test_signed_v5f64_v5i32(<5 x double> %f) {
+; CHECK-LABEL: test_signed_v5f64_v5i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov.f32 s16, s0
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    vmov.f32 s17, s1
+; CHECK-NEXT:    vldr d0, .LCPI12_0
+; CHECK-NEXT:    vmov r5, r4, d4
+; CHECK-NEXT:    str r0, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    vmov.f32 s20, s6
+; CHECK-NEXT:    vmov.f32 s18, s4
+; CHECK-NEXT:    vmov.f32 s22, s2
+; CHECK-NEXT:    vmov.f32 s21, s7
+; CHECK-NEXT:    vmov.f32 s19, s5
+; CHECK-NEXT:    vmov.f32 s23, s3
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    strd r2, r3, [sp, #20] @ 8-byte Folded Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI12_1
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    str r3, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    str r2, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    vmov r8, r0, d11
+; CHECK-NEXT:    cmp.w r10, #0
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    vmov r9, r6, d10
+; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r11, #-2147483648
+; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r11, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r11, #0
+; CHECK-NEXT:    str.w r11, [r7, #16]
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    ldr.w r10, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    ldr r7, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r4, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    ldr.w r11, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r0, #-2147483648
+; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r0, #-2147483648
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    str r5, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    ldr r5, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    mov r6, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r10, #-2147483648
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    vmov r11, r4, d9
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r10, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r11
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r11
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r11
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r7, #-2147483648
+; CHECK-NEXT:    mov r0, r11
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r7, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    vmov r5, r4, d8
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r7, #0
+; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r6, #-2147483648
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r6, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r6, #0
+; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r6, r7
+; CHECK-NEXT:    vmov q0[3], q0[1], r10, r0
+; CHECK-NEXT:    ldr r0, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI12_0:
+; CHECK-NEXT:    .long 4290772992 @ double 2147483647
+; CHECK-NEXT:    .long 1105199103
+; CHECK-NEXT:  .LCPI12_1:
+; CHECK-NEXT:    .long 0 @ double -2147483648
+; CHECK-NEXT:    .long 3252682752
+    %x = call <5 x i32> @llvm.fptosi.sat.v5f64.v5i32(<5 x double> %f)
+    ret <5 x i32> %x
+}
+
+define arm_aapcs_vfpcc <6 x i32> @test_signed_v6f64_v6i32(<6 x double> %f) {
+; CHECK-LABEL: test_signed_v6f64_v6i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12}
+; CHECK-NEXT:    .pad #40
+; CHECK-NEXT:    sub sp, #40
+; CHECK-NEXT:    vmov.f32 s16, s0
+; CHECK-NEXT:    str r0, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    vmov.f32 s17, s1
+; CHECK-NEXT:    vldr d0, .LCPI13_0
+; CHECK-NEXT:    vmov r9, r4, d5
+; CHECK-NEXT:    vmov r2, r6, d0
+; CHECK-NEXT:    vmov.f32 s20, s8
+; CHECK-NEXT:    vmov.f32 s22, s6
+; CHECK-NEXT:    vmov.f32 s18, s4
+; CHECK-NEXT:    vmov.f32 s24, s2
+; CHECK-NEXT:    vmov.f32 s21, s9
+; CHECK-NEXT:    vmov.f32 s23, s7
+; CHECK-NEXT:    vmov.f32 s19, s5
+; CHECK-NEXT:    vmov.f32 s25, s3
+; CHECK-NEXT:    str r2, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    str r6, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI13_1
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    strd r2, r3, [sp, #32] @ 8-byte Folded Spill
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    vmov r8, r0, d11
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    vmov r7, r5, d10
+; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    vmov r1, r0, d12
+; CHECK-NEXT:    strd r1, r0, [sp, #12] @ 8-byte Folded Spill
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r10, #-2147483648
+; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r10, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    ldr.w r11, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    str.w r10, [r11, #20]
+; CHECK-NEXT:    ldr.w r10, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldrd r2, r3, [sp, #32] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r6, #-2147483648
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r6, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r6, #0
+; CHECK-NEXT:    str.w r6, [r11, #16]
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    ldr r4, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    ldr.w r11, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r7, [sp, #32] @ 4-byte Reload
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    ldr r5, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    cmp r6, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r10, #-2147483648
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r10, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    ldr r4, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    ldr r6, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    ldr r2, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r5, r6
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r8, #-2147483648
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    vmov r7, r6, d9
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r8, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r8, #0
+; CHECK-NEXT:    ldr.w r11, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    ldr r3, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldrd r2, r3, [sp, #32] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r4, #-2147483648
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r4, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    ldr r3, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldrd r2, r3, [sp, #32] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r5, #-2147483648
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r5, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    vmov q0[2], q0[0], r5, r4
+; CHECK-NEXT:    ldr r0, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[3], q0[1], r8, r10
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    add sp, #40
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI13_0:
+; CHECK-NEXT:    .long 4290772992 @ double 2147483647
+; CHECK-NEXT:    .long 1105199103
+; CHECK-NEXT:  .LCPI13_1:
+; CHECK-NEXT:    .long 0 @ double -2147483648
+; CHECK-NEXT:    .long 3252682752
+    %x = call <6 x i32> @llvm.fptosi.sat.v6f64.v6i32(<6 x double> %f)
+    ret <6 x i32> %x
+}
+
+;
+; FP16 to signed 32-bit -- Vector size variation
+;
+
+declare <1 x i32> @llvm.fptosi.sat.v1f16.v1i32 (<1 x half>)
+declare <2 x i32> @llvm.fptosi.sat.v2f16.v2i32 (<2 x half>)
+declare <3 x i32> @llvm.fptosi.sat.v3f16.v3i32 (<3 x half>)
+declare <4 x i32> @llvm.fptosi.sat.v4f16.v4i32 (<4 x half>)
+declare <5 x i32> @llvm.fptosi.sat.v5f16.v5i32 (<5 x half>)
+declare <6 x i32> @llvm.fptosi.sat.v6f16.v6i32 (<6 x half>)
+declare <7 x i32> @llvm.fptosi.sat.v7f16.v7i32 (<7 x half>)
+declare <8 x i32> @llvm.fptosi.sat.v8f16.v8i32 (<8 x half>)
+
+define arm_aapcs_vfpcc <1 x i32> @test_signed_v1f16_v1i32(<1 x half> %f) {
+; CHECK-LABEL: test_signed_v1f16_v1i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    vldr s2, .LCPI14_0
+; CHECK-NEXT:    vcvt.s32.f32 s4, s0
+; CHECK-NEXT:    vldr s6, .LCPI14_1
+; CHECK-NEXT:    vcmp.f32 s0, s2
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s6
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r0, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI14_0:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+; CHECK-NEXT:  .LCPI14_1:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+    %x = call <1 x i32> @llvm.fptosi.sat.v1f16.v1i32(<1 x half> %f)
+    ret <1 x i32> %x
+}
+
+define arm_aapcs_vfpcc <2 x i32> @test_signed_v2f16_v2i32(<2 x half> %f) {
+; CHECK-LABEL: test_signed_v2f16_v2i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vcvtt.f32.f16 s18, s16
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcvtb.f32.f16 s16, s16
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vldr s20, .LCPI15_0
+; CHECK-NEXT:    vldr s22, .LCPI15_1
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r5, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r5, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r4, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-1
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    vmov q0[2], q0[0], r0, r5
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r1, #0
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r4
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI15_0:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+; CHECK-NEXT:  .LCPI15_1:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+    %x = call <2 x i32> @llvm.fptosi.sat.v2f16.v2i32(<2 x half> %f)
+    ret <2 x i32> %x
+}
+
+define arm_aapcs_vfpcc <3 x i32> @test_signed_v3f16_v3i32(<3 x half> %f) {
+; CHECK-LABEL: test_signed_v3f16_v3i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvtb.f32.f16 s10, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vcvt.s32.f32 s12, s10
+; CHECK-NEXT:    vldr s6, .LCPI16_1
+; CHECK-NEXT:    vcvt.s32.f32 s14, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    vcvt.s32.f32 s8, s0
+; CHECK-NEXT:    vldr s4, .LCPI16_0
+; CHECK-NEXT:    vcmp.f32 s10, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s10, s4
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s10, s10
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s6
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s14
+; CHECK-NEXT:    vcmp.f32 s2, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vcmp.f32 s0, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r2, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s0, s4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r2, #-2147483648
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, r1
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI16_0:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+; CHECK-NEXT:  .LCPI16_1:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+    %x = call <3 x i32> @llvm.fptosi.sat.v3f16.v3i32(<3 x half> %f)
+    ret <3 x i32> %x
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_signed_v4f16_v4i32(<4 x half> %f) {
+; CHECK-LABEL: test_signed_v4f16_v4i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvtb.f32.f16 s14, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s10, s1
+; CHECK-NEXT:    vcvt.s32.f32 s1, s14
+; CHECK-NEXT:    vcvtt.f32.f16 s6, s0
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    vldr s4, .LCPI17_1
+; CHECK-NEXT:    vcvt.s32.f32 s3, s0
+; CHECK-NEXT:    vldr s2, .LCPI17_0
+; CHECK-NEXT:    vcvt.s32.f32 s12, s10
+; CHECK-NEXT:    vcmp.f32 s14, s4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s14, s2
+; CHECK-NEXT:    vcvt.s32.f32 s8, s6
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s14, s14
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s4
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    vcmp.f32 s0, s2
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s10, s4
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcmp.f32 s10, s2
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s10, s10
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vcmp.f32 s6, s4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s6, s2
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s6, s6
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI17_0:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+; CHECK-NEXT:  .LCPI17_1:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+    %x = call <4 x i32> @llvm.fptosi.sat.v4f16.v4i32(<4 x half> %f)
+    ret <4 x i32> %x
+}
+
+define arm_aapcs_vfpcc <5 x i32> @test_signed_v5f16_v5i32(<5 x half> %f) {
+; CHECK-LABEL: test_signed_v5f16_v5i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s1
+; CHECK-NEXT:    vcvt.s32.f32 s5, s2
+; CHECK-NEXT:    vcvtt.f32.f16 s1, s1
+; CHECK-NEXT:    vcvt.s32.f32 s7, s1
+; CHECK-NEXT:    vldr s8, .LCPI18_1
+; CHECK-NEXT:    vcvtb.f32.f16 s4, s0
+; CHECK-NEXT:    vcvtt.f32.f16 s0, s0
+; CHECK-NEXT:    vldr s6, .LCPI18_0
+; CHECK-NEXT:    vcvt.s32.f32 s3, s0
+; CHECK-NEXT:    vcmp.f32 s2, s8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s6
+; CHECK-NEXT:    vcvt.s32.f32 s14, s12
+; CHECK-NEXT:    vmov r1, s5
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r12, s7
+; CHECK-NEXT:    str r1, [r0, #16]
+; CHECK-NEXT:    vcmp.f32 s1, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.s32.f32 s10, s4
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vcmp.f32 s0, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s12, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vcmp.f32 s12, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s12, s12
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vcmp.f32 s4, s8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s4, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s4, s4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r3
+; CHECK-NEXT:    vmov q0[3], q0[1], r2, r12
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI18_0:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+; CHECK-NEXT:  .LCPI18_1:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+    %x = call <5 x i32> @llvm.fptosi.sat.v5f16.v5i32(<5 x half> %f)
+    ret <5 x i32> %x
+}
+
+define arm_aapcs_vfpcc <6 x i32> @test_signed_v6f16_v6i32(<6 x half> %f) {
+; CHECK-LABEL: test_signed_v6f16_v6i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvtt.f32.f16 s7, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vcvt.s32.f32 s9, s7
+; CHECK-NEXT:    vldr s8, .LCPI19_1
+; CHECK-NEXT:    vcvt.s32.f32 s11, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s1, s1
+; CHECK-NEXT:    vldr s6, .LCPI19_0
+; CHECK-NEXT:    vcvt.s32.f32 s5, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s4, s0
+; CHECK-NEXT:    vcmp.f32 s7, s8
+; CHECK-NEXT:    vcvtt.f32.f16 s0, s0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s7, s6
+; CHECK-NEXT:    vcvt.s32.f32 s3, s0
+; CHECK-NEXT:    vmov r1, s9
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s7, s7
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vcmp.f32 s2, s8
+; CHECK-NEXT:    str r1, [r0, #20]
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s11
+; CHECK-NEXT:    vcmp.f32 s2, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    vcvt.s32.f32 s14, s12
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r12, s5
+; CHECK-NEXT:    str r1, [r0, #16]
+; CHECK-NEXT:    vcmp.f32 s1, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.s32.f32 s10, s4
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vcmp.f32 s0, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s12, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vcmp.f32 s12, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s12, s12
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vcmp.f32 s4, s8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s4, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s4, s4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r3
+; CHECK-NEXT:    vmov q0[3], q0[1], r2, r12
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI19_0:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+; CHECK-NEXT:  .LCPI19_1:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+    %x = call <6 x i32> @llvm.fptosi.sat.v6f16.v6i32(<6 x half> %f)
+    ret <6 x i32> %x
+}
+
+define arm_aapcs_vfpcc <7 x i32> @test_signed_v7f16_v7i32(<7 x half> %f) {
+; CHECK-LABEL: test_signed_v7f16_v7i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvtt.f32.f16 s11, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vcvt.s32.f32 s13, s11
+; CHECK-NEXT:    vldr s8, .LCPI20_1
+; CHECK-NEXT:    vcvt.s32.f32 s15, s2
+; CHECK-NEXT:    vldr s6, .LCPI20_0
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s3
+; CHECK-NEXT:    vcmp.f32 s11, s8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.s32.f32 s9, s3
+; CHECK-NEXT:    vcmp.f32 s11, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s1, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s4, s0
+; CHECK-NEXT:    vcvt.s32.f32 s7, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s0, s0
+; CHECK-NEXT:    vmov r1, s13
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s11, s11
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    str r1, [r0, #20]
+; CHECK-NEXT:    vcmp.f32 s2, s8
+; CHECK-NEXT:    vmov r1, s15
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s2, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vcvt.s32.f32 s5, s0
+; CHECK-NEXT:    str r1, [r0, #16]
+; CHECK-NEXT:    vcmp.f32 s3, s8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s9
+; CHECK-NEXT:    vcmp.f32 s3, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    vcvt.s32.f32 s14, s12
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r12, s7
+; CHECK-NEXT:    str r1, [r0, #24]
+; CHECK-NEXT:    vcmp.f32 s1, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.s32.f32 s10, s4
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s5
+; CHECK-NEXT:    vcmp.f32 s0, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s12, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vcmp.f32 s12, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s12, s12
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vcmp.f32 s4, s8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s4, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s4, s4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r3
+; CHECK-NEXT:    vmov q0[3], q0[1], r2, r12
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI20_0:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+; CHECK-NEXT:  .LCPI20_1:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+    %x = call <7 x i32> @llvm.fptosi.sat.v7f16.v7i32(<7 x half> %f)
+    ret <7 x i32> %x
+}
+
+define arm_aapcs_vfpcc <8 x i32> @test_signed_v8f16_v8i32(<8 x half> %f) {
+; CHECK-LABEL: test_signed_v8f16_v8i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vcvtt.f32.f16 s13, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s3
+; CHECK-NEXT:    vcvt.s32.f32 s16, s3
+; CHECK-NEXT:    vcvtt.f32.f16 s9, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vldr s8, .LCPI21_1
+; CHECK-NEXT:    vcvt.s32.f32 s18, s2
+; CHECK-NEXT:    vldr s6, .LCPI21_0
+; CHECK-NEXT:    vcvt.s32.f32 s15, s13
+; CHECK-NEXT:    vcvtt.f32.f16 s12, s1
+; CHECK-NEXT:    vcmp.f32 s3, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s6
+; CHECK-NEXT:    vcvt.s32.f32 s11, s9
+; CHECK-NEXT:    vcvtt.f32.f16 s4, s0
+; CHECK-NEXT:    vmov r12, s16
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov lr, s18
+; CHECK-NEXT:    vcmp.f32 s2, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w lr, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    vcvt.s32.f32 s7, s1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt lr, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s13, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w lr, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s15
+; CHECK-NEXT:    vcmp.f32 s13, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s13, s13
+; CHECK-NEXT:    vcvt.s32.f32 s5, s0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s9, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s11
+; CHECK-NEXT:    vcmp.f32 s9, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s9, s9
+; CHECK-NEXT:    vcvt.s32.f32 s14, s12
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r0, s7
+; CHECK-NEXT:    vcmp.f32 s1, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.s32.f32 s10, s4
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s5
+; CHECK-NEXT:    vcmp.f32 s0, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s12, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r4, s14
+; CHECK-NEXT:    vcmp.f32 s12, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r4, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s12, s12
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r4, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r5, s10
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r4, #0
+; CHECK-NEXT:    vcmp.f32 s4, s8
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r5, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s4, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s4, s4
+; CHECK-NEXT:    vmov q1[2], q1[0], lr, r12
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r5, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r5, r4
+; CHECK-NEXT:    vmov q1[3], q1[1], r3, r2
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI21_0:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+; CHECK-NEXT:  .LCPI21_1:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+    %x = call <8 x i32> @llvm.fptosi.sat.v8f16.v8i32(<8 x half> %f)
+    ret <8 x i32> %x
+}
+
+;
+; 2-Vector float to signed integer -- result size variation
+;
+
+declare <4 x   i1> @llvm.fptosi.sat.v4f32.v4i1  (<4 x float>)
+declare <4 x   i8> @llvm.fptosi.sat.v4f32.v4i8  (<4 x float>)
+declare <4 x  i13> @llvm.fptosi.sat.v4f32.v4i13 (<4 x float>)
+declare <4 x  i16> @llvm.fptosi.sat.v4f32.v4i16 (<4 x float>)
+declare <4 x  i19> @llvm.fptosi.sat.v4f32.v4i19 (<4 x float>)
+declare <4 x  i50> @llvm.fptosi.sat.v4f32.v4i50 (<4 x float>)
+declare <4 x  i64> @llvm.fptosi.sat.v4f32.v4i64 (<4 x float>)
+declare <4 x i100> @llvm.fptosi.sat.v4f32.v4i100(<4 x float>)
+declare <4 x i128> @llvm.fptosi.sat.v4f32.v4i128(<4 x float>)
+
+define arm_aapcs_vfpcc <4 x i1> @test_signed_v4f32_v4i1(<4 x float> %f) {
+; CHECK-LABEL: test_signed_v4f32_v4i1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f32 s4, #-1.000000e+00
+; CHECK-NEXT:    vldr s6, .LCPI22_0
+; CHECK-NEXT:    vmaxnm.f32 s12, s0, s4
+; CHECK-NEXT:    vmaxnm.f32 s8, s3, s4
+; CHECK-NEXT:    vminnm.f32 s12, s12, s6
+; CHECK-NEXT:    vmaxnm.f32 s10, s2, s4
+; CHECK-NEXT:    vcvt.s32.f32 s12, s12
+; CHECK-NEXT:    vmaxnm.f32 s4, s1, s4
+; CHECK-NEXT:    vminnm.f32 s4, s4, s6
+; CHECK-NEXT:    vminnm.f32 s10, s10, s6
+; CHECK-NEXT:    vcvt.s32.f32 s4, s4
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    vminnm.f32 s8, s8, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.s32.f32 s10, s10
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    vcvt.s32.f32 s8, s8
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    rsb.w r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #0, #1
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    rsb.w r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #1, #1
+; CHECK-NEXT:    vmov r2, s10
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsb.w r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #2, #1
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #3, #1
+; CHECK-NEXT:    strb r1, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI22_0:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+    %x = call <4 x i1> @llvm.fptosi.sat.v4f32.v4i1(<4 x float> %f)
+    ret <4 x i1> %x
+}
+
+define arm_aapcs_vfpcc <4 x i8> @test_signed_v4f32_v4i8(<4 x float> %f) {
+; CHECK-LABEL: test_signed_v4f32_v4i8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s4, .LCPI23_0
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    vldr s6, .LCPI23_1
+; CHECK-NEXT:    vmaxnm.f32 s12, s2, s4
+; CHECK-NEXT:    vmaxnm.f32 s10, s0, s4
+; CHECK-NEXT:    vminnm.f32 s12, s12, s6
+; CHECK-NEXT:    vmaxnm.f32 s8, s1, s4
+; CHECK-NEXT:    vminnm.f32 s10, s10, s6
+; CHECK-NEXT:    vmaxnm.f32 s4, s3, s4
+; CHECK-NEXT:    vcvt.s32.f32 s12, s12
+; CHECK-NEXT:    vminnm.f32 s8, s8, s6
+; CHECK-NEXT:    vminnm.f32 s4, s4, s6
+; CHECK-NEXT:    vcvt.s32.f32 s10, s10
+; CHECK-NEXT:    vcvt.s32.f32 s8, s8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.s32.f32 s4, s4
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI23_0:
+; CHECK-NEXT:    .long 0xc3000000 @ float -128
+; CHECK-NEXT:  .LCPI23_1:
+; CHECK-NEXT:    .long 0x42fe0000 @ float 127
+    %x = call <4 x i8> @llvm.fptosi.sat.v4f32.v4i8(<4 x float> %f)
+    ret <4 x i8> %x
+}
+
+define arm_aapcs_vfpcc <4 x i13> @test_signed_v4f32_v4i13(<4 x float> %f) {
+; CHECK-LABEL: test_signed_v4f32_v4i13:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s4, .LCPI24_0
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    vldr s6, .LCPI24_1
+; CHECK-NEXT:    vmaxnm.f32 s12, s2, s4
+; CHECK-NEXT:    vmaxnm.f32 s10, s0, s4
+; CHECK-NEXT:    vminnm.f32 s12, s12, s6
+; CHECK-NEXT:    vmaxnm.f32 s8, s1, s4
+; CHECK-NEXT:    vminnm.f32 s10, s10, s6
+; CHECK-NEXT:    vmaxnm.f32 s4, s3, s4
+; CHECK-NEXT:    vcvt.s32.f32 s12, s12
+; CHECK-NEXT:    vminnm.f32 s8, s8, s6
+; CHECK-NEXT:    vminnm.f32 s4, s4, s6
+; CHECK-NEXT:    vcvt.s32.f32 s10, s10
+; CHECK-NEXT:    vcvt.s32.f32 s8, s8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.s32.f32 s4, s4
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI24_0:
+; CHECK-NEXT:    .long 0xc5800000 @ float -4096
+; CHECK-NEXT:  .LCPI24_1:
+; CHECK-NEXT:    .long 0x457ff000 @ float 4095
+    %x = call <4 x i13> @llvm.fptosi.sat.v4f32.v4i13(<4 x float> %f)
+    ret <4 x i13> %x
+}
+
+define arm_aapcs_vfpcc <4 x i16> @test_signed_v4f32_v4i16(<4 x float> %f) {
+; CHECK-LABEL: test_signed_v4f32_v4i16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s4, .LCPI25_0
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    vldr s6, .LCPI25_1
+; CHECK-NEXT:    vmaxnm.f32 s12, s2, s4
+; CHECK-NEXT:    vmaxnm.f32 s10, s0, s4
+; CHECK-NEXT:    vminnm.f32 s12, s12, s6
+; CHECK-NEXT:    vmaxnm.f32 s8, s1, s4
+; CHECK-NEXT:    vminnm.f32 s10, s10, s6
+; CHECK-NEXT:    vmaxnm.f32 s4, s3, s4
+; CHECK-NEXT:    vcvt.s32.f32 s12, s12
+; CHECK-NEXT:    vminnm.f32 s8, s8, s6
+; CHECK-NEXT:    vminnm.f32 s4, s4, s6
+; CHECK-NEXT:    vcvt.s32.f32 s10, s10
+; CHECK-NEXT:    vcvt.s32.f32 s8, s8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.s32.f32 s4, s4
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI25_0:
+; CHECK-NEXT:    .long 0xc7000000 @ float -32768
+; CHECK-NEXT:  .LCPI25_1:
+; CHECK-NEXT:    .long 0x46fffe00 @ float 32767
+    %x = call <4 x i16> @llvm.fptosi.sat.v4f32.v4i16(<4 x float> %f)
+    ret <4 x i16> %x
+}
+
+define arm_aapcs_vfpcc <4 x i19> @test_signed_v4f32_v4i19(<4 x float> %f) {
+; CHECK-LABEL: test_signed_v4f32_v4i19:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s4, .LCPI26_0
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    vldr s6, .LCPI26_1
+; CHECK-NEXT:    vmaxnm.f32 s12, s2, s4
+; CHECK-NEXT:    vmaxnm.f32 s10, s0, s4
+; CHECK-NEXT:    vminnm.f32 s12, s12, s6
+; CHECK-NEXT:    vmaxnm.f32 s8, s1, s4
+; CHECK-NEXT:    vminnm.f32 s10, s10, s6
+; CHECK-NEXT:    vmaxnm.f32 s4, s3, s4
+; CHECK-NEXT:    vcvt.s32.f32 s12, s12
+; CHECK-NEXT:    vminnm.f32 s8, s8, s6
+; CHECK-NEXT:    vminnm.f32 s4, s4, s6
+; CHECK-NEXT:    vcvt.s32.f32 s10, s10
+; CHECK-NEXT:    vcvt.s32.f32 s8, s8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.s32.f32 s4, s4
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI26_0:
+; CHECK-NEXT:    .long 0xc8800000 @ float -262144
+; CHECK-NEXT:  .LCPI26_1:
+; CHECK-NEXT:    .long 0x487fffc0 @ float 262143
+    %x = call <4 x i19> @llvm.fptosi.sat.v4f32.v4i19(<4 x float> %f)
+    ret <4 x i19> %x
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_signed_v4f32_v4i32_duplicate(<4 x float> %f) {
+; CHECK-LABEL: test_signed_v4f32_v4i32_duplicate:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.s32.f32 s12, s2
+; CHECK-NEXT:    vldr s6, .LCPI27_0
+; CHECK-NEXT:    vcvt.s32.f32 s14, s0
+; CHECK-NEXT:    vldr s10, .LCPI27_1
+; CHECK-NEXT:    vcvt.s32.f32 s8, s3
+; CHECK-NEXT:    vcmp.f32 s2, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s10
+; CHECK-NEXT:    vcvt.s32.f32 s4, s1
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s6
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s14
+; CHECK-NEXT:    vcmp.f32 s0, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s6
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vcmp.f32 s3, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vcmp.f32 s1, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s1, s10
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI27_0:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+; CHECK-NEXT:  .LCPI27_1:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+    %x = call <4 x i32> @llvm.fptosi.sat.v4f32.v4i32(<4 x float> %f)
+    ret <4 x i32> %x
+}
+
+define arm_aapcs_vfpcc <4 x i50> @test_signed_v4f32_v4i50(<4 x float> %f) {
+; CHECK-LABEL: test_signed_v4f32_v4i50:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    vmov r0, s19
+; CHECK-NEXT:    vldr s20, .LCPI28_0
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    movtlt r5, #65534
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vldr s22, .LCPI28_1
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    itt lt
+; CHECK-NEXT:    movlt r6, #0
+; CHECK-NEXT:    movtlt r6, #65534
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r5, #65535
+; CHECK-NEXT:    movtgt r5, #1
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r6, #65535
+; CHECK-NEXT:    movtgt r6, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r0, [r8]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r7, #0
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r7, #-1
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt vs
+; CHECK-NEXT:    movvs r7, #0
+; CHECK-NEXT:    movvs r6, #0
+; CHECK-NEXT:    lsls r0, r6, #22
+; CHECK-NEXT:    orr.w r1, r0, r7, lsr #10
+; CHECK-NEXT:    vmov r0, s17
+; CHECK-NEXT:    str.w r1, [r8, #20]
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    lsrs r2, r6, #10
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s22
+; CHECK-NEXT:    itt lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    movtlt r1, #65534
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r1, #65535
+; CHECK-NEXT:    movtgt r1, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    itt lt
+; CHECK-NEXT:    movlt r4, #0
+; CHECK-NEXT:    movtlt r4, #65534
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r4, #65535
+; CHECK-NEXT:    movtgt r4, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s22
+; CHECK-NEXT:    strb.w r2, [r8, #24]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r4, #0
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    bfc r4, #18, #14
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    orr.w r2, r4, r0, lsl #18
+; CHECK-NEXT:    str.w r2, [r8, #4]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r9, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r9, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r9, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    bfc r1, #18, #14
+; CHECK-NEXT:    lsrs r0, r0, #14
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    lsr.w r2, r1, #14
+; CHECK-NEXT:    orr.w r0, r0, r1, lsl #18
+; CHECK-NEXT:    orr.w r2, r2, r9, lsl #4
+; CHECK-NEXT:    str.w r2, [r8, #12]
+; CHECK-NEXT:    str.w r0, [r8, #8]
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    bfc r5, #18, #14
+; CHECK-NEXT:    lsr.w r0, r9, #28
+; CHECK-NEXT:    orr.w r0, r0, r5, lsl #4
+; CHECK-NEXT:    orr.w r0, r0, r7, lsl #22
+; CHECK-NEXT:    str.w r0, [r8, #16]
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI28_0:
+; CHECK-NEXT:    .long 0xd8000000 @ float -5.62949953E+14
+; CHECK-NEXT:  .LCPI28_1:
+; CHECK-NEXT:    .long 0x57ffffff @ float 5.6294992E+14
+    %x = call <4 x i50> @llvm.fptosi.sat.v4f32.v4i50(<4 x float> %f)
+    ret <4 x i50> %x
+}
+
+define arm_aapcs_vfpcc <4 x i64> @test_signed_v4f32_v4i64(<4 x float> %f) {
+; CHECK-LABEL: test_signed_v4f32_v4i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vmov r0, s19
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    vldr s22, .LCPI29_0
+; CHECK-NEXT:    mov r9, r1
+; CHECK-NEXT:    vldr s20, .LCPI29_1
+; CHECK-NEXT:    vmov r8, s16
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r10, #0
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r10, #-1
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r4, s17
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r10, #0
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r7, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r7, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r7, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r9, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r9, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r9, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r6, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r6, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r6, #0
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    vcmp.f32 s17, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r5, #-1
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    vmov q1[2], q1[0], r7, r10
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r4, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r4, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    vmov q0[2], q0[0], r0, r5
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    vmov q1[3], q1[1], r6, r9
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r4
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI29_0:
+; CHECK-NEXT:    .long 0xdf000000 @ float -9.22337203E+18
+; CHECK-NEXT:  .LCPI29_1:
+; CHECK-NEXT:    .long 0x5effffff @ float 9.22337149E+18
+    %x = call <4 x i64> @llvm.fptosi.sat.v4f32.v4i64(<4 x float> %f)
+    ret <4 x i64> %x
+}
+
+define arm_aapcs_vfpcc <4 x i100> @test_signed_v4f32_v4i100(<4 x float> %f) {
+; CHECK-LABEL: test_signed_v4f32_v4i100:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    vldr s20, .LCPI30_0
+; CHECK-NEXT:    vmov r7, s19
+; CHECK-NEXT:    vmov r5, s16
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vldr s22, .LCPI30_1
+; CHECK-NEXT:    mov r6, r3
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    str.w r2, [r4, #33]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r1, [r4, #29]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str.w r0, [r4, #25]
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    mov r5, r3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    str r2, [r4, #8]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #4]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str r0, [r4]
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    lsr.w r7, r1, #28
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    orr.w r7, r7, r2, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r7, [r4, #45]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    lsrs r2, r2, #28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    lsrs r7, r0, #28
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    orr.w r7, r7, r1, lsl #4
+; CHECK-NEXT:    vmov r1, s17
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    str.w r7, [r4, #41]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    mvnlt r3, #7
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r3, #7
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    orr.w r2, r2, r3, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    strb.w r2, [r4, #49]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    mvnlt r6, #7
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r6, #7
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r6, #0
+; CHECK-NEXT:    and r2, r6, #15
+; CHECK-NEXT:    orr.w r0, r2, r0, lsl #4
+; CHECK-NEXT:    str.w r0, [r4, #37]
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s17, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    lsrs r7, r1, #28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    orr.w r7, r7, r2, lsl #4
+; CHECK-NEXT:    str r7, [r4, #20]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    lsr.w r7, r0, #28
+; CHECK-NEXT:    orr.w r1, r7, r1, lsl #4
+; CHECK-NEXT:    str r1, [r4, #16]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    mvnlt r3, #7
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r3, #7
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    lsr.w r1, r2, #28
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    orr.w r1, r1, r3, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    strb r1, [r4, #24]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    mvnlt r5, #7
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r5, #7
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    and r1, r5, #15
+; CHECK-NEXT:    orr.w r0, r1, r0, lsl #4
+; CHECK-NEXT:    str r0, [r4, #12]
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI30_0:
+; CHECK-NEXT:    .long 0x70ffffff @ float 6.33825262E+29
+; CHECK-NEXT:  .LCPI30_1:
+; CHECK-NEXT:    .long 0xf1000000 @ float -6.338253E+29
+    %x = call <4 x i100> @llvm.fptosi.sat.v4f32.v4i100(<4 x float> %f)
+    ret <4 x i100> %x
+}
+
+define arm_aapcs_vfpcc <4 x i128> @test_signed_v4f32_v4i128(<4 x float> %f) {
+; CHECK-LABEL: test_signed_v4f32_v4i128:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vmov r0, s19
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vmov r5, s18
+; CHECK-NEXT:    vldr s22, .LCPI31_0
+; CHECK-NEXT:    vldr s20, .LCPI31_1
+; CHECK-NEXT:    vmov r7, s16
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    str r3, [r4, #60]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    str r2, [r4, #56]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #52]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str r0, [r4, #48]
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    vmov r6, s17
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    str r3, [r4, #44]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    str r2, [r4, #40]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #36]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str r0, [r4, #32]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s17, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    str r3, [r4, #28]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    str r2, [r4, #24]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #20]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str r0, [r4, #16]
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    str r3, [r4, #12]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    str r2, [r4, #8]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #4]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str r0, [r4]
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI31_0:
+; CHECK-NEXT:    .long 0xff000000 @ float -1.70141183E+38
+; CHECK-NEXT:  .LCPI31_1:
+; CHECK-NEXT:    .long 0x7effffff @ float 1.70141173E+38
+    %x = call <4 x i128> @llvm.fptosi.sat.v4f32.v4i128(<4 x float> %f)
+    ret <4 x i128> %x
+}
+
+;
+; 2-Vector double to signed integer -- result size variation
+;
+
+declare <2 x   i1> @llvm.fptosi.sat.v2f64.v2i1  (<2 x double>)
+declare <2 x   i8> @llvm.fptosi.sat.v2f64.v2i8  (<2 x double>)
+declare <2 x  i13> @llvm.fptosi.sat.v2f64.v2i13 (<2 x double>)
+declare <2 x  i16> @llvm.fptosi.sat.v2f64.v2i16 (<2 x double>)
+declare <2 x  i19> @llvm.fptosi.sat.v2f64.v2i19 (<2 x double>)
+declare <2 x  i50> @llvm.fptosi.sat.v2f64.v2i50 (<2 x double>)
+declare <2 x  i64> @llvm.fptosi.sat.v2f64.v2i64 (<2 x double>)
+declare <2 x i100> @llvm.fptosi.sat.v2f64.v2i100(<2 x double>)
+declare <2 x i128> @llvm.fptosi.sat.v2f64.v2i128(<2 x double>)
+
+define arm_aapcs_vfpcc <2 x i1> @test_signed_v2f64_v2i1(<2 x double> %f) {
+; CHECK-LABEL: test_signed_v2f64_v2i1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI32_0
+; CHECK-NEXT:    vmov r9, r8, d9
+; CHECK-NEXT:    vmov r11, r10, d0
+; CHECK-NEXT:    str.w r11, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    str.w r10, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI32_1
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    vmov r5, r3, d0
+; CHECK-NEXT:    str r3, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    str r5, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    str r1, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r0, #-1
+; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #0
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    str r4, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    ldr r5, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r11, #-1
+; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r10, r1
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r11, #0
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r11, #0
+; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r5, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r5, #-1
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    ldr r3, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r10, #-1
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r11, r0
+; CHECK-NEXT:    vmov q0[3], q0[1], r10, r5
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI32_0:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+; CHECK-NEXT:  .LCPI32_1:
+; CHECK-NEXT:    .long 0 @ double -1
+; CHECK-NEXT:    .long 3220176896
+    %x = call <2 x i1> @llvm.fptosi.sat.v2f64.v2i1(<2 x double> %f)
+    ret <2 x i1> %x
+}
+
+define arm_aapcs_vfpcc <2 x i8> @test_signed_v2f64_v2i8(<2 x double> %f) {
+; CHECK-LABEL: test_signed_v2f64_v2i8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI33_0
+; CHECK-NEXT:    vmov r9, r8, d9
+; CHECK-NEXT:    vmov r11, r10, d0
+; CHECK-NEXT:    str.w r11, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    str.w r10, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI33_1
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    vmov r5, r3, d0
+; CHECK-NEXT:    str r3, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    str r5, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    str r1, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    mvneq r0, #127
+; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #127
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    str r4, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    ldr r5, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    mvneq r11, #127
+; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r10, r1
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r11, #127
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r11, #0
+; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r5, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r5, #-1
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    ldr r3, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r10, #-1
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r11, r0
+; CHECK-NEXT:    vmov q0[3], q0[1], r10, r5
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI33_0:
+; CHECK-NEXT:    .long 0 @ double 127
+; CHECK-NEXT:    .long 1080016896
+; CHECK-NEXT:  .LCPI33_1:
+; CHECK-NEXT:    .long 0 @ double -128
+; CHECK-NEXT:    .long 3227516928
+    %x = call <2 x i8> @llvm.fptosi.sat.v2f64.v2i8(<2 x double> %f)
+    ret <2 x i8> %x
+}
+
+define arm_aapcs_vfpcc <2 x i13> @test_signed_v2f64_v2i13(<2 x double> %f) {
+; CHECK-LABEL: test_signed_v2f64_v2i13:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI34_0
+; CHECK-NEXT:    vmov r10, r11, d9
+; CHECK-NEXT:    vmov r8, r5, d0
+; CHECK-NEXT:    str.w r8, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    str r5, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI34_1
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    strd r3, r2, [sp, #16] @ 8-byte Folded Spill
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r1, #-1
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r1, #0
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    str r4, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldrd r5, r2, [sp, #16] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r8, r1
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r8, #-1
+; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r8, #0
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r8, #0
+; CHECK-NEXT:    ldr r4, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    itt eq
+; CHECK-NEXT:    movweq r9, #61440
+; CHECK-NEXT:    movteq r9, #65535
+; CHECK-NEXT:    ldrd r2, r3, [sp, #24] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r3, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    itt eq
+; CHECK-NEXT:    movweq r0, #61440
+; CHECK-NEXT:    movteq r0, #65535
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movwne r0, #4095
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    ldrd r2, r3, [sp, #24] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movwne r9, #4095
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r9, #0
+; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r9, r5
+; CHECK-NEXT:    vmov q0[3], q0[1], r8, r0
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI34_0:
+; CHECK-NEXT:    .long 0 @ double 4095
+; CHECK-NEXT:    .long 1085275648
+; CHECK-NEXT:  .LCPI34_1:
+; CHECK-NEXT:    .long 0 @ double -4096
+; CHECK-NEXT:    .long 3232759808
+    %x = call <2 x i13> @llvm.fptosi.sat.v2f64.v2i13(<2 x double> %f)
+    ret <2 x i13> %x
+}
+
+define arm_aapcs_vfpcc <2 x i16> @test_signed_v2f64_v2i16(<2 x double> %f) {
+; CHECK-LABEL: test_signed_v2f64_v2i16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI35_0
+; CHECK-NEXT:    vmov r10, r11, d9
+; CHECK-NEXT:    vmov r8, r5, d0
+; CHECK-NEXT:    str.w r8, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    str r5, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI35_1
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    strd r3, r2, [sp, #16] @ 8-byte Folded Spill
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r1, #-1
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r1, #0
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    str r4, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldrd r5, r2, [sp, #16] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r8, r1
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r8, #-1
+; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r8, #0
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r8, #0
+; CHECK-NEXT:    ldr r4, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    itt eq
+; CHECK-NEXT:    movweq r9, #32768
+; CHECK-NEXT:    movteq r9, #65535
+; CHECK-NEXT:    ldrd r2, r3, [sp, #24] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r3, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    itt eq
+; CHECK-NEXT:    movweq r0, #32768
+; CHECK-NEXT:    movteq r0, #65535
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movwne r0, #32767
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    ldrd r2, r3, [sp, #24] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movwne r9, #32767
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r9, #0
+; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r9, r5
+; CHECK-NEXT:    vmov q0[3], q0[1], r8, r0
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI35_0:
+; CHECK-NEXT:    .long 0 @ double 32767
+; CHECK-NEXT:    .long 1088421824
+; CHECK-NEXT:  .LCPI35_1:
+; CHECK-NEXT:    .long 0 @ double -32768
+; CHECK-NEXT:    .long 3235905536
+    %x = call <2 x i16> @llvm.fptosi.sat.v2f64.v2i16(<2 x double> %f)
+    ret <2 x i16> %x
+}
+
+define arm_aapcs_vfpcc <2 x i19> @test_signed_v2f64_v2i19(<2 x double> %f) {
+; CHECK-LABEL: test_signed_v2f64_v2i19:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #48
+; CHECK-NEXT:    sub sp, #48
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI36_0
+; CHECK-NEXT:    vmov r6, r5, d9
+; CHECK-NEXT:    vmov r8, r3, d0
+; CHECK-NEXT:    str r3, [sp, #36] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    str.w r8, [sp, #44] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI36_1
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    strd r2, r3, [sp, #16] @ 8-byte Folded Spill
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    str r0, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r1, #-1
+; CHECK-NEXT:    cmp.w r10, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r1, #0
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r1
+; CHECK-NEXT:    str r5, [sp, #40] @ 4-byte Spill
+; CHECK-NEXT:    vmov r7, r9, d8
+; CHECK-NEXT:    mov r5, r6
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    str r4, [sp, #32] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    ldr r4, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r9
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r6, r9
+; CHECK-NEXT:    str.w r9, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r1, r9
+; CHECK-NEXT:    ldr.w r9, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    ldr.w r8, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r6, r5
+; CHECK-NEXT:    mov r10, r1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    itt eq
+; CHECK-NEXT:    movweq r11, #0
+; CHECK-NEXT:    movteq r11, #65532
+; CHECK-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    movwne r11, #65535
+; CHECK-NEXT:    movtne r11, #3
+; CHECK-NEXT:    str r5, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    ldr r5, [sp, #40] @ 4-byte Reload
+; CHECK-NEXT:    ldr r2, [sp, #44] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    mov r6, r8
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r4, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    itt eq
+; CHECK-NEXT:    moveq r4, #0
+; CHECK-NEXT:    movteq r4, #65532
+; CHECK-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    movwne r4, #65535
+; CHECK-NEXT:    movtne r4, #3
+; CHECK-NEXT:    ldr r5, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    ldr r2, [sp, #44] @ 4-byte Reload
+; CHECK-NEXT:    ldr r3, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r10, #-1
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    ldr r0, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    ldr r1, [sp, #40] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r0
+; CHECK-NEXT:    mov r3, r1
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r11, #0
+; CHECK-NEXT:    ldr r0, [sp, #32] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r11, r4
+; CHECK-NEXT:    vmov q0[3], q0[1], r10, r0
+; CHECK-NEXT:    add sp, #48
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI36_0:
+; CHECK-NEXT:    .long 0 @ double 262143
+; CHECK-NEXT:    .long 1091567608
+; CHECK-NEXT:  .LCPI36_1:
+; CHECK-NEXT:    .long 0 @ double -262144
+; CHECK-NEXT:    .long 3239051264
+    %x = call <2 x i19> @llvm.fptosi.sat.v2f64.v2i19(<2 x double> %f)
+    ret <2 x i19> %x
+}
+
+define arm_aapcs_vfpcc <2 x i32> @test_signed_v2f64_v2i32_duplicate(<2 x double> %f) {
+; CHECK-LABEL: test_signed_v2f64_v2i32_duplicate:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI37_0
+; CHECK-NEXT:    vmov r9, r8, d9
+; CHECK-NEXT:    vmov r11, r10, d0
+; CHECK-NEXT:    str.w r11, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    str.w r10, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI37_1
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    vmov r5, r3, d0
+; CHECK-NEXT:    str r3, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    str r5, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    str r1, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r0, #-2147483648
+; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r0, #-2147483648
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    str r4, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    ldr r5, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r11, #-2147483648
+; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r10, r1
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r11, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r11, #0
+; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r5, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r5, #-1
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    ldr r3, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r10, #-1
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r11, r0
+; CHECK-NEXT:    vmov q0[3], q0[1], r10, r5
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI37_0:
+; CHECK-NEXT:    .long 4290772992 @ double 2147483647
+; CHECK-NEXT:    .long 1105199103
+; CHECK-NEXT:  .LCPI37_1:
+; CHECK-NEXT:    .long 0 @ double -2147483648
+; CHECK-NEXT:    .long 3252682752
+    %x = call <2 x i32> @llvm.fptosi.sat.v2f64.v2i32(<2 x double> %f)
+    ret <2 x i32> %x
+}
+
+define arm_aapcs_vfpcc <2 x i50> @test_signed_v2f64_v2i50(<2 x double> %f) {
+; CHECK-LABEL: test_signed_v2f64_v2i50:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI38_0
+; CHECK-NEXT:    vmov r8, r5, d9
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    str r2, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    str r3, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI38_1
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    vmov r10, r9, d0
+; CHECK-NEXT:    str.w r10, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    str.w r9, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    str r1, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    csel r0, r0, r11, ne
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    vmov r6, r7, d8
+; CHECK-NEXT:    mov r11, r5
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    str r4, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    ldr r5, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    ldr r4, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r10, r1
+; CHECK-NEXT:    str r0, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    itt eq
+; CHECK-NEXT:    movweq r10, #0
+; CHECK-NEXT:    movteq r10, #65534
+; CHECK-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    movwne r10, #65535
+; CHECK-NEXT:    movtne r10, #1
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr.w r9, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    mov r5, r11
+; CHECK-NEXT:    ldr.w r11, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    ldr r0, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    itt eq
+; CHECK-NEXT:    moveq r0, #0
+; CHECK-NEXT:    movteq r0, #65534
+; CHECK-NEXT:    ldr r1, [sp] @ 4-byte Reload
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    movwne r0, #65535
+; CHECK-NEXT:    movtne r0, #1
+; CHECK-NEXT:    ldr r2, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    str r0, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    csel r4, r1, r0, ne
+; CHECK-NEXT:    ldr r0, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r4, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    ldr r5, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    ldr r0, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r4, r0
+; CHECK-NEXT:    vmov q0[3], q0[1], r10, r5
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI38_0:
+; CHECK-NEXT:    .long 4294967280 @ double 562949953421311
+; CHECK-NEXT:    .long 1124073471
+; CHECK-NEXT:  .LCPI38_1:
+; CHECK-NEXT:    .long 0 @ double -562949953421312
+; CHECK-NEXT:    .long 3271557120
+    %x = call <2 x i50> @llvm.fptosi.sat.v2f64.v2i50(<2 x double> %f)
+    ret <2 x i50> %x
+}
+
+define arm_aapcs_vfpcc <2 x i64> @test_signed_v2f64_v2i64(<2 x double> %f) {
+; CHECK-LABEL: test_signed_v2f64_v2i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI39_0
+; CHECK-NEXT:    vmov r8, r5, d9
+; CHECK-NEXT:    vmov r11, r3, d0
+; CHECK-NEXT:    str r3, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    str.w r11, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI39_1
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    str r2, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    str r3, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    cmp.w r10, #0
+; CHECK-NEXT:    str r1, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    csel r0, r0, r10, ne
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    ldr r3, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    str r4, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r4, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    ldr.w r10, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    mov r9, r1
+; CHECK-NEXT:    csel r11, r0, r11, ne
+; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r11, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r11, #0
+; CHECK-NEXT:    ldrd r2, r3, [sp, #24] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r0, #-2147483648
+; CHECK-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r0, #-2147483648
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    ldrd r2, r3, [sp, #24] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r3, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r9, #-2147483648
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r9, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r9, #0
+; CHECK-NEXT:    ldr r0, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r11, r0
+; CHECK-NEXT:    vmov q0[3], q0[1], r9, r10
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI39_0:
+; CHECK-NEXT:    .long 4294967295 @ double 9.2233720368547748E+18
+; CHECK-NEXT:    .long 1138753535
+; CHECK-NEXT:  .LCPI39_1:
+; CHECK-NEXT:    .long 0 @ double -9.2233720368547758E+18
+; CHECK-NEXT:    .long 3286237184
+    %x = call <2 x i64> @llvm.fptosi.sat.v2f64.v2i64(<2 x double> %f)
+    ret <2 x i64> %x
+}
+
+define arm_aapcs_vfpcc <2 x i100> @test_signed_v2f64_v2i100(<2 x double> %f) {
+; CHECK-LABEL: test_signed_v2f64_v2i100:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #48
+; CHECK-NEXT:    sub sp, #48
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI40_0
+; CHECK-NEXT:    vmov r6, r5, d8
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    vmov r9, r8, d0
+; CHECK-NEXT:    str.w r8, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI40_1
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    vmov r7, r3, d0
+; CHECK-NEXT:    str r3, [sp, #32] @ 4-byte Spill
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __fixdfti
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    strd r1, r0, [sp, #8] @ 8-byte Folded Spill
+; CHECK-NEXT:    csel r4, r2, r4, ne
+; CHECK-NEXT:    str r3, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    cmp.w r10, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r4, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    str.w r11, [sp, #44] @ 4-byte Spill
+; CHECK-NEXT:    str.w r4, [r11, #8]
+; CHECK-NEXT:    str.w r9, [sp, #40] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r4, [sp, #32] @ 4-byte Reload
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r10, r7
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    csel r7, r1, r0, ne
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r7, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r7, #0
+; CHECK-NEXT:    str.w r7, [r11, #4]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    ldr.w r11, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    str.w r10, [sp, #36] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    csel r7, r1, r0, ne
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r7, #-1
+; CHECK-NEXT:    str r6, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    str r5, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    vmov r9, r8, d9
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r7, #0
+; CHECK-NEXT:    ldr r0, [sp, #44] @ 4-byte Reload
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    mov r5, r11
+; CHECK-NEXT:    str r7, [r0]
+; CHECK-NEXT:    ldr r7, [sp, #40] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r4, [sp, #32] @ 4-byte Reload
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __fixdfti
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    strd r2, r0, [sp, #4] @ 8-byte Folded Spill
+; CHECK-NEXT:    csel r10, r1, r11, ne
+; CHECK-NEXT:    str r3, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    cmp r6, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r6, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    csel r4, r1, r0, ne
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r4, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    ldr r1, [sp, #44] @ 4-byte Reload
+; CHECK-NEXT:    lsr.w r0, r10, #28
+; CHECK-NEXT:    orr.w r0, r0, r4, lsl #4
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    mov r7, r5
+; CHECK-NEXT:    str r0, [r1, #20]
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    ldr r6, [sp, #32] @ 4-byte Reload
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    csel r11, r1, r0, ne
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r11, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r11, #0
+; CHECK-NEXT:    ldr r5, [sp, #44] @ 4-byte Reload
+; CHECK-NEXT:    lsr.w r0, r11, #28
+; CHECK-NEXT:    orr.w r0, r0, r10, lsl #4
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    str r0, [r5, #16]
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    ldr r2, [sp, #40] @ 4-byte Reload
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    mov r10, r6
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    mvneq r0, #7
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #7
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    lsr.w r0, r4, #28
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r6, #0
+; CHECK-NEXT:    orr.w r0, r0, r6, lsl #4
+; CHECK-NEXT:    strb r0, [r5, #24]
+; CHECK-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    ldr r4, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    ldr r2, [sp, #40] @ 4-byte Reload
+; CHECK-NEXT:    ldr r3, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    mov r6, r4
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    ldr r0, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    mvneq r0, #7
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #7
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    and r0, r4, #15
+; CHECK-NEXT:    orr.w r0, r0, r11, lsl #4
+; CHECK-NEXT:    str r0, [r5, #12]
+; CHECK-NEXT:    add sp, #48
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI40_0:
+; CHECK-NEXT:    .long 4294967295 @ double 6.3382530011411463E+29
+; CHECK-NEXT:    .long 1176502271
+; CHECK-NEXT:  .LCPI40_1:
+; CHECK-NEXT:    .long 0 @ double -6.338253001141147E+29
+; CHECK-NEXT:    .long 3323985920
+    %x = call <2 x i100> @llvm.fptosi.sat.v2f64.v2i100(<2 x double> %f)
+    ret <2 x i100> %x
+}
+
+define arm_aapcs_vfpcc <2 x i128> @test_signed_v2f64_v2i128(<2 x double> %f) {
+; CHECK-LABEL: test_signed_v2f64_v2i128:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI41_0
+; CHECK-NEXT:    vmov r8, r7, d9
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    str r2, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r11, r3
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI41_1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    vmov r4, r3, d0
+; CHECK-NEXT:    str r3, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl __fixdfti
+; CHECK-NEXT:    mov r10, r3
+; CHECK-NEXT:    strd r2, r1, [sp] @ 8-byte Folded Spill
+; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r10, #-2147483648
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r10, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r10, #0
+; CHECK-NEXT:    str.w r10, [r6, #28]
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    ldr.w r9, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    mov r5, r11
+; CHECK-NEXT:    str.w r11, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr.w r10, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r11, r4
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    str r4, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    csel r4, r1, r0, ne
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    cmp r6, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r4, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    ldr r6, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    str r4, [r6, #24]
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    csel r4, r1, r0, ne
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r4, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    str r4, [r6, #20]
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    ldr.w r10, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r11, r6
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldrd r2, r3, [sp, #20] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    csel r4, r1, r0, ne
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    vmov r6, r5, d8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r4, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    str.w r4, [r11, #16]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    ldr r7, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr.w r9, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    ldr.w r8, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __fixdfti
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    strd r2, r1, [sp] @ 8-byte Folded Spill
+; CHECK-NEXT:    str r0, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    moveq.w r4, #-2147483648
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    cmp.w r10, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    mvnne r4, #-2147483648
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #0
+; CHECK-NEXT:    ldr.w r10, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    str.w r4, [r10, #12]
+; CHECK-NEXT:    ldr.w r11, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    csel r7, r1, r0, ne
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r7, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r7, #0
+; CHECK-NEXT:    str.w r7, [r10, #8]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    csel r7, r1, r0, ne
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r7, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r7, #0
+; CHECK-NEXT:    str.w r7, [r10, #4]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    csel r7, r1, r0, ne
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r7, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpun
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r7, #0
+; CHECK-NEXT:    str.w r7, [r10]
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI41_0:
+; CHECK-NEXT:    .long 4294967295 @ double 1.7014118346046921E+38
+; CHECK-NEXT:    .long 1205862399
+; CHECK-NEXT:  .LCPI41_1:
+; CHECK-NEXT:    .long 0 @ double -1.7014118346046923E+38
+; CHECK-NEXT:    .long 3353346048
+    %x = call <2 x i128> @llvm.fptosi.sat.v2f64.v2i128(<2 x double> %f)
+    ret <2 x i128> %x
+}
+
+;
+; 4-Vector half to signed integer -- result size variation
+;
+
+declare <8 x   i1> @llvm.fptosi.sat.v8f16.v8i1  (<8 x half>)
+declare <8 x   i8> @llvm.fptosi.sat.v8f16.v8i8  (<8 x half>)
+declare <8 x  i13> @llvm.fptosi.sat.v8f16.v8i13 (<8 x half>)
+declare <8 x  i16> @llvm.fptosi.sat.v8f16.v8i16 (<8 x half>)
+declare <8 x  i19> @llvm.fptosi.sat.v8f16.v8i19 (<8 x half>)
+declare <8 x  i50> @llvm.fptosi.sat.v8f16.v8i50 (<8 x half>)
+declare <8 x  i64> @llvm.fptosi.sat.v8f16.v8i64 (<8 x half>)
+declare <8 x i100> @llvm.fptosi.sat.v8f16.v8i100(<8 x half>)
+declare <8 x i128> @llvm.fptosi.sat.v8f16.v8i128(<8 x half>)
+
+define arm_aapcs_vfpcc <8 x i1> @test_signed_v8f16_v8i1(<8 x half> %f) {
+; CHECK-LABEL: test_signed_v8f16_v8i1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .vsave {d8}
+; CHECK-NEXT:    vpush {d8}
+; CHECK-NEXT:    vcvtb.f32.f16 s15, s0
+; CHECK-NEXT:    vmov.f32 s5, #-1.000000e+00
+; CHECK-NEXT:    vldr s7, .LCPI42_0
+; CHECK-NEXT:    vmaxnm.f32 s16, s15, s5
+; CHECK-NEXT:    vcvtt.f32.f16 s12, s2
+; CHECK-NEXT:    vcvtt.f32.f16 s9, s1
+; CHECK-NEXT:    vminnm.f32 s16, s16, s7
+; CHECK-NEXT:    vcvtt.f32.f16 s4, s3
+; CHECK-NEXT:    vcvt.s32.f32 s16, s16
+; CHECK-NEXT:    vcvtb.f32.f16 s8, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s0, s0
+; CHECK-NEXT:    vmaxnm.f32 s6, s4, s5
+; CHECK-NEXT:    vmaxnm.f32 s10, s8, s5
+; CHECK-NEXT:    vmaxnm.f32 s14, s12, s5
+; CHECK-NEXT:    vmaxnm.f32 s3, s2, s5
+; CHECK-NEXT:    vmaxnm.f32 s11, s9, s5
+; CHECK-NEXT:    vmaxnm.f32 s13, s1, s5
+; CHECK-NEXT:    vmaxnm.f32 s5, s0, s5
+; CHECK-NEXT:    vminnm.f32 s5, s5, s7
+; CHECK-NEXT:    vminnm.f32 s13, s13, s7
+; CHECK-NEXT:    vcvt.s32.f32 s5, s5
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    vcmp.f32 s15, s15
+; CHECK-NEXT:    vminnm.f32 s11, s11, s7
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vcvt.s32.f32 s13, s13
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    bfi r1, r2, #0, #1
+; CHECK-NEXT:    vcvt.s32.f32 s11, s11
+; CHECK-NEXT:    vmov r2, s5
+; CHECK-NEXT:    vminnm.f32 s3, s3, s7
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    rsb.w r2, r2, #0
+; CHECK-NEXT:    vcvt.s32.f32 s3, s3
+; CHECK-NEXT:    bfi r1, r2, #1, #1
+; CHECK-NEXT:    vmov r2, s13
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vminnm.f32 s14, s14, s7
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    vcmp.f32 s9, s9
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    bfi r1, r2, #2, #1
+; CHECK-NEXT:    vmov r2, s11
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vcvt.s32.f32 s14, s14
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    vminnm.f32 s10, s10, s7
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    bfi r1, r2, #3, #1
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vcvt.s32.f32 s10, s10
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vminnm.f32 s6, s6, s7
+; CHECK-NEXT:    bfi r1, r2, #4, #1
+; CHECK-NEXT:    vcmp.f32 s12, s12
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vcvt.s32.f32 s6, s6
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    vcmp.f32 s8, s8
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    bfi r1, r2, #5, #1
+; CHECK-NEXT:    vmov r2, s10
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vcmp.f32 s4, s4
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    rsb.w r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #6, #1
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #7, #1
+; CHECK-NEXT:    strb r1, [r0]
+; CHECK-NEXT:    vpop {d8}
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI42_0:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+    %x = call <8 x i1> @llvm.fptosi.sat.v8f16.v8i1(<8 x half> %f)
+    ret <8 x i1> %x
+}
+
+define arm_aapcs_vfpcc <8 x i8> @test_signed_v8f16_v8i8(<8 x half> %f) {
+; CHECK-LABEL: test_signed_v8f16_v8i8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8}
+; CHECK-NEXT:    vpush {d8}
+; CHECK-NEXT:    vldr s8, .LCPI43_1
+; CHECK-NEXT:    vcvtt.f32.f16 s13, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s3
+; CHECK-NEXT:    vldr s6, .LCPI43_0
+; CHECK-NEXT:    vmaxnm.f32 s16, s3, s8
+; CHECK-NEXT:    vcvtt.f32.f16 s4, s0
+; CHECK-NEXT:    vcvtt.f32.f16 s12, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s7, s2
+; CHECK-NEXT:    vmaxnm.f32 s15, s13, s8
+; CHECK-NEXT:    vminnm.f32 s16, s16, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vmaxnm.f32 s10, s4, s8
+; CHECK-NEXT:    vmaxnm.f32 s14, s12, s8
+; CHECK-NEXT:    vmaxnm.f32 s5, s0, s8
+; CHECK-NEXT:    vmaxnm.f32 s9, s7, s8
+; CHECK-NEXT:    vmaxnm.f32 s11, s1, s8
+; CHECK-NEXT:    vminnm.f32 s15, s15, s6
+; CHECK-NEXT:    vcvt.s32.f32 s16, s16
+; CHECK-NEXT:    vmaxnm.f32 s8, s2, s8
+; CHECK-NEXT:    vminnm.f32 s10, s10, s6
+; CHECK-NEXT:    vminnm.f32 s14, s14, s6
+; CHECK-NEXT:    vminnm.f32 s5, s5, s6
+; CHECK-NEXT:    vminnm.f32 s9, s9, s6
+; CHECK-NEXT:    vminnm.f32 s11, s11, s6
+; CHECK-NEXT:    vminnm.f32 s6, s8, s6
+; CHECK-NEXT:    vcvt.s32.f32 s15, s15
+; CHECK-NEXT:    vcvt.s32.f32 s6, s6
+; CHECK-NEXT:    vcvt.s32.f32 s9, s9
+; CHECK-NEXT:    vcvt.s32.f32 s11, s11
+; CHECK-NEXT:    vcvt.s32.f32 s14, s14
+; CHECK-NEXT:    vcvt.s32.f32 s5, s5
+; CHECK-NEXT:    vcvt.s32.f32 s10, s10
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r12, s16
+; CHECK-NEXT:    vcmp.f32 s13, s13
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov lr, s15
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w lr, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vcmp.f32 s7, s7
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s9
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    vcmp.f32 s12, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s14
+; CHECK-NEXT:    vmov r4, s5
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r4, #0
+; CHECK-NEXT:    vcmp.f32 s4, s4
+; CHECK-NEXT:    vmov.16 q0[0], r4
+; CHECK-NEXT:    vmov r5, s10
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    vmov.16 q0[1], r5
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov.16 q0[3], r1
+; CHECK-NEXT:    vmov.16 q0[4], r2
+; CHECK-NEXT:    vmov.16 q0[5], r3
+; CHECK-NEXT:    vmov.16 q0[6], r12
+; CHECK-NEXT:    vmov.16 q0[7], lr
+; CHECK-NEXT:    vpop {d8}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI43_0:
+; CHECK-NEXT:    .long 0x42fe0000 @ float 127
+; CHECK-NEXT:  .LCPI43_1:
+; CHECK-NEXT:    .long 0xc3000000 @ float -128
+    %x = call <8 x i8> @llvm.fptosi.sat.v8f16.v8i8(<8 x half> %f)
+    ret <8 x i8> %x
+}
+
+define arm_aapcs_vfpcc <8 x i13> @test_signed_v8f16_v8i13(<8 x half> %f) {
+; CHECK-LABEL: test_signed_v8f16_v8i13:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8}
+; CHECK-NEXT:    vpush {d8}
+; CHECK-NEXT:    vldr s8, .LCPI44_1
+; CHECK-NEXT:    vcvtt.f32.f16 s13, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s3
+; CHECK-NEXT:    vldr s6, .LCPI44_0
+; CHECK-NEXT:    vmaxnm.f32 s16, s3, s8
+; CHECK-NEXT:    vcvtt.f32.f16 s4, s0
+; CHECK-NEXT:    vcvtt.f32.f16 s12, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s7, s2
+; CHECK-NEXT:    vmaxnm.f32 s15, s13, s8
+; CHECK-NEXT:    vminnm.f32 s16, s16, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vmaxnm.f32 s10, s4, s8
+; CHECK-NEXT:    vmaxnm.f32 s14, s12, s8
+; CHECK-NEXT:    vmaxnm.f32 s5, s0, s8
+; CHECK-NEXT:    vmaxnm.f32 s9, s7, s8
+; CHECK-NEXT:    vmaxnm.f32 s11, s1, s8
+; CHECK-NEXT:    vminnm.f32 s15, s15, s6
+; CHECK-NEXT:    vcvt.s32.f32 s16, s16
+; CHECK-NEXT:    vmaxnm.f32 s8, s2, s8
+; CHECK-NEXT:    vminnm.f32 s10, s10, s6
+; CHECK-NEXT:    vminnm.f32 s14, s14, s6
+; CHECK-NEXT:    vminnm.f32 s5, s5, s6
+; CHECK-NEXT:    vminnm.f32 s9, s9, s6
+; CHECK-NEXT:    vminnm.f32 s11, s11, s6
+; CHECK-NEXT:    vminnm.f32 s6, s8, s6
+; CHECK-NEXT:    vcvt.s32.f32 s15, s15
+; CHECK-NEXT:    vcvt.s32.f32 s6, s6
+; CHECK-NEXT:    vcvt.s32.f32 s9, s9
+; CHECK-NEXT:    vcvt.s32.f32 s11, s11
+; CHECK-NEXT:    vcvt.s32.f32 s14, s14
+; CHECK-NEXT:    vcvt.s32.f32 s5, s5
+; CHECK-NEXT:    vcvt.s32.f32 s10, s10
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r12, s16
+; CHECK-NEXT:    vcmp.f32 s13, s13
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov lr, s15
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w lr, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vcmp.f32 s7, s7
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s9
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    vcmp.f32 s12, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s14
+; CHECK-NEXT:    vmov r4, s5
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r4, #0
+; CHECK-NEXT:    vcmp.f32 s4, s4
+; CHECK-NEXT:    vmov.16 q0[0], r4
+; CHECK-NEXT:    vmov r5, s10
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    vmov.16 q0[1], r5
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov.16 q0[3], r1
+; CHECK-NEXT:    vmov.16 q0[4], r2
+; CHECK-NEXT:    vmov.16 q0[5], r3
+; CHECK-NEXT:    vmov.16 q0[6], r12
+; CHECK-NEXT:    vmov.16 q0[7], lr
+; CHECK-NEXT:    vpop {d8}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI44_0:
+; CHECK-NEXT:    .long 0x457ff000 @ float 4095
+; CHECK-NEXT:  .LCPI44_1:
+; CHECK-NEXT:    .long 0xc5800000 @ float -4096
+    %x = call <8 x i13> @llvm.fptosi.sat.v8f16.v8i13(<8 x half> %f)
+    ret <8 x i13> %x
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_signed_v8f16_v8i16(<8 x half> %f) {
+; CHECK-LABEL: test_signed_v8f16_v8i16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8}
+; CHECK-NEXT:    vpush {d8}
+; CHECK-NEXT:    vldr s8, .LCPI45_1
+; CHECK-NEXT:    vcvtt.f32.f16 s13, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s3
+; CHECK-NEXT:    vldr s6, .LCPI45_0
+; CHECK-NEXT:    vmaxnm.f32 s16, s3, s8
+; CHECK-NEXT:    vcvtt.f32.f16 s4, s0
+; CHECK-NEXT:    vcvtt.f32.f16 s12, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s7, s2
+; CHECK-NEXT:    vmaxnm.f32 s15, s13, s8
+; CHECK-NEXT:    vminnm.f32 s16, s16, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vmaxnm.f32 s10, s4, s8
+; CHECK-NEXT:    vmaxnm.f32 s14, s12, s8
+; CHECK-NEXT:    vmaxnm.f32 s5, s0, s8
+; CHECK-NEXT:    vmaxnm.f32 s9, s7, s8
+; CHECK-NEXT:    vmaxnm.f32 s11, s1, s8
+; CHECK-NEXT:    vminnm.f32 s15, s15, s6
+; CHECK-NEXT:    vcvt.s32.f32 s16, s16
+; CHECK-NEXT:    vmaxnm.f32 s8, s2, s8
+; CHECK-NEXT:    vminnm.f32 s10, s10, s6
+; CHECK-NEXT:    vminnm.f32 s14, s14, s6
+; CHECK-NEXT:    vminnm.f32 s5, s5, s6
+; CHECK-NEXT:    vminnm.f32 s9, s9, s6
+; CHECK-NEXT:    vminnm.f32 s11, s11, s6
+; CHECK-NEXT:    vminnm.f32 s6, s8, s6
+; CHECK-NEXT:    vcvt.s32.f32 s15, s15
+; CHECK-NEXT:    vcvt.s32.f32 s6, s6
+; CHECK-NEXT:    vcvt.s32.f32 s9, s9
+; CHECK-NEXT:    vcvt.s32.f32 s11, s11
+; CHECK-NEXT:    vcvt.s32.f32 s14, s14
+; CHECK-NEXT:    vcvt.s32.f32 s5, s5
+; CHECK-NEXT:    vcvt.s32.f32 s10, s10
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r12, s16
+; CHECK-NEXT:    vcmp.f32 s13, s13
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov lr, s15
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w lr, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vcmp.f32 s7, s7
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s9
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    vcmp.f32 s12, s12
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s14
+; CHECK-NEXT:    vmov r4, s5
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r4, #0
+; CHECK-NEXT:    vcmp.f32 s4, s4
+; CHECK-NEXT:    vmov.16 q0[0], r4
+; CHECK-NEXT:    vmov r5, s10
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    vmov.16 q0[1], r5
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov.16 q0[3], r1
+; CHECK-NEXT:    vmov.16 q0[4], r2
+; CHECK-NEXT:    vmov.16 q0[5], r3
+; CHECK-NEXT:    vmov.16 q0[6], r12
+; CHECK-NEXT:    vmov.16 q0[7], lr
+; CHECK-NEXT:    vpop {d8}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI45_0:
+; CHECK-NEXT:    .long 0x46fffe00 @ float 32767
+; CHECK-NEXT:  .LCPI45_1:
+; CHECK-NEXT:    .long 0xc7000000 @ float -32768
+    %x = call <8 x i16> @llvm.fptosi.sat.v8f16.v8i16(<8 x half> %f)
+    ret <8 x i16> %x
+}
+
+define arm_aapcs_vfpcc <8 x i19> @test_signed_v8f16_v8i19(<8 x half> %f) {
+; CHECK-LABEL: test_signed_v8f16_v8i19:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8}
+; CHECK-NEXT:    vpush {d8}
+; CHECK-NEXT:    vldr s5, .LCPI46_0
+; CHECK-NEXT:    vcvtt.f32.f16 s15, s3
+; CHECK-NEXT:    vldr s7, .LCPI46_1
+; CHECK-NEXT:    vcvtb.f32.f16 s8, s2
+; CHECK-NEXT:    vmaxnm.f32 s16, s15, s5
+; CHECK-NEXT:    vcvtb.f32.f16 s4, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s12, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s0
+; CHECK-NEXT:    vminnm.f32 s16, s16, s7
+; CHECK-NEXT:    vcvtt.f32.f16 s0, s0
+; CHECK-NEXT:    vcvtt.f32.f16 s2, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s3
+; CHECK-NEXT:    vmaxnm.f32 s6, s4, s5
+; CHECK-NEXT:    vmaxnm.f32 s10, s8, s5
+; CHECK-NEXT:    vmaxnm.f32 s14, s12, s5
+; CHECK-NEXT:    vmaxnm.f32 s9, s1, s5
+; CHECK-NEXT:    vmaxnm.f32 s11, s0, s5
+; CHECK-NEXT:    vmaxnm.f32 s13, s2, s5
+; CHECK-NEXT:    vmaxnm.f32 s5, s3, s5
+; CHECK-NEXT:    vcvt.s32.f32 s16, s16
+; CHECK-NEXT:    vminnm.f32 s5, s5, s7
+; CHECK-NEXT:    vminnm.f32 s13, s13, s7
+; CHECK-NEXT:    vcvt.s32.f32 s5, s5
+; CHECK-NEXT:    vminnm.f32 s11, s11, s7
+; CHECK-NEXT:    vcvt.s32.f32 s13, s13
+; CHECK-NEXT:    vminnm.f32 s9, s9, s7
+; CHECK-NEXT:    vcmp.f32 s15, s15
+; CHECK-NEXT:    vminnm.f32 s10, s10, s7
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.s32.f32 s11, s11
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    vminnm.f32 s14, s14, s7
+; CHECK-NEXT:    vmov r1, s16
+; CHECK-NEXT:    vminnm.f32 s6, s6, s7
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    lsrs r2, r1, #11
+; CHECK-NEXT:    strb r2, [r0, #18]
+; CHECK-NEXT:    vmov r2, s5
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vcvt.s32.f32 s9, s9
+; CHECK-NEXT:    bfc r2, #19, #13
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    vmov r12, s13
+; CHECK-NEXT:    lsrs r3, r2, #14
+; CHECK-NEXT:    orr.w r1, r3, r1, lsl #5
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    strh r1, [r0, #16]
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r12, #0
+; CHECK-NEXT:    vcvt.s32.f32 s10, s10
+; CHECK-NEXT:    bfc r12, #19, #13
+; CHECK-NEXT:    vcvt.s32.f32 s14, s14
+; CHECK-NEXT:    lsr.w r3, r12, #1
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    vmov lr, s11
+; CHECK-NEXT:    orr.w r2, r3, r2, lsl #18
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r2, [r0, #12]
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w lr, #0
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    vmov r3, s9
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    bfc lr, #19, #13
+; CHECK-NEXT:    bfc r3, #19, #13
+; CHECK-NEXT:    vcmp.f32 s12, s12
+; CHECK-NEXT:    orr.w r3, r3, lr, lsl #19
+; CHECK-NEXT:    vcvt.s32.f32 s6, s6
+; CHECK-NEXT:    str r3, [r0]
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vcmp.f32 s8, s8
+; CHECK-NEXT:    bfc r3, #19, #13
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    bfc r1, #19, #13
+; CHECK-NEXT:    lsrs r2, r3, #7
+; CHECK-NEXT:    vcmp.f32 s4, s4
+; CHECK-NEXT:    orr.w r1, r2, r1, lsl #12
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    orr.w r1, r1, r12, lsl #31
+; CHECK-NEXT:    str r1, [r0, #8]
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    lsr.w r2, lr, #13
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    bfc r1, #19, #13
+; CHECK-NEXT:    orr.w r1, r2, r1, lsl #6
+; CHECK-NEXT:    orr.w r1, r1, r3, lsl #25
+; CHECK-NEXT:    str r1, [r0, #4]
+; CHECK-NEXT:    vpop {d8}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI46_0:
+; CHECK-NEXT:    .long 0xc8800000 @ float -262144
+; CHECK-NEXT:  .LCPI46_1:
+; CHECK-NEXT:    .long 0x487fffc0 @ float 262143
+    %x = call <8 x i19> @llvm.fptosi.sat.v8f16.v8i19(<8 x half> %f)
+    ret <8 x i19> %x
+}
+
+define arm_aapcs_vfpcc <8 x i32> @test_signed_v8f16_v8i32_duplicate(<8 x half> %f) {
+; CHECK-LABEL: test_signed_v8f16_v8i32_duplicate:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vcvtt.f32.f16 s13, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s3
+; CHECK-NEXT:    vcvt.s32.f32 s16, s3
+; CHECK-NEXT:    vcvtt.f32.f16 s9, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vldr s8, .LCPI47_1
+; CHECK-NEXT:    vcvt.s32.f32 s18, s2
+; CHECK-NEXT:    vldr s6, .LCPI47_0
+; CHECK-NEXT:    vcvt.s32.f32 s15, s13
+; CHECK-NEXT:    vcvtt.f32.f16 s12, s1
+; CHECK-NEXT:    vcmp.f32 s3, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s6
+; CHECK-NEXT:    vcvt.s32.f32 s11, s9
+; CHECK-NEXT:    vcvtt.f32.f16 s4, s0
+; CHECK-NEXT:    vmov r12, s16
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s3
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r12, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov lr, s18
+; CHECK-NEXT:    vcmp.f32 s2, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w lr, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s2
+; CHECK-NEXT:    vcvt.s32.f32 s7, s1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt lr, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s13, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w lr, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s15
+; CHECK-NEXT:    vcmp.f32 s13, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s13, s13
+; CHECK-NEXT:    vcvt.s32.f32 s5, s0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r2, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s9, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s11
+; CHECK-NEXT:    vcmp.f32 s9, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s9, s9
+; CHECK-NEXT:    vcvt.s32.f32 s14, s12
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r0, s7
+; CHECK-NEXT:    vcmp.f32 s1, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.s32.f32 s10, s4
+; CHECK-NEXT:    vcmp.f32 s1, s1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r0, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s5
+; CHECK-NEXT:    vcmp.f32 s0, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s12, s8
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r4, s14
+; CHECK-NEXT:    vcmp.f32 s12, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r4, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s12, s12
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r4, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r5, s10
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r4, #0
+; CHECK-NEXT:    vcmp.f32 s4, s8
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r5, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s4, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s4, s4
+; CHECK-NEXT:    vmov q1[2], q1[0], lr, r12
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r5, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r5, r4
+; CHECK-NEXT:    vmov q1[3], q1[1], r3, r2
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI47_0:
+; CHECK-NEXT:    .long 0x4effffff @ float 2.14748352E+9
+; CHECK-NEXT:  .LCPI47_1:
+; CHECK-NEXT:    .long 0xcf000000 @ float -2.14748365E+9
+    %x = call <8 x i32> @llvm.fptosi.sat.v8f16.v8i32(<8 x half> %f)
+    ret <8 x i32> %x
+}
+
+define arm_aapcs_vfpcc <8 x i50> @test_signed_v8f16_v8i50(<8 x half> %f) {
+; CHECK-LABEL: test_signed_v8f16_v8i50:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    vcvtt.f32.f16 s30, s19
+; CHECK-NEXT:    vmov r0, s30
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcvtb.f32.f16 s26, s18
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vmov r0, s26
+; CHECK-NEXT:    vldr s22, .LCPI48_1
+; CHECK-NEXT:    vcvtb.f32.f16 s24, s16
+; CHECK-NEXT:    vcvtt.f32.f16 s28, s17
+; CHECK-NEXT:    vcmp.f32 s30, s22
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r7, s24
+; CHECK-NEXT:    vldr s20, .LCPI48_0
+; CHECK-NEXT:    vmov r8, s28
+; CHECK-NEXT:    itt lt
+; CHECK-NEXT:    movlt r6, #0
+; CHECK-NEXT:    movtlt r6, #65534
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcmp.f32 s26, s22
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r6, #65535
+; CHECK-NEXT:    movtgt r6, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s26, s26
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str.w r0, [r10, #25]
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcmp.f32 s24, s22
+; CHECK-NEXT:    mov r11, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s24
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r0, [r10]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r4, #0
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r4, #-1
+; CHECK-NEXT:    vcmp.f32 s30, s30
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r4, #0
+; CHECK-NEXT:    str r4, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r6, #0
+; CHECK-NEXT:    lsls r0, r6, #22
+; CHECK-NEXT:    orr.w r7, r0, r4, lsr #10
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcmp.f32 s28, s22
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    itt lt
+; CHECK-NEXT:    movlt r4, #0
+; CHECK-NEXT:    movtlt r4, #65534
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s22
+; CHECK-NEXT:    mov r1, r0
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r4, #65535
+; CHECK-NEXT:    movtgt r4, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r7, [r10, #45]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    vcvtt.f32.f16 s18, s18
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vcmp.f32 s28, s28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    str r1, [sp] @ 4-byte Spill
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r4, #0
+; CHECK-NEXT:    lsls r0, r4, #22
+; CHECK-NEXT:    orr.w r0, r0, r1, lsr #10
+; CHECK-NEXT:    str.w r0, [r10, #20]
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    lsrs r1, r6, #10
+; CHECK-NEXT:    strb.w r1, [r10, #49]
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r9, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r9, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r9, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    movtlt r5, #65534
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r5, #65535
+; CHECK-NEXT:    movtgt r5, #1
+; CHECK-NEXT:    vcmp.f32 s26, s26
+; CHECK-NEXT:    vcvtt.f32.f16 s16, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    bfc r5, #18, #14
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    orr.w r0, r5, r9, lsl #18
+; CHECK-NEXT:    str.w r0, [r10, #29]
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    lsrs r1, r4, #10
+; CHECK-NEXT:    strb.w r1, [r10, #24]
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r8, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r8, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r8, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt lt
+; CHECK-NEXT:    movwlt r11, #0
+; CHECK-NEXT:    movtlt r11, #65534
+; CHECK-NEXT:    vcmp.f32 s24, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s24
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r11, #65535
+; CHECK-NEXT:    movtgt r11, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r11, #0
+; CHECK-NEXT:    vcvtb.f32.f16 s24, s19
+; CHECK-NEXT:    bfc r11, #18, #14
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    orr.w r0, r11, r8, lsl #18
+; CHECK-NEXT:    str.w r0, [r10, #4]
+; CHECK-NEXT:    vmov r0, s24
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcmp.f32 s24, s22
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s24
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r5, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt lt
+; CHECK-NEXT:    movlt r7, #0
+; CHECK-NEXT:    movtlt r7, #65534
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r7, #65535
+; CHECK-NEXT:    movtgt r7, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r7, #0
+; CHECK-NEXT:    vcvtb.f32.f16 s18, s17
+; CHECK-NEXT:    bfc r7, #18, #14
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    lsrs r0, r7, #14
+; CHECK-NEXT:    orr.w r0, r0, r5, lsl #4
+; CHECK-NEXT:    str.w r0, [r10, #37]
+; CHECK-NEXT:    lsr.w r0, r9, #14
+; CHECK-NEXT:    orr.w r0, r0, r7, lsl #18
+; CHECK-NEXT:    str.w r0, [r10, #33]
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s22
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt lt
+; CHECK-NEXT:    movlt r6, #0
+; CHECK-NEXT:    movtlt r6, #65534
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r6, #65535
+; CHECK-NEXT:    movtgt r6, #1
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r6, #0
+; CHECK-NEXT:    bfc r6, #18, #14
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    lsr.w r2, r6, #14
+; CHECK-NEXT:    orr.w r2, r2, r0, lsl #4
+; CHECK-NEXT:    str.w r2, [r10, #12]
+; CHECK-NEXT:    itt lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    movtlt r1, #65534
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s22
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r1, #65535
+; CHECK-NEXT:    movtgt r1, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt lt
+; CHECK-NEXT:    movlt r4, #0
+; CHECK-NEXT:    movtlt r4, #65534
+; CHECK-NEXT:    vcmp.f32 s24, s20
+; CHECK-NEXT:    lsr.w r2, r8, #14
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r4, #65535
+; CHECK-NEXT:    movtgt r4, #1
+; CHECK-NEXT:    vcmp.f32 s24, s24
+; CHECK-NEXT:    orr.w r2, r2, r6, lsl #18
+; CHECK-NEXT:    str.w r2, [r10, #8]
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r4, #0
+; CHECK-NEXT:    ldr r3, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    bfc r4, #18, #14
+; CHECK-NEXT:    lsrs r2, r5, #28
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    lsrs r0, r0, #28
+; CHECK-NEXT:    orr.w r2, r2, r4, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    orr.w r2, r2, r3, lsl #22
+; CHECK-NEXT:    str.w r2, [r10, #41]
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    bfc r1, #18, #14
+; CHECK-NEXT:    orr.w r0, r0, r1, lsl #4
+; CHECK-NEXT:    ldr r1, [sp] @ 4-byte Reload
+; CHECK-NEXT:    orr.w r0, r0, r1, lsl #22
+; CHECK-NEXT:    str.w r0, [r10, #16]
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI48_0:
+; CHECK-NEXT:    .long 0x57ffffff @ float 5.6294992E+14
+; CHECK-NEXT:  .LCPI48_1:
+; CHECK-NEXT:    .long 0xd8000000 @ float -5.62949953E+14
+    %x = call <8 x i50> @llvm.fptosi.sat.v8f16.v8i50(<8 x half> %f)
+    ret <8 x i50> %x
+}
+
+define arm_aapcs_vfpcc <8 x i64> @test_signed_v8f16_v8i64(<8 x half> %f) {
+; CHECK-LABEL: test_signed_v8f16_v8i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vcvtt.f32.f16 s20, s19
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcvtb.f32.f16 s22, s19
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    vmov r0, s22
+; CHECK-NEXT:    vldr s30, .LCPI49_1
+; CHECK-NEXT:    vldr s28, .LCPI49_0
+; CHECK-NEXT:    vcvtb.f32.f16 s24, s16
+; CHECK-NEXT:    vcmp.f32 s20, s30
+; CHECK-NEXT:    vcvtt.f32.f16 s16, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r9, #0
+; CHECK-NEXT:    vcmp.f32 s20, s28
+; CHECK-NEXT:    mov r8, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r9, #-1
+; CHECK-NEXT:    vcmp.f32 s20, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r4, s24
+; CHECK-NEXT:    vmov r5, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r9, #0
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcmp.f32 s22, s30
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r11, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s22
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r11, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s20, s30
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r11, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s20, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r8, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s20, s20
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r8, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r10, r1
+; CHECK-NEXT:    vcmp.f32 s22, s30
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r8, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r10, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s22, s28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r10, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s22, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r10, #0
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    vcmp.f32 s16, s30
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r6, #0
+; CHECK-NEXT:    vcmp.f32 s16, s28
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r6, #-1
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r6, #0
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcvtt.f32.f16 s19, s17
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    vmov r1, s19
+; CHECK-NEXT:    vcmp.f32 s24, s30
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s24, s28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s24, s24
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmov q5[2], q5[0], r0, r6
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcvtb.f32.f16 s17, s17
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    vmov r0, s17
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    vcmp.f32 s19, s30
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r6, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r6, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s30
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r6, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r5, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r5, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s30
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r7, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s24, s28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r7, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s24, s24
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r7, #0
+; CHECK-NEXT:    vmov q5[3], q5[1], r7, r5
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcvtt.f32.f16 s16, s18
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    vmov r1, s16
+; CHECK-NEXT:    vcmp.f32 s17, s30
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s17, s28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmov q6[2], q6[0], r0, r6
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcvtb.f32.f16 s18, s18
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    vcmp.f32 s16, s30
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r6, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r6, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s30
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r6, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r4, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r4, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s30
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r7, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s17, s28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r7, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s17, s17
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r7, #0
+; CHECK-NEXT:    vmov q6[3], q6[1], r7, r4
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vcmp.f32 s18, s30
+; CHECK-NEXT:    vmov q3[2], q3[0], r11, r9
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s30
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r5, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r5, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s30
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s18, s28
+; CHECK-NEXT:    vmov q2[2], q2[0], r0, r6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r1, #-2147483648
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    vmov q3[3], q3[1], r10, r8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmov q2[3], q2[1], r1, r5
+; CHECK-NEXT:    vmov q0, q5
+; CHECK-NEXT:    vmov q1, q6
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI49_0:
+; CHECK-NEXT:    .long 0x5effffff @ float 9.22337149E+18
+; CHECK-NEXT:  .LCPI49_1:
+; CHECK-NEXT:    .long 0xdf000000 @ float -9.22337203E+18
+    %x = call <8 x i64> @llvm.fptosi.sat.v8f16.v8i64(<8 x half> %f)
+    ret <8 x i64> %x
+}
+
+define arm_aapcs_vfpcc <8 x i100> @test_signed_v8f16_v8i100(<8 x half> %f) {
+; CHECK-LABEL: test_signed_v8f16_v8i100:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vcvtb.f32.f16 s30, s19
+; CHECK-NEXT:    vmov r0, s30
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcvtb.f32.f16 s28, s18
+; CHECK-NEXT:    mov r5, r3
+; CHECK-NEXT:    vmov r3, s28
+; CHECK-NEXT:    vldr s24, .LCPI50_2
+; CHECK-NEXT:    vldr s20, .LCPI50_3
+; CHECK-NEXT:    vcvtt.f32.f16 s19, s19
+; CHECK-NEXT:    vcmp.f32 s30, s24
+; CHECK-NEXT:    vcvtb.f32.f16 s22, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s30
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    str.w r2, [r4, #83]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s30
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r1, [r4, #79]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    vcvtb.f32.f16 s26, s17
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s30, s30
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str.w r0, [r4, #75]
+; CHECK-NEXT:    vmov r9, s19
+; CHECK-NEXT:    vmov r8, s22
+; CHECK-NEXT:    mov r0, r3
+; CHECK-NEXT:    vmov r6, s26
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s28, s24
+; CHECK-NEXT:    mov r7, r3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s28
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    str.w r2, [r4, #58]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s28
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r1, [r4, #54]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s28, s28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str.w r0, [r4, #50]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s26, s24
+; CHECK-NEXT:    mov r10, r3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s26
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    str.w r2, [r4, #33]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s26
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r1, [r4, #29]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s26, s26
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str.w r0, [r4, #25]
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s22, s24
+; CHECK-NEXT:    mov r8, r3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s22
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    str r2, [r4, #8]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s22
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #4]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s22, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str r0, [r4]
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s19, s24
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    lsrs r6, r1, #28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    orr.w r6, r6, r2, lsl #4
+; CHECK-NEXT:    str.w r6, [r4, #95]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    lsrs r6, r0, #28
+; CHECK-NEXT:    orr.w r1, r6, r1, lsl #4
+; CHECK-NEXT:    str.w r1, [r4, #91]
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    mvnlt r3, #7
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r3, #7
+; CHECK-NEXT:    lsrs r1, r2, #28
+; CHECK-NEXT:    vcvtt.f32.f16 s19, s18
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    orr.w r2, r1, r3, lsl #4
+; CHECK-NEXT:    vmov r1, s19
+; CHECK-NEXT:    strb.w r2, [r4, #99]
+; CHECK-NEXT:    vcmp.f32 s30, s24
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    mvnlt r5, #7
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r5, #7
+; CHECK-NEXT:    vcmp.f32 s30, s30
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r5, #0
+; CHECK-NEXT:    and r2, r5, #15
+; CHECK-NEXT:    orr.w r0, r2, r0, lsl #4
+; CHECK-NEXT:    str.w r0, [r4, #87]
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s19, s24
+; CHECK-NEXT:    vcvtt.f32.f16 s18, s17
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    lsr.w r6, r1, #28
+; CHECK-NEXT:    vcmp.f32 s19, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    orr.w r6, r6, r2, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r6, [r4, #70]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    lsrs r2, r2, #28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    lsrs r6, r0, #28
+; CHECK-NEXT:    orr.w r1, r6, r1, lsl #4
+; CHECK-NEXT:    str.w r1, [r4, #66]
+; CHECK-NEXT:    vmov r1, s18
+; CHECK-NEXT:    vcmp.f32 s19, s24
+; CHECK-NEXT:    vcvtt.f32.f16 s16, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    mvnlt r3, #7
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r3, #7
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    orr.w r2, r2, r3, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    b.w .LBB50_3
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI50_2:
+; CHECK-NEXT:    .long 0xf1000000 @ float -6.338253E+29
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:  .LCPI50_3:
+; CHECK-NEXT:    .long 0x70ffffff @ float 6.33825262E+29
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  .LBB50_3:
+; CHECK-NEXT:    strb.w r2, [r4, #74]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    mvnlt r7, #7
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r7, #7
+; CHECK-NEXT:    vcmp.f32 s28, s28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r7, #0
+; CHECK-NEXT:    and r2, r7, #15
+; CHECK-NEXT:    orr.w r0, r2, r0, lsl #4
+; CHECK-NEXT:    str.w r0, [r4, #62]
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s18, s24
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    lsr.w r7, r1, #28
+; CHECK-NEXT:    vcmp.f32 s18, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    orr.w r7, r7, r2, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r7, [r4, #45]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    lsrs r2, r2, #28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    lsrs r7, r0, #28
+; CHECK-NEXT:    vcmp.f32 s18, s24
+; CHECK-NEXT:    orr.w r7, r7, r1, lsl #4
+; CHECK-NEXT:    vmov r1, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    str.w r7, [r4, #41]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    mvnlt r3, #7
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r3, #7
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    orr.w r2, r2, r3, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    strb.w r2, [r4, #49]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    mvnlt r10, #7
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r10, #7
+; CHECK-NEXT:    vcmp.f32 s26, s26
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r10, #0
+; CHECK-NEXT:    and r2, r10, #15
+; CHECK-NEXT:    orr.w r0, r2, r0, lsl #4
+; CHECK-NEXT:    str.w r0, [r4, #37]
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s16, s24
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    lsrs r7, r1, #28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    orr.w r7, r7, r2, lsl #4
+; CHECK-NEXT:    str r7, [r4, #20]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    lsr.w r7, r0, #28
+; CHECK-NEXT:    orr.w r1, r7, r1, lsl #4
+; CHECK-NEXT:    str r1, [r4, #16]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    mvnlt r3, #7
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r3, #7
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    lsr.w r1, r2, #28
+; CHECK-NEXT:    vcmp.f32 s22, s24
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    orr.w r1, r1, r3, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    strb r1, [r4, #24]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    mvnlt r8, #7
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r8, #7
+; CHECK-NEXT:    vcmp.f32 s22, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs.w r8, #0
+; CHECK-NEXT:    and r1, r8, #15
+; CHECK-NEXT:    orr.w r0, r1, r0, lsl #4
+; CHECK-NEXT:    str r0, [r4, #12]
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
+; CHECK-NEXT:  @ %bb.4:
+    %x = call <8 x i100> @llvm.fptosi.sat.v8f16.v8i100(<8 x half> %f)
+    ret <8 x i100> %x
+}
+
+define arm_aapcs_vfpcc <8 x i128> @test_signed_v8f16_v8i128(<8 x half> %f) {
+; CHECK-LABEL: test_signed_v8f16_v8i128:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vcvtt.f32.f16 s28, s19
+; CHECK-NEXT:    vcvtb.f32.f16 s20, s16
+; CHECK-NEXT:    vmov r0, s28
+; CHECK-NEXT:    vcvtt.f32.f16 s24, s16
+; CHECK-NEXT:    vcvtb.f32.f16 s26, s17
+; CHECK-NEXT:    vcvtb.f32.f16 s19, s19
+; CHECK-NEXT:    vldr s22, .LCPI51_2
+; CHECK-NEXT:    vmov r8, s20
+; CHECK-NEXT:    vmov r9, s24
+; CHECK-NEXT:    vcvtt.f32.f16 s30, s18
+; CHECK-NEXT:    vmov r7, s26
+; CHECK-NEXT:    vmov r6, s19
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vldr s16, .LCPI51_3
+; CHECK-NEXT:    vmov r5, s30
+; CHECK-NEXT:    vcvtb.f32.f16 s18, s18
+; CHECK-NEXT:    vcmp.f32 s28, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s22
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s28
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s22
+; CHECK-NEXT:    str r3, [r4, #124]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s28
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s22
+; CHECK-NEXT:    str r2, [r4, #120]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s28
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #116]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s28, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s28, s28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str r0, [r4, #112]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s19, s16
+; CHECK-NEXT:    vcvtt.f32.f16 s28, s17
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    str r3, [r4, #108]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    str r2, [r4, #104]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #100]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s19, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s19, s19
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str r0, [r4, #96]
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    vmov r6, s18
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s30, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s22
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s30
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s22
+; CHECK-NEXT:    str r3, [r4, #92]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s30
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s22
+; CHECK-NEXT:    str r2, [r4, #88]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s30
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #84]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s30, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s30, s30
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str r0, [r4, #80]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    vmov r5, s28
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s18, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    str r3, [r4, #76]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    str r2, [r4, #72]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #68]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s18, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s18, s18
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str r0, [r4, #64]
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s28, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s22
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s28
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s22
+; CHECK-NEXT:    str r3, [r4, #60]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s28
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s22
+; CHECK-NEXT:    str r2, [r4, #56]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s28
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #52]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s28, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s28, s28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str r0, [r4, #48]
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s26, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s22
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s26
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s22
+; CHECK-NEXT:    str r3, [r4, #44]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s26
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s22
+; CHECK-NEXT:    str r2, [r4, #40]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s26
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s16
+; CHECK-NEXT:    b.w .LBB51_3
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI51_2:
+; CHECK-NEXT:    .long 0x7effffff @ float 1.70141173E+38
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:  .LCPI51_3:
+; CHECK-NEXT:    .long 0xff000000 @ float -1.70141183E+38
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  .LBB51_3:
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #36]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s26, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s26, s26
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str r0, [r4, #32]
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s24, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s22
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s24
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s22
+; CHECK-NEXT:    str r3, [r4, #28]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s24
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s22
+; CHECK-NEXT:    str r2, [r4, #24]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s24
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #20]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s24, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s24, s24
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str r0, [r4, #16]
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    bl __fixsfti
+; CHECK-NEXT:    vcmp.f32 s20, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s20, s22
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s20, s20
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    mvngt r3, #-2147483648
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s20, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s20, s22
+; CHECK-NEXT:    str r3, [r4, #12]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s20, s20
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s20, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s20, s22
+; CHECK-NEXT:    str r2, [r4, #8]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s20, s20
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s20, s16
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #4]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s20, s22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s20, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it vs
+; CHECK-NEXT:    movvs r0, #0
+; CHECK-NEXT:    str r0, [r4]
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, pc}
+; CHECK-NEXT:  @ %bb.4:
+    %x = call <8 x i128> @llvm.fptosi.sat.v8f16.v8i128(<8 x half> %f)
+    ret <8 x i128> %x
+}
+

diff  --git a/llvm/test/CodeGen/Thumb2/mve-fptoui-sat-vector.ll b/llvm/test/CodeGen/Thumb2/mve-fptoui-sat-vector.ll
new file mode 100644
index 0000000000000..10e9f2e063dca
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-fptoui-sat-vector.ll
@@ -0,0 +1,5783 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK
+
+;
+; Float to signed 32-bit -- Vector size variation
+;
+
+declare <1 x i32> @llvm.fptoui.sat.v1f32.v1i32 (<1 x float>)
+declare <2 x i32> @llvm.fptoui.sat.v2f32.v2i32 (<2 x float>)
+declare <3 x i32> @llvm.fptoui.sat.v3f32.v3i32 (<3 x float>)
+declare <4 x i32> @llvm.fptoui.sat.v4f32.v4i32 (<4 x float>)
+declare <5 x i32> @llvm.fptoui.sat.v5f32.v5i32 (<5 x float>)
+declare <6 x i32> @llvm.fptoui.sat.v6f32.v6i32 (<6 x float>)
+declare <7 x i32> @llvm.fptoui.sat.v7f32.v7i32 (<7 x float>)
+declare <8 x i32> @llvm.fptoui.sat.v8f32.v8i32 (<8 x float>)
+
+define arm_aapcs_vfpcc <1 x i32> @test_unsigned_v1f32_v1i32(<1 x float> %f) {
+; CHECK-LABEL: test_unsigned_v1f32_v1i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 s2, s0
+; CHECK-NEXT:    vldr s4, .LCPI0_0
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s4
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI0_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <1 x i32> @llvm.fptoui.sat.v1f32.v1i32(<1 x float> %f)
+    ret <1 x i32> %x
+}
+
+define arm_aapcs_vfpcc <2 x i32> @test_unsigned_v2f32_v2i32(<2 x float> %f) {
+; CHECK-LABEL: test_unsigned_v2f32_v2i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vmov r0, s17
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vldr s18, .LCPI1_0
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    vcmp.f32 s17, s18
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r5, #-1
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s18
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s18
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s16, s18
+; CHECK-NEXT:    vmov q0[2], q0[0], r0, r5
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r1, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r4
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI1_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <2 x i32> @llvm.fptoui.sat.v2f32.v2i32(<2 x float> %f)
+    ret <2 x i32> %x
+}
+
+define arm_aapcs_vfpcc <3 x i32> @test_unsigned_v3f32_v3i32(<3 x float> %f) {
+; CHECK-LABEL: test_unsigned_v3f32_v3i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 s10, s2
+; CHECK-NEXT:    vldr s8, .LCPI2_0
+; CHECK-NEXT:    vcvt.u32.f32 s12, s0
+; CHECK-NEXT:    vcvt.u32.f32 s6, s3
+; CHECK-NEXT:    vcvt.u32.f32 s4, s1
+; CHECK-NEXT:    vcmp.f32 s2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s8
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    vcmp.f32 s0, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vcmp.f32 s3, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vcmp.f32 s1, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI2_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <3 x i32> @llvm.fptoui.sat.v3f32.v3i32(<3 x float> %f)
+    ret <3 x i32> %x
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_unsigned_v4f32_v4i32(<4 x float> %f) {
+; CHECK-LABEL: test_unsigned_v4f32_v4i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 s10, s2
+; CHECK-NEXT:    vldr s8, .LCPI3_0
+; CHECK-NEXT:    vcvt.u32.f32 s12, s0
+; CHECK-NEXT:    vcvt.u32.f32 s6, s3
+; CHECK-NEXT:    vcvt.u32.f32 s4, s1
+; CHECK-NEXT:    vcmp.f32 s2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s8
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    vcmp.f32 s0, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vcmp.f32 s3, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vcmp.f32 s1, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI3_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <4 x i32> @llvm.fptoui.sat.v4f32.v4i32(<4 x float> %f)
+    ret <4 x i32> %x
+}
+
+define arm_aapcs_vfpcc <5 x i32> @test_unsigned_v5f32_v5i32(<5 x float> %f) {
+; CHECK-LABEL: test_unsigned_v5f32_v5i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 s14, s4
+; CHECK-NEXT:    vldr s12, .LCPI4_0
+; CHECK-NEXT:    vcvt.u32.f32 s5, s3
+; CHECK-NEXT:    vcvt.u32.f32 s10, s1
+; CHECK-NEXT:    vcvt.u32.f32 s6, s2
+; CHECK-NEXT:    vcmp.f32 s4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.u32.f32 s8, s0
+; CHECK-NEXT:    vcmp.f32 s4, s12
+; CHECK-NEXT:    vmov r1, s14
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r12, s5
+; CHECK-NEXT:    vcmp.f32 s3, s12
+; CHECK-NEXT:    str r1, [r0, #16]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r12, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s10
+; CHECK-NEXT:    vcmp.f32 s1, s12
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vcmp.f32 s2, s12
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s0, s12
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r3
+; CHECK-NEXT:    vmov q0[3], q0[1], r2, r12
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI4_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <5 x i32> @llvm.fptoui.sat.v5f32.v5i32(<5 x float> %f)
+    ret <5 x i32> %x
+}
+
+define arm_aapcs_vfpcc <6 x i32> @test_unsigned_v6f32_v6i32(<6 x float> %f) {
+; CHECK-LABEL: test_unsigned_v6f32_v6i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 s7, s5
+; CHECK-NEXT:    vldr s14, .LCPI5_0
+; CHECK-NEXT:    vcvt.u32.f32 s9, s4
+; CHECK-NEXT:    vcvt.u32.f32 s12, s3
+; CHECK-NEXT:    vcmp.f32 s5, #0
+; CHECK-NEXT:    vcvt.u32.f32 s6, s1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s5, s14
+; CHECK-NEXT:    vcvt.u32.f32 s8, s2
+; CHECK-NEXT:    vmov r1, s7
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s4, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s9
+; CHECK-NEXT:    vcvt.u32.f32 s10, s0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vcmp.f32 s4, s14
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r12, s12
+; CHECK-NEXT:    strd r2, r1, [r0, #16]
+; CHECK-NEXT:    vcmp.f32 s3, s14
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r12, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vcmp.f32 s1, s14
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    vcmp.f32 s2, s14
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s0, s14
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r3
+; CHECK-NEXT:    vmov q0[3], q0[1], r2, r12
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI5_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <6 x i32> @llvm.fptoui.sat.v6f32.v6i32(<6 x float> %f)
+    ret <6 x i32> %x
+}
+
+define arm_aapcs_vfpcc <7 x i32> @test_unsigned_v7f32_v7i32(<7 x float> %f) {
+; CHECK-LABEL: test_unsigned_v7f32_v7i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 s11, s5
+; CHECK-NEXT:    vldr s8, .LCPI6_0
+; CHECK-NEXT:    vcvt.u32.f32 s13, s4
+; CHECK-NEXT:    vcvt.u32.f32 s9, s6
+; CHECK-NEXT:    vcmp.f32 s5, #0
+; CHECK-NEXT:    vcvt.u32.f32 s10, s3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s5, s8
+; CHECK-NEXT:    vcvt.u32.f32 s12, s1
+; CHECK-NEXT:    vmov r1, s11
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    str r1, [r0, #20]
+; CHECK-NEXT:    vcmp.f32 s4, #0
+; CHECK-NEXT:    vmov r1, s13
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s4, s8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vcvt.u32.f32 s14, s2
+; CHECK-NEXT:    str r1, [r0, #16]
+; CHECK-NEXT:    vcmp.f32 s6, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s9
+; CHECK-NEXT:    vcvt.u32.f32 s7, s0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s6, s8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r12, s10
+; CHECK-NEXT:    str r1, [r0, #24]
+; CHECK-NEXT:    vcmp.f32 s3, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r12, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcmp.f32 s1, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vcmp.f32 s2, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s7
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s0, s8
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r3
+; CHECK-NEXT:    vmov q0[3], q0[1], r2, r12
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI6_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <7 x i32> @llvm.fptoui.sat.v7f32.v7i32(<7 x float> %f)
+    ret <7 x i32> %x
+}
+
+define arm_aapcs_vfpcc <8 x i32> @test_unsigned_v8f32_v8i32(<8 x float> %f) {
+; CHECK-LABEL: test_unsigned_v8f32_v8i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8}
+; CHECK-NEXT:    vpush {d8}
+; CHECK-NEXT:    vcvt.u32.f32 s15, s6
+; CHECK-NEXT:    vldr s8, .LCPI7_0
+; CHECK-NEXT:    vcvt.u32.f32 s16, s4
+; CHECK-NEXT:    vcvt.u32.f32 s13, s7
+; CHECK-NEXT:    vcvt.u32.f32 s12, s5
+; CHECK-NEXT:    vcmp.f32 s6, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s6, s8
+; CHECK-NEXT:    vcvt.u32.f32 s14, s2
+; CHECK-NEXT:    vmov r12, s15
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s4, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r12, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov lr, s16
+; CHECK-NEXT:    vcmp.f32 s4, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w lr, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.u32.f32 s9, s0
+; CHECK-NEXT:    vcmp.f32 s7, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w lr, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s13
+; CHECK-NEXT:    vcmp.f32 s7, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.u32.f32 s11, s3
+; CHECK-NEXT:    vcmp.f32 s5, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vcvt.u32.f32 s10, s1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vcmp.f32 s5, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    vmov q1[2], q1[0], lr, r12
+; CHECK-NEXT:    vcmp.f32 s2, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s9
+; CHECK-NEXT:    vmov q1[3], q1[1], r3, r2
+; CHECK-NEXT:    vcmp.f32 s0, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r4, s11
+; CHECK-NEXT:    vcmp.f32 s3, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r5, s10
+; CHECK-NEXT:    vcmp.f32 s1, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r4, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r5, #-1
+; CHECK-NEXT:    vmov q0[3], q0[1], r5, r4
+; CHECK-NEXT:    vpop {d8}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI7_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <8 x i32> @llvm.fptoui.sat.v8f32.v8i32(<8 x float> %f)
+    ret <8 x i32> %x
+}
+
+;
+; Double to signed 32-bit -- Vector size variation
+;
+
+declare <1 x i32> @llvm.fptoui.sat.v1f64.v1i32 (<1 x double>)
+declare <2 x i32> @llvm.fptoui.sat.v2f64.v2i32 (<2 x double>)
+declare <3 x i32> @llvm.fptoui.sat.v3f64.v3i32 (<3 x double>)
+declare <4 x i32> @llvm.fptoui.sat.v4f64.v4i32 (<4 x double>)
+declare <5 x i32> @llvm.fptoui.sat.v5f64.v5i32 (<5 x double>)
+declare <6 x i32> @llvm.fptoui.sat.v6f64.v6i32 (<6 x double>)
+
+define arm_aapcs_vfpcc <1 x i32> @test_unsigned_v1f64_v1i32(<1 x double> %f) {
+; CHECK-LABEL: test_unsigned_v1f64_v1i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    vldr d1, .LCPI8_0
+; CHECK-NEXT:    vmov r4, r5, d0
+; CHECK-NEXT:    vmov r2, r3, d1
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI8_1
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_d2uiz
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    csel r0, r0, r7, ne
+; CHECK-NEXT:    cmp r6, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI8_0:
+; CHECK-NEXT:    .long 4292870144 @ double 4294967295
+; CHECK-NEXT:    .long 1106247679
+; CHECK-NEXT:  .LCPI8_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <1 x i32> @llvm.fptoui.sat.v1f64.v1i32(<1 x double> %f)
+    ret <1 x i32> %x
+}
+
+define arm_aapcs_vfpcc <2 x i32> @test_unsigned_v2f64_v2i32(<2 x double> %f) {
+; CHECK-LABEL: test_unsigned_v2f64_v2i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI9_0
+; CHECK-NEXT:    vmov r5, r4, d9
+; CHECK-NEXT:    vmov r10, r9, d0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI9_1
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    vmov r2, r11, d0
+; CHECK-NEXT:    str r2, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    str.w r11, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    str r1, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    csel r0, r0, r8, ne
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    str.w r10, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    mov r8, r9
+; CHECK-NEXT:    str.w r9, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    str r1, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    csel r9, r0, r9, ne
+; CHECK-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r9, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr.w r11, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    ldr.w r10, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r5, r1, r0, ne
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #0
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r9, r1
+; CHECK-NEXT:    vmov q0[3], q0[1], r0, r5
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI9_0:
+; CHECK-NEXT:    .long 4292870144 @ double 4294967295
+; CHECK-NEXT:    .long 1106247679
+; CHECK-NEXT:  .LCPI9_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <2 x i32> @llvm.fptoui.sat.v2f64.v2i32(<2 x double> %f)
+    ret <2 x i32> %x
+}
+
+define arm_aapcs_vfpcc <3 x i32> @test_unsigned_v3f64_v3i32(<3 x double> %f) {
+; CHECK-LABEL: test_unsigned_v3f64_v3i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #24
+; CHECK-NEXT:    sub sp, #24
+; CHECK-NEXT:    vmov.f32 s18, s0
+; CHECK-NEXT:    vmov.f32 s19, s1
+; CHECK-NEXT:    vldr d0, .LCPI10_0
+; CHECK-NEXT:    vmov r4, r5, d1
+; CHECK-NEXT:    vmov r9, r7, d0
+; CHECK-NEXT:    vmov.f32 s16, s4
+; CHECK-NEXT:    vmov.f32 s17, s5
+; CHECK-NEXT:    str.w r9, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    str r7, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI10_1
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    str r0, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    vmov r11, r3, d0
+; CHECK-NEXT:    str r3, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r10, r8, d8
+; CHECK-NEXT:    cmp r6, #0
+; CHECK-NEXT:    ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    csel r0, r0, r6, ne
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    str r0, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    vmov r5, r4, d9
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    csel r6, r0, r9, ne
+; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r6, #-1
+; CHECK-NEXT:    ldrd r2, r3, [sp, #8] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    csel r0, r0, r7, ne
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov q0[2], q0[0], r0, r6
+; CHECK-NEXT:    add sp, #24
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI10_0:
+; CHECK-NEXT:    .long 4292870144 @ double 4294967295
+; CHECK-NEXT:    .long 1106247679
+; CHECK-NEXT:  .LCPI10_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <3 x i32> @llvm.fptoui.sat.v3f64.v3i32(<3 x double> %f)
+    ret <3 x i32> %x
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_unsigned_v4f64_v4i32(<4 x double> %f) {
+; CHECK-LABEL: test_unsigned_v4f64_v4i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    .pad #24
+; CHECK-NEXT:    sub sp, #24
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI11_0
+; CHECK-NEXT:    vmov q5, q1
+; CHECK-NEXT:    vmov r7, r9, d0
+; CHECK-NEXT:    vmov r4, r5, d10
+; CHECK-NEXT:    str.w r9, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI11_1
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    strd r2, r3, [sp, #16] @ 8-byte Folded Spill
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r10, r8, d8
+; CHECK-NEXT:    cmp r6, #0
+; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    csel r0, r0, r6, ne
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    vmov r11, r5, d11
+; CHECK-NEXT:    mov r4, r7
+; CHECK-NEXT:    str r7, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r6, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    ldr r7, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    csel r8, r0, r9, ne
+; CHECK-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r8, #-1
+; CHECK-NEXT:    ldr.w r10, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r11
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r11
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r4, r5, d9
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    csel r6, r0, r7, ne
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r6, #-1
+; CHECK-NEXT:    ldr r2, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldrd r2, r3, [sp, #16] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    csel r0, r0, r7, ne
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r8, r1
+; CHECK-NEXT:    vmov q0[3], q0[1], r0, r6
+; CHECK-NEXT:    add sp, #24
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI11_0:
+; CHECK-NEXT:    .long 4292870144 @ double 4294967295
+; CHECK-NEXT:    .long 1106247679
+; CHECK-NEXT:  .LCPI11_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <4 x i32> @llvm.fptoui.sat.v4f64.v4i32(<4 x double> %f)
+    ret <4 x i32> %x
+}
+
+define arm_aapcs_vfpcc <5 x i32> @test_unsigned_v5f64_v5i32(<5 x double> %f) {
+; CHECK-LABEL: test_unsigned_v5f64_v5i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    .pad #40
+; CHECK-NEXT:    sub sp, #40
+; CHECK-NEXT:    vmov.f32 s16, s0
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vmov.f32 s17, s1
+; CHECK-NEXT:    vldr d0, .LCPI12_0
+; CHECK-NEXT:    vmov r5, r6, d4
+; CHECK-NEXT:    str r0, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    vmov.f32 s18, s6
+; CHECK-NEXT:    vmov.f32 s20, s4
+; CHECK-NEXT:    vmov.f32 s22, s2
+; CHECK-NEXT:    vmov.f32 s19, s7
+; CHECK-NEXT:    vmov.f32 s21, s5
+; CHECK-NEXT:    vmov.f32 s23, s3
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    strd r2, r3, [sp, #32] @ 8-byte Folded Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI12_1
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    vmov r7, r3, d0
+; CHECK-NEXT:    str r3, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    str r7, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r8, r1, d11
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    vmov r6, r9, d9
+; CHECK-NEXT:    csel r0, r0, r11, ne
+; CHECK-NEXT:    cmp.w r10, #0
+; CHECK-NEXT:    str r1, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    vmov r2, r1, d10
+; CHECK-NEXT:    strd r2, r1, [sp, #16] @ 8-byte Folded Spill
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    str r0, [r4, #16]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    ldr r5, [sp, #32] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r9
+; CHECK-NEXT:    ldr.w r10, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    ldr r7, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r9
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r9
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    csel r0, r0, r4, ne
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    ldr r6, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    mov r11, r10
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr.w r10, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    mov r5, r6
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r9, r7
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp r6, #0
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    csel r0, r0, r6, ne
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    ldr r4, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    ldr.w r8, [sp, #32] @ 4-byte Reload
+; CHECK-NEXT:    ldr r6, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    str r0, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    mov r11, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    csel r4, r0, r5, ne
+; CHECK-NEXT:    vmov r5, r6, d8
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r4, #-1
+; CHECK-NEXT:    ldr r3, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    csel r0, r0, r7, ne
+; CHECK-NEXT:    cmp.w r10, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    vmov q0[2], q0[0], r0, r4
+; CHECK-NEXT:    ldr r0, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r0
+; CHECK-NEXT:    ldr r0, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    add sp, #40
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI12_0:
+; CHECK-NEXT:    .long 4292870144 @ double 4294967295
+; CHECK-NEXT:    .long 1106247679
+; CHECK-NEXT:  .LCPI12_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <5 x i32> @llvm.fptoui.sat.v5f64.v5i32(<5 x double> %f)
+    ret <5 x i32> %x
+}
+
+define arm_aapcs_vfpcc <6 x i32> @test_unsigned_v6f64_v6i32(<6 x double> %f) {
+; CHECK-LABEL: test_unsigned_v6f64_v6i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12}
+; CHECK-NEXT:    .pad #40
+; CHECK-NEXT:    sub sp, #40
+; CHECK-NEXT:    vmov.f32 s16, s0
+; CHECK-NEXT:    str r0, [sp, #32] @ 4-byte Spill
+; CHECK-NEXT:    vmov.f32 s17, s1
+; CHECK-NEXT:    vldr d0, .LCPI13_0
+; CHECK-NEXT:    vmov r5, r6, d5
+; CHECK-NEXT:    vmov r11, r3, d0
+; CHECK-NEXT:    vmov.f32 s18, s8
+; CHECK-NEXT:    vmov.f32 s20, s6
+; CHECK-NEXT:    vmov.f32 s22, s4
+; CHECK-NEXT:    vmov.f32 s24, s2
+; CHECK-NEXT:    vmov.f32 s19, s9
+; CHECK-NEXT:    vmov.f32 s21, s7
+; CHECK-NEXT:    vmov.f32 s23, s5
+; CHECK-NEXT:    vmov.f32 s25, s3
+; CHECK-NEXT:    str r3, [sp, #36] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    str.w r11, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI13_1
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    vmov r4, r9, d0
+; CHECK-NEXT:    str r4, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r10, r1, d10
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    vmov r5, r6, d9
+; CHECK-NEXT:    csel r0, r0, r8, ne
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    str r1, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    vmov r2, r1, d12
+; CHECK-NEXT:    strd r2, r1, [sp, #12] @ 8-byte Folded Spill
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    ldr r7, [sp, #32] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    str r0, [r7, #20]
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    ldr.w r8, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r2, r1, d11
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    csel r0, r0, r4, ne
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    strd r2, r1, [sp, #4] @ 8-byte Folded Spill
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    str r0, [r7, #16]
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    ldr r6, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    ldr.w r11, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r5, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    mov r8, r9
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r10
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    csel r0, r0, r7, ne
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    ldr r4, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    ldr.w r9, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    str r0, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    mov r6, r7
+; CHECK-NEXT:    mov r10, r5
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    csel r9, r0, r7, ne
+; CHECK-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r9, #-1
+; CHECK-NEXT:    ldr r6, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    ldr r5, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r5, r6, d8
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    csel r4, r0, r7, ne
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r4, #-1
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    ldr r3, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    csel r0, r0, r7, ne
+; CHECK-NEXT:    cmp.w r10, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    vmov q0[2], q0[0], r0, r4
+; CHECK-NEXT:    ldr r0, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[3], q0[1], r9, r0
+; CHECK-NEXT:    ldr r0, [sp, #32] @ 4-byte Reload
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    add sp, #40
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI13_0:
+; CHECK-NEXT:    .long 4292870144 @ double 4294967295
+; CHECK-NEXT:    .long 1106247679
+; CHECK-NEXT:  .LCPI13_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <6 x i32> @llvm.fptoui.sat.v6f64.v6i32(<6 x double> %f)
+    ret <6 x i32> %x
+}
+
+;
+; FP16 to signed 32-bit -- Vector size variation
+;
+
+declare <1 x i32> @llvm.fptoui.sat.v1f16.v1i32 (<1 x half>)
+declare <2 x i32> @llvm.fptoui.sat.v2f16.v2i32 (<2 x half>)
+declare <3 x i32> @llvm.fptoui.sat.v3f16.v3i32 (<3 x half>)
+declare <4 x i32> @llvm.fptoui.sat.v4f16.v4i32 (<4 x half>)
+declare <5 x i32> @llvm.fptoui.sat.v5f16.v5i32 (<5 x half>)
+declare <6 x i32> @llvm.fptoui.sat.v6f16.v6i32 (<6 x half>)
+declare <7 x i32> @llvm.fptoui.sat.v7f16.v7i32 (<7 x half>)
+declare <8 x i32> @llvm.fptoui.sat.v8f16.v8i32 (<8 x half>)
+
+define arm_aapcs_vfpcc <1 x i32> @test_unsigned_v1f16_v1i32(<1 x half> %f) {
+; CHECK-LABEL: test_unsigned_v1f16_v1i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    vldr s4, .LCPI14_0
+; CHECK-NEXT:    vcvt.u32.f32 s2, s0
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s4
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI14_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <1 x i32> @llvm.fptoui.sat.v1f16.v1i32(<1 x half> %f)
+    ret <1 x i32> %x
+}
+
+define arm_aapcs_vfpcc <2 x i32> @test_unsigned_v2f16_v2i32(<2 x half> %f) {
+; CHECK-LABEL: test_unsigned_v2f16_v2i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vcvtt.f32.f16 s18, s16
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcvtb.f32.f16 s16, s16
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vldr s20, .LCPI15_0
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r5, #-1
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    vmov q0[2], q0[0], r0, r5
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r1, #0
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r4
+; CHECK-NEXT:    vpop {d8, d9, d10}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI15_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <2 x i32> @llvm.fptoui.sat.v2f16.v2i32(<2 x half> %f)
+    ret <2 x i32> %x
+}
+
+define arm_aapcs_vfpcc <3 x i32> @test_unsigned_v3f16_v3i32(<3 x half> %f) {
+; CHECK-LABEL: test_unsigned_v3f16_v3i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvtb.f32.f16 s8, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vcvt.u32.f32 s10, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    vcvt.u32.f32 s12, s2
+; CHECK-NEXT:    vldr s4, .LCPI16_0
+; CHECK-NEXT:    vcvt.u32.f32 s6, s0
+; CHECK-NEXT:    vcmp.f32 s8, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s8, s4
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    vcmp.f32 s2, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, r1
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI16_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <3 x i32> @llvm.fptoui.sat.v3f16.v3i32(<3 x half> %f)
+    ret <3 x i32> %x
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_unsigned_v4f16_v4i32(<4 x half> %f) {
+; CHECK-LABEL: test_unsigned_v4f16_v4i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s4, s0
+; CHECK-NEXT:    vcvt.u32.f32 s14, s12
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    vcvtt.f32.f16 s8, s1
+; CHECK-NEXT:    vcvt.u32.f32 s1, s0
+; CHECK-NEXT:    vcvt.u32.f32 s10, s8
+; CHECK-NEXT:    vldr s2, .LCPI17_0
+; CHECK-NEXT:    vcvt.u32.f32 s6, s4
+; CHECK-NEXT:    vcmp.f32 s12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s12, s2
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vcmp.f32 s0, s2
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s8, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s10
+; CHECK-NEXT:    vcmp.f32 s8, s2
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vcmp.f32 s4, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s4, s2
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI17_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <4 x i32> @llvm.fptoui.sat.v4f16.v4i32(<4 x half> %f)
+    ret <4 x i32> %x
+}
+
+define arm_aapcs_vfpcc <5 x i32> @test_unsigned_v5f16_v5i32(<5 x half> %f) {
+; CHECK-LABEL: test_unsigned_v5f16_v5i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s10, s1
+; CHECK-NEXT:    vcvt.u32.f32 s3, s2
+; CHECK-NEXT:    vcvtt.f32.f16 s1, s1
+; CHECK-NEXT:    vcvt.u32.f32 s5, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s6, s0
+; CHECK-NEXT:    vcvtt.f32.f16 s0, s0
+; CHECK-NEXT:    vldr s4, .LCPI18_0
+; CHECK-NEXT:    vcvt.u32.f32 s14, s0
+; CHECK-NEXT:    vcvt.u32.f32 s12, s10
+; CHECK-NEXT:    vcmp.f32 s2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.u32.f32 s8, s6
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    vcmp.f32 s2, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r12, s5
+; CHECK-NEXT:    str r1, [r0, #16]
+; CHECK-NEXT:    vcmp.f32 s1, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r12, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    vcmp.f32 s0, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s10, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vcmp.f32 s10, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vcmp.f32 s6, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s6, s4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r3
+; CHECK-NEXT:    vmov q0[3], q0[1], r2, r12
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI18_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <5 x i32> @llvm.fptoui.sat.v5f16.v5i32(<5 x half> %f)
+    ret <5 x i32> %x
+}
+
+define arm_aapcs_vfpcc <6 x i32> @test_unsigned_v6f16_v6i32(<6 x half> %f) {
+; CHECK-LABEL: test_unsigned_v6f16_v6i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvtt.f32.f16 s5, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vcvt.u32.f32 s7, s5
+; CHECK-NEXT:    vcvtb.f32.f16 s10, s1
+; CHECK-NEXT:    vcvt.u32.f32 s9, s2
+; CHECK-NEXT:    vcvtt.f32.f16 s1, s1
+; CHECK-NEXT:    vcvt.u32.f32 s3, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s6, s0
+; CHECK-NEXT:    vcvtt.f32.f16 s0, s0
+; CHECK-NEXT:    vldr s4, .LCPI19_0
+; CHECK-NEXT:    vcmp.f32 s5, #0
+; CHECK-NEXT:    vcvt.u32.f32 s14, s0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s5, s4
+; CHECK-NEXT:    vmov r1, s7
+; CHECK-NEXT:    vcvt.u32.f32 s12, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s9
+; CHECK-NEXT:    vcvt.u32.f32 s8, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vcmp.f32 s2, s4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r12, s3
+; CHECK-NEXT:    strd r2, r1, [r0, #16]
+; CHECK-NEXT:    vcmp.f32 s1, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r12, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    vcmp.f32 s0, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s10, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vcmp.f32 s10, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vcmp.f32 s6, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s6, s4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r3
+; CHECK-NEXT:    vmov q0[3], q0[1], r2, r12
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI19_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <6 x i32> @llvm.fptoui.sat.v6f16.v6i32(<6 x half> %f)
+    ret <6 x i32> %x
+}
+
+define arm_aapcs_vfpcc <7 x i32> @test_unsigned_v7f16_v7i32(<7 x half> %f) {
+; CHECK-LABEL: test_unsigned_v7f16_v7i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvtt.f32.f16 s9, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vcvt.u32.f32 s11, s9
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s3
+; CHECK-NEXT:    vcvt.u32.f32 s13, s2
+; CHECK-NEXT:    vldr s4, .LCPI20_0
+; CHECK-NEXT:    vcvt.u32.f32 s7, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s10, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s1, s1
+; CHECK-NEXT:    vcmp.f32 s9, #0
+; CHECK-NEXT:    vcvt.u32.f32 s5, s1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s9, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s6, s0
+; CHECK-NEXT:    vcvtt.f32.f16 s0, s0
+; CHECK-NEXT:    vcvt.u32.f32 s12, s10
+; CHECK-NEXT:    vmov r1, s11
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vcvt.u32.f32 s14, s0
+; CHECK-NEXT:    str r1, [r0, #20]
+; CHECK-NEXT:    vcmp.f32 s2, #0
+; CHECK-NEXT:    vmov r1, s13
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s2, s4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vcmp.f32 s3, #0
+; CHECK-NEXT:    str r1, [r0, #16]
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s7
+; CHECK-NEXT:    vcvt.u32.f32 s8, s6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s3, s4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r12, s5
+; CHECK-NEXT:    str r1, [r0, #24]
+; CHECK-NEXT:    vcmp.f32 s1, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r12, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    vcmp.f32 s0, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s10, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vcmp.f32 s10, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vcmp.f32 s6, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s6, s4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r3
+; CHECK-NEXT:    vmov q0[3], q0[1], r2, r12
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI20_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <7 x i32> @llvm.fptoui.sat.v7f16.v7i32(<7 x half> %f)
+    ret <7 x i32> %x
+}
+
+define arm_aapcs_vfpcc <8 x i32> @test_unsigned_v8f16_v8i32(<8 x half> %f) {
+; CHECK-LABEL: test_unsigned_v8f16_v8i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8}
+; CHECK-NEXT:    vpush {d8}
+; CHECK-NEXT:    vcvtt.f32.f16 s11, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s3
+; CHECK-NEXT:    vcvt.u32.f32 s15, s3
+; CHECK-NEXT:    vcvtt.f32.f16 s7, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vcvt.u32.f32 s13, s11
+; CHECK-NEXT:    vcvt.u32.f32 s16, s2
+; CHECK-NEXT:    vldr s4, .LCPI21_0
+; CHECK-NEXT:    vcvt.u32.f32 s9, s7
+; CHECK-NEXT:    vcvtt.f32.f16 s10, s1
+; CHECK-NEXT:    vcmp.f32 s3, #0
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s4
+; CHECK-NEXT:    vcvt.u32.f32 s5, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s6, s0
+; CHECK-NEXT:    vmov r12, s15
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r12, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov lr, s16
+; CHECK-NEXT:    vcmp.f32 s2, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w lr, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.u32.f32 s14, s0
+; CHECK-NEXT:    vcmp.f32 s11, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w lr, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s13
+; CHECK-NEXT:    vcmp.f32 s11, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.u32.f32 s12, s10
+; CHECK-NEXT:    vcmp.f32 s7, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vcvt.u32.f32 s8, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s9
+; CHECK-NEXT:    vcmp.f32 s7, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r0, s5
+; CHECK-NEXT:    vcmp.f32 s1, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s14
+; CHECK-NEXT:    vcmp.f32 s0, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s10, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r4, s12
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vcmp.f32 s10, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r5, s8
+; CHECK-NEXT:    vcmp.f32 s6, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r4, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s6, s4
+; CHECK-NEXT:    vmov q1[2], q1[0], lr, r12
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r5, #-1
+; CHECK-NEXT:    vmov q0[3], q0[1], r5, r4
+; CHECK-NEXT:    vmov q1[3], q1[1], r3, r2
+; CHECK-NEXT:    vpop {d8}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI21_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <8 x i32> @llvm.fptoui.sat.v8f16.v8i32(<8 x half> %f)
+    ret <8 x i32> %x
+}
+
+;
+; 2-Vector float to signed integer -- result size variation
+;
+
+declare <4 x   i1> @llvm.fptoui.sat.v4f32.v4i1  (<4 x float>)
+declare <4 x   i8> @llvm.fptoui.sat.v4f32.v4i8  (<4 x float>)
+declare <4 x  i13> @llvm.fptoui.sat.v4f32.v4i13 (<4 x float>)
+declare <4 x  i16> @llvm.fptoui.sat.v4f32.v4i16 (<4 x float>)
+declare <4 x  i19> @llvm.fptoui.sat.v4f32.v4i19 (<4 x float>)
+declare <4 x  i50> @llvm.fptoui.sat.v4f32.v4i50 (<4 x float>)
+declare <4 x  i64> @llvm.fptoui.sat.v4f32.v4i64 (<4 x float>)
+declare <4 x i100> @llvm.fptoui.sat.v4f32.v4i100(<4 x float>)
+declare <4 x i128> @llvm.fptoui.sat.v4f32.v4i128(<4 x float>)
+
+define arm_aapcs_vfpcc <4 x i1> @test_unsigned_v4f32_v4i1(<4 x float> %f) {
+; CHECK-LABEL: test_unsigned_v4f32_v4i1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s4, .LCPI22_0
+; CHECK-NEXT:    vmov.f32 s6, #1.000000e+00
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    vmaxnm.f32 s8, s3, s4
+; CHECK-NEXT:    vminnm.f32 s0, s0, s6
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s4
+; CHECK-NEXT:    vcvt.u32.f32 s0, s0
+; CHECK-NEXT:    vmaxnm.f32 s4, s1, s4
+; CHECK-NEXT:    vminnm.f32 s4, s4, s6
+; CHECK-NEXT:    vminnm.f32 s2, s2, s6
+; CHECK-NEXT:    vcvt.u32.f32 s4, s4
+; CHECK-NEXT:    vminnm.f32 s8, s8, s6
+; CHECK-NEXT:    vcvt.u32.f32 s2, s2
+; CHECK-NEXT:    vcvt.u32.f32 s8, s8
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #0, #1
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #1, #1
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #2, #1
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #3, #1
+; CHECK-NEXT:    strb r1, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI22_0:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+    %x = call <4 x i1> @llvm.fptoui.sat.v4f32.v4i1(<4 x float> %f)
+    ret <4 x i1> %x
+}
+
+define arm_aapcs_vfpcc <4 x i8> @test_unsigned_v4f32_v4i8(<4 x float> %f) {
+; CHECK-LABEL: test_unsigned_v4f32_v4i8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s4, .LCPI23_0
+; CHECK-NEXT:    vldr s6, .LCPI23_1
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s4
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    vmaxnm.f32 s8, s3, s4
+; CHECK-NEXT:    vminnm.f32 s2, s2, s6
+; CHECK-NEXT:    vminnm.f32 s0, s0, s6
+; CHECK-NEXT:    vmaxnm.f32 s4, s1, s4
+; CHECK-NEXT:    vminnm.f32 s8, s8, s6
+; CHECK-NEXT:    vminnm.f32 s4, s4, s6
+; CHECK-NEXT:    vcvt.u32.f32 s2, s2
+; CHECK-NEXT:    vcvt.u32.f32 s0, s0
+; CHECK-NEXT:    vcvt.u32.f32 s8, s8
+; CHECK-NEXT:    vcvt.u32.f32 s4, s4
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI23_0:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+; CHECK-NEXT:  .LCPI23_1:
+; CHECK-NEXT:    .long 0x437f0000 @ float 255
+    %x = call <4 x i8> @llvm.fptoui.sat.v4f32.v4i8(<4 x float> %f)
+    ret <4 x i8> %x
+}
+
+define arm_aapcs_vfpcc <4 x i13> @test_unsigned_v4f32_v4i13(<4 x float> %f) {
+; CHECK-LABEL: test_unsigned_v4f32_v4i13:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s4, .LCPI24_0
+; CHECK-NEXT:    vldr s6, .LCPI24_1
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s4
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    vmaxnm.f32 s8, s3, s4
+; CHECK-NEXT:    vminnm.f32 s2, s2, s6
+; CHECK-NEXT:    vminnm.f32 s0, s0, s6
+; CHECK-NEXT:    vmaxnm.f32 s4, s1, s4
+; CHECK-NEXT:    vminnm.f32 s8, s8, s6
+; CHECK-NEXT:    vminnm.f32 s4, s4, s6
+; CHECK-NEXT:    vcvt.u32.f32 s2, s2
+; CHECK-NEXT:    vcvt.u32.f32 s0, s0
+; CHECK-NEXT:    vcvt.u32.f32 s8, s8
+; CHECK-NEXT:    vcvt.u32.f32 s4, s4
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI24_0:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+; CHECK-NEXT:  .LCPI24_1:
+; CHECK-NEXT:    .long 0x45fff800 @ float 8191
+    %x = call <4 x i13> @llvm.fptoui.sat.v4f32.v4i13(<4 x float> %f)
+    ret <4 x i13> %x
+}
+
+define arm_aapcs_vfpcc <4 x i16> @test_unsigned_v4f32_v4i16(<4 x float> %f) {
+; CHECK-LABEL: test_unsigned_v4f32_v4i16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s4, .LCPI25_0
+; CHECK-NEXT:    vldr s6, .LCPI25_1
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s4
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    vmaxnm.f32 s8, s3, s4
+; CHECK-NEXT:    vminnm.f32 s2, s2, s6
+; CHECK-NEXT:    vminnm.f32 s0, s0, s6
+; CHECK-NEXT:    vmaxnm.f32 s4, s1, s4
+; CHECK-NEXT:    vminnm.f32 s8, s8, s6
+; CHECK-NEXT:    vminnm.f32 s4, s4, s6
+; CHECK-NEXT:    vcvt.u32.f32 s2, s2
+; CHECK-NEXT:    vcvt.u32.f32 s0, s0
+; CHECK-NEXT:    vcvt.u32.f32 s8, s8
+; CHECK-NEXT:    vcvt.u32.f32 s4, s4
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI25_0:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+; CHECK-NEXT:  .LCPI25_1:
+; CHECK-NEXT:    .long 0x477fff00 @ float 65535
+    %x = call <4 x i16> @llvm.fptoui.sat.v4f32.v4i16(<4 x float> %f)
+    ret <4 x i16> %x
+}
+
+define arm_aapcs_vfpcc <4 x i19> @test_unsigned_v4f32_v4i19(<4 x float> %f) {
+; CHECK-LABEL: test_unsigned_v4f32_v4i19:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s4, .LCPI26_0
+; CHECK-NEXT:    vldr s6, .LCPI26_1
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s4
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    vmaxnm.f32 s8, s3, s4
+; CHECK-NEXT:    vminnm.f32 s2, s2, s6
+; CHECK-NEXT:    vminnm.f32 s0, s0, s6
+; CHECK-NEXT:    vmaxnm.f32 s4, s1, s4
+; CHECK-NEXT:    vminnm.f32 s8, s8, s6
+; CHECK-NEXT:    vminnm.f32 s4, s4, s6
+; CHECK-NEXT:    vcvt.u32.f32 s2, s2
+; CHECK-NEXT:    vcvt.u32.f32 s0, s0
+; CHECK-NEXT:    vcvt.u32.f32 s8, s8
+; CHECK-NEXT:    vcvt.u32.f32 s4, s4
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI26_0:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+; CHECK-NEXT:  .LCPI26_1:
+; CHECK-NEXT:    .long 0x48ffffe0 @ float 524287
+    %x = call <4 x i19> @llvm.fptoui.sat.v4f32.v4i19(<4 x float> %f)
+    ret <4 x i19> %x
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_unsigned_v4f32_v4i32_duplicate(<4 x float> %f) {
+; CHECK-LABEL: test_unsigned_v4f32_v4i32_duplicate:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcvt.u32.f32 s10, s2
+; CHECK-NEXT:    vldr s8, .LCPI27_0
+; CHECK-NEXT:    vcvt.u32.f32 s12, s0
+; CHECK-NEXT:    vcvt.u32.f32 s6, s3
+; CHECK-NEXT:    vcvt.u32.f32 s4, s1
+; CHECK-NEXT:    vcmp.f32 s2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s8
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    vcmp.f32 s0, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vcmp.f32 s3, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vcmp.f32 s1, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, s8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI27_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <4 x i32> @llvm.fptoui.sat.v4f32.v4i32(<4 x float> %f)
+    ret <4 x i32> %x
+}
+
+define arm_aapcs_vfpcc <4 x i50> @test_unsigned_v4f32_v4i50(<4 x float> %f) {
+; CHECK-LABEL: test_unsigned_v4f32_v4i50:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vldr s20, .LCPI28_0
+; CHECK-NEXT:    vmov r5, s18
+; CHECK-NEXT:    vmov r6, s19
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r4, #0
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r5, #65535
+; CHECK-NEXT:    movtgt r5, #3
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s19, #0
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r6, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r6, #65535
+; CHECK-NEXT:    movtgt r6, #3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    vcmp.f32 s19, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r4, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r4, [r8]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r10, #0
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    lsls r0, r6, #22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r10, #-1
+; CHECK-NEXT:    orr.w r1, r0, r10, lsr #10
+; CHECK-NEXT:    vmov r0, s17
+; CHECK-NEXT:    str.w r1, [r8, #20]
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    lsrs r2, r6, #10
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r1, #65535
+; CHECK-NEXT:    movtgt r1, #3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r7, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r7, #65535
+; CHECK-NEXT:    movtgt r7, #3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    strb.w r2, [r8, #24]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    bfc r7, #18, #14
+; CHECK-NEXT:    bfc r1, #18, #14
+; CHECK-NEXT:    orr.w r2, r7, r0, lsl #18
+; CHECK-NEXT:    lsrs r0, r0, #14
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    str.w r2, [r8, #4]
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r9, #0
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    lsrs r2, r1, #14
+; CHECK-NEXT:    orr.w r0, r0, r1, lsl #18
+; CHECK-NEXT:    bfc r5, #18, #14
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r9, #-1
+; CHECK-NEXT:    orr.w r2, r2, r9, lsl #4
+; CHECK-NEXT:    str.w r2, [r8, #12]
+; CHECK-NEXT:    str.w r0, [r8, #8]
+; CHECK-NEXT:    lsr.w r0, r9, #28
+; CHECK-NEXT:    orr.w r0, r0, r5, lsl #4
+; CHECK-NEXT:    orr.w r0, r0, r10, lsl #22
+; CHECK-NEXT:    str.w r0, [r8, #16]
+; CHECK-NEXT:    vpop {d8, d9, d10}
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI28_0:
+; CHECK-NEXT:    .long 0x587fffff @ float 1.12589984E+15
+    %x = call <4 x i50> @llvm.fptoui.sat.v4f32.v4i50(<4 x float> %f)
+    ret <4 x i50> %x
+}
+
+define arm_aapcs_vfpcc <4 x i64> @test_unsigned_v4f32_v4i64(<4 x float> %f) {
+; CHECK-LABEL: test_unsigned_v4f32_v4i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vmov r0, s19
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    vldr s20, .LCPI29_0
+; CHECK-NEXT:    vcmp.f32 s19, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r11, #0
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    mov r10, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r9, s17
+; CHECK-NEXT:    vmov r8, s16
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r11, #-1
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r7, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r7, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r10, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r10, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r6, #0
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r6, #-1
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r5, #-1
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    vmov q1[2], q1[0], r7, r11
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r4, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    vmov q0[2], q0[0], r0, r5
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r4
+; CHECK-NEXT:    vmov q1[3], q1[1], r6, r10
+; CHECK-NEXT:    vpop {d8, d9, d10}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI29_0:
+; CHECK-NEXT:    .long 0x5f7fffff @ float 1.8446743E+19
+    %x = call <4 x i64> @llvm.fptoui.sat.v4f32.v4i64(<4 x float> %f)
+    ret <4 x i64> %x
+}
+
+define arm_aapcs_vfpcc <4 x i100> @test_unsigned_v4f32_v4i100(<4 x float> %f) {
+; CHECK-LABEL: test_unsigned_v4f32_v4i100:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    vldr s20, .LCPI30_0
+; CHECK-NEXT:    vmov r5, s16
+; CHECK-NEXT:    vmov r7, s19
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    mov r6, r3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    str.w r2, [r4, #33]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r1, [r4, #29]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str.w r0, [r4, #25]
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    mov r5, r3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    str r2, [r4, #8]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #4]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str r0, [r4]
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s19, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    lsr.w r7, r1, #28
+; CHECK-NEXT:    vcmp.f32 s19, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    orr.w r7, r7, r2, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r7, [r4, #45]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    lsrs r7, r0, #28
+; CHECK-NEXT:    vcmp.f32 s19, #0
+; CHECK-NEXT:    orr.w r7, r7, r1, lsl #4
+; CHECK-NEXT:    vmov r1, s17
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    str.w r7, [r4, #41]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    lsr.w r2, r2, #28
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r3, #15
+; CHECK-NEXT:    orr.w r2, r2, r3, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    strb.w r2, [r4, #49]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r6, #0
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r6, #15
+; CHECK-NEXT:    and r2, r6, #15
+; CHECK-NEXT:    orr.w r0, r2, r0, lsl #4
+; CHECK-NEXT:    str.w r0, [r4, #37]
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    lsrs r7, r1, #28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    orr.w r7, r7, r2, lsl #4
+; CHECK-NEXT:    str r7, [r4, #20]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    lsrs r7, r0, #28
+; CHECK-NEXT:    orr.w r1, r7, r1, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    str r1, [r4, #16]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    lsr.w r1, r2, #28
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r3, #15
+; CHECK-NEXT:    orr.w r1, r1, r3, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    strb r1, [r4, #24]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r5, #15
+; CHECK-NEXT:    and r1, r5, #15
+; CHECK-NEXT:    orr.w r0, r1, r0, lsl #4
+; CHECK-NEXT:    str r0, [r4, #12]
+; CHECK-NEXT:    vpop {d8, d9, d10}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI30_0:
+; CHECK-NEXT:    .long 0x717fffff @ float 1.26765052E+30
+    %x = call <4 x i100> @llvm.fptoui.sat.v4f32.v4i100(<4 x float> %f)
+    ret <4 x i100> %x
+}
+
+define arm_aapcs_vfpcc <4 x i128> @test_unsigned_v4f32_v4i128(<4 x float> %f) {
+; CHECK-LABEL: test_unsigned_v4f32_v4i128:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vmov r0, s19
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vmov r5, s18
+; CHECK-NEXT:    vldr s20, .LCPI31_0
+; CHECK-NEXT:    vcmp.f32 s19, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    str r3, [r4, #60]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    str r2, [r4, #56]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s19, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #52]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s19, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str r0, [r4, #48]
+; CHECK-NEXT:    vmov r7, s16
+; CHECK-NEXT:    vmov r6, s17
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    str r3, [r4, #44]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    str r2, [r4, #40]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #36]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str r0, [r4, #32]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    str r3, [r4, #28]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    str r2, [r4, #24]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #20]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s17, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str r0, [r4, #16]
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    str r3, [r4, #12]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    str r2, [r4, #8]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #4]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str r0, [r4]
+; CHECK-NEXT:    vpop {d8, d9, d10}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI31_0:
+; CHECK-NEXT:    .long 0x7f7fffff @ float 3.40282347E+38
+    %x = call <4 x i128> @llvm.fptoui.sat.v4f32.v4i128(<4 x float> %f)
+    ret <4 x i128> %x
+}
+
+;
+; 2-Vector double to signed integer -- result size variation
+;
+
+declare <2 x   i1> @llvm.fptoui.sat.v2f64.v2i1  (<2 x double>)
+declare <2 x   i8> @llvm.fptoui.sat.v2f64.v2i8  (<2 x double>)
+declare <2 x  i13> @llvm.fptoui.sat.v2f64.v2i13 (<2 x double>)
+declare <2 x  i16> @llvm.fptoui.sat.v2f64.v2i16 (<2 x double>)
+declare <2 x  i19> @llvm.fptoui.sat.v2f64.v2i19 (<2 x double>)
+declare <2 x  i50> @llvm.fptoui.sat.v2f64.v2i50 (<2 x double>)
+declare <2 x  i64> @llvm.fptoui.sat.v2f64.v2i64 (<2 x double>)
+declare <2 x i100> @llvm.fptoui.sat.v2f64.v2i100(<2 x double>)
+declare <2 x i128> @llvm.fptoui.sat.v2f64.v2i128(<2 x double>)
+
+define arm_aapcs_vfpcc <2 x i1> @test_unsigned_v2f64_v2i1(<2 x double> %f) {
+; CHECK-LABEL: test_unsigned_v2f64_v2i1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI32_0
+; CHECK-NEXT:    vmov r5, r4, d9
+; CHECK-NEXT:    vmov r10, r9, d0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI32_1
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    vmov r2, r11, d0
+; CHECK-NEXT:    str r2, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    str.w r11, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    str r1, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    csel r0, r0, r8, ne
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #1
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    str.w r10, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    mov r8, r9
+; CHECK-NEXT:    str.w r9, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    str r1, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    csel r9, r0, r9, ne
+; CHECK-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r9, #1
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr.w r11, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    ldr.w r10, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r5, r1, r0, ne
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #0
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r9, r1
+; CHECK-NEXT:    vmov q0[3], q0[1], r0, r5
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI32_0:
+; CHECK-NEXT:    .long 0 @ double 1
+; CHECK-NEXT:    .long 1072693248
+; CHECK-NEXT:  .LCPI32_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <2 x i1> @llvm.fptoui.sat.v2f64.v2i1(<2 x double> %f)
+    ret <2 x i1> %x
+}
+
+define arm_aapcs_vfpcc <2 x i8> @test_unsigned_v2f64_v2i8(<2 x double> %f) {
+; CHECK-LABEL: test_unsigned_v2f64_v2i8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI33_0
+; CHECK-NEXT:    vmov r5, r4, d9
+; CHECK-NEXT:    vmov r10, r9, d0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI33_1
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    vmov r2, r11, d0
+; CHECK-NEXT:    str r2, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    str.w r11, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    str r1, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    csel r0, r0, r8, ne
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #255
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    str.w r10, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    mov r8, r9
+; CHECK-NEXT:    str.w r9, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    str r1, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    csel r9, r0, r9, ne
+; CHECK-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r9, #255
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr.w r11, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    ldr.w r10, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r5, r1, r0, ne
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #0
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r9, r1
+; CHECK-NEXT:    vmov q0[3], q0[1], r0, r5
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI33_0:
+; CHECK-NEXT:    .long 0 @ double 255
+; CHECK-NEXT:    .long 1081073664
+; CHECK-NEXT:  .LCPI33_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <2 x i8> @llvm.fptoui.sat.v2f64.v2i8(<2 x double> %f)
+    ret <2 x i8> %x
+}
+
+define arm_aapcs_vfpcc <2 x i13> @test_unsigned_v2f64_v2i13(<2 x double> %f) {
+; CHECK-LABEL: test_unsigned_v2f64_v2i13:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI34_0
+; CHECK-NEXT:    vmov r5, r4, d9
+; CHECK-NEXT:    vmov r10, r9, d0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI34_1
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    vmov r2, r11, d0
+; CHECK-NEXT:    str r2, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    str.w r11, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    str r1, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    csel r0, r0, r8, ne
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movwne r0, #8191
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    str.w r10, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    mov r8, r9
+; CHECK-NEXT:    str.w r9, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    str r1, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    csel r9, r0, r9, ne
+; CHECK-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movwne r9, #8191
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr.w r11, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    ldr.w r10, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r5, r1, r0, ne
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #0
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r9, r1
+; CHECK-NEXT:    vmov q0[3], q0[1], r0, r5
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI34_0:
+; CHECK-NEXT:    .long 0 @ double 8191
+; CHECK-NEXT:    .long 1086324480
+; CHECK-NEXT:  .LCPI34_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <2 x i13> @llvm.fptoui.sat.v2f64.v2i13(<2 x double> %f)
+    ret <2 x i13> %x
+}
+
+define arm_aapcs_vfpcc <2 x i16> @test_unsigned_v2f64_v2i16(<2 x double> %f) {
+; CHECK-LABEL: test_unsigned_v2f64_v2i16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI35_0
+; CHECK-NEXT:    vmov r5, r4, d9
+; CHECK-NEXT:    vmov r10, r9, d0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI35_1
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    vmov r2, r11, d0
+; CHECK-NEXT:    str r2, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    str.w r11, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    str r1, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    csel r0, r0, r8, ne
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movwne r0, #65535
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    str.w r10, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    mov r8, r9
+; CHECK-NEXT:    str.w r9, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    str r1, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    csel r9, r0, r9, ne
+; CHECK-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movwne r9, #65535
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr.w r11, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    ldr.w r10, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r5, r1, r0, ne
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #0
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r9, r1
+; CHECK-NEXT:    vmov q0[3], q0[1], r0, r5
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI35_0:
+; CHECK-NEXT:    .long 0 @ double 65535
+; CHECK-NEXT:    .long 1089470432
+; CHECK-NEXT:  .LCPI35_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <2 x i16> @llvm.fptoui.sat.v2f64.v2i16(<2 x double> %f)
+    ret <2 x i16> %x
+}
+
+define arm_aapcs_vfpcc <2 x i19> @test_unsigned_v2f64_v2i19(<2 x double> %f) {
+; CHECK-LABEL: test_unsigned_v2f64_v2i19:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI36_0
+; CHECK-NEXT:    vmov r5, r4, d9
+; CHECK-NEXT:    vmov r10, r9, d0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI36_1
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    vmov r2, r11, d0
+; CHECK-NEXT:    str r2, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    str.w r11, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    str r1, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    csel r0, r0, r8, ne
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    movwne r0, #65535
+; CHECK-NEXT:    movtne r0, #7
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    str.w r10, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    mov r8, r9
+; CHECK-NEXT:    str.w r9, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    str r1, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    csel r9, r0, r9, ne
+; CHECK-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    movwne r9, #65535
+; CHECK-NEXT:    movtne r9, #7
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr.w r11, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    ldr.w r10, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r5, r1, r0, ne
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #0
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r9, r1
+; CHECK-NEXT:    vmov q0[3], q0[1], r0, r5
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI36_0:
+; CHECK-NEXT:    .long 0 @ double 524287
+; CHECK-NEXT:    .long 1092616188
+; CHECK-NEXT:  .LCPI36_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <2 x i19> @llvm.fptoui.sat.v2f64.v2i19(<2 x double> %f)
+    ret <2 x i19> %x
+}
+
+define arm_aapcs_vfpcc <2 x i32> @test_unsigned_v2f64_v2i32_duplicate(<2 x double> %f) {
+; CHECK-LABEL: test_unsigned_v2f64_v2i32_duplicate:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI37_0
+; CHECK-NEXT:    vmov r5, r4, d9
+; CHECK-NEXT:    vmov r10, r9, d0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI37_1
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    vmov r2, r11, d0
+; CHECK-NEXT:    str r2, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    str.w r11, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    str r1, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    csel r0, r0, r8, ne
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    str.w r10, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    mov r8, r9
+; CHECK-NEXT:    str.w r9, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    str r1, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    csel r9, r0, r9, ne
+; CHECK-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r9, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr.w r11, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    ldr.w r10, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r5, r1, r0, ne
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #0
+; CHECK-NEXT:    ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #0
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r9, r1
+; CHECK-NEXT:    vmov q0[3], q0[1], r0, r5
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI37_0:
+; CHECK-NEXT:    .long 4292870144 @ double 4294967295
+; CHECK-NEXT:    .long 1106247679
+; CHECK-NEXT:  .LCPI37_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <2 x i32> @llvm.fptoui.sat.v2f64.v2i32(<2 x double> %f)
+    ret <2 x i32> %x
+}
+
+define arm_aapcs_vfpcc <2 x i50> @test_unsigned_v2f64_v2i50(<2 x double> %f) {
+; CHECK-LABEL: test_unsigned_v2f64_v2i50:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI38_0
+; CHECK-NEXT:    vmov r5, r4, d9
+; CHECK-NEXT:    vmov r10, r9, d0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI38_1
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    vmov r2, r11, d0
+; CHECK-NEXT:    str r2, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    str.w r11, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    str r0, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    csel r0, r1, r8, ne
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    mov r8, r9
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    movwne r0, #65535
+; CHECK-NEXT:    movtne r0, #3
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    str.w r10, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    str.w r9, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    str r0, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; CHECK-NEXT:    csel r9, r1, r9, ne
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    movwne r9, #65535
+; CHECK-NEXT:    movtne r9, #3
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr.w r11, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    ldr.w r10, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r5, r1, r0, ne
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r5, #-1
+; CHECK-NEXT:    ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    vmov q0[2], q0[0], r0, r5
+; CHECK-NEXT:    ldr r0, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[3], q0[1], r9, r0
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI38_0:
+; CHECK-NEXT:    .long 4294967288 @ double 1125899906842623
+; CHECK-NEXT:    .long 1125122047
+; CHECK-NEXT:  .LCPI38_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <2 x i50> @llvm.fptoui.sat.v2f64.v2i50(<2 x double> %f)
+    ret <2 x i50> %x
+}
+
+define arm_aapcs_vfpcc <2 x i64> @test_unsigned_v2f64_v2i64(<2 x double> %f) {
+; CHECK-LABEL: test_unsigned_v2f64_v2i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI39_0
+; CHECK-NEXT:    vmov r5, r4, d9
+; CHECK-NEXT:    vmov r10, r9, d0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI39_1
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    vmov r2, r11, d0
+; CHECK-NEXT:    str r2, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    str.w r11, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    vmov r7, r6, d8
+; CHECK-NEXT:    str r1, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    csel r0, r0, r8, ne
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    str.w r10, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    mov r8, r9
+; CHECK-NEXT:    str.w r9, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    str r1, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    csel r9, r0, r9, ne
+; CHECK-NEXT:    ldr r0, [sp] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r3, r8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r9, #-1
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr.w r11, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    ldr.w r10, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r5, r1, r0, ne
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r5, #-1
+; CHECK-NEXT:    ldrd r3, r2, [sp, #4] @ 8-byte Folded Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    vmov q0[2], q0[0], r9, r1
+; CHECK-NEXT:    vmov q0[3], q0[1], r0, r5
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI39_0:
+; CHECK-NEXT:    .long 4294967295 @ double 1.844674407370955E+19
+; CHECK-NEXT:    .long 1139802111
+; CHECK-NEXT:  .LCPI39_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <2 x i64> @llvm.fptoui.sat.v2f64.v2i64(<2 x double> %f)
+    ret <2 x i64> %x
+}
+
+define arm_aapcs_vfpcc <2 x i100> @test_unsigned_v2f64_v2i100(<2 x double> %f) {
+; CHECK-LABEL: test_unsigned_v2f64_v2i100:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #48
+; CHECK-NEXT:    sub sp, #48
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI40_0
+; CHECK-NEXT:    vmov r9, r5, d8
+; CHECK-NEXT:    str r0, [sp, #44] @ 4-byte Spill
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r7, r2
+; CHECK-NEXT:    mov r6, r3
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI40_1
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    str r2, [sp, #40] @ 4-byte Spill
+; CHECK-NEXT:    mov r10, r3
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __fixunsdfti
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    strd r1, r0, [sp, #8] @ 8-byte Folded Spill
+; CHECK-NEXT:    csel r0, r2, r8, ne
+; CHECK-NEXT:    str r3, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    ldr r4, [sp, #44] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    mov r11, r7
+; CHECK-NEXT:    str r0, [r4, #8]
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    str r5, [sp, #20] @ 4-byte Spill
+; CHECK-NEXT:    str r7, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    str r6, [sp, #32] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r7, [sp, #40] @ 4-byte Reload
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    str.w r9, [sp, #16] @ 4-byte Spill
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    str r0, [r4, #4]
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    mov r9, r7
+; CHECK-NEXT:    str.w r10, [sp, #36] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    vmov r8, r11, d9
+; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp r6, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    str r0, [r4]
+; CHECK-NEXT:    ldr r5, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    ldr r6, [sp, #32] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    mov r2, r7
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    bl __fixunsdfti
+; CHECK-NEXT:    cmp.w r10, #0
+; CHECK-NEXT:    strd r2, r0, [sp, #4] @ 8-byte Folded Spill
+; CHECK-NEXT:    csel r7, r1, r10, ne
+; CHECK-NEXT:    str r3, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r7, #-1
+; CHECK-NEXT:    mov r4, r6
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr.w r10, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    mov r2, r9
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    csel r9, r1, r0, ne
+; CHECK-NEXT:    cmp r6, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r9, #-1
+; CHECK-NEXT:    ldr r6, [sp, #44] @ 4-byte Reload
+; CHECK-NEXT:    lsrs r0, r7, #28
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    orr.w r0, r0, r9, lsl #4
+; CHECK-NEXT:    str r0, [r6, #20]
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r2, [sp, #40] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    str r0, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    mov r5, r10
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r4, r1, r0, ne
+; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r4, #-1
+; CHECK-NEXT:    lsrs r0, r4, #28
+; CHECK-NEXT:    orr.w r0, r0, r7, lsl #4
+; CHECK-NEXT:    str r0, [r6, #16]
+; CHECK-NEXT:    ldr r6, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    ldr.w r10, [sp, #32] @ 4-byte Reload
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r1, r11
+; CHECK-NEXT:    ldr.w r11, [sp, #40] @ 4-byte Reload
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r10
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #15
+; CHECK-NEXT:    lsr.w r1, r9, #28
+; CHECK-NEXT:    ldr.w r9, [sp, #44] @ 4-byte Reload
+; CHECK-NEXT:    orr.w r0, r1, r0, lsl #4
+; CHECK-NEXT:    strb.w r0, [r9, #24]
+; CHECK-NEXT:    ldr r7, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    ldr r5, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r3, [sp, #36] @ 4-byte Reload
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #15
+; CHECK-NEXT:    and r0, r0, #15
+; CHECK-NEXT:    orr.w r0, r0, r4, lsl #4
+; CHECK-NEXT:    str.w r0, [r9, #12]
+; CHECK-NEXT:    add sp, #48
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI40_0:
+; CHECK-NEXT:    .long 4294967295 @ double 1.2676506002282293E+30
+; CHECK-NEXT:    .long 1177550847
+; CHECK-NEXT:  .LCPI40_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <2 x i100> @llvm.fptoui.sat.v2f64.v2i100(<2 x double> %f)
+    ret <2 x i100> %x
+}
+
+define arm_aapcs_vfpcc <2 x i128> @test_unsigned_v2f64_v2i128(<2 x double> %f) {
+; CHECK-LABEL: test_unsigned_v2f64_v2i128:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI41_0
+; CHECK-NEXT:    vmov r8, r7, d9
+; CHECK-NEXT:    str r0, [sp, #24] @ 4-byte Spill
+; CHECK-NEXT:    vmov r6, r4, d0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    vldr d0, .LCPI41_1
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    vmov r10, r11, d0
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl __fixunsdfti
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    strd r1, r0, [sp, #16] @ 8-byte Folded Spill
+; CHECK-NEXT:    csel r0, r3, r5, ne
+; CHECK-NEXT:    str r2, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    ldr r5, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    str r0, [r5, #28]
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    str r6, [sp, #28] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    str.w r10, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    str r0, [r5, #24]
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    str r4, [sp] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    vmov r6, r5, d8
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp.w r9, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    ldr.w r9, [sp, #24] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    str.w r0, [r9, #20]
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    ldr r2, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    ldr r4, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    mov r3, r11
+; CHECK-NEXT:    mov r7, r11
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    str.w r11, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r11, r9
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp.w r10, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    str.w r0, [r9, #16]
+; CHECK-NEXT:    ldr.w r8, [sp, #28] @ 4-byte Reload
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    ldr.w r9, [sp] @ 4-byte Reload
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __fixunsdfti
+; CHECK-NEXT:    cmp r7, #0
+; CHECK-NEXT:    strd r1, r0, [sp, #16] @ 8-byte Folded Spill
+; CHECK-NEXT:    csel r0, r3, r7, ne
+; CHECK-NEXT:    str r2, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    cmp.w r10, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    str.w r0, [r11, #12]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    mov r7, r11
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r10, r4
+; CHECK-NEXT:    ldr r4, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    str r0, [r7, #8]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #16] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r9
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp.w r11, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    str r0, [r7, #4]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_dcmpgt
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r10
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    bl __aeabi_dcmpge
+; CHECK-NEXT:    ldr r1, [sp, #20] @ 4-byte Reload
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csel r0, r1, r0, ne
+; CHECK-NEXT:    cmp.w r8, #0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne.w r0, #-1
+; CHECK-NEXT:    str r0, [r7]
+; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI41_0:
+; CHECK-NEXT:    .long 4294967295 @ double 3.4028236692093843E+38
+; CHECK-NEXT:    .long 1206910975
+; CHECK-NEXT:  .LCPI41_1:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+    %x = call <2 x i128> @llvm.fptoui.sat.v2f64.v2i128(<2 x double> %f)
+    ret <2 x i128> %x
+}
+
+;
+; 4-Vector half to signed integer -- result size variation
+;
+
+declare <8 x   i1> @llvm.fptoui.sat.v8f16.v8i1  (<8 x half>)
+declare <8 x   i8> @llvm.fptoui.sat.v8f16.v8i8  (<8 x half>)
+declare <8 x  i13> @llvm.fptoui.sat.v8f16.v8i13 (<8 x half>)
+declare <8 x  i16> @llvm.fptoui.sat.v8f16.v8i16 (<8 x half>)
+declare <8 x  i19> @llvm.fptoui.sat.v8f16.v8i19 (<8 x half>)
+declare <8 x  i50> @llvm.fptoui.sat.v8f16.v8i50 (<8 x half>)
+declare <8 x  i64> @llvm.fptoui.sat.v8f16.v8i64 (<8 x half>)
+declare <8 x i100> @llvm.fptoui.sat.v8f16.v8i100(<8 x half>)
+declare <8 x i128> @llvm.fptoui.sat.v8f16.v8i128(<8 x half>)
+
+define arm_aapcs_vfpcc <8 x i1> @test_unsigned_v8f16_v8i1(<8 x half> %f) {
+; CHECK-LABEL: test_unsigned_v8f16_v8i1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s4, .LCPI42_0
+; CHECK-NEXT:    vcvtt.f32.f16 s8, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s10, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s0
+; CHECK-NEXT:    vmov.f32 s6, #1.000000e+00
+; CHECK-NEXT:    vmaxnm.f32 s3, s3, s4
+; CHECK-NEXT:    vminnm.f32 s3, s3, s6
+; CHECK-NEXT:    vcvtt.f32.f16 s0, s0
+; CHECK-NEXT:    vcvt.u32.f32 s3, s3
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    vminnm.f32 s0, s0, s6
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    vcvt.u32.f32 s0, s0
+; CHECK-NEXT:    vcvtt.f32.f16 s14, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s1
+; CHECK-NEXT:    vmaxnm.f32 s14, s14, s4
+; CHECK-NEXT:    vmaxnm.f32 s1, s1, s4
+; CHECK-NEXT:    vminnm.f32 s14, s14, s6
+; CHECK-NEXT:    vminnm.f32 s1, s1, s6
+; CHECK-NEXT:    vcvt.u32.f32 s14, s14
+; CHECK-NEXT:    vcvt.u32.f32 s1, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s12, s2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s4
+; CHECK-NEXT:    vmaxnm.f32 s12, s12, s4
+; CHECK-NEXT:    vminnm.f32 s2, s2, s6
+; CHECK-NEXT:    vminnm.f32 s12, s12, s6
+; CHECK-NEXT:    vcvt.u32.f32 s2, s2
+; CHECK-NEXT:    vmaxnm.f32 s10, s10, s4
+; CHECK-NEXT:    vcvt.u32.f32 s12, s12
+; CHECK-NEXT:    vminnm.f32 s10, s10, s6
+; CHECK-NEXT:    vcvt.u32.f32 s10, s10
+; CHECK-NEXT:    vmaxnm.f32 s8, s8, s4
+; CHECK-NEXT:    vminnm.f32 s8, s8, s6
+; CHECK-NEXT:    vcvt.u32.f32 s8, s8
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #0, #1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #1, #1
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #2, #1
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #3, #1
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #4, #1
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #5, #1
+; CHECK-NEXT:    vmov r2, s10
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #6, #1
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    and r2, r2, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    bfi r1, r2, #7, #1
+; CHECK-NEXT:    strb r1, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI42_0:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+    %x = call <8 x i1> @llvm.fptoui.sat.v8f16.v8i1(<8 x half> %f)
+    ret <8 x i1> %x
+}
+
+define arm_aapcs_vfpcc <8 x i8> @test_unsigned_v8f16_v8i8(<8 x half> %f) {
+; CHECK-LABEL: test_unsigned_v8f16_v8i8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s6, .LCPI43_1
+; CHECK-NEXT:    vcvtt.f32.f16 s10, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vldr s4, .LCPI43_0
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s6
+; CHECK-NEXT:    vcvtt.f32.f16 s8, s3
+; CHECK-NEXT:    vminnm.f32 s2, s2, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s3
+; CHECK-NEXT:    vcvt.u32.f32 s5, s2
+; CHECK-NEXT:    vcvtt.f32.f16 s2, s0
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s6
+; CHECK-NEXT:    vminnm.f32 s2, s2, s4
+; CHECK-NEXT:    vminnm.f32 s0, s0, s4
+; CHECK-NEXT:    vcvt.u32.f32 s7, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s14, s1
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s6
+; CHECK-NEXT:    vcvt.u32.f32 s0, s0
+; CHECK-NEXT:    vmaxnm.f32 s8, s8, s6
+; CHECK-NEXT:    vmaxnm.f32 s10, s10, s6
+; CHECK-NEXT:    vmaxnm.f32 s12, s12, s6
+; CHECK-NEXT:    vmaxnm.f32 s14, s14, s6
+; CHECK-NEXT:    vminnm.f32 s2, s2, s4
+; CHECK-NEXT:    vminnm.f32 s8, s8, s4
+; CHECK-NEXT:    vminnm.f32 s10, s10, s4
+; CHECK-NEXT:    vminnm.f32 s12, s12, s4
+; CHECK-NEXT:    vminnm.f32 s14, s14, s4
+; CHECK-NEXT:    vcvt.u32.f32 s4, s2
+; CHECK-NEXT:    vcvt.u32.f32 s14, s14
+; CHECK-NEXT:    vcvt.u32.f32 s10, s10
+; CHECK-NEXT:    vcvt.u32.f32 s12, s12
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vcvt.u32.f32 s8, s8
+; CHECK-NEXT:    vmov.16 q0[0], r0
+; CHECK-NEXT:    vmov r0, s7
+; CHECK-NEXT:    vmov.16 q0[1], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    vmov.16 q0[3], r0
+; CHECK-NEXT:    vmov r0, s5
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov.16 q0[7], r0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI43_0:
+; CHECK-NEXT:    .long 0x437f0000 @ float 255
+; CHECK-NEXT:  .LCPI43_1:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+    %x = call <8 x i8> @llvm.fptoui.sat.v8f16.v8i8(<8 x half> %f)
+    ret <8 x i8> %x
+}
+
+define arm_aapcs_vfpcc <8 x i13> @test_unsigned_v8f16_v8i13(<8 x half> %f) {
+; CHECK-LABEL: test_unsigned_v8f16_v8i13:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s6, .LCPI44_1
+; CHECK-NEXT:    vcvtt.f32.f16 s10, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vldr s4, .LCPI44_0
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s6
+; CHECK-NEXT:    vcvtt.f32.f16 s8, s3
+; CHECK-NEXT:    vminnm.f32 s2, s2, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s3
+; CHECK-NEXT:    vcvt.u32.f32 s5, s2
+; CHECK-NEXT:    vcvtt.f32.f16 s2, s0
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s6
+; CHECK-NEXT:    vminnm.f32 s2, s2, s4
+; CHECK-NEXT:    vminnm.f32 s0, s0, s4
+; CHECK-NEXT:    vcvt.u32.f32 s7, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s14, s1
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s6
+; CHECK-NEXT:    vcvt.u32.f32 s0, s0
+; CHECK-NEXT:    vmaxnm.f32 s8, s8, s6
+; CHECK-NEXT:    vmaxnm.f32 s10, s10, s6
+; CHECK-NEXT:    vmaxnm.f32 s12, s12, s6
+; CHECK-NEXT:    vmaxnm.f32 s14, s14, s6
+; CHECK-NEXT:    vminnm.f32 s2, s2, s4
+; CHECK-NEXT:    vminnm.f32 s8, s8, s4
+; CHECK-NEXT:    vminnm.f32 s10, s10, s4
+; CHECK-NEXT:    vminnm.f32 s12, s12, s4
+; CHECK-NEXT:    vminnm.f32 s14, s14, s4
+; CHECK-NEXT:    vcvt.u32.f32 s4, s2
+; CHECK-NEXT:    vcvt.u32.f32 s14, s14
+; CHECK-NEXT:    vcvt.u32.f32 s10, s10
+; CHECK-NEXT:    vcvt.u32.f32 s12, s12
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vcvt.u32.f32 s8, s8
+; CHECK-NEXT:    vmov.16 q0[0], r0
+; CHECK-NEXT:    vmov r0, s7
+; CHECK-NEXT:    vmov.16 q0[1], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    vmov.16 q0[3], r0
+; CHECK-NEXT:    vmov r0, s5
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov.16 q0[7], r0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI44_0:
+; CHECK-NEXT:    .long 0x45fff800 @ float 8191
+; CHECK-NEXT:  .LCPI44_1:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+    %x = call <8 x i13> @llvm.fptoui.sat.v8f16.v8i13(<8 x half> %f)
+    ret <8 x i13> %x
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_unsigned_v8f16_v8i16(<8 x half> %f) {
+; CHECK-LABEL: test_unsigned_v8f16_v8i16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s6, .LCPI45_1
+; CHECK-NEXT:    vcvtt.f32.f16 s10, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vldr s4, .LCPI45_0
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s6
+; CHECK-NEXT:    vcvtt.f32.f16 s8, s3
+; CHECK-NEXT:    vminnm.f32 s2, s2, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s3
+; CHECK-NEXT:    vcvt.u32.f32 s5, s2
+; CHECK-NEXT:    vcvtt.f32.f16 s2, s0
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s6
+; CHECK-NEXT:    vminnm.f32 s2, s2, s4
+; CHECK-NEXT:    vminnm.f32 s0, s0, s4
+; CHECK-NEXT:    vcvt.u32.f32 s7, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s14, s1
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s6
+; CHECK-NEXT:    vcvt.u32.f32 s0, s0
+; CHECK-NEXT:    vmaxnm.f32 s8, s8, s6
+; CHECK-NEXT:    vmaxnm.f32 s10, s10, s6
+; CHECK-NEXT:    vmaxnm.f32 s12, s12, s6
+; CHECK-NEXT:    vmaxnm.f32 s14, s14, s6
+; CHECK-NEXT:    vminnm.f32 s2, s2, s4
+; CHECK-NEXT:    vminnm.f32 s8, s8, s4
+; CHECK-NEXT:    vminnm.f32 s10, s10, s4
+; CHECK-NEXT:    vminnm.f32 s12, s12, s4
+; CHECK-NEXT:    vminnm.f32 s14, s14, s4
+; CHECK-NEXT:    vcvt.u32.f32 s4, s2
+; CHECK-NEXT:    vcvt.u32.f32 s14, s14
+; CHECK-NEXT:    vcvt.u32.f32 s10, s10
+; CHECK-NEXT:    vcvt.u32.f32 s12, s12
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vcvt.u32.f32 s8, s8
+; CHECK-NEXT:    vmov.16 q0[0], r0
+; CHECK-NEXT:    vmov r0, s7
+; CHECK-NEXT:    vmov.16 q0[1], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    vmov.16 q0[3], r0
+; CHECK-NEXT:    vmov r0, s5
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov.16 q0[7], r0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI45_0:
+; CHECK-NEXT:    .long 0x477fff00 @ float 65535
+; CHECK-NEXT:  .LCPI45_1:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+    %x = call <8 x i16> @llvm.fptoui.sat.v8f16.v8i16(<8 x half> %f)
+    ret <8 x i16> %x
+}
+
+define arm_aapcs_vfpcc <8 x i19> @test_unsigned_v8f16_v8i19(<8 x half> %f) {
+; CHECK-LABEL: test_unsigned_v8f16_v8i19:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    vldr s4, .LCPI46_0
+; CHECK-NEXT:    vcvtb.f32.f16 s8, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s12, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s1, s3
+; CHECK-NEXT:    vldr s6, .LCPI46_1
+; CHECK-NEXT:    vmaxnm.f32 s1, s1, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s10, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s14, s0
+; CHECK-NEXT:    vminnm.f32 s1, s1, s6
+; CHECK-NEXT:    vcvtt.f32.f16 s0, s0
+; CHECK-NEXT:    vcvt.u32.f32 s1, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s2, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s3
+; CHECK-NEXT:    vmaxnm.f32 s8, s8, s4
+; CHECK-NEXT:    vmaxnm.f32 s10, s10, s4
+; CHECK-NEXT:    vmaxnm.f32 s12, s12, s4
+; CHECK-NEXT:    vmaxnm.f32 s14, s14, s4
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    vmaxnm.f32 s2, s2, s4
+; CHECK-NEXT:    vmaxnm.f32 s4, s3, s4
+; CHECK-NEXT:    vminnm.f32 s4, s4, s6
+; CHECK-NEXT:    vminnm.f32 s2, s2, s6
+; CHECK-NEXT:    vcvt.u32.f32 s4, s4
+; CHECK-NEXT:    vminnm.f32 s0, s0, s6
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vminnm.f32 s14, s14, s6
+; CHECK-NEXT:    vcvt.u32.f32 s2, s2
+; CHECK-NEXT:    vminnm.f32 s10, s10, s6
+; CHECK-NEXT:    vcvt.u32.f32 s0, s0
+; CHECK-NEXT:    vminnm.f32 s12, s12, s6
+; CHECK-NEXT:    vcvt.u32.f32 s14, s14
+; CHECK-NEXT:    vminnm.f32 s8, s8, s6
+; CHECK-NEXT:    vcvt.u32.f32 s10, s10
+; CHECK-NEXT:    vcvt.u32.f32 s12, s12
+; CHECK-NEXT:    vcvt.u32.f32 s8, s8
+; CHECK-NEXT:    vmov r12, s2
+; CHECK-NEXT:    vmov lr, s0
+; CHECK-NEXT:    lsrs r2, r1, #11
+; CHECK-NEXT:    strb r2, [r0, #18]
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    bfc r12, #19, #13
+; CHECK-NEXT:    bfc lr, #19, #13
+; CHECK-NEXT:    bfc r2, #19, #13
+; CHECK-NEXT:    lsrs r3, r2, #14
+; CHECK-NEXT:    orr.w r1, r3, r1, lsl #5
+; CHECK-NEXT:    lsr.w r3, r12, #1
+; CHECK-NEXT:    orr.w r2, r3, r2, lsl #18
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    strh r1, [r0, #16]
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    str r2, [r0, #12]
+; CHECK-NEXT:    bfc r3, #19, #13
+; CHECK-NEXT:    orr.w r3, r3, lr, lsl #19
+; CHECK-NEXT:    str r3, [r0]
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    bfc r1, #19, #13
+; CHECK-NEXT:    bfc r3, #19, #13
+; CHECK-NEXT:    lsrs r2, r3, #7
+; CHECK-NEXT:    orr.w r1, r2, r1, lsl #12
+; CHECK-NEXT:    orr.w r1, r1, r12, lsl #31
+; CHECK-NEXT:    str r1, [r0, #8]
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    lsr.w r2, lr, #13
+; CHECK-NEXT:    bfc r1, #19, #13
+; CHECK-NEXT:    orr.w r1, r2, r1, lsl #6
+; CHECK-NEXT:    orr.w r1, r1, r3, lsl #25
+; CHECK-NEXT:    str r1, [r0, #4]
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI46_0:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+; CHECK-NEXT:  .LCPI46_1:
+; CHECK-NEXT:    .long 0x48ffffe0 @ float 524287
+    %x = call <8 x i19> @llvm.fptoui.sat.v8f16.v8i19(<8 x half> %f)
+    ret <8 x i19> %x
+}
+
+define arm_aapcs_vfpcc <8 x i32> @test_unsigned_v8f16_v8i32_duplicate(<8 x half> %f) {
+; CHECK-LABEL: test_unsigned_v8f16_v8i32_duplicate:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8}
+; CHECK-NEXT:    vpush {d8}
+; CHECK-NEXT:    vcvtt.f32.f16 s11, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s3
+; CHECK-NEXT:    vcvt.u32.f32 s15, s3
+; CHECK-NEXT:    vcvtt.f32.f16 s7, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-NEXT:    vcvt.u32.f32 s13, s11
+; CHECK-NEXT:    vcvt.u32.f32 s16, s2
+; CHECK-NEXT:    vldr s4, .LCPI47_0
+; CHECK-NEXT:    vcvt.u32.f32 s9, s7
+; CHECK-NEXT:    vcvtt.f32.f16 s10, s1
+; CHECK-NEXT:    vcmp.f32 s3, #0
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s4
+; CHECK-NEXT:    vcvt.u32.f32 s5, s1
+; CHECK-NEXT:    vcvtt.f32.f16 s6, s0
+; CHECK-NEXT:    vmov r12, s15
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r12, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r12, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov lr, s16
+; CHECK-NEXT:    vcmp.f32 s2, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w lr, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.u32.f32 s14, s0
+; CHECK-NEXT:    vcmp.f32 s11, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w lr, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r2, s13
+; CHECK-NEXT:    vcmp.f32 s11, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvt.u32.f32 s12, s10
+; CHECK-NEXT:    vcmp.f32 s7, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vcvt.u32.f32 s8, s6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r3, s9
+; CHECK-NEXT:    vcmp.f32 s7, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s1, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r0, s5
+; CHECK-NEXT:    vcmp.f32 s1, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r1, s14
+; CHECK-NEXT:    vcmp.f32 s0, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s10, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r4, s12
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    vcmp.f32 s10, s4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vmov r5, s8
+; CHECK-NEXT:    vcmp.f32 s6, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r4, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s6, s4
+; CHECK-NEXT:    vmov q1[2], q1[0], lr, r12
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r5, #-1
+; CHECK-NEXT:    vmov q0[3], q0[1], r5, r4
+; CHECK-NEXT:    vmov q1[3], q1[1], r3, r2
+; CHECK-NEXT:    vpop {d8}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI47_0:
+; CHECK-NEXT:    .long 0x4f7fffff @ float 4.29496704E+9
+    %x = call <8 x i32> @llvm.fptoui.sat.v8f16.v8i32(<8 x half> %f)
+    ret <8 x i32> %x
+}
+
+define arm_aapcs_vfpcc <8 x i50> @test_unsigned_v8f16_v8i50(<8 x half> %f) {
+; CHECK-LABEL: test_unsigned_v8f16_v8i50:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14}
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    mov r10, r0
+; CHECK-NEXT:    vcvtb.f32.f16 s24, s18
+; CHECK-NEXT:    vmov r0, s24
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcvtt.f32.f16 s28, s19
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    vmov r0, s28
+; CHECK-NEXT:    vcvtb.f32.f16 s22, s16
+; CHECK-NEXT:    vcvtt.f32.f16 s26, s17
+; CHECK-NEXT:    vcmp.f32 s24, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    vmov r5, s22
+; CHECK-NEXT:    vldr s20, .LCPI48_0
+; CHECK-NEXT:    vmov r8, s26
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r6, #0
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s28, #0
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    vcmp.f32 s24, s20
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r4, #65535
+; CHECK-NEXT:    movtgt r4, #3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r6, #-1
+; CHECK-NEXT:    str.w r6, [r10, #25]
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s22, #0
+; CHECK-NEXT:    mov r11, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcmp.f32 s28, #0
+; CHECK-NEXT:    str.w r0, [r10]
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r9, #0
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    lsls r0, r4, #22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r9, #-1
+; CHECK-NEXT:    orr.w r5, r0, r9, lsr #10
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    str.w r9, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s26, #0
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r6, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, #0
+; CHECK-NEXT:    mov r1, r0
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r6, #65535
+; CHECK-NEXT:    movtgt r6, #3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r5, [r10, #45]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    lsls r0, r6, #22
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    orr.w r0, r0, r1, lsr #10
+; CHECK-NEXT:    vcvtt.f32.f16 s18, s18
+; CHECK-NEXT:    str r1, [sp] @ 4-byte Spill
+; CHECK-NEXT:    lsrs r1, r4, #10
+; CHECK-NEXT:    str.w r0, [r10, #20]
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    strb.w r1, [r10, #49]
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r9, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r9, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r7, #0
+; CHECK-NEXT:    vcmp.f32 s24, s20
+; CHECK-NEXT:    vcvtt.f32.f16 s16, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r7, #65535
+; CHECK-NEXT:    movtgt r7, #3
+; CHECK-NEXT:    bfc r7, #18, #14
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    orr.w r0, r7, r9, lsl #18
+; CHECK-NEXT:    str.w r0, [r10, #29]
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    lsrs r1, r6, #10
+; CHECK-NEXT:    strb.w r1, [r10, #24]
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r8, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r8, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r11, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r11, #65535
+; CHECK-NEXT:    movtgt r11, #3
+; CHECK-NEXT:    vcvtb.f32.f16 s22, s19
+; CHECK-NEXT:    bfc r11, #18, #14
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    orr.w r0, r11, r8, lsl #18
+; CHECK-NEXT:    str.w r0, [r10, #4]
+; CHECK-NEXT:    vmov r0, s22
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s22, #0
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r7, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r7, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r5, #65535
+; CHECK-NEXT:    movtgt r5, #3
+; CHECK-NEXT:    vcvtb.f32.f16 s18, s17
+; CHECK-NEXT:    bfc r5, #18, #14
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    lsrs r0, r5, #14
+; CHECK-NEXT:    orr.w r0, r0, r7, lsl #4
+; CHECK-NEXT:    str.w r0, [r10, #37]
+; CHECK-NEXT:    lsr.w r0, r9, #14
+; CHECK-NEXT:    orr.w r0, r0, r5, lsl #18
+; CHECK-NEXT:    str.w r0, [r10, #33]
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r6, #0
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r6, #65535
+; CHECK-NEXT:    movtgt r6, #3
+; CHECK-NEXT:    bfc r6, #18, #14
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    lsr.w r2, r6, #14
+; CHECK-NEXT:    orr.w r2, r2, r0, lsl #4
+; CHECK-NEXT:    str.w r2, [r10, #12]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, #0
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r1, #65535
+; CHECK-NEXT:    movtgt r1, #3
+; CHECK-NEXT:    lsr.w r2, r8, #14
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r4, #0
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    itt gt
+; CHECK-NEXT:    movwgt r4, #65535
+; CHECK-NEXT:    movtgt r4, #3
+; CHECK-NEXT:    orr.w r2, r2, r6, lsl #18
+; CHECK-NEXT:    str.w r2, [r10, #8]
+; CHECK-NEXT:    bfc r4, #18, #14
+; CHECK-NEXT:    ldr r3, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    lsrs r2, r7, #28
+; CHECK-NEXT:    bfc r1, #18, #14
+; CHECK-NEXT:    orr.w r2, r2, r4, lsl #4
+; CHECK-NEXT:    lsrs r0, r0, #28
+; CHECK-NEXT:    orr.w r2, r2, r3, lsl #22
+; CHECK-NEXT:    str.w r2, [r10, #41]
+; CHECK-NEXT:    orr.w r0, r0, r1, lsl #4
+; CHECK-NEXT:    ldr r1, [sp] @ 4-byte Reload
+; CHECK-NEXT:    orr.w r0, r0, r1, lsl #22
+; CHECK-NEXT:    str.w r0, [r10, #16]
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI48_0:
+; CHECK-NEXT:    .long 0x587fffff @ float 1.12589984E+15
+    %x = call <8 x i50> @llvm.fptoui.sat.v8f16.v8i50(<8 x half> %f)
+    ret <8 x i50> %x
+}
+
+define arm_aapcs_vfpcc <8 x i64> @test_unsigned_v8f16_v8i64(<8 x half> %f) {
+; CHECK-LABEL: test_unsigned_v8f16_v8i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vcvtt.f32.f16 s20, s19
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcvtb.f32.f16 s22, s19
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    vmov r0, s22
+; CHECK-NEXT:    vldr s28, .LCPI49_0
+; CHECK-NEXT:    vcmp.f32 s20, #0
+; CHECK-NEXT:    vcvtt.f32.f16 s24, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcvtb.f32.f16 s16, s16
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r9, #0
+; CHECK-NEXT:    vcmp.f32 s20, s28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r8, r1
+; CHECK-NEXT:    vmov r5, s24
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r9, #-1
+; CHECK-NEXT:    vmov r4, s16
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s22, #0
+; CHECK-NEXT:    mov r11, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r11, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s20, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r11, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s20, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r8, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r10, r1
+; CHECK-NEXT:    vcmp.f32 s22, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r8, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r10, #0
+; CHECK-NEXT:    vcmp.f32 s22, s28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r10, #-1
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    vcmp.f32 s24, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r6, #0
+; CHECK-NEXT:    vcmp.f32 s24, s28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r6, #-1
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcvtt.f32.f16 s30, s17
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    vmov r1, s30
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s16, s28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmov q5[2], q5[0], r0, r6
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s30, #0
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r6, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r6, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r5, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s28
+; CHECK-NEXT:    vcvtb.f32.f16 s16, s17
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r7, #0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r7, #-1
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    vmov q5[3], q5[1], r7, r5
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcvtt.f32.f16 s17, s18
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    vmov r1, s17
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s16, s28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmov q6[2], q6[0], r0, r6
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r6, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r6, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r4, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r4, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s28
+; CHECK-NEXT:    vcvtb.f32.f16 s16, s18
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r7, #0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r7, #-1
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    vmov q6[3], q6[1], r7, r4
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    vmov q3[2], q3[0], r11, r9
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s17, s28
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r5, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vcmp.f32 s16, s28
+; CHECK-NEXT:    vmov q2[2], q2[0], r0, r6
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmov q2[3], q2[1], r1, r5
+; CHECK-NEXT:    vmov q3[3], q3[1], r10, r8
+; CHECK-NEXT:    vmov q0, q5
+; CHECK-NEXT:    vmov q1, q6
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI49_0:
+; CHECK-NEXT:    .long 0x5f7fffff @ float 1.8446743E+19
+    %x = call <8 x i64> @llvm.fptoui.sat.v8f16.v8i64(<8 x half> %f)
+    ret <8 x i64> %x
+}
+
+define arm_aapcs_vfpcc <8 x i100> @test_unsigned_v8f16_v8i100(<8 x half> %f) {
+; CHECK-LABEL: test_unsigned_v8f16_v8i100:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vcvtb.f32.f16 s28, s19
+; CHECK-NEXT:    vmov r0, s28
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcvtb.f32.f16 s26, s18
+; CHECK-NEXT:    mov r5, r3
+; CHECK-NEXT:    vmov r3, s26
+; CHECK-NEXT:    vldr s20, .LCPI50_1
+; CHECK-NEXT:    vcmp.f32 s28, #0
+; CHECK-NEXT:    vcvtt.f32.f16 s30, s19
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    str.w r2, [r4, #83]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r1, [r4, #79]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    vcvtb.f32.f16 s22, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    vcvtb.f32.f16 s24, s17
+; CHECK-NEXT:    str.w r0, [r4, #75]
+; CHECK-NEXT:    vmov r9, s30
+; CHECK-NEXT:    vmov r8, s22
+; CHECK-NEXT:    vmov r6, s24
+; CHECK-NEXT:    mov r0, r3
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s26, #0
+; CHECK-NEXT:    mov r7, r3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    str.w r2, [r4, #58]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r1, [r4, #54]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str.w r0, [r4, #50]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s24, #0
+; CHECK-NEXT:    mov r10, r3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s20
+; CHECK-NEXT:    str.w r2, [r4, #33]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r1, [r4, #29]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s24, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str.w r0, [r4, #25]
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s22, #0
+; CHECK-NEXT:    mov r8, r3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    str r2, [r4, #8]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #4]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str r0, [r4]
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s30, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    lsr.w r6, r1, #28
+; CHECK-NEXT:    vcmp.f32 s30, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    orr.w r6, r6, r2, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r6, [r4, #95]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    lsrs r6, r0, #28
+; CHECK-NEXT:    orr.w r1, r6, r1, lsl #4
+; CHECK-NEXT:    vcmp.f32 s30, #0
+; CHECK-NEXT:    str.w r1, [r4, #91]
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    lsrs r1, r2, #28
+; CHECK-NEXT:    vcvtt.f32.f16 s30, s18
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r3, #15
+; CHECK-NEXT:    orr.w r2, r1, r3, lsl #4
+; CHECK-NEXT:    vmov r1, s30
+; CHECK-NEXT:    strb.w r2, [r4, #99]
+; CHECK-NEXT:    vcmp.f32 s28, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r5, #0
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r5, #15
+; CHECK-NEXT:    and r2, r5, #15
+; CHECK-NEXT:    orr.w r0, r2, r0, lsl #4
+; CHECK-NEXT:    str.w r0, [r4, #87]
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s30, #0
+; CHECK-NEXT:    vcvtt.f32.f16 s18, s17
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    lsr.w r6, r1, #28
+; CHECK-NEXT:    vcmp.f32 s30, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    orr.w r6, r6, r2, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r6, [r4, #70]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    lsrs r6, r0, #28
+; CHECK-NEXT:    orr.w r1, r6, r1, lsl #4
+; CHECK-NEXT:    str.w r1, [r4, #66]
+; CHECK-NEXT:    vmov r1, s18
+; CHECK-NEXT:    vcmp.f32 s30, #0
+; CHECK-NEXT:    lsrs r2, r2, #28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r3, #15
+; CHECK-NEXT:    orr.w r2, r2, r3, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    strb.w r2, [r4, #74]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r7, #0
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    vcvtt.f32.f16 s16, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r7, #15
+; CHECK-NEXT:    and r2, r7, #15
+; CHECK-NEXT:    orr.w r0, r2, r0, lsl #4
+; CHECK-NEXT:    str.w r0, [r4, #62]
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    lsr.w r7, r1, #28
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    orr.w r7, r7, r2, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str.w r7, [r4, #45]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    lsrs r7, r0, #28
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    orr.w r7, r7, r1, lsl #4
+; CHECK-NEXT:    vmov r1, s16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    str.w r7, [r4, #41]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    lsr.w r2, r2, #28
+; CHECK-NEXT:    vcmp.f32 s24, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r3, #15
+; CHECK-NEXT:    orr.w r2, r2, r3, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    strb.w r2, [r4, #49]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r10, #0
+; CHECK-NEXT:    vcmp.f32 s24, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r10, #15
+; CHECK-NEXT:    and r2, r10, #15
+; CHECK-NEXT:    orr.w r0, r2, r0, lsl #4
+; CHECK-NEXT:    str.w r0, [r4, #37]
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    b.w .LBB50_2
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI50_1:
+; CHECK-NEXT:    .long 0x717fffff @ float 1.26765052E+30
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  .LBB50_2:
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    lsrs r7, r1, #28
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    orr.w r7, r7, r2, lsl #4
+; CHECK-NEXT:    str r7, [r4, #20]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    lsrs r7, r0, #28
+; CHECK-NEXT:    orr.w r1, r7, r1, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    str r1, [r4, #16]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    lsr.w r1, r2, #28
+; CHECK-NEXT:    vcmp.f32 s22, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt r3, #15
+; CHECK-NEXT:    orr.w r1, r1, r3, lsl #4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    strb r1, [r4, #24]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt.w r8, #0
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r8, #15
+; CHECK-NEXT:    and r1, r8, #15
+; CHECK-NEXT:    orr.w r0, r1, r0, lsl #4
+; CHECK-NEXT:    str r0, [r4, #12]
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
+; CHECK-NEXT:  @ %bb.3:
+    %x = call <8 x i100> @llvm.fptoui.sat.v8f16.v8i100(<8 x half> %f)
+    ret <8 x i100> %x
+}
+
+define arm_aapcs_vfpcc <8 x i128> @test_unsigned_v8f16_v8i128(<8 x half> %f) {
+; CHECK-LABEL: test_unsigned_v8f16_v8i128:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vcvtt.f32.f16 s26, s19
+; CHECK-NEXT:    vcvtb.f32.f16 s22, s16
+; CHECK-NEXT:    vmov r0, s26
+; CHECK-NEXT:    vcvtt.f32.f16 s16, s16
+; CHECK-NEXT:    vcvtb.f32.f16 s24, s17
+; CHECK-NEXT:    vcvtb.f32.f16 s30, s19
+; CHECK-NEXT:    vldr s20, .LCPI51_0
+; CHECK-NEXT:    vmov r8, s22
+; CHECK-NEXT:    vmov r9, s16
+; CHECK-NEXT:    vcvtt.f32.f16 s28, s18
+; CHECK-NEXT:    vmov r7, s24
+; CHECK-NEXT:    vmov r6, s30
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s26, #0
+; CHECK-NEXT:    vcvtb.f32.f16 s18, s18
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    str r3, [r4, #124]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    str r2, [r4, #120]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #116]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str r0, [r4, #112]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    vmov r5, s28
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s30, #0
+; CHECK-NEXT:    vcvtt.f32.f16 s26, s17
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    str r3, [r4, #108]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    str r2, [r4, #104]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s30, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #100]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s30, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str r0, [r4, #96]
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    vmov r6, s18
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s28, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    str r3, [r4, #92]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    str r2, [r4, #88]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s28, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #84]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s28, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str r0, [r4, #80]
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    vmov r5, s26
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    str r3, [r4, #76]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    str r2, [r4, #72]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s18, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #68]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s18, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str r0, [r4, #64]
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s26, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    str r3, [r4, #60]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    str r2, [r4, #56]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s26, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #52]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s26, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str r0, [r4, #48]
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s24, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s20
+; CHECK-NEXT:    str r3, [r4, #44]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, s20
+; CHECK-NEXT:    str r2, [r4, #40]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s24, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #36]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s24, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str r0, [r4, #32]
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    str r3, [r4, #28]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    str r2, [r4, #24]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s16, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #20]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s16, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str r0, [r4, #16]
+; CHECK-NEXT:    mov r0, r8
+; CHECK-NEXT:    bl __fixunssfti
+; CHECK-NEXT:    vcmp.f32 s22, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r3, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    str r3, [r4, #12]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r2, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    str r2, [r4, #8]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s22, #0
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r1, #-1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    str r1, [r4, #4]
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    movlt r0, #0
+; CHECK-NEXT:    vcmp.f32 s22, s20
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it gt
+; CHECK-NEXT:    movgt.w r0, #-1
+; CHECK-NEXT:    str r0, [r4]
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI51_0:
+; CHECK-NEXT:    .long 0x7f7fffff @ float 3.40282347E+38
+    %x = call <8 x i128> @llvm.fptoui.sat.v8f16.v8i128(<8 x half> %f)
+    ret <8 x i128> %x
+}
+


        


More information about the llvm-commits mailing list