[llvm] 6a10bc7 - [RISCV] Add i8/i16 fptosi/fptoui and fptosi_sat/fptoui_sat tests. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Jan 8 14:01:45 PST 2022


Author: Craig Topper
Date: 2022-01-08T14:01:31-08:00
New Revision: 6a10bc705681a5dc56e57afc82fc06575df50cef

URL: https://github.com/llvm/llvm-project/commit/6a10bc705681a5dc56e57afc82fc06575df50cef
DIFF: https://github.com/llvm/llvm-project/commit/6a10bc705681a5dc56e57afc82fc06575df50cef.diff

LOG: [RISCV] Add i8/i16 fptosi/fptoui and fptosi_sat/fptoui_sat tests. NFC

Use signext/zeroext return attributes to show unnecessary ands or
shifts in the saturating tests.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/double-convert.ll
    llvm/test/CodeGen/RISCV/float-convert.ll
    llvm/test/CodeGen/RISCV/half-convert.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index 0494107430b7d..8cbab1e3a639f 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -1552,3 +1552,702 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, double* %1) nounwind
   store double %4, double* %1, align 8
   ret i32 %3
 }
+
+define signext i16 @fcvt_w_s_i16(double %a) nounwind {
+; RV32IFD-LABEL: fcvt_w_s_i16:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_w_s_i16:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_w_s_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __fixdfsi at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_w_s_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixdfdi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = fptosi double %a to i16
+  ret i16 %1
+}
+
+define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
+; RV32IFD-LABEL: fcvt_w_s_sat_i16:
+; RV32IFD:       # %bb.0: # %start
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    feq.d a0, ft0, ft0
+; RV32IFD-NEXT:    bnez a0, .LBB26_2
+; RV32IFD-NEXT:  # %bb.1: # %start
+; RV32IFD-NEXT:    li a0, 0
+; RV32IFD-NEXT:    j .LBB26_3
+; RV32IFD-NEXT:  .LBB26_2:
+; RV32IFD-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV32IFD-NEXT:    fld ft1, %lo(.LCPI26_0)(a0)
+; RV32IFD-NEXT:    lui a0, %hi(.LCPI26_1)
+; RV32IFD-NEXT:    fld ft2, %lo(.LCPI26_1)(a0)
+; RV32IFD-NEXT:    fmax.d ft0, ft0, ft1
+; RV32IFD-NEXT:    fmin.d ft0, ft0, ft2
+; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32IFD-NEXT:  .LBB26_3: # %start
+; RV32IFD-NEXT:    slli a0, a0, 16
+; RV32IFD-NEXT:    srai a0, a0, 16
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_w_s_sat_i16:
+; RV64IFD:       # %bb.0: # %start
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    feq.d a0, ft0, ft0
+; RV64IFD-NEXT:    bnez a0, .LBB26_2
+; RV64IFD-NEXT:  # %bb.1: # %start
+; RV64IFD-NEXT:    li a0, 0
+; RV64IFD-NEXT:    j .LBB26_3
+; RV64IFD-NEXT:  .LBB26_2:
+; RV64IFD-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV64IFD-NEXT:    fld ft1, %lo(.LCPI26_0)(a0)
+; RV64IFD-NEXT:    lui a0, %hi(.LCPI26_1)
+; RV64IFD-NEXT:    fld ft2, %lo(.LCPI26_1)(a0)
+; RV64IFD-NEXT:    fmax.d ft0, ft0, ft1
+; RV64IFD-NEXT:    fmin.d ft0, ft0, ft2
+; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT:  .LBB26_3: # %start
+; RV64IFD-NEXT:    slli a0, a0, 48
+; RV64IFD-NEXT:    srai a0, a0, 48
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_w_s_sat_i16:
+; RV32I:       # %bb.0: # %start
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s1, a0
+; RV32I-NEXT:    lui a3, 790016
+; RV32I-NEXT:    li s0, 0
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    mv a1, s2
+; RV32I-NEXT:    call __fixdfsi at plt
+; RV32I-NEXT:    lui s4, 1048568
+; RV32I-NEXT:    blt s3, s0, .LBB26_2
+; RV32I-NEXT:  # %bb.1: # %start
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:  .LBB26_2: # %start
+; RV32I-NEXT:    lui a0, 265728
+; RV32I-NEXT:    addi a3, a0, -64
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    mv a1, s2
+; RV32I-NEXT:    mv a2, s0
+; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    bge s0, a0, .LBB26_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    lui a0, 8
+; RV32I-NEXT:    addi s4, a0, -1
+; RV32I-NEXT:  .LBB26_4: # %start
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    mv a1, s2
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:    mv a3, s2
+; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    bne a0, s0, .LBB26_6
+; RV32I-NEXT:  # %bb.5: # %start
+; RV32I-NEXT:    mv s0, s4
+; RV32I-NEXT:  .LBB26_6: # %start
+; RV32I-NEXT:    slli a0, s0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_w_s_sat_i16:
+; RV64I:       # %bb.0: # %start
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    li a0, -505
+; RV64I-NEXT:    slli a1, a0, 53
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __fixdfdi at plt
+; RV64I-NEXT:    li s1, 0
+; RV64I-NEXT:    lui s3, 1048568
+; RV64I-NEXT:    bltz s2, .LBB26_2
+; RV64I-NEXT:  # %bb.1: # %start
+; RV64I-NEXT:    mv s3, a0
+; RV64I-NEXT:  .LBB26_2: # %start
+; RV64I-NEXT:    lui a0, 4152
+; RV64I-NEXT:    addiw a0, a0, -1
+; RV64I-NEXT:    slli a1, a0, 38
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    bge s1, a0, .LBB26_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    lui a0, 8
+; RV64I-NEXT:    addiw s3, a0, -1
+; RV64I-NEXT:  .LBB26_4: # %start
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    bne a0, s1, .LBB26_6
+; RV64I-NEXT:  # %bb.5: # %start
+; RV64I-NEXT:    mv s1, s3
+; RV64I-NEXT:  .LBB26_6: # %start
+; RV64I-NEXT:    slli a0, s1, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
+start:
+  %0 = tail call i16 @llvm.fptosi.sat.i16.f64(double %a)
+  ret i16 %0
+}
+declare i16 @llvm.fptosi.sat.i16.f64(double)
+
+define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind {
+; RV32IFD-LABEL: fcvt_wu_s_i16:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_wu_s_i16:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_wu_s_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __fixunsdfsi at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_wu_s_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixunsdfdi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = fptoui double %a to i16
+  ret i16 %1
+}
+
+define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
+; RV32IFD-LABEL: fcvt_wu_s_sat_i16:
+; RV32IFD:       # %bb.0: # %start
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    lui a0, %hi(.LCPI28_0)
+; RV32IFD-NEXT:    fld ft1, %lo(.LCPI28_0)(a0)
+; RV32IFD-NEXT:    fcvt.d.w ft2, zero
+; RV32IFD-NEXT:    fmax.d ft0, ft0, ft2
+; RV32IFD-NEXT:    fmin.d ft0, ft0, ft1
+; RV32IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32IFD-NEXT:    lui a1, 16
+; RV32IFD-NEXT:    addi a1, a1, -1
+; RV32IFD-NEXT:    and a0, a0, a1
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_wu_s_sat_i16:
+; RV64IFD:       # %bb.0: # %start
+; RV64IFD-NEXT:    lui a1, %hi(.LCPI28_0)
+; RV64IFD-NEXT:    fld ft0, %lo(.LCPI28_0)(a1)
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fmv.d.x ft2, zero
+; RV64IFD-NEXT:    fmax.d ft1, ft1, ft2
+; RV64IFD-NEXT:    fmin.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64IFD-NEXT:    lui a1, 16
+; RV64IFD-NEXT:    addiw a1, a1, -1
+; RV64IFD-NEXT:    and a0, a0, a1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_wu_s_sat_i16:
+; RV32I:       # %bb.0: # %start
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    lui a0, 265984
+; RV32I-NEXT:    addi a3, a0, -32
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    mv a1, s1
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    mv a1, s1
+; RV32I-NEXT:    call __fixunsdfsi at plt
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    bltz s3, .LBB28_2
+; RV32I-NEXT:  # %bb.1: # %start
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:  .LBB28_2: # %start
+; RV32I-NEXT:    lui a0, 16
+; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bgtz s2, .LBB28_4
+; RV32I-NEXT:  # %bb.3: # %start
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:  .LBB28_4: # %start
+; RV32I-NEXT:    and a0, a2, a0
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_wu_s_sat_i16:
+; RV64I:       # %bb.0: # %start
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    li a1, 0
+; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __fixunsdfdi at plt
+; RV64I-NEXT:    li s2, 0
+; RV64I-NEXT:    bltz s1, .LBB28_2
+; RV64I-NEXT:  # %bb.1: # %start
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:  .LBB28_2: # %start
+; RV64I-NEXT:    lui a0, 8312
+; RV64I-NEXT:    addiw a0, a0, -1
+; RV64I-NEXT:    slli a1, a0, 37
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    bgtz a0, .LBB28_4
+; RV64I-NEXT:  # %bb.3: # %start
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB28_4: # %start
+; RV64I-NEXT:    and a0, a2, a1
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+start:
+  %0 = tail call i16 @llvm.fptoui.sat.i16.f64(double %a)
+  ret i16 %0
+}
+declare i16 @llvm.fptoui.sat.i16.f64(double)
+
+define signext i8 @fcvt_w_s_i8(double %a) nounwind {
+; RV32IFD-LABEL: fcvt_w_s_i8:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_w_s_i8:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_w_s_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __fixdfsi at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_w_s_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixdfdi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = fptosi double %a to i8
+  ret i8 %1
+}
+
+define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
+; RV32IFD-LABEL: fcvt_w_s_sat_i8:
+; RV32IFD:       # %bb.0: # %start
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    feq.d a0, ft0, ft0
+; RV32IFD-NEXT:    bnez a0, .LBB30_2
+; RV32IFD-NEXT:  # %bb.1: # %start
+; RV32IFD-NEXT:    li a0, 0
+; RV32IFD-NEXT:    j .LBB30_3
+; RV32IFD-NEXT:  .LBB30_2:
+; RV32IFD-NEXT:    lui a0, %hi(.LCPI30_0)
+; RV32IFD-NEXT:    fld ft1, %lo(.LCPI30_0)(a0)
+; RV32IFD-NEXT:    lui a0, %hi(.LCPI30_1)
+; RV32IFD-NEXT:    fld ft2, %lo(.LCPI30_1)(a0)
+; RV32IFD-NEXT:    fmax.d ft0, ft0, ft1
+; RV32IFD-NEXT:    fmin.d ft0, ft0, ft2
+; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32IFD-NEXT:  .LBB30_3: # %start
+; RV32IFD-NEXT:    slli a0, a0, 24
+; RV32IFD-NEXT:    srai a0, a0, 24
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_w_s_sat_i8:
+; RV64IFD:       # %bb.0: # %start
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    feq.d a0, ft0, ft0
+; RV64IFD-NEXT:    bnez a0, .LBB30_2
+; RV64IFD-NEXT:  # %bb.1: # %start
+; RV64IFD-NEXT:    li a0, 0
+; RV64IFD-NEXT:    j .LBB30_3
+; RV64IFD-NEXT:  .LBB30_2:
+; RV64IFD-NEXT:    lui a0, %hi(.LCPI30_0)
+; RV64IFD-NEXT:    fld ft1, %lo(.LCPI30_0)(a0)
+; RV64IFD-NEXT:    lui a0, %hi(.LCPI30_1)
+; RV64IFD-NEXT:    fld ft2, %lo(.LCPI30_1)(a0)
+; RV64IFD-NEXT:    fmax.d ft0, ft0, ft1
+; RV64IFD-NEXT:    fmin.d ft0, ft0, ft2
+; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT:  .LBB30_3: # %start
+; RV64IFD-NEXT:    slli a0, a0, 56
+; RV64IFD-NEXT:    srai a0, a0, 56
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_w_s_sat_i8:
+; RV32I:       # %bb.0: # %start
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s1, a0
+; RV32I-NEXT:    lui a3, 787968
+; RV32I-NEXT:    li s0, 0
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    mv a1, s2
+; RV32I-NEXT:    call __fixdfsi at plt
+; RV32I-NEXT:    li s4, -128
+; RV32I-NEXT:    blt s3, s0, .LBB30_2
+; RV32I-NEXT:  # %bb.1: # %start
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:  .LBB30_2: # %start
+; RV32I-NEXT:    lui a3, 263676
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    mv a1, s2
+; RV32I-NEXT:    mv a2, s0
+; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    li s3, 127
+; RV32I-NEXT:    blt s0, a0, .LBB30_4
+; RV32I-NEXT:  # %bb.3: # %start
+; RV32I-NEXT:    mv s3, s4
+; RV32I-NEXT:  .LBB30_4: # %start
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    mv a1, s2
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:    mv a3, s2
+; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    bne a0, s0, .LBB30_6
+; RV32I-NEXT:  # %bb.5: # %start
+; RV32I-NEXT:    mv s0, s3
+; RV32I-NEXT:  .LBB30_6: # %start
+; RV32I-NEXT:    slli a0, s0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_w_s_sat_i8:
+; RV64I:       # %bb.0: # %start
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    li a0, -509
+; RV64I-NEXT:    slli a1, a0, 53
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __fixdfdi at plt
+; RV64I-NEXT:    li s1, 0
+; RV64I-NEXT:    li s3, -128
+; RV64I-NEXT:    bltz s2, .LBB30_2
+; RV64I-NEXT:  # %bb.1: # %start
+; RV64I-NEXT:    mv s3, a0
+; RV64I-NEXT:  .LBB30_2: # %start
+; RV64I-NEXT:    lui a0, 65919
+; RV64I-NEXT:    slli a1, a0, 34
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    li s2, 127
+; RV64I-NEXT:    blt s1, a0, .LBB30_4
+; RV64I-NEXT:  # %bb.3: # %start
+; RV64I-NEXT:    mv s2, s3
+; RV64I-NEXT:  .LBB30_4: # %start
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    bne a0, s1, .LBB30_6
+; RV64I-NEXT:  # %bb.5: # %start
+; RV64I-NEXT:    mv s1, s2
+; RV64I-NEXT:  .LBB30_6: # %start
+; RV64I-NEXT:    slli a0, s1, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
+start:
+  %0 = tail call i8 @llvm.fptosi.sat.i8.f64(double %a)
+  ret i8 %0
+}
+declare i8 @llvm.fptosi.sat.i8.f64(double)
+
+define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind {
+;
+;
+; RV32IFD-LABEL: fcvt_wu_s_i8:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_wu_s_i8:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_wu_s_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __fixunsdfsi at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_wu_s_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixunsdfdi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = fptoui double %a to i8
+  ret i8 %1
+}
+
+define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
+;
+;
+; RV32IFD-LABEL: fcvt_wu_s_sat_i8:
+; RV32IFD:       # %bb.0: # %start
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    lui a0, %hi(.LCPI32_0)
+; RV32IFD-NEXT:    fld ft1, %lo(.LCPI32_0)(a0)
+; RV32IFD-NEXT:    fcvt.d.w ft2, zero
+; RV32IFD-NEXT:    fmax.d ft0, ft0, ft2
+; RV32IFD-NEXT:    fmin.d ft0, ft0, ft1
+; RV32IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32IFD-NEXT:    andi a0, a0, 255
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_wu_s_sat_i8:
+; RV64IFD:       # %bb.0: # %start
+; RV64IFD-NEXT:    lui a1, %hi(.LCPI32_0)
+; RV64IFD-NEXT:    fld ft0, %lo(.LCPI32_0)(a1)
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fmv.d.x ft2, zero
+; RV64IFD-NEXT:    fmax.d ft1, ft1, ft2
+; RV64IFD-NEXT:    fmin.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64IFD-NEXT:    andi a0, a0, 255
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_wu_s_sat_i8:
+; RV32I:       # %bb.0: # %start
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    lui a3, 263934
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    mv a1, s1
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    mv a1, s1
+; RV32I-NEXT:    call __fixunsdfsi at plt
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    bltz s3, .LBB32_2
+; RV32I-NEXT:  # %bb.1: # %start
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:  .LBB32_2: # %start
+; RV32I-NEXT:    li a0, 255
+; RV32I-NEXT:    bgtz s2, .LBB32_4
+; RV32I-NEXT:  # %bb.3: # %start
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB32_4: # %start
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_wu_s_sat_i8:
+; RV64I:       # %bb.0: # %start
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    li a1, 0
+; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __fixunsdfdi at plt
+; RV64I-NEXT:    li s2, 0
+; RV64I-NEXT:    bltz s1, .LBB32_2
+; RV64I-NEXT:  # %bb.1: # %start
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:  .LBB32_2: # %start
+; RV64I-NEXT:    lui a0, 131967
+; RV64I-NEXT:    slli a1, a0, 33
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    li a1, 255
+; RV64I-NEXT:    bgtz a0, .LBB32_4
+; RV64I-NEXT:  # %bb.3: # %start
+; RV64I-NEXT:    mv a1, s2
+; RV64I-NEXT:  .LBB32_4: # %start
+; RV64I-NEXT:    andi a0, a1, 255
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+start:
+  %0 = tail call i8 @llvm.fptoui.sat.i8.f64(double %a)
+  ret i8 %0
+}
+declare i8 @llvm.fptoui.sat.i8.f64(double)

diff  --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index 1ae03a62a4a77..c820b553757c6 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -1342,3 +1342,623 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, float* %1) nounwind
   store float %4, float* %1, align 4
   ret i32 %3
 }
+
+define signext i16 @fcvt_w_s_i16(float %a) nounwind {
+; RV32IF-LABEL: fcvt_w_s_i16:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fmv.w.x ft0, a0
+; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcvt_w_s_i16:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fmv.w.x ft0, a0
+; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_w_s_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_w_s_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = fptosi float %a to i16
+  ret i16 %1
+}
+
+define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
+; RV32IF-LABEL: fcvt_w_s_sat_i16:
+; RV32IF:       # %bb.0: # %start
+; RV32IF-NEXT:    fmv.w.x ft0, a0
+; RV32IF-NEXT:    feq.s a0, ft0, ft0
+; RV32IF-NEXT:    bnez a0, .LBB24_2
+; RV32IF-NEXT:  # %bb.1: # %start
+; RV32IF-NEXT:    li a0, 0
+; RV32IF-NEXT:    j .LBB24_3
+; RV32IF-NEXT:  .LBB24_2:
+; RV32IF-NEXT:    lui a0, %hi(.LCPI24_0)
+; RV32IF-NEXT:    flw ft1, %lo(.LCPI24_0)(a0)
+; RV32IF-NEXT:    lui a0, %hi(.LCPI24_1)
+; RV32IF-NEXT:    flw ft2, %lo(.LCPI24_1)(a0)
+; RV32IF-NEXT:    fmax.s ft0, ft0, ft1
+; RV32IF-NEXT:    fmin.s ft0, ft0, ft2
+; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT:  .LBB24_3: # %start
+; RV32IF-NEXT:    slli a0, a0, 16
+; RV32IF-NEXT:    srai a0, a0, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcvt_w_s_sat_i16:
+; RV64IF:       # %bb.0: # %start
+; RV64IF-NEXT:    fmv.w.x ft0, a0
+; RV64IF-NEXT:    feq.s a0, ft0, ft0
+; RV64IF-NEXT:    bnez a0, .LBB24_2
+; RV64IF-NEXT:  # %bb.1: # %start
+; RV64IF-NEXT:    li a0, 0
+; RV64IF-NEXT:    j .LBB24_3
+; RV64IF-NEXT:  .LBB24_2:
+; RV64IF-NEXT:    lui a0, %hi(.LCPI24_0)
+; RV64IF-NEXT:    flw ft1, %lo(.LCPI24_0)(a0)
+; RV64IF-NEXT:    lui a0, %hi(.LCPI24_1)
+; RV64IF-NEXT:    flw ft2, %lo(.LCPI24_1)(a0)
+; RV64IF-NEXT:    fmax.s ft0, ft0, ft1
+; RV64IF-NEXT:    fmin.s ft0, ft0, ft2
+; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT:  .LBB24_3: # %start
+; RV64IF-NEXT:    slli a0, a0, 48
+; RV64IF-NEXT:    srai a0, a0, 48
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_w_s_sat_i16:
+; RV32I:       # %bb.0: # %start
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    lui a1, 815104
+; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    li s1, 0
+; RV32I-NEXT:    lui s3, 1048568
+; RV32I-NEXT:    bltz s2, .LBB24_2
+; RV32I-NEXT:  # %bb.1: # %start
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:  .LBB24_2: # %start
+; RV32I-NEXT:    lui a0, 290816
+; RV32I-NEXT:    addi a1, a0, -512
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    bge s1, a0, .LBB24_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    lui a0, 8
+; RV32I-NEXT:    addi s3, a0, -1
+; RV32I-NEXT:  .LBB24_4: # %start
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    mv a1, s0
+; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    bne a0, s1, .LBB24_6
+; RV32I-NEXT:  # %bb.5: # %start
+; RV32I-NEXT:    mv s1, s3
+; RV32I-NEXT:  .LBB24_6: # %start
+; RV32I-NEXT:    slli a0, s1, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_w_s_sat_i16:
+; RV64I:       # %bb.0: # %start
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    lui a1, 815104
+; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    li s1, 0
+; RV64I-NEXT:    lui s3, 1048568
+; RV64I-NEXT:    bltz s2, .LBB24_2
+; RV64I-NEXT:  # %bb.1: # %start
+; RV64I-NEXT:    mv s3, a0
+; RV64I-NEXT:  .LBB24_2: # %start
+; RV64I-NEXT:    lui a0, 290816
+; RV64I-NEXT:    addiw a1, a0, -512
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    bge s1, a0, .LBB24_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    lui a0, 8
+; RV64I-NEXT:    addiw s3, a0, -1
+; RV64I-NEXT:  .LBB24_4: # %start
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    bne a0, s1, .LBB24_6
+; RV64I-NEXT:  # %bb.5: # %start
+; RV64I-NEXT:    mv s1, s3
+; RV64I-NEXT:  .LBB24_6: # %start
+; RV64I-NEXT:    slli a0, s1, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
+start:
+  %0 = tail call i16 @llvm.fptosi.sat.i16.f32(float %a)
+  ret i16 %0
+}
+declare i16 @llvm.fptosi.sat.i16.f32(float)
+
+define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
+; RV32IF-LABEL: fcvt_wu_s_i16:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fmv.w.x ft0, a0
+; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcvt_wu_s_i16:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fmv.w.x ft0, a0
+; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_wu_s_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_wu_s_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = fptoui float %a to i16
+  ret i16 %1
+}
+
+define zeroext i16 @fcvt_wu_s_sat_i16(float %a) nounwind {
+; RV32IF-LABEL: fcvt_wu_s_sat_i16:
+; RV32IF:       # %bb.0: # %start
+; RV32IF-NEXT:    lui a1, %hi(.LCPI26_0)
+; RV32IF-NEXT:    flw ft0, %lo(.LCPI26_0)(a1)
+; RV32IF-NEXT:    fmv.w.x ft1, a0
+; RV32IF-NEXT:    fmv.w.x ft2, zero
+; RV32IF-NEXT:    fmax.s ft1, ft1, ft2
+; RV32IF-NEXT:    fmin.s ft0, ft1, ft0
+; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IF-NEXT:    lui a1, 16
+; RV32IF-NEXT:    addi a1, a1, -1
+; RV32IF-NEXT:    and a0, a0, a1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcvt_wu_s_sat_i16:
+; RV64IF:       # %bb.0: # %start
+; RV64IF-NEXT:    lui a1, %hi(.LCPI26_0)
+; RV64IF-NEXT:    flw ft0, %lo(.LCPI26_0)(a1)
+; RV64IF-NEXT:    fmv.w.x ft1, a0
+; RV64IF-NEXT:    fmv.w.x ft2, zero
+; RV64IF-NEXT:    fmax.s ft1, ft1, ft2
+; RV64IF-NEXT:    fmin.s ft0, ft1, ft0
+; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IF-NEXT:    lui a1, 16
+; RV64IF-NEXT:    addiw a1, a1, -1
+; RV64IF-NEXT:    and a0, a0, a1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_wu_s_sat_i16:
+; RV32I:       # %bb.0: # %start
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    mv s1, a0
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    li s2, 0
+; RV32I-NEXT:    bltz s1, .LBB26_2
+; RV32I-NEXT:  # %bb.1: # %start
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:  .LBB26_2: # %start
+; RV32I-NEXT:    lui a0, 292864
+; RV32I-NEXT:    addi a1, a0, -256
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    bgtz a0, .LBB26_4
+; RV32I-NEXT:  # %bb.3: # %start
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB26_4: # %start
+; RV32I-NEXT:    and a0, a2, a1
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_wu_s_sat_i16:
+; RV64I:       # %bb.0: # %start
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    li a1, 0
+; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    li s2, 0
+; RV64I-NEXT:    bltz s1, .LBB26_2
+; RV64I-NEXT:  # %bb.1: # %start
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:  .LBB26_2: # %start
+; RV64I-NEXT:    lui a0, 292864
+; RV64I-NEXT:    addiw a1, a0, -256
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    bgtz a0, .LBB26_4
+; RV64I-NEXT:  # %bb.3: # %start
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB26_4: # %start
+; RV64I-NEXT:    and a0, a2, a1
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+start:
+  %0 = tail call i16 @llvm.fptoui.sat.i16.f32(float %a)
+  ret i16 %0
+}
+declare i16 @llvm.fptoui.sat.i16.f32(float)
+
+define signext i8 @fcvt_w_s_i8(float %a) nounwind {
+; RV32IF-LABEL: fcvt_w_s_i8:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fmv.w.x ft0, a0
+; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcvt_w_s_i8:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fmv.w.x ft0, a0
+; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_w_s_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_w_s_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = fptosi float %a to i8
+  ret i8 %1
+}
+
+define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
+; RV32IF-LABEL: fcvt_w_s_sat_i8:
+; RV32IF:       # %bb.0: # %start
+; RV32IF-NEXT:    fmv.w.x ft0, a0
+; RV32IF-NEXT:    feq.s a0, ft0, ft0
+; RV32IF-NEXT:    bnez a0, .LBB28_2
+; RV32IF-NEXT:  # %bb.1: # %start
+; RV32IF-NEXT:    li a0, 0
+; RV32IF-NEXT:    j .LBB28_3
+; RV32IF-NEXT:  .LBB28_2:
+; RV32IF-NEXT:    lui a0, %hi(.LCPI28_0)
+; RV32IF-NEXT:    flw ft1, %lo(.LCPI28_0)(a0)
+; RV32IF-NEXT:    lui a0, %hi(.LCPI28_1)
+; RV32IF-NEXT:    flw ft2, %lo(.LCPI28_1)(a0)
+; RV32IF-NEXT:    fmax.s ft0, ft0, ft1
+; RV32IF-NEXT:    fmin.s ft0, ft0, ft2
+; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT:  .LBB28_3: # %start
+; RV32IF-NEXT:    slli a0, a0, 24
+; RV32IF-NEXT:    srai a0, a0, 24
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcvt_w_s_sat_i8:
+; RV64IF:       # %bb.0: # %start
+; RV64IF-NEXT:    fmv.w.x ft0, a0
+; RV64IF-NEXT:    feq.s a0, ft0, ft0
+; RV64IF-NEXT:    bnez a0, .LBB28_2
+; RV64IF-NEXT:  # %bb.1: # %start
+; RV64IF-NEXT:    li a0, 0
+; RV64IF-NEXT:    j .LBB28_3
+; RV64IF-NEXT:  .LBB28_2:
+; RV64IF-NEXT:    lui a0, %hi(.LCPI28_0)
+; RV64IF-NEXT:    flw ft1, %lo(.LCPI28_0)(a0)
+; RV64IF-NEXT:    lui a0, %hi(.LCPI28_1)
+; RV64IF-NEXT:    flw ft2, %lo(.LCPI28_1)(a0)
+; RV64IF-NEXT:    fmax.s ft0, ft0, ft1
+; RV64IF-NEXT:    fmin.s ft0, ft0, ft2
+; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT:  .LBB28_3: # %start
+; RV64IF-NEXT:    slli a0, a0, 56
+; RV64IF-NEXT:    srai a0, a0, 56
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_w_s_sat_i8:
+; RV32I:       # %bb.0: # %start
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    lui a1, 798720
+; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    li s1, 0
+; RV32I-NEXT:    li s3, -128
+; RV32I-NEXT:    bltz s2, .LBB28_2
+; RV32I-NEXT:  # %bb.1: # %start
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:  .LBB28_2: # %start
+; RV32I-NEXT:    lui a1, 274400
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    li s2, 127
+; RV32I-NEXT:    blt s1, a0, .LBB28_4
+; RV32I-NEXT:  # %bb.3: # %start
+; RV32I-NEXT:    mv s2, s3
+; RV32I-NEXT:  .LBB28_4: # %start
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    mv a1, s0
+; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    bne a0, s1, .LBB28_6
+; RV32I-NEXT:  # %bb.5: # %start
+; RV32I-NEXT:    mv s1, s2
+; RV32I-NEXT:  .LBB28_6: # %start
+; RV32I-NEXT:    slli a0, s1, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_w_s_sat_i8:
+; RV64I:       # %bb.0: # %start
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    lui a1, 798720
+; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    li s1, 0
+; RV64I-NEXT:    li s3, -128
+; RV64I-NEXT:    bltz s2, .LBB28_2
+; RV64I-NEXT:  # %bb.1: # %start
+; RV64I-NEXT:    mv s3, a0
+; RV64I-NEXT:  .LBB28_2: # %start
+; RV64I-NEXT:    lui a1, 274400
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    li s2, 127
+; RV64I-NEXT:    blt s1, a0, .LBB28_4
+; RV64I-NEXT:  # %bb.3: # %start
+; RV64I-NEXT:    mv s2, s3
+; RV64I-NEXT:  .LBB28_4: # %start
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    bne a0, s1, .LBB28_6
+; RV64I-NEXT:  # %bb.5: # %start
+; RV64I-NEXT:    mv s1, s2
+; RV64I-NEXT:  .LBB28_6: # %start
+; RV64I-NEXT:    slli a0, s1, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
+start:
+  %0 = tail call i8 @llvm.fptosi.sat.i8.f32(float %a)
+  ret i8 %0
+}
+declare i8 @llvm.fptosi.sat.i8.f32(float)
+
+define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
+; RV32IF-LABEL: fcvt_wu_s_i8:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fmv.w.x ft0, a0
+; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcvt_wu_s_i8:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fmv.w.x ft0, a0
+; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_wu_s_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_wu_s_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = fptoui float %a to i8
+  ret i8 %1
+}
+
+define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind {
+; RV32IF-LABEL: fcvt_wu_s_sat_i8:
+; RV32IF:       # %bb.0: # %start
+; RV32IF-NEXT:    lui a1, %hi(.LCPI30_0)
+; RV32IF-NEXT:    flw ft0, %lo(.LCPI30_0)(a1)
+; RV32IF-NEXT:    fmv.w.x ft1, a0
+; RV32IF-NEXT:    fmv.w.x ft2, zero
+; RV32IF-NEXT:    fmax.s ft1, ft1, ft2
+; RV32IF-NEXT:    fmin.s ft0, ft1, ft0
+; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IF-NEXT:    andi a0, a0, 255
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcvt_wu_s_sat_i8:
+; RV64IF:       # %bb.0: # %start
+; RV64IF-NEXT:    lui a1, %hi(.LCPI30_0)
+; RV64IF-NEXT:    flw ft0, %lo(.LCPI30_0)(a1)
+; RV64IF-NEXT:    fmv.w.x ft1, a0
+; RV64IF-NEXT:    fmv.w.x ft2, zero
+; RV64IF-NEXT:    fmax.s ft1, ft1, ft2
+; RV64IF-NEXT:    fmin.s ft0, ft1, ft0
+; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IF-NEXT:    andi a0, a0, 255
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_wu_s_sat_i8:
+; RV32I:       # %bb.0: # %start
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    mv s1, a0
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    li s2, 0
+; RV32I-NEXT:    bltz s1, .LBB30_2
+; RV32I-NEXT:  # %bb.1: # %start
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:  .LBB30_2: # %start
+; RV32I-NEXT:    lui a1, 276464
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    li a1, 255
+; RV32I-NEXT:    bgtz a0, .LBB30_4
+; RV32I-NEXT:  # %bb.3: # %start
+; RV32I-NEXT:    mv a1, s2
+; RV32I-NEXT:  .LBB30_4: # %start
+; RV32I-NEXT:    andi a0, a1, 255
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_wu_s_sat_i8:
+; RV64I:       # %bb.0: # %start
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    li a1, 0
+; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    li s2, 0
+; RV64I-NEXT:    bltz s1, .LBB30_2
+; RV64I-NEXT:  # %bb.1: # %start
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:  .LBB30_2: # %start
+; RV64I-NEXT:    lui a1, 276464
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    li a1, 255
+; RV64I-NEXT:    bgtz a0, .LBB30_4
+; RV64I-NEXT:  # %bb.3: # %start
+; RV64I-NEXT:    mv a1, s2
+; RV64I-NEXT:  .LBB30_4: # %start
+; RV64I-NEXT:    andi a0, a1, 255
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+start:
+  %0 = tail call i8 @llvm.fptoui.sat.i8.f32(float %a)
+  ret i8 %0
+}
+declare i8 @llvm.fptoui.sat.i8.f32(float)

diff  --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index 298cc256e8a3c..cd7fa86f9a2fa 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -2405,3 +2405,853 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, half* %1) nounwind {
   store half %4, half* %1, align 2
   ret i32 %3
 }
+
+define signext i16 @fcvt_w_s_i16(half %a) nounwind {
+; RV32IZFH-LABEL: fcvt_w_s_i16:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rtz
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcvt_w_s_i16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rtz
+; RV64IZFH-NEXT:    ret
+;
+; RV32IDZFH-LABEL: fcvt_w_s_i16:
+; RV32IDZFH:       # %bb.0:
+; RV32IDZFH-NEXT:    fcvt.w.h a0, fa0, rtz
+; RV32IDZFH-NEXT:    ret
+;
+; RV64IDZFH-LABEL: fcvt_w_s_i16:
+; RV64IDZFH:       # %bb.0:
+; RV64IDZFH-NEXT:    fcvt.l.h a0, fa0, rtz
+; RV64IDZFH-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_w_s_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    call __gnu_h2f_ieee at plt
+; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_w_s_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    call __gnu_h2f_ieee at plt
+; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = fptosi half %a to i16
+  ret i16 %1
+}
+
+define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
+; RV32IZFH-LABEL: fcvt_w_s_sat_i16:
+; RV32IZFH:       # %bb.0: # %start
+; RV32IZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFH-NEXT:    feq.s a0, ft0, ft0
+; RV32IZFH-NEXT:    bnez a0, .LBB32_2
+; RV32IZFH-NEXT:  # %bb.1: # %start
+; RV32IZFH-NEXT:    li a0, 0
+; RV32IZFH-NEXT:    j .LBB32_3
+; RV32IZFH-NEXT:  .LBB32_2:
+; RV32IZFH-NEXT:    lui a0, %hi(.LCPI32_0)
+; RV32IZFH-NEXT:    flw ft1, %lo(.LCPI32_0)(a0)
+; RV32IZFH-NEXT:    lui a0, %hi(.LCPI32_1)
+; RV32IZFH-NEXT:    flw ft2, %lo(.LCPI32_1)(a0)
+; RV32IZFH-NEXT:    fmax.s ft0, ft0, ft1
+; RV32IZFH-NEXT:    fmin.s ft0, ft0, ft2
+; RV32IZFH-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IZFH-NEXT:  .LBB32_3: # %start
+; RV32IZFH-NEXT:    slli a0, a0, 16
+; RV32IZFH-NEXT:    srai a0, a0, 16
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcvt_w_s_sat_i16:
+; RV64IZFH:       # %bb.0: # %start
+; RV64IZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFH-NEXT:    feq.s a0, ft0, ft0
+; RV64IZFH-NEXT:    bnez a0, .LBB32_2
+; RV64IZFH-NEXT:  # %bb.1: # %start
+; RV64IZFH-NEXT:    li a0, 0
+; RV64IZFH-NEXT:    j .LBB32_3
+; RV64IZFH-NEXT:  .LBB32_2:
+; RV64IZFH-NEXT:    lui a0, %hi(.LCPI32_0)
+; RV64IZFH-NEXT:    flw ft1, %lo(.LCPI32_0)(a0)
+; RV64IZFH-NEXT:    lui a0, %hi(.LCPI32_1)
+; RV64IZFH-NEXT:    flw ft2, %lo(.LCPI32_1)(a0)
+; RV64IZFH-NEXT:    fmax.s ft0, ft0, ft1
+; RV64IZFH-NEXT:    fmin.s ft0, ft0, ft2
+; RV64IZFH-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFH-NEXT:  .LBB32_3: # %start
+; RV64IZFH-NEXT:    slli a0, a0, 48
+; RV64IZFH-NEXT:    srai a0, a0, 48
+; RV64IZFH-NEXT:    ret
+;
+; RV32IDZFH-LABEL: fcvt_w_s_sat_i16:
+; RV32IDZFH:       # %bb.0: # %start
+; RV32IDZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV32IDZFH-NEXT:    feq.s a0, ft0, ft0
+; RV32IDZFH-NEXT:    bnez a0, .LBB32_2
+; RV32IDZFH-NEXT:  # %bb.1: # %start
+; RV32IDZFH-NEXT:    li a0, 0
+; RV32IDZFH-NEXT:    j .LBB32_3
+; RV32IDZFH-NEXT:  .LBB32_2:
+; RV32IDZFH-NEXT:    lui a0, %hi(.LCPI32_0)
+; RV32IDZFH-NEXT:    flw ft1, %lo(.LCPI32_0)(a0)
+; RV32IDZFH-NEXT:    lui a0, %hi(.LCPI32_1)
+; RV32IDZFH-NEXT:    flw ft2, %lo(.LCPI32_1)(a0)
+; RV32IDZFH-NEXT:    fmax.s ft0, ft0, ft1
+; RV32IDZFH-NEXT:    fmin.s ft0, ft0, ft2
+; RV32IDZFH-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IDZFH-NEXT:  .LBB32_3: # %start
+; RV32IDZFH-NEXT:    slli a0, a0, 16
+; RV32IDZFH-NEXT:    srai a0, a0, 16
+; RV32IDZFH-NEXT:    ret
+;
+; RV64IDZFH-LABEL: fcvt_w_s_sat_i16:
+; RV64IDZFH:       # %bb.0: # %start
+; RV64IDZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV64IDZFH-NEXT:    feq.s a0, ft0, ft0
+; RV64IDZFH-NEXT:    bnez a0, .LBB32_2
+; RV64IDZFH-NEXT:  # %bb.1: # %start
+; RV64IDZFH-NEXT:    li a0, 0
+; RV64IDZFH-NEXT:    j .LBB32_3
+; RV64IDZFH-NEXT:  .LBB32_2:
+; RV64IDZFH-NEXT:    lui a0, %hi(.LCPI32_0)
+; RV64IDZFH-NEXT:    flw ft1, %lo(.LCPI32_0)(a0)
+; RV64IDZFH-NEXT:    lui a0, %hi(.LCPI32_1)
+; RV64IDZFH-NEXT:    flw ft2, %lo(.LCPI32_1)(a0)
+; RV64IDZFH-NEXT:    fmax.s ft0, ft0, ft1
+; RV64IDZFH-NEXT:    fmin.s ft0, ft0, ft2
+; RV64IDZFH-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IDZFH-NEXT:  .LBB32_3: # %start
+; RV64IDZFH-NEXT:    slli a0, a0, 48
+; RV64IDZFH-NEXT:    srai a0, a0, 48
+; RV64IDZFH-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_w_s_sat_i16:
+; RV32I:       # %bb.0: # %start
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    call __gnu_h2f_ieee at plt
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    lui a1, 815104
+; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    li s1, 0
+; RV32I-NEXT:    lui s3, 1048568
+; RV32I-NEXT:    bltz s2, .LBB32_2
+; RV32I-NEXT:  # %bb.1: # %start
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:  .LBB32_2: # %start
+; RV32I-NEXT:    lui a0, 290816
+; RV32I-NEXT:    addi a1, a0, -512
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    bge s1, a0, .LBB32_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    lui a0, 8
+; RV32I-NEXT:    addi s3, a0, -1
+; RV32I-NEXT:  .LBB32_4: # %start
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    mv a1, s0
+; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    bne a0, s1, .LBB32_6
+; RV32I-NEXT:  # %bb.5: # %start
+; RV32I-NEXT:    mv s1, s3
+; RV32I-NEXT:  .LBB32_6: # %start
+; RV32I-NEXT:    slli a0, s1, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_w_s_sat_i16:
+; RV64I:       # %bb.0: # %start
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    call __gnu_h2f_ieee at plt
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    lui a1, 815104
+; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    li s1, 0
+; RV64I-NEXT:    lui s3, 1048568
+; RV64I-NEXT:    bltz s2, .LBB32_2
+; RV64I-NEXT:  # %bb.1: # %start
+; RV64I-NEXT:    mv s3, a0
+; RV64I-NEXT:  .LBB32_2: # %start
+; RV64I-NEXT:    lui a0, 290816
+; RV64I-NEXT:    addiw a1, a0, -512
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    bge s1, a0, .LBB32_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    lui a0, 8
+; RV64I-NEXT:    addiw s3, a0, -1
+; RV64I-NEXT:  .LBB32_4: # %start
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    bne a0, s1, .LBB32_6
+; RV64I-NEXT:  # %bb.5: # %start
+; RV64I-NEXT:    mv s1, s3
+; RV64I-NEXT:  .LBB32_6: # %start
+; RV64I-NEXT:    slli a0, s1, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
+start:
+  %0 = tail call i16 @llvm.fptosi.sat.i16.f16(half %a)
+  ret i16 %0
+}
+
+define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind {
+; RV32IZFH-LABEL: fcvt_wu_s_i16:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcvt_wu_s_i16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rtz
+; RV64IZFH-NEXT:    ret
+;
+; RV32IDZFH-LABEL: fcvt_wu_s_i16:
+; RV32IDZFH:       # %bb.0:
+; RV32IDZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
+; RV32IDZFH-NEXT:    ret
+;
+; RV64IDZFH-LABEL: fcvt_wu_s_i16:
+; RV64IDZFH:       # %bb.0:
+; RV64IDZFH-NEXT:    fcvt.lu.h a0, fa0, rtz
+; RV64IDZFH-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_wu_s_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    call __gnu_h2f_ieee at plt
+; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_wu_s_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    call __gnu_h2f_ieee at plt
+; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = fptoui half %a to i16
+  ret i16 %1
+}
+
+define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
+; RV32IZFH-LABEL: fcvt_wu_s_sat_i16:
+; RV32IZFH:       # %bb.0: # %start
+; RV32IZFH-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI34_0)(a0)
+; RV32IZFH-NEXT:    fcvt.s.h ft1, fa0
+; RV32IZFH-NEXT:    fmv.w.x ft2, zero
+; RV32IZFH-NEXT:    fmax.s ft1, ft1, ft2
+; RV32IZFH-NEXT:    fmin.s ft0, ft1, ft0
+; RV32IZFH-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IZFH-NEXT:    lui a1, 16
+; RV32IZFH-NEXT:    addi a1, a1, -1
+; RV32IZFH-NEXT:    and a0, a0, a1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcvt_wu_s_sat_i16:
+; RV64IZFH:       # %bb.0: # %start
+; RV64IZFH-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV64IZFH-NEXT:    flw ft0, %lo(.LCPI34_0)(a0)
+; RV64IZFH-NEXT:    fcvt.s.h ft1, fa0
+; RV64IZFH-NEXT:    fmv.w.x ft2, zero
+; RV64IZFH-NEXT:    fmax.s ft1, ft1, ft2
+; RV64IZFH-NEXT:    fmin.s ft0, ft1, ft0
+; RV64IZFH-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFH-NEXT:    lui a1, 16
+; RV64IZFH-NEXT:    addiw a1, a1, -1
+; RV64IZFH-NEXT:    and a0, a0, a1
+; RV64IZFH-NEXT:    ret
+;
+; RV32IDZFH-LABEL: fcvt_wu_s_sat_i16:
+; RV32IDZFH:       # %bb.0: # %start
+; RV32IDZFH-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV32IDZFH-NEXT:    flw ft0, %lo(.LCPI34_0)(a0)
+; RV32IDZFH-NEXT:    fcvt.s.h ft1, fa0
+; RV32IDZFH-NEXT:    fmv.w.x ft2, zero
+; RV32IDZFH-NEXT:    fmax.s ft1, ft1, ft2
+; RV32IDZFH-NEXT:    fmin.s ft0, ft1, ft0
+; RV32IDZFH-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IDZFH-NEXT:    lui a1, 16
+; RV32IDZFH-NEXT:    addi a1, a1, -1
+; RV32IDZFH-NEXT:    and a0, a0, a1
+; RV32IDZFH-NEXT:    ret
+;
+; RV64IDZFH-LABEL: fcvt_wu_s_sat_i16:
+; RV64IDZFH:       # %bb.0: # %start
+; RV64IDZFH-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV64IDZFH-NEXT:    flw ft0, %lo(.LCPI34_0)(a0)
+; RV64IDZFH-NEXT:    fcvt.s.h ft1, fa0
+; RV64IDZFH-NEXT:    fmv.w.x ft2, zero
+; RV64IDZFH-NEXT:    fmax.s ft1, ft1, ft2
+; RV64IDZFH-NEXT:    fmin.s ft0, ft1, ft0
+; RV64IDZFH-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IDZFH-NEXT:    lui a1, 16
+; RV64IDZFH-NEXT:    addiw a1, a1, -1
+; RV64IDZFH-NEXT:    and a0, a0, a1
+; RV64IDZFH-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_wu_s_sat_i16:
+; RV32I:       # %bb.0: # %start
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi s2, a1, -1
+; RV32I-NEXT:    and a0, a0, s2
+; RV32I-NEXT:    call __gnu_h2f_ieee at plt
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    mv s1, a0
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    li s3, 0
+; RV32I-NEXT:    bltz s1, .LBB34_2
+; RV32I-NEXT:  # %bb.1: # %start
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:  .LBB34_2: # %start
+; RV32I-NEXT:    lui a0, 292864
+; RV32I-NEXT:    addi a1, a0, -256
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    mv a1, s2
+; RV32I-NEXT:    bgtz a0, .LBB34_4
+; RV32I-NEXT:  # %bb.3: # %start
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:  .LBB34_4: # %start
+; RV32I-NEXT:    and a0, a1, s2
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_wu_s_sat_i16:
+; RV64I:       # %bb.0: # %start
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s2, a1, -1
+; RV64I-NEXT:    and a0, a0, s2
+; RV64I-NEXT:    call __gnu_h2f_ieee at plt
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    li a1, 0
+; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    li s3, 0
+; RV64I-NEXT:    bltz s1, .LBB34_2
+; RV64I-NEXT:  # %bb.1: # %start
+; RV64I-NEXT:    mv s3, a0
+; RV64I-NEXT:  .LBB34_2: # %start
+; RV64I-NEXT:    lui a0, 292864
+; RV64I-NEXT:    addiw a1, a0, -256
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    mv a1, s2
+; RV64I-NEXT:    bgtz a0, .LBB34_4
+; RV64I-NEXT:  # %bb.3: # %start
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:  .LBB34_4: # %start
+; RV64I-NEXT:    and a0, a1, s2
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
+start:
+  %0 = tail call i16 @llvm.fptoui.sat.i16.f16(half %a)
+  ret i16 %0
+}
+
+define signext i8 @fcvt_w_s_i8(half %a) nounwind {
+; RV32IZFH-LABEL: fcvt_w_s_i8:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rtz
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcvt_w_s_i8:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rtz
+; RV64IZFH-NEXT:    ret
+;
+; RV32IDZFH-LABEL: fcvt_w_s_i8:
+; RV32IDZFH:       # %bb.0:
+; RV32IDZFH-NEXT:    fcvt.w.h a0, fa0, rtz
+; RV32IDZFH-NEXT:    ret
+;
+; RV64IDZFH-LABEL: fcvt_w_s_i8:
+; RV64IDZFH:       # %bb.0:
+; RV64IDZFH-NEXT:    fcvt.l.h a0, fa0, rtz
+; RV64IDZFH-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_w_s_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    call __gnu_h2f_ieee at plt
+; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_w_s_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    call __gnu_h2f_ieee at plt
+; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = fptosi half %a to i8
+  ret i8 %1
+}
+
+define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
+; RV32IZFH-LABEL: fcvt_w_s_sat_i8:
+; RV32IZFH:       # %bb.0: # %start
+; RV32IZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFH-NEXT:    feq.s a0, ft0, ft0
+; RV32IZFH-NEXT:    bnez a0, .LBB36_2
+; RV32IZFH-NEXT:  # %bb.1: # %start
+; RV32IZFH-NEXT:    li a0, 0
+; RV32IZFH-NEXT:    j .LBB36_3
+; RV32IZFH-NEXT:  .LBB36_2:
+; RV32IZFH-NEXT:    lui a0, %hi(.LCPI36_0)
+; RV32IZFH-NEXT:    flw ft1, %lo(.LCPI36_0)(a0)
+; RV32IZFH-NEXT:    lui a0, %hi(.LCPI36_1)
+; RV32IZFH-NEXT:    flw ft2, %lo(.LCPI36_1)(a0)
+; RV32IZFH-NEXT:    fmax.s ft0, ft0, ft1
+; RV32IZFH-NEXT:    fmin.s ft0, ft0, ft2
+; RV32IZFH-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IZFH-NEXT:  .LBB36_3: # %start
+; RV32IZFH-NEXT:    slli a0, a0, 24
+; RV32IZFH-NEXT:    srai a0, a0, 24
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcvt_w_s_sat_i8:
+; RV64IZFH:       # %bb.0: # %start
+; RV64IZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFH-NEXT:    feq.s a0, ft0, ft0
+; RV64IZFH-NEXT:    bnez a0, .LBB36_2
+; RV64IZFH-NEXT:  # %bb.1: # %start
+; RV64IZFH-NEXT:    li a0, 0
+; RV64IZFH-NEXT:    j .LBB36_3
+; RV64IZFH-NEXT:  .LBB36_2:
+; RV64IZFH-NEXT:    lui a0, %hi(.LCPI36_0)
+; RV64IZFH-NEXT:    flw ft1, %lo(.LCPI36_0)(a0)
+; RV64IZFH-NEXT:    lui a0, %hi(.LCPI36_1)
+; RV64IZFH-NEXT:    flw ft2, %lo(.LCPI36_1)(a0)
+; RV64IZFH-NEXT:    fmax.s ft0, ft0, ft1
+; RV64IZFH-NEXT:    fmin.s ft0, ft0, ft2
+; RV64IZFH-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFH-NEXT:  .LBB36_3: # %start
+; RV64IZFH-NEXT:    slli a0, a0, 56
+; RV64IZFH-NEXT:    srai a0, a0, 56
+; RV64IZFH-NEXT:    ret
+;
+; RV32IDZFH-LABEL: fcvt_w_s_sat_i8:
+; RV32IDZFH:       # %bb.0: # %start
+; RV32IDZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV32IDZFH-NEXT:    feq.s a0, ft0, ft0
+; RV32IDZFH-NEXT:    bnez a0, .LBB36_2
+; RV32IDZFH-NEXT:  # %bb.1: # %start
+; RV32IDZFH-NEXT:    li a0, 0
+; RV32IDZFH-NEXT:    j .LBB36_3
+; RV32IDZFH-NEXT:  .LBB36_2:
+; RV32IDZFH-NEXT:    lui a0, %hi(.LCPI36_0)
+; RV32IDZFH-NEXT:    flw ft1, %lo(.LCPI36_0)(a0)
+; RV32IDZFH-NEXT:    lui a0, %hi(.LCPI36_1)
+; RV32IDZFH-NEXT:    flw ft2, %lo(.LCPI36_1)(a0)
+; RV32IDZFH-NEXT:    fmax.s ft0, ft0, ft1
+; RV32IDZFH-NEXT:    fmin.s ft0, ft0, ft2
+; RV32IDZFH-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IDZFH-NEXT:  .LBB36_3: # %start
+; RV32IDZFH-NEXT:    slli a0, a0, 24
+; RV32IDZFH-NEXT:    srai a0, a0, 24
+; RV32IDZFH-NEXT:    ret
+;
+; RV64IDZFH-LABEL: fcvt_w_s_sat_i8:
+; RV64IDZFH:       # %bb.0: # %start
+; RV64IDZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV64IDZFH-NEXT:    feq.s a0, ft0, ft0
+; RV64IDZFH-NEXT:    bnez a0, .LBB36_2
+; RV64IDZFH-NEXT:  # %bb.1: # %start
+; RV64IDZFH-NEXT:    li a0, 0
+; RV64IDZFH-NEXT:    j .LBB36_3
+; RV64IDZFH-NEXT:  .LBB36_2:
+; RV64IDZFH-NEXT:    lui a0, %hi(.LCPI36_0)
+; RV64IDZFH-NEXT:    flw ft1, %lo(.LCPI36_0)(a0)
+; RV64IDZFH-NEXT:    lui a0, %hi(.LCPI36_1)
+; RV64IDZFH-NEXT:    flw ft2, %lo(.LCPI36_1)(a0)
+; RV64IDZFH-NEXT:    fmax.s ft0, ft0, ft1
+; RV64IDZFH-NEXT:    fmin.s ft0, ft0, ft2
+; RV64IDZFH-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IDZFH-NEXT:  .LBB36_3: # %start
+; RV64IDZFH-NEXT:    slli a0, a0, 56
+; RV64IDZFH-NEXT:    srai a0, a0, 56
+; RV64IDZFH-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_w_s_sat_i8:
+; RV32I:       # %bb.0: # %start
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    call __gnu_h2f_ieee at plt
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    lui a1, 798720
+; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __fixsfsi at plt
+; RV32I-NEXT:    li s1, 0
+; RV32I-NEXT:    li s3, -128
+; RV32I-NEXT:    bltz s2, .LBB36_2
+; RV32I-NEXT:  # %bb.1: # %start
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:  .LBB36_2: # %start
+; RV32I-NEXT:    lui a1, 274400
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    li s2, 127
+; RV32I-NEXT:    blt s1, a0, .LBB36_4
+; RV32I-NEXT:  # %bb.3: # %start
+; RV32I-NEXT:    mv s2, s3
+; RV32I-NEXT:  .LBB36_4: # %start
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    mv a1, s0
+; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    bne a0, s1, .LBB36_6
+; RV32I-NEXT:  # %bb.5: # %start
+; RV32I-NEXT:    mv s1, s2
+; RV32I-NEXT:  .LBB36_6: # %start
+; RV32I-NEXT:    slli a0, s1, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_w_s_sat_i8:
+; RV64I:       # %bb.0: # %start
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    call __gnu_h2f_ieee at plt
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    lui a1, 798720
+; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __fixsfdi at plt
+; RV64I-NEXT:    li s1, 0
+; RV64I-NEXT:    li s3, -128
+; RV64I-NEXT:    bltz s2, .LBB36_2
+; RV64I-NEXT:  # %bb.1: # %start
+; RV64I-NEXT:    mv s3, a0
+; RV64I-NEXT:  .LBB36_2: # %start
+; RV64I-NEXT:    lui a1, 274400
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    li s2, 127
+; RV64I-NEXT:    blt s1, a0, .LBB36_4
+; RV64I-NEXT:  # %bb.3: # %start
+; RV64I-NEXT:    mv s2, s3
+; RV64I-NEXT:  .LBB36_4: # %start
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    bne a0, s1, .LBB36_6
+; RV64I-NEXT:  # %bb.5: # %start
+; RV64I-NEXT:    mv s1, s2
+; RV64I-NEXT:  .LBB36_6: # %start
+; RV64I-NEXT:    slli a0, s1, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
+start:
+  %0 = tail call i8 @llvm.fptosi.sat.i8.f16(half %a)
+  ret i8 %0
+}
+declare i8 @llvm.fptosi.sat.i8.f16(half)
+
+define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind {
+; RV32IZFH-LABEL: fcvt_wu_s_i8:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcvt_wu_s_i8:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rtz
+; RV64IZFH-NEXT:    ret
+;
+; RV32IDZFH-LABEL: fcvt_wu_s_i8:
+; RV32IDZFH:       # %bb.0:
+; RV32IDZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
+; RV32IDZFH-NEXT:    ret
+;
+; RV64IDZFH-LABEL: fcvt_wu_s_i8:
+; RV64IDZFH:       # %bb.0:
+; RV64IDZFH-NEXT:    fcvt.lu.h a0, fa0, rtz
+; RV64IDZFH-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_wu_s_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    call __gnu_h2f_ieee at plt
+; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_wu_s_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    call __gnu_h2f_ieee at plt
+; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = fptoui half %a to i8
+  ret i8 %1
+}
+
+define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
+; RV32IZFH-LABEL: fcvt_wu_s_sat_i8:
+; RV32IZFH:       # %bb.0: # %start
+; RV32IZFH-NEXT:    lui a0, %hi(.LCPI38_0)
+; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI38_0)(a0)
+; RV32IZFH-NEXT:    fcvt.s.h ft1, fa0
+; RV32IZFH-NEXT:    fmv.w.x ft2, zero
+; RV32IZFH-NEXT:    fmax.s ft1, ft1, ft2
+; RV32IZFH-NEXT:    fmin.s ft0, ft1, ft0
+; RV32IZFH-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IZFH-NEXT:    andi a0, a0, 255
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcvt_wu_s_sat_i8:
+; RV64IZFH:       # %bb.0: # %start
+; RV64IZFH-NEXT:    lui a0, %hi(.LCPI38_0)
+; RV64IZFH-NEXT:    flw ft0, %lo(.LCPI38_0)(a0)
+; RV64IZFH-NEXT:    fcvt.s.h ft1, fa0
+; RV64IZFH-NEXT:    fmv.w.x ft2, zero
+; RV64IZFH-NEXT:    fmax.s ft1, ft1, ft2
+; RV64IZFH-NEXT:    fmin.s ft0, ft1, ft0
+; RV64IZFH-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFH-NEXT:    andi a0, a0, 255
+; RV64IZFH-NEXT:    ret
+;
+; RV32IDZFH-LABEL: fcvt_wu_s_sat_i8:
+; RV32IDZFH:       # %bb.0: # %start
+; RV32IDZFH-NEXT:    lui a0, %hi(.LCPI38_0)
+; RV32IDZFH-NEXT:    flw ft0, %lo(.LCPI38_0)(a0)
+; RV32IDZFH-NEXT:    fcvt.s.h ft1, fa0
+; RV32IDZFH-NEXT:    fmv.w.x ft2, zero
+; RV32IDZFH-NEXT:    fmax.s ft1, ft1, ft2
+; RV32IDZFH-NEXT:    fmin.s ft0, ft1, ft0
+; RV32IDZFH-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IDZFH-NEXT:    andi a0, a0, 255
+; RV32IDZFH-NEXT:    ret
+;
+; RV64IDZFH-LABEL: fcvt_wu_s_sat_i8:
+; RV64IDZFH:       # %bb.0: # %start
+; RV64IDZFH-NEXT:    lui a0, %hi(.LCPI38_0)
+; RV64IDZFH-NEXT:    flw ft0, %lo(.LCPI38_0)(a0)
+; RV64IDZFH-NEXT:    fcvt.s.h ft1, fa0
+; RV64IDZFH-NEXT:    fmv.w.x ft2, zero
+; RV64IDZFH-NEXT:    fmax.s ft1, ft1, ft2
+; RV64IDZFH-NEXT:    fmin.s ft0, ft1, ft0
+; RV64IDZFH-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IDZFH-NEXT:    andi a0, a0, 255
+; RV64IDZFH-NEXT:    ret
+;
+; RV32I-LABEL: fcvt_wu_s_sat_i8:
+; RV32I:       # %bb.0: # %start
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    call __gnu_h2f_ieee at plt
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    mv s1, a0
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __fixunssfsi at plt
+; RV32I-NEXT:    li s2, 0
+; RV32I-NEXT:    bltz s1, .LBB38_2
+; RV32I-NEXT:  # %bb.1: # %start
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:  .LBB38_2: # %start
+; RV32I-NEXT:    lui a1, 276464
+; RV32I-NEXT:    mv a0, s0
+; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    li a1, 255
+; RV32I-NEXT:    bgtz a0, .LBB38_4
+; RV32I-NEXT:  # %bb.3: # %start
+; RV32I-NEXT:    mv a1, s2
+; RV32I-NEXT:  .LBB38_4: # %start
+; RV32I-NEXT:    andi a0, a1, 255
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcvt_wu_s_sat_i8:
+; RV64I:       # %bb.0: # %start
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    call __gnu_h2f_ieee at plt
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    li a1, 0
+; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __fixunssfdi at plt
+; RV64I-NEXT:    li s2, 0
+; RV64I-NEXT:    bltz s1, .LBB38_2
+; RV64I-NEXT:  # %bb.1: # %start
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:  .LBB38_2: # %start
+; RV64I-NEXT:    lui a1, 276464
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    li a1, 255
+; RV64I-NEXT:    bgtz a0, .LBB38_4
+; RV64I-NEXT:  # %bb.3: # %start
+; RV64I-NEXT:    mv a1, s2
+; RV64I-NEXT:  .LBB38_4: # %start
+; RV64I-NEXT:    andi a0, a1, 255
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+start:
+  %0 = tail call i8 @llvm.fptoui.sat.i8.f16(half %a)
+  ret i8 %0
+}
+declare i8 @llvm.fptoui.sat.i8.f16(half)


        


More information about the llvm-commits mailing list