[llvm] 768b0b4 - [RISCV] Add test cases for RV64 i128<->half/float/double (#115124)

via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 6 11:36:32 PST 2024


Author: Craig Topper
Date: 2024-11-06T11:36:30-08:00
New Revision: 768b0b4eb83e8ca62cc504ba3f0f9a0c46eea7b6

URL: https://github.com/llvm/llvm-project/commit/768b0b4eb83e8ca62cc504ba3f0f9a0c46eea7b6
DIFF: https://github.com/llvm/llvm-project/commit/768b0b4eb83e8ca62cc504ba3f0f9a0c46eea7b6.diff

LOG: [RISCV] Add test cases for RV64 i128<->half/float/double (#115124)

These emit 'ti' libcalls.

Added: 
    llvm/test/CodeGen/RISCV/rv64-double-convert-strict.ll
    llvm/test/CodeGen/RISCV/rv64-double-convert.ll
    llvm/test/CodeGen/RISCV/rv64-float-convert-strict.ll
    llvm/test/CodeGen/RISCV/rv64-float-convert.ll
    llvm/test/CodeGen/RISCV/rv64-half-convert-strict.ll
    llvm/test/CodeGen/RISCV/rv64-half-convert.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rv64-double-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64-double-convert-strict.ll
new file mode 100644
index 00000000000000..b7860e6374e884
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-double-convert-strict.ll
@@ -0,0 +1,63 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs -target-abi=lp64 \
+; RUN:   -disable-strictnode-mutation < %s | FileCheck %s -check-prefixes=CHECK,RV64I
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs -target-abi=lp64d \
+; RUN:   -disable-strictnode-mutation < %s | FileCheck %s -check-prefixes=CHECK,RV64ID
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs -target-abi=lp64 \
+; RUN:   -disable-strictnode-mutation < %s | FileCheck %s -check-prefixes=CHECK,RV64IDINX
+
+define i128 @fptosi_f64_to_i128(double %a) nounwind strictfp {
+; CHECK-LABEL: fptosi_f64_to_i128:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    call __fixdfti
+; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %1 = call i128 @llvm.experimental.constrained.fptosi.i128.f64(double %a, metadata !"fpexcept.strict")
+  ret i128 %1
+}
+
+define i128 @fptoui_f64_to_i128(double %a) nounwind strictfp {
+; CHECK-LABEL: fptoui_f64_to_i128:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    call __fixunsdfti
+; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %1 = call i128 @llvm.experimental.constrained.fptoui.i128.f64(double %a, metadata !"fpexcept.strict")
+  ret i128 %1
+}
+
+define double @sitofp_i128_to_f64(i128 %a) nounwind strictfp {
+; CHECK-LABEL: sitofp_i128_to_f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    call __floattidf
+; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %1 = call double @llvm.experimental.constrained.sitofp.f64.i128(i128 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret double %1
+}
+
+define double @uitofp_i128_to_f64(i128 %a) nounwind strictfp {
+; CHECK-LABEL: uitofp_i128_to_f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    call __floatuntidf
+; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %1 = call double @llvm.experimental.constrained.uitofp.f64.i128(i128 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret double %1
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV64I: {{.*}}
+; RV64ID: {{.*}}
+; RV64IDINX: {{.*}}

diff  --git a/llvm/test/CodeGen/RISCV/rv64-double-convert.ll b/llvm/test/CodeGen/RISCV/rv64-double-convert.ll
new file mode 100644
index 00000000000000..315bf86046dff5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-double-convert.ll
@@ -0,0 +1,286 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64 | FileCheck %s -check-prefixes=CHECK,RV64I
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64d | FileCheck %s -check-prefixes=CHECK,RV64ID
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64 | FileCheck %s -check-prefixes=CHECK,RV64IDINX
+
+define i128 @fptosi_f64_to_i128(double %a) nounwind {
+; CHECK-LABEL: fptosi_f64_to_i128:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    call __fixdfti
+; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %1 = fptosi double %a to i128
+  ret i128 %1
+}
+
+define i128 @fptoui_f64_to_i128(double %a) nounwind {
+; CHECK-LABEL: fptoui_f64_to_i128:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    call __fixunsdfti
+; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %1 = fptoui double %a to i128
+  ret i128 %1
+}
+
+define double @sitofp_i128_to_f64(i128 %a) nounwind {
+; CHECK-LABEL: sitofp_i128_to_f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    call __floattidf
+; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %1 = sitofp i128 %a to double
+  ret double %1
+}
+
+define double @uitofp_i128_to_f64(i128 %a) nounwind {
+; CHECK-LABEL: uitofp_i128_to_f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    call __floatuntidf
+; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %1 = uitofp i128 %a to double
+  ret double %1
+}
+
+define i128 @fptosi_sat_f64_to_i128(double %a) nounwind {
+; RV64I-LABEL: fptosi_sat_f64_to_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s5, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    li a1, -449
+; RV64I-NEXT:    slli a1, a1, 53
+; RV64I-NEXT:    call __gedf2
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __fixdfti
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    mv s3, a1
+; RV64I-NEXT:    li s5, -1
+; RV64I-NEXT:    bgez s1, .LBB4_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    slli s3, s5, 63
+; RV64I-NEXT:  .LBB4_2:
+; RV64I-NEXT:    li a0, 575
+; RV64I-NEXT:    slli a0, a0, 53
+; RV64I-NEXT:    addi a1, a0, -1
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gtdf2
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    blez a0, .LBB4_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    srli s3, s5, 1
+; RV64I-NEXT:  .LBB4_4:
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unorddf2
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a1, a0, s3
+; RV64I-NEXT:    slti a2, s1, 0
+; RV64I-NEXT:    addi a2, a2, -1
+; RV64I-NEXT:    and a2, a2, s2
+; RV64I-NEXT:    sgtz a3, s4
+; RV64I-NEXT:    neg a3, a3
+; RV64I-NEXT:    or a2, a3, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s5, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
+;
+; RV64ID-LABEL: fptosi_sat_f64_to_i128:
+; RV64ID:       # %bb.0:
+; RV64ID-NEXT:    addi sp, sp, -32
+; RV64ID-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64ID-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64ID-NEXT:    fsd fs0, 8(sp) # 8-byte Folded Spill
+; RV64ID-NEXT:    lui a0, %hi(.LCPI4_0)
+; RV64ID-NEXT:    fld fa5, %lo(.LCPI4_0)(a0)
+; RV64ID-NEXT:    fmv.d fs0, fa0
+; RV64ID-NEXT:    fle.d s0, fa5, fa0
+; RV64ID-NEXT:    call __fixdfti
+; RV64ID-NEXT:    li a2, -1
+; RV64ID-NEXT:    bnez s0, .LBB4_2
+; RV64ID-NEXT:  # %bb.1:
+; RV64ID-NEXT:    slli a1, a2, 63
+; RV64ID-NEXT:  .LBB4_2:
+; RV64ID-NEXT:    lui a3, %hi(.LCPI4_1)
+; RV64ID-NEXT:    fld fa5, %lo(.LCPI4_1)(a3)
+; RV64ID-NEXT:    flt.d a3, fa5, fs0
+; RV64ID-NEXT:    beqz a3, .LBB4_4
+; RV64ID-NEXT:  # %bb.3:
+; RV64ID-NEXT:    srli a1, a2, 1
+; RV64ID-NEXT:  .LBB4_4:
+; RV64ID-NEXT:    feq.d a2, fs0, fs0
+; RV64ID-NEXT:    neg a2, a2
+; RV64ID-NEXT:    and a1, a2, a1
+; RV64ID-NEXT:    neg a3, a3
+; RV64ID-NEXT:    neg a4, s0
+; RV64ID-NEXT:    and a0, a4, a0
+; RV64ID-NEXT:    or a0, a3, a0
+; RV64ID-NEXT:    and a0, a2, a0
+; RV64ID-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64ID-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64ID-NEXT:    fld fs0, 8(sp) # 8-byte Folded Reload
+; RV64ID-NEXT:    addi sp, sp, 32
+; RV64ID-NEXT:    ret
+;
+; RV64IDINX-LABEL: fptosi_sat_f64_to_i128:
+; RV64IDINX:       # %bb.0:
+; RV64IDINX-NEXT:    addi sp, sp, -32
+; RV64IDINX-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IDINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IDINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64IDINX-NEXT:    mv s0, a0
+; RV64IDINX-NEXT:    li a0, -449
+; RV64IDINX-NEXT:    slli a0, a0, 53
+; RV64IDINX-NEXT:    fle.d s1, a0, s0
+; RV64IDINX-NEXT:    mv a0, s0
+; RV64IDINX-NEXT:    call __fixdfti
+; RV64IDINX-NEXT:    li a2, -1
+; RV64IDINX-NEXT:    bnez s1, .LBB4_2
+; RV64IDINX-NEXT:  # %bb.1:
+; RV64IDINX-NEXT:    slli a1, a2, 63
+; RV64IDINX-NEXT:  .LBB4_2:
+; RV64IDINX-NEXT:    lui a3, %hi(.LCPI4_0)
+; RV64IDINX-NEXT:    ld a3, %lo(.LCPI4_0)(a3)
+; RV64IDINX-NEXT:    flt.d a3, a3, s0
+; RV64IDINX-NEXT:    beqz a3, .LBB4_4
+; RV64IDINX-NEXT:  # %bb.3:
+; RV64IDINX-NEXT:    srli a1, a2, 1
+; RV64IDINX-NEXT:  .LBB4_4:
+; RV64IDINX-NEXT:    feq.d a2, s0, s0
+; RV64IDINX-NEXT:    neg a2, a2
+; RV64IDINX-NEXT:    and a1, a2, a1
+; RV64IDINX-NEXT:    neg a3, a3
+; RV64IDINX-NEXT:    neg a4, s1
+; RV64IDINX-NEXT:    and a0, a4, a0
+; RV64IDINX-NEXT:    or a0, a3, a0
+; RV64IDINX-NEXT:    and a0, a2, a0
+; RV64IDINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IDINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IDINX-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64IDINX-NEXT:    addi sp, sp, 32
+; RV64IDINX-NEXT:    ret
+  %1 = tail call i128 @llvm.fptosi.sat.i128.f64(double %a)
+  ret i128 %1
+}
+declare i128 @llvm.fptosi.sat.i128.f64(double)
+
+define i128 @fptoui_sat_f64_to_i128(double %a) nounwind {
+; RV64I-LABEL: fptoui_sat_f64_to_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    li a1, 0
+; RV64I-NEXT:    call __gedf2
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    addi s2, a0, -1
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __fixunsdfti
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    and s3, s2, a0
+; RV64I-NEXT:    li a0, 1151
+; RV64I-NEXT:    slli a0, a0, 52
+; RV64I-NEXT:    addi a1, a0, -1
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gtdf2
+; RV64I-NEXT:    sgtz a0, a0
+; RV64I-NEXT:    neg a1, a0
+; RV64I-NEXT:    or a0, a1, s3
+; RV64I-NEXT:    and a2, s2, s1
+; RV64I-NEXT:    or a1, a1, a2
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
+;
+; RV64ID-LABEL: fptoui_sat_f64_to_i128:
+; RV64ID:       # %bb.0:
+; RV64ID-NEXT:    addi sp, sp, -32
+; RV64ID-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64ID-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64ID-NEXT:    fsd fs0, 8(sp) # 8-byte Folded Spill
+; RV64ID-NEXT:    fmv.d fs0, fa0
+; RV64ID-NEXT:    fmv.d.x fa5, zero
+; RV64ID-NEXT:    fle.d a0, fa5, fa0
+; RV64ID-NEXT:    neg s0, a0
+; RV64ID-NEXT:    call __fixunsdfti
+; RV64ID-NEXT:    lui a2, %hi(.LCPI5_0)
+; RV64ID-NEXT:    fld fa5, %lo(.LCPI5_0)(a2)
+; RV64ID-NEXT:    and a0, s0, a0
+; RV64ID-NEXT:    flt.d a2, fa5, fs0
+; RV64ID-NEXT:    neg a2, a2
+; RV64ID-NEXT:    or a0, a2, a0
+; RV64ID-NEXT:    and a1, s0, a1
+; RV64ID-NEXT:    or a1, a2, a1
+; RV64ID-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64ID-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64ID-NEXT:    fld fs0, 8(sp) # 8-byte Folded Reload
+; RV64ID-NEXT:    addi sp, sp, 32
+; RV64ID-NEXT:    ret
+;
+; RV64IDINX-LABEL: fptoui_sat_f64_to_i128:
+; RV64IDINX:       # %bb.0:
+; RV64IDINX-NEXT:    addi sp, sp, -32
+; RV64IDINX-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IDINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IDINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64IDINX-NEXT:    mv s0, a0
+; RV64IDINX-NEXT:    fle.d a0, zero, a0
+; RV64IDINX-NEXT:    neg s1, a0
+; RV64IDINX-NEXT:    mv a0, s0
+; RV64IDINX-NEXT:    call __fixunsdfti
+; RV64IDINX-NEXT:    lui a2, %hi(.LCPI5_0)
+; RV64IDINX-NEXT:    ld a2, %lo(.LCPI5_0)(a2)
+; RV64IDINX-NEXT:    and a0, s1, a0
+; RV64IDINX-NEXT:    flt.d a2, a2, s0
+; RV64IDINX-NEXT:    neg a2, a2
+; RV64IDINX-NEXT:    or a0, a2, a0
+; RV64IDINX-NEXT:    and a1, s1, a1
+; RV64IDINX-NEXT:    or a1, a2, a1
+; RV64IDINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IDINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IDINX-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64IDINX-NEXT:    addi sp, sp, 32
+; RV64IDINX-NEXT:    ret
+  %1 = tail call i128 @llvm.fptoui.sat.i128.f64(double %a)
+  ret i128 %1
+}
+declare i128 @llvm.fptoui.sat.i128.f64(double)

diff  --git a/llvm/test/CodeGen/RISCV/rv64-float-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64-float-convert-strict.ll
new file mode 100644
index 00000000000000..86d1801928ea1d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-float-convert-strict.ll
@@ -0,0 +1,97 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -target-abi=lp64 -verify-machineinstrs < %s \
+; RUN:   -disable-strictnode-mutation | FileCheck %s -check-prefixes=CHECK,RV64I
+; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64f -verify-machineinstrs < %s \
+; RUN:   -disable-strictnode-mutation | FileCheck %s -check-prefixes=CHECK,RV64IF
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -target-abi=lp64 -verify-machineinstrs < %s \
+; RUN:   -disable-strictnode-mutation | FileCheck %s -check-prefixes=CHECK,RV64IFINX
+
+define i128 @fptosi_f32_to_i128(float %a) nounwind strictfp {
+; RV64I-LABEL: fptosi_f32_to_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    call __fixsfti
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IF-LABEL: fptosi_f32_to_i128:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call __fixsfti
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    ret
+;
+; RV64IFINX-LABEL: fptosi_f32_to_i128:
+; RV64IFINX:       # %bb.0:
+; RV64IFINX-NEXT:    addi sp, sp, -16
+; RV64IFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFINX-NEXT:    call __fixsfti
+; RV64IFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFINX-NEXT:    addi sp, sp, 16
+; RV64IFINX-NEXT:    ret
+  %1 = call i128 @llvm.experimental.constrained.fptosi.i128.f32(float %a, metadata !"fpexcept.strict")
+  ret i128 %1
+}
+
+define i128 @fptoui_f32_to_i128(float %a) nounwind strictfp {
+; RV64I-LABEL: fptoui_f32_to_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    call __fixunssfti
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IF-LABEL: fptoui_f32_to_i128:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call __fixunssfti
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    ret
+;
+; RV64IFINX-LABEL: fptoui_f32_to_i128:
+; RV64IFINX:       # %bb.0:
+; RV64IFINX-NEXT:    addi sp, sp, -16
+; RV64IFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFINX-NEXT:    call __fixunssfti
+; RV64IFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFINX-NEXT:    addi sp, sp, 16
+; RV64IFINX-NEXT:    ret
+  %1 = call i128 @llvm.experimental.constrained.fptoui.i128.f32(float %a, metadata !"fpexcept.strict")
+  ret i128 %1
+}
+
+define float @sitofp_i128_to_f32(i128 %a) nounwind strictfp {
+; CHECK-LABEL: sitofp_i128_to_f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    call __floattisf
+; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %1 = call float @llvm.experimental.constrained.sitofp.f32.i128(i128 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %1
+}
+
+define float @uitofp_i128_to_f32(i128 %a) nounwind strictfp {
+; CHECK-LABEL: uitofp_i128_to_f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    call __floatuntisf
+; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %1 = call float @llvm.experimental.constrained.uitofp.f32.i128(i128 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %1
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv64-float-convert.ll b/llvm/test/CodeGen/RISCV/rv64-float-convert.ll
new file mode 100644
index 00000000000000..8ebb9433bad79a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-float-convert.ll
@@ -0,0 +1,317 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -target-abi=lp64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64I
+; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64f -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64IF
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -target-abi=lp64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64IZFINX
+
+define i128 @fptosi_f32_to_i128(float %a) nounwind {
+; RV64I-LABEL: fptosi_f32_to_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    call __fixsfti
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IF-LABEL: fptosi_f32_to_i128:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call __fixsfti
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    ret
+;
+; RV64IZFINX-LABEL: fptosi_f32_to_i128:
+; RV64IZFINX:       # %bb.0:
+; RV64IZFINX-NEXT:    addi sp, sp, -16
+; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT:    call __fixsfti
+; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT:    addi sp, sp, 16
+; RV64IZFINX-NEXT:    ret
+  %1 = fptosi float %a to i128
+  ret i128 %1
+}
+
+define i128 @fptoui_f32_to_i128(float %a) nounwind {
+; RV64I-LABEL: fptoui_f32_to_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    call __fixunssfti
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IF-LABEL: fptoui_f32_to_i128:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call __fixunssfti
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    ret
+;
+; RV64IZFINX-LABEL: fptoui_f32_to_i128:
+; RV64IZFINX:       # %bb.0:
+; RV64IZFINX-NEXT:    addi sp, sp, -16
+; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT:    call __fixunssfti
+; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT:    addi sp, sp, 16
+; RV64IZFINX-NEXT:    ret
+  %1 = fptoui float %a to i128
+  ret i128 %1
+}
+
+define float @sitofp_i128_to_f32(i128 %a) nounwind {
+; CHECK-LABEL: sitofp_i128_to_f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    call __floattisf
+; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %1 = sitofp i128 %a to float
+  ret float %1
+}
+
+define float @uitofp_i128_to_f32(i128 %a) nounwind {
+; CHECK-LABEL: uitofp_i128_to_f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    call __floatuntisf
+; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %1 = uitofp i128 %a to float
+  ret float %1
+}
+
+define i128 @fptosi_sat_f32_to_i128(float %a) nounwind {
+; RV64I-LABEL: fptosi_sat_f32_to_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s5, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    lui a1, 1044480
+; RV64I-NEXT:    call __gesf2
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    sext.w a0, s0
+; RV64I-NEXT:    call __fixsfti
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    mv s3, a1
+; RV64I-NEXT:    li s5, -1
+; RV64I-NEXT:    bgez s1, .LBB4_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    slli s3, s5, 63
+; RV64I-NEXT:  .LBB4_2:
+; RV64I-NEXT:    lui a1, 520192
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call __gtsf2
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    blez a0, .LBB4_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    srli s3, s5, 1
+; RV64I-NEXT:  .LBB4_4:
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unordsf2
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a1, a0, s3
+; RV64I-NEXT:    slti a2, s1, 0
+; RV64I-NEXT:    addi a2, a2, -1
+; RV64I-NEXT:    and a2, a2, s2
+; RV64I-NEXT:    sgtz a3, s4
+; RV64I-NEXT:    neg a3, a3
+; RV64I-NEXT:    or a2, a3, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s5, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
+;
+; RV64IF-LABEL: fptosi_sat_f32_to_i128:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -32
+; RV64IF-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    fsw fs0, 12(sp) # 4-byte Folded Spill
+; RV64IF-NEXT:    fmv.s fs0, fa0
+; RV64IF-NEXT:    lui a0, 1044480
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fle.s s0, fa5, fa0
+; RV64IF-NEXT:    call __fixsfti
+; RV64IF-NEXT:    li a3, -1
+; RV64IF-NEXT:    bnez s0, .LBB4_2
+; RV64IF-NEXT:  # %bb.1:
+; RV64IF-NEXT:    slli a1, a3, 63
+; RV64IF-NEXT:  .LBB4_2:
+; RV64IF-NEXT:    lui a2, %hi(.LCPI4_0)
+; RV64IF-NEXT:    flw fa5, %lo(.LCPI4_0)(a2)
+; RV64IF-NEXT:    flt.s a2, fa5, fs0
+; RV64IF-NEXT:    beqz a2, .LBB4_4
+; RV64IF-NEXT:  # %bb.3:
+; RV64IF-NEXT:    srli a1, a3, 1
+; RV64IF-NEXT:  .LBB4_4:
+; RV64IF-NEXT:    feq.s a3, fs0, fs0
+; RV64IF-NEXT:    neg a3, a3
+; RV64IF-NEXT:    and a1, a3, a1
+; RV64IF-NEXT:    neg a4, s0
+; RV64IF-NEXT:    and a0, a4, a0
+; RV64IF-NEXT:    neg a2, a2
+; RV64IF-NEXT:    or a0, a2, a0
+; RV64IF-NEXT:    and a0, a3, a0
+; RV64IF-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
+; RV64IF-NEXT:    addi sp, sp, 32
+; RV64IF-NEXT:    ret
+;
+; RV64IZFINX-LABEL: fptosi_sat_f32_to_i128:
+; RV64IZFINX:       # %bb.0:
+; RV64IZFINX-NEXT:    addi sp, sp, -32
+; RV64IZFINX-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT:    mv s0, a0
+; RV64IZFINX-NEXT:    lui a0, 1044480
+; RV64IZFINX-NEXT:    fle.s s1, a0, s0
+; RV64IZFINX-NEXT:    mv a0, s0
+; RV64IZFINX-NEXT:    call __fixsfti
+; RV64IZFINX-NEXT:    li a2, -1
+; RV64IZFINX-NEXT:    bnez s1, .LBB4_2
+; RV64IZFINX-NEXT:  # %bb.1:
+; RV64IZFINX-NEXT:    slli a1, a2, 63
+; RV64IZFINX-NEXT:  .LBB4_2:
+; RV64IZFINX-NEXT:    lui a3, 520192
+; RV64IZFINX-NEXT:    addiw a3, a3, -1
+; RV64IZFINX-NEXT:    flt.s a3, a3, s0
+; RV64IZFINX-NEXT:    beqz a3, .LBB4_4
+; RV64IZFINX-NEXT:  # %bb.3:
+; RV64IZFINX-NEXT:    srli a1, a2, 1
+; RV64IZFINX-NEXT:  .LBB4_4:
+; RV64IZFINX-NEXT:    feq.s a2, s0, s0
+; RV64IZFINX-NEXT:    neg a2, a2
+; RV64IZFINX-NEXT:    and a1, a2, a1
+; RV64IZFINX-NEXT:    neg a4, s1
+; RV64IZFINX-NEXT:    and a0, a4, a0
+; RV64IZFINX-NEXT:    neg a3, a3
+; RV64IZFINX-NEXT:    or a0, a3, a0
+; RV64IZFINX-NEXT:    and a0, a2, a0
+; RV64IZFINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT:    addi sp, sp, 32
+; RV64IZFINX-NEXT:    ret
+  %1 = tail call i128 @llvm.fptosi.sat.i128.f32(float %a)
+  ret i128 %1
+}
+declare i128 @llvm.fptosi.sat.i128.f32(float)
+
+define i128 @fptoui_sat_f32_to_i128(float %a) nounwind {
+; RV64I-LABEL: fptoui_sat_f32_to_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    lui a1, 522240
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    call __gtsf2
+; RV64I-NEXT:    sgtz a0, a0
+; RV64I-NEXT:    neg s1, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    li a1, 0
+; RV64I-NEXT:    call __gesf2
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    addi s2, a0, -1
+; RV64I-NEXT:    sext.w a0, s0
+; RV64I-NEXT:    call __fixunssfti
+; RV64I-NEXT:    and a0, s2, a0
+; RV64I-NEXT:    or a0, s1, a0
+; RV64I-NEXT:    and a1, s2, a1
+; RV64I-NEXT:    or a1, s1, a1
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+;
+; RV64IF-LABEL: fptoui_sat_f32_to_i128:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -32
+; RV64IF-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    fsw fs0, 12(sp) # 4-byte Folded Spill
+; RV64IF-NEXT:    fmv.s fs0, fa0
+; RV64IF-NEXT:    fmv.w.x fa5, zero
+; RV64IF-NEXT:    fle.s a0, fa5, fa0
+; RV64IF-NEXT:    neg s0, a0
+; RV64IF-NEXT:    call __fixunssfti
+; RV64IF-NEXT:    lui a2, %hi(.LCPI5_0)
+; RV64IF-NEXT:    flw fa5, %lo(.LCPI5_0)(a2)
+; RV64IF-NEXT:    and a0, s0, a0
+; RV64IF-NEXT:    flt.s a2, fa5, fs0
+; RV64IF-NEXT:    neg a2, a2
+; RV64IF-NEXT:    or a0, a2, a0
+; RV64IF-NEXT:    and a1, s0, a1
+; RV64IF-NEXT:    or a1, a2, a1
+; RV64IF-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
+; RV64IF-NEXT:    addi sp, sp, 32
+; RV64IF-NEXT:    ret
+;
+; RV64IZFINX-LABEL: fptoui_sat_f32_to_i128:
+; RV64IZFINX:       # %bb.0:
+; RV64IZFINX-NEXT:    addi sp, sp, -32
+; RV64IZFINX-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT:    mv s0, a0
+; RV64IZFINX-NEXT:    fle.s a0, zero, a0
+; RV64IZFINX-NEXT:    neg s1, a0
+; RV64IZFINX-NEXT:    mv a0, s0
+; RV64IZFINX-NEXT:    call __fixunssfti
+; RV64IZFINX-NEXT:    and a0, s1, a0
+; RV64IZFINX-NEXT:    lui a2, 522240
+; RV64IZFINX-NEXT:    addiw a2, a2, -1
+; RV64IZFINX-NEXT:    flt.s a2, a2, s0
+; RV64IZFINX-NEXT:    neg a2, a2
+; RV64IZFINX-NEXT:    or a0, a2, a0
+; RV64IZFINX-NEXT:    and a1, s1, a1
+; RV64IZFINX-NEXT:    or a1, a2, a1
+; RV64IZFINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT:    addi sp, sp, 32
+; RV64IZFINX-NEXT:    ret
+  %1 = tail call i128 @llvm.fptoui.sat.i128.f32(float %a)
+  ret i128 %1
+}
+declare i128 @llvm.fptoui.sat.i128.f32(float)

diff  --git a/llvm/test/CodeGen/RISCV/rv64-half-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64-half-convert-strict.ll
new file mode 100644
index 00000000000000..e9554bce1d5012
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-half-convert-strict.ll
@@ -0,0 +1,142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs \
+; RUN:   -target-abi lp64 -disable-strictnode-mutation < %s | \
+; RUN:   FileCheck %s -check-prefixes=CHECK,RV64I
+; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \
+; RUN:   -target-abi lp64f -disable-strictnode-mutation < %s | \
+; RUN:   FileCheck %s -check-prefixes=CHECK,RV64IZFH
+; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \
+; RUN:   -target-abi lp64 -disable-strictnode-mutation < %s | \
+; RUN:   FileCheck %s -check-prefixes=CHECK,RV64IZHINX
+
+define i128 @fptosi_f16_to_i128(half %a) nounwind strictfp {
+; RV64I-LABEL: fptosi_f16_to_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    call __fixsfti
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IZFH-LABEL: fptosi_f16_to_i128:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    addi sp, sp, -16
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT:    call __fixhfti
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT:    addi sp, sp, 16
+; RV64IZFH-NEXT:    ret
+;
+; RV64IZHINX-LABEL: fptosi_f16_to_i128:
+; RV64IZHINX:       # %bb.0:
+; RV64IZHINX-NEXT:    addi sp, sp, -16
+; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZHINX-NEXT:    call __fixhfti
+; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZHINX-NEXT:    addi sp, sp, 16
+; RV64IZHINX-NEXT:    ret
+  %1 = call i128 @llvm.experimental.constrained.fptosi.i128.f16(half %a, metadata !"fpexcept.strict")
+  ret i128 %1
+}
+
+define i128 @fptoui_f16_to_i128(half %a) nounwind strictfp {
+; RV64I-LABEL: fptoui_f16_to_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    call __fixunssfti
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IZFH-LABEL: fptoui_f16_to_i128:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    addi sp, sp, -16
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT:    call __fixunshfti
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT:    addi sp, sp, 16
+; RV64IZFH-NEXT:    ret
+;
+; RV64IZHINX-LABEL: fptoui_f16_to_i128:
+; RV64IZHINX:       # %bb.0:
+; RV64IZHINX-NEXT:    addi sp, sp, -16
+; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZHINX-NEXT:    call __fixunshfti
+; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZHINX-NEXT:    addi sp, sp, 16
+; RV64IZHINX-NEXT:    ret
+  %1 = call i128 @llvm.experimental.constrained.fptoui.i128.f16(half %a, metadata !"fpexcept.strict")
+  ret i128 %1
+}
+
+define half @sitofp_i128_to_f16(i128 %a) nounwind strictfp {
+; RV64I-LABEL: sitofp_i128_to_f16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __floattisf
+; RV64I-NEXT:    call __truncsfhf2
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IZFH-LABEL: sitofp_i128_to_f16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    addi sp, sp, -16
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT:    call __floattihf
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT:    addi sp, sp, 16
+; RV64IZFH-NEXT:    ret
+;
+; RV64IZHINX-LABEL: sitofp_i128_to_f16:
+; RV64IZHINX:       # %bb.0:
+; RV64IZHINX-NEXT:    addi sp, sp, -16
+; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZHINX-NEXT:    call __floattihf
+; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZHINX-NEXT:    addi sp, sp, 16
+; RV64IZHINX-NEXT:    ret
+  %1 = call half @llvm.experimental.constrained.sitofp.f16.i128(i128 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret half %1
+}
+
+define half @uitofp_i128_to_f16(i128 %a) nounwind strictfp {
+; RV64I-LABEL: uitofp_i128_to_f16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __floatuntisf
+; RV64I-NEXT:    call __truncsfhf2
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IZFH-LABEL: uitofp_i128_to_f16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    addi sp, sp, -16
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT:    call __floatuntihf
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT:    addi sp, sp, 16
+; RV64IZFH-NEXT:    ret
+;
+; RV64IZHINX-LABEL: uitofp_i128_to_f16:
+; RV64IZHINX:       # %bb.0:
+; RV64IZHINX-NEXT:    addi sp, sp, -16
+; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZHINX-NEXT:    call __floatuntihf
+; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZHINX-NEXT:    addi sp, sp, 16
+; RV64IZHINX-NEXT:    ret
+  %1 = call half @llvm.experimental.constrained.uitofp.f16.i128(i128 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret half %1
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}

diff  --git a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
new file mode 100644
index 00000000000000..2cb2ecbd57f65c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
@@ -0,0 +1,361 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs \
+; RUN:   -target-abi lp64 < %s | FileCheck %s -check-prefixes=CHECK,RV64I
+; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \
+; RUN:   -target-abi lp64f < %s | FileCheck %s -check-prefixes=CHECK,RV64IZFH
+; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \
+; RUN:   -target-abi lp64 < %s | FileCheck %s -check-prefixes=CHECK,RV64IZHINX
+
+define half @sitofp_i128_to_f16(i128 %a) nounwind {
+; RV64I-LABEL: sitofp_i128_to_f16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __floattisf
+; RV64I-NEXT:    call __truncsfhf2
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IZFH-LABEL: sitofp_i128_to_f16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    addi sp, sp, -16
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT:    call __floattihf
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT:    addi sp, sp, 16
+; RV64IZFH-NEXT:    ret
+;
+; RV64IZHINX-LABEL: sitofp_i128_to_f16:
+; RV64IZHINX:       # %bb.0:
+; RV64IZHINX-NEXT:    addi sp, sp, -16
+; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZHINX-NEXT:    call __floattihf
+; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZHINX-NEXT:    addi sp, sp, 16
+; RV64IZHINX-NEXT:    ret
+  %1 = sitofp i128 %a to half
+  ret half %1
+}
+
+define half @uitofp_i128_to_f16(i128 %a) nounwind {
+; RV64I-LABEL: uitofp_i128_to_f16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __floatuntisf
+; RV64I-NEXT:    call __truncsfhf2
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IZFH-LABEL: uitofp_i128_to_f16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    addi sp, sp, -16
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT:    call __floatuntihf
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT:    addi sp, sp, 16
+; RV64IZFH-NEXT:    ret
+;
+; RV64IZHINX-LABEL: uitofp_i128_to_f16:
+; RV64IZHINX:       # %bb.0:
+; RV64IZHINX-NEXT:    addi sp, sp, -16
+; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZHINX-NEXT:    call __floatuntihf
+; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZHINX-NEXT:    addi sp, sp, 16
+; RV64IZHINX-NEXT:    ret
+  %1 = uitofp i128 %a to half
+  ret half %1
+}
+
+define i128 @fptosi_f16_to_i128(half %a) nounwind {
+; RV64I-LABEL: fptosi_f16_to_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    call __fixsfti
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IZFH-LABEL: fptosi_f16_to_i128:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    addi sp, sp, -16
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT:    call __fixhfti
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT:    addi sp, sp, 16
+; RV64IZFH-NEXT:    ret
+;
+; RV64IZHINX-LABEL: fptosi_f16_to_i128:
+; RV64IZHINX:       # %bb.0:
+; RV64IZHINX-NEXT:    addi sp, sp, -16
+; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZHINX-NEXT:    call __fixhfti
+; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZHINX-NEXT:    addi sp, sp, 16
+; RV64IZHINX-NEXT:    ret
+  %1 = fptosi half %a to i128
+  ret i128 %1
+}
+
+define i128 @fptoui_f16_to_i128(half %a) nounwind {
+; RV64I-LABEL: fptoui_f16_to_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    call __fixunssfti
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IZFH-LABEL: fptoui_f16_to_i128:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    addi sp, sp, -16
+; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT:    call __fixunshfti
+; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT:    addi sp, sp, 16
+; RV64IZFH-NEXT:    ret
+;
+; RV64IZHINX-LABEL: fptoui_f16_to_i128:
+; RV64IZHINX:       # %bb.0:
+; RV64IZHINX-NEXT:    addi sp, sp, -16
+; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZHINX-NEXT:    call __fixunshfti
+; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZHINX-NEXT:    addi sp, sp, 16
+; RV64IZHINX-NEXT:    ret
+  %1 = fptoui half %a to i128
+  ret i128 %1
+}
+
+define i128 @fptosi_sat_f16_to_i128(half %a) nounwind {
+; RV64I-LABEL: fptosi_sat_f16_to_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s5, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    lui a1, 1044480
+; RV64I-NEXT:    call __gesf2
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    sext.w a0, s1
+; RV64I-NEXT:    call __fixsfti
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    mv s3, a1
+; RV64I-NEXT:    li s5, -1
+; RV64I-NEXT:    bgez s0, .LBB4_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    slli s3, s5, 63
+; RV64I-NEXT:  .LBB4_2:
+; RV64I-NEXT:    lui a1, 520192
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    call __gtsf2
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    blez a0, .LBB4_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    srli s3, s5, 1
+; RV64I-NEXT:  .LBB4_4:
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    call __unordsf2
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a1, a0, s3
+; RV64I-NEXT:    sgtz a2, s4
+; RV64I-NEXT:    neg a2, a2
+; RV64I-NEXT:    slti a3, s0, 0
+; RV64I-NEXT:    addi a3, a3, -1
+; RV64I-NEXT:    and a3, a3, s2
+; RV64I-NEXT:    or a2, a2, a3
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s5, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
+;
+; RV64IZFH-LABEL: fptosi_sat_f16_to_i128:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    addi sp, sp, -32
+; RV64IZFH-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT:    fsw fs0, 12(sp) # 4-byte Folded Spill
+; RV64IZFH-NEXT:    fcvt.s.h fs0, fa0
+; RV64IZFH-NEXT:    lui a0, 1044480
+; RV64IZFH-NEXT:    fmv.w.x fa5, a0
+; RV64IZFH-NEXT:    fle.s s0, fa5, fs0
+; RV64IZFH-NEXT:    fmv.s fa0, fs0
+; RV64IZFH-NEXT:    call __fixsfti
+; RV64IZFH-NEXT:    li a2, -1
+; RV64IZFH-NEXT:    bnez s0, .LBB4_2
+; RV64IZFH-NEXT:  # %bb.1:
+; RV64IZFH-NEXT:    slli a1, a2, 63
+; RV64IZFH-NEXT:  .LBB4_2:
+; RV64IZFH-NEXT:    lui a3, %hi(.LCPI4_0)
+; RV64IZFH-NEXT:    flw fa5, %lo(.LCPI4_0)(a3)
+; RV64IZFH-NEXT:    flt.s a3, fa5, fs0
+; RV64IZFH-NEXT:    beqz a3, .LBB4_4
+; RV64IZFH-NEXT:  # %bb.3:
+; RV64IZFH-NEXT:    srli a1, a2, 1
+; RV64IZFH-NEXT:  .LBB4_4:
+; RV64IZFH-NEXT:    feq.s a2, fs0, fs0
+; RV64IZFH-NEXT:    neg a2, a2
+; RV64IZFH-NEXT:    and a1, a2, a1
+; RV64IZFH-NEXT:    neg a3, a3
+; RV64IZFH-NEXT:    neg a4, s0
+; RV64IZFH-NEXT:    and a0, a4, a0
+; RV64IZFH-NEXT:    or a0, a3, a0
+; RV64IZFH-NEXT:    and a0, a2, a0
+; RV64IZFH-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
+; RV64IZFH-NEXT:    addi sp, sp, 32
+; RV64IZFH-NEXT:    ret
+;
+; RV64IZHINX-LABEL: fptosi_sat_f16_to_i128:
+; RV64IZHINX:       # %bb.0:
+; RV64IZHINX-NEXT:    addi sp, sp, -32
+; RV64IZHINX-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IZHINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IZHINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64IZHINX-NEXT:    fcvt.s.h s0, a0
+; RV64IZHINX-NEXT:    lui a0, 1044480
+; RV64IZHINX-NEXT:    fle.s s1, a0, s0
+; RV64IZHINX-NEXT:    mv a0, s0
+; RV64IZHINX-NEXT:    call __fixsfti
+; RV64IZHINX-NEXT:    li a2, -1
+; RV64IZHINX-NEXT:    bnez s1, .LBB4_2
+; RV64IZHINX-NEXT:  # %bb.1:
+; RV64IZHINX-NEXT:    slli a1, a2, 63
+; RV64IZHINX-NEXT:  .LBB4_2:
+; RV64IZHINX-NEXT:    lui a3, 520192
+; RV64IZHINX-NEXT:    addiw a3, a3, -1
+; RV64IZHINX-NEXT:    flt.s a3, a3, s0
+; RV64IZHINX-NEXT:    beqz a3, .LBB4_4
+; RV64IZHINX-NEXT:  # %bb.3:
+; RV64IZHINX-NEXT:    srli a1, a2, 1
+; RV64IZHINX-NEXT:  .LBB4_4:
+; RV64IZHINX-NEXT:    feq.s a2, s0, s0
+; RV64IZHINX-NEXT:    neg a2, a2
+; RV64IZHINX-NEXT:    and a1, a2, a1
+; RV64IZHINX-NEXT:    neg a3, a3
+; RV64IZHINX-NEXT:    neg a4, s1
+; RV64IZHINX-NEXT:    and a0, a4, a0
+; RV64IZHINX-NEXT:    or a0, a3, a0
+; RV64IZHINX-NEXT:    and a0, a2, a0
+; RV64IZHINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IZHINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IZHINX-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64IZHINX-NEXT:    addi sp, sp, 32
+; RV64IZHINX-NEXT:    ret
+  %1 = tail call i128 @llvm.fptosi.sat.i128.f16(half %a)
+  ret i128 %1
+}
+declare i128 @llvm.fptosi.sat.i128.f16(half)
+
+define i128 @fptoui_sat_f16_to_i128(half %a) nounwind {
+; RV64I-LABEL: fptoui_sat_f16_to_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __extendhfsf2
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    lui a1, 522240
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    call __gtsf2
+; RV64I-NEXT:    sgtz a0, a0
+; RV64I-NEXT:    neg s1, a0
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    li a1, 0
+; RV64I-NEXT:    call __gesf2
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    addi s2, a0, -1
+; RV64I-NEXT:    sext.w a0, s0
+; RV64I-NEXT:    call __fixunssfti
+; RV64I-NEXT:    and a0, s2, a0
+; RV64I-NEXT:    or a0, s1, a0
+; RV64I-NEXT:    and a1, s2, a1
+; RV64I-NEXT:    or a1, s1, a1
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+;
+; RV64IZFH-LABEL: fptoui_sat_f16_to_i128:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    addi sp, sp, -32
+; RV64IZFH-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64IZFH-NEXT:    lui a0, %hi(.LCPI5_0)
+; RV64IZFH-NEXT:    flw fa5, %lo(.LCPI5_0)(a0)
+; RV64IZFH-NEXT:    fcvt.s.h fa0, fa0
+; RV64IZFH-NEXT:    flt.s a0, fa5, fa0
+; RV64IZFH-NEXT:    neg s0, a0
+; RV64IZFH-NEXT:    fmv.w.x fa5, zero
+; RV64IZFH-NEXT:    fle.s a0, fa5, fa0
+; RV64IZFH-NEXT:    neg s1, a0
+; RV64IZFH-NEXT:    call __fixunssfti
+; RV64IZFH-NEXT:    and a0, s1, a0
+; RV64IZFH-NEXT:    or a0, s0, a0
+; RV64IZFH-NEXT:    and a1, s1, a1
+; RV64IZFH-NEXT:    or a1, s0, a1
+; RV64IZFH-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT:    addi sp, sp, 32
+; RV64IZFH-NEXT:    ret
+;
+; RV64IZHINX-LABEL: fptoui_sat_f16_to_i128:
+; RV64IZHINX:       # %bb.0:
+; RV64IZHINX-NEXT:    addi sp, sp, -32
+; RV64IZHINX-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IZHINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IZHINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64IZHINX-NEXT:    fcvt.s.h a0, a0
+; RV64IZHINX-NEXT:    lui a1, 522240
+; RV64IZHINX-NEXT:    addiw a1, a1, -1
+; RV64IZHINX-NEXT:    flt.s a1, a1, a0
+; RV64IZHINX-NEXT:    neg s0, a1
+; RV64IZHINX-NEXT:    fle.s a1, zero, a0
+; RV64IZHINX-NEXT:    neg s1, a1
+; RV64IZHINX-NEXT:    call __fixunssfti
+; RV64IZHINX-NEXT:    and a0, s1, a0
+; RV64IZHINX-NEXT:    or a0, s0, a0
+; RV64IZHINX-NEXT:    and a1, s1, a1
+; RV64IZHINX-NEXT:    or a1, s0, a1
+; RV64IZHINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IZHINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IZHINX-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64IZHINX-NEXT:    addi sp, sp, 32
+; RV64IZHINX-NEXT:    ret
+  %1 = tail call i128 @llvm.fptoui.sat.i128.f16(half %a)
+  ret i128 %1
+}
+declare i128 @llvm.fptoui.sat.i128.f16(half)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}


        


More information about the llvm-commits mailing list