[llvm] be1cc64 - [RISCV] Add DAG combine to fold (fp_to_int (ffloor X)) -> (fcvt X, rdn)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 11 09:06:49 PST 2022


Author: Craig Topper
Date: 2022-01-11T09:05:57-08:00
New Revision: be1cc64cc13cb24bcd9ed097925a140d74beaa45

URL: https://github.com/llvm/llvm-project/commit/be1cc64cc13cb24bcd9ed097925a140d74beaa45
DIFF: https://github.com/llvm/llvm-project/commit/be1cc64cc13cb24bcd9ed097925a140d74beaa45.diff

LOG: [RISCV] Add DAG combine to fold (fp_to_int (ffloor X)) -> (fcvt X, rdn)

Similar for ceil, trunc, round, and roundeven. This allows us to use
static rounding modes to avoid a libcall.

This optimization is done for AArch64 as isel patterns.
RISCV doesn't have instructions for ceil/floor/trunc/round/roundeven
so the operations don't stick around until isel to enable a pattern
match. Thus I've implemented a DAG combine.

We only handle XLen types except i32 on RV64. i32 will be type
legalized to a RISCVISD node. All other types will be type legalized
to XLen and maintain the FP_TO_SINT/UINT ISD opcode.

Reviewed By: asb

Differential Revision: https://reviews.llvm.org/D116771

Added: 
    llvm/test/CodeGen/RISCV/double-round-conv.ll
    llvm/test/CodeGen/RISCV/float-round-conv.ll
    llvm/test/CodeGen/RISCV/half-round-conv.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index fd295fedbb9ec..f237cd93329a7 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1041,7 +1041,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   setTargetDAGCombine(ISD::OR);
   setTargetDAGCombine(ISD::XOR);
   setTargetDAGCombine(ISD::ANY_EXTEND);
-  setTargetDAGCombine(ISD::ZERO_EXTEND);
+  if (Subtarget.hasStdExtF()) {
+    setTargetDAGCombine(ISD::ZERO_EXTEND);
+    setTargetDAGCombine(ISD::FP_TO_SINT);
+    setTargetDAGCombine(ISD::FP_TO_UINT);
+  }
   if (Subtarget.hasVInstructions()) {
     setTargetDAGCombine(ISD::FCOPYSIGN);
     setTargetDAGCombine(ISD::MGATHER);
@@ -7052,6 +7056,61 @@ static SDValue combineMUL_VLToVWMUL(SDNode *N, SDValue Op0, SDValue Op1,
   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
 }
 
+// Fold
+//   (fp_to_int (froundeven X)) -> fcvt X, rne
+//   (fp_to_int (ftrunc X))     -> fcvt X, rtz
+//   (fp_to_int (ffloor X))     -> fcvt X, rdn
+//   (fp_to_int (fceil X))      -> fcvt X, rup
+//   (fp_to_int (fround X))     -> fcvt X, rmm
+// FIXME: We should also do this for fp_to_int_sat.
+static SDValue performFP_TO_INTCombine(SDNode *N,
+                                       TargetLowering::DAGCombinerInfo &DCI,
+                                       const RISCVSubtarget &Subtarget) {
+  SelectionDAG &DAG = DCI.DAG;
+  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+  MVT XLenVT = Subtarget.getXLenVT();
+
+  // Only handle XLen or i32 types. Other types narrower than XLen will
+  // eventually be legalized to XLenVT.
+  EVT VT = N->getValueType(0);
+  if (VT != MVT::i32 && VT != XLenVT)
+    return SDValue();
+
+  SDValue Src = N->getOperand(0);
+
+  // Ensure the FP type is also legal.
+  if (!TLI.isTypeLegal(Src.getValueType()))
+    return SDValue();
+
+  // Don't do this for f16 with Zfhmin and not Zfh.
+  if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
+    return SDValue();
+
+  RISCVFPRndMode::RoundingMode FRM;
+  switch (Src->getOpcode()) {
+  default:
+    return SDValue();
+  case ISD::FROUNDEVEN: FRM = RISCVFPRndMode::RNE; break;
+  case ISD::FTRUNC:     FRM = RISCVFPRndMode::RTZ; break;
+  case ISD::FFLOOR:     FRM = RISCVFPRndMode::RDN; break;
+  case ISD::FCEIL:      FRM = RISCVFPRndMode::RUP; break;
+  case ISD::FROUND:     FRM = RISCVFPRndMode::RMM; break;
+  }
+
+  bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
+
+  unsigned Opc;
+  if (VT == XLenVT)
+    Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
+  else
+    Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
+
+  SDLoc DL(N);
+  SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
+                                DAG.getTargetConstant(FRM, DL, XLenVT));
+  return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
+}
+
 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
                                                DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
@@ -7381,6 +7440,9 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     }
     break;
   }
+  case ISD::FP_TO_SINT:
+  case ISD::FP_TO_UINT:
+    return performFP_TO_INTCombine(N, DCI, Subtarget);
   case ISD::FCOPYSIGN: {
     EVT VT = N->getValueType(0);
     if (!VT.isVector())

diff  --git a/llvm/test/CodeGen/RISCV/double-round-conv.ll b/llvm/test/CodeGen/RISCV/double-round-conv.ll
new file mode 100644
index 0000000000000..f83a0fcb86a6f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/double-round-conv.ll
@@ -0,0 +1,681 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   -target-abi=ilp32d | FileCheck -check-prefix=RV32IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64d | FileCheck -check-prefix=RV64IFD %s
+
+define signext i8 @test_floor_si8(double %x) {
+; RV32IFD-LABEL: test_floor_si8:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rdn
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_floor_si8:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rdn
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.floor.f64(double %x)
+  %b = fptosi double %a to i8
+  ret i8 %b
+}
+
+define signext i16 @test_floor_si16(double %x) {
+; RV32IFD-LABEL: test_floor_si16:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rdn
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_floor_si16:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rdn
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.floor.f64(double %x)
+  %b = fptosi double %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_floor_si32(double %x) {
+; RV32IFD-LABEL: test_floor_si32:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rdn
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_floor_si32:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.w.d a0, fa0, rdn
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.floor.f64(double %x)
+  %b = fptosi double %a to i32
+  ret i32 %b
+}
+
+define i64 @test_floor_si64(double %x) {
+; RV32IFD-LABEL: test_floor_si64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    .cfi_offset ra, -4
+; RV32IFD-NEXT:    call floor at plt
+; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_floor_si64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rdn
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.floor.f64(double %x)
+  %b = fptosi double %a to i64
+  ret i64 %b
+}
+
+define zeroext i8 @test_floor_ui8(double %x) {
+; RV32IFD-LABEL: test_floor_ui8:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rdn
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_floor_ui8:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rdn
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.floor.f64(double %x)
+  %b = fptoui double %a to i8
+  ret i8 %b
+}
+
+define zeroext i16 @test_floor_ui16(double %x) {
+; RV32IFD-LABEL: test_floor_ui16:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rdn
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_floor_ui16:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rdn
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.floor.f64(double %x)
+  %b = fptoui double %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_floor_ui32(double %x) {
+; RV32IFD-LABEL: test_floor_ui32:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rdn
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_floor_ui32:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.wu.d a0, fa0, rdn
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.floor.f64(double %x)
+  %b = fptoui double %a to i32
+  ret i32 %b
+}
+
+define i64 @test_floor_ui64(double %x) {
+; RV32IFD-LABEL: test_floor_ui64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    .cfi_offset ra, -4
+; RV32IFD-NEXT:    call floor at plt
+; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_floor_ui64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rdn
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.floor.f64(double %x)
+  %b = fptoui double %a to i64
+  ret i64 %b
+}
+
+define signext i8 @test_ceil_si8(double %x) {
+; RV32IFD-LABEL: test_ceil_si8:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rup
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_ceil_si8:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rup
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.ceil.f64(double %x)
+  %b = fptosi double %a to i8
+  ret i8 %b
+}
+
+define signext i16 @test_ceil_si16(double %x) {
+; RV32IFD-LABEL: test_ceil_si16:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rup
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_ceil_si16:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rup
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.ceil.f64(double %x)
+  %b = fptosi double %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_ceil_si32(double %x) {
+; RV32IFD-LABEL: test_ceil_si32:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rup
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_ceil_si32:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.w.d a0, fa0, rup
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.ceil.f64(double %x)
+  %b = fptosi double %a to i32
+  ret i32 %b
+}
+
+define i64 @test_ceil_si64(double %x) {
+; RV32IFD-LABEL: test_ceil_si64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    .cfi_offset ra, -4
+; RV32IFD-NEXT:    call ceil at plt
+; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_ceil_si64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rup
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.ceil.f64(double %x)
+  %b = fptosi double %a to i64
+  ret i64 %b
+}
+
+define zeroext i8 @test_ceil_ui8(double %x) {
+; RV32IFD-LABEL: test_ceil_ui8:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rup
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_ceil_ui8:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rup
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.ceil.f64(double %x)
+  %b = fptoui double %a to i8
+  ret i8 %b
+}
+
+define zeroext i16 @test_ceil_ui16(double %x) {
+; RV32IFD-LABEL: test_ceil_ui16:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rup
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_ceil_ui16:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rup
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.ceil.f64(double %x)
+  %b = fptoui double %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_ceil_ui32(double %x) {
+; RV32IFD-LABEL: test_ceil_ui32:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rup
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_ceil_ui32:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.wu.d a0, fa0, rup
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.ceil.f64(double %x)
+  %b = fptoui double %a to i32
+  ret i32 %b
+}
+
+define i64 @test_ceil_ui64(double %x) {
+; RV32IFD-LABEL: test_ceil_ui64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    .cfi_offset ra, -4
+; RV32IFD-NEXT:    call ceil at plt
+; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_ceil_ui64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rup
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.ceil.f64(double %x)
+  %b = fptoui double %a to i64
+  ret i64 %b
+}
+
+define signext i8 @test_trunc_si8(double %x) {
+; RV32IFD-LABEL: test_trunc_si8:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_trunc_si8:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.trunc.f64(double %x)
+  %b = fptosi double %a to i8
+  ret i8 %b
+}
+
+define signext i16 @test_trunc_si16(double %x) {
+; RV32IFD-LABEL: test_trunc_si16:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_trunc_si16:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.trunc.f64(double %x)
+  %b = fptosi double %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_trunc_si32(double %x) {
+; RV32IFD-LABEL: test_trunc_si32:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_trunc_si32:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.w.d a0, fa0, rtz
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.trunc.f64(double %x)
+  %b = fptosi double %a to i32
+  ret i32 %b
+}
+
+define i64 @test_trunc_si64(double %x) {
+; RV32IFD-LABEL: test_trunc_si64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    .cfi_offset ra, -4
+; RV32IFD-NEXT:    call trunc at plt
+; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_trunc_si64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.trunc.f64(double %x)
+  %b = fptosi double %a to i64
+  ret i64 %b
+}
+
+define zeroext i8 @test_trunc_ui8(double %x) {
+; RV32IFD-LABEL: test_trunc_ui8:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_trunc_ui8:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.trunc.f64(double %x)
+  %b = fptoui double %a to i8
+  ret i8 %b
+}
+
+define zeroext i16 @test_trunc_ui16(double %x) {
+; RV32IFD-LABEL: test_trunc_ui16:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_trunc_ui16:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.trunc.f64(double %x)
+  %b = fptoui double %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_trunc_ui32(double %x) {
+; RV32IFD-LABEL: test_trunc_ui32:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_trunc_ui32:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.trunc.f64(double %x)
+  %b = fptoui double %a to i32
+  ret i32 %b
+}
+
+define i64 @test_trunc_ui64(double %x) {
+; RV32IFD-LABEL: test_trunc_ui64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    .cfi_offset ra, -4
+; RV32IFD-NEXT:    call trunc at plt
+; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_trunc_ui64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.trunc.f64(double %x)
+  %b = fptoui double %a to i64
+  ret i64 %b
+}
+
+define signext i8 @test_round_si8(double %x) {
+; RV32IFD-LABEL: test_round_si8:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rmm
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_round_si8:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.round.f64(double %x)
+  %b = fptosi double %a to i8
+  ret i8 %b
+}
+
+define signext i16 @test_round_si16(double %x) {
+; RV32IFD-LABEL: test_round_si16:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rmm
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_round_si16:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.round.f64(double %x)
+  %b = fptosi double %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_round_si32(double %x) {
+; RV32IFD-LABEL: test_round_si32:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rmm
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_round_si32:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.w.d a0, fa0, rmm
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.round.f64(double %x)
+  %b = fptosi double %a to i32
+  ret i32 %b
+}
+
+define i64 @test_round_si64(double %x) {
+; RV32IFD-LABEL: test_round_si64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    .cfi_offset ra, -4
+; RV32IFD-NEXT:    call round at plt
+; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_round_si64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.round.f64(double %x)
+  %b = fptosi double %a to i64
+  ret i64 %b
+}
+
+define zeroext i8 @test_round_ui8(double %x) {
+; RV32IFD-LABEL: test_round_ui8:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rmm
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_round_ui8:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rmm
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.round.f64(double %x)
+  %b = fptoui double %a to i8
+  ret i8 %b
+}
+
+define zeroext i16 @test_round_ui16(double %x) {
+; RV32IFD-LABEL: test_round_ui16:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rmm
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_round_ui16:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rmm
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.round.f64(double %x)
+  %b = fptoui double %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_round_ui32(double %x) {
+; RV32IFD-LABEL: test_round_ui32:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rmm
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_round_ui32:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.wu.d a0, fa0, rmm
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.round.f64(double %x)
+  %b = fptoui double %a to i32
+  ret i32 %b
+}
+
+define i64 @test_round_ui64(double %x) {
+; RV32IFD-LABEL: test_round_ui64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    .cfi_offset ra, -4
+; RV32IFD-NEXT:    call round at plt
+; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_round_ui64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rmm
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.round.f64(double %x)
+  %b = fptoui double %a to i64
+  ret i64 %b
+}
+
+define signext i8 @test_roundeven_si8(double %x) {
+; RV32IFD-LABEL: test_roundeven_si8:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rne
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_roundeven_si8:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rne
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.roundeven.f64(double %x)
+  %b = fptosi double %a to i8
+  ret i8 %b
+}
+
+define signext i16 @test_roundeven_si16(double %x) {
+; RV32IFD-LABEL: test_roundeven_si16:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rne
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_roundeven_si16:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rne
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.roundeven.f64(double %x)
+  %b = fptosi double %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_roundeven_si32(double %x) {
+; RV32IFD-LABEL: test_roundeven_si32:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rne
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_roundeven_si32:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.w.d a0, fa0, rne
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.roundeven.f64(double %x)
+  %b = fptosi double %a to i32
+  ret i32 %b
+}
+
+define i64 @test_roundeven_si64(double %x) {
+; RV32IFD-LABEL: test_roundeven_si64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    .cfi_offset ra, -4
+; RV32IFD-NEXT:    call roundeven at plt
+; RV32IFD-NEXT:    call __fixdfdi at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_roundeven_si64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rne
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.roundeven.f64(double %x)
+  %b = fptosi double %a to i64
+  ret i64 %b
+}
+
+define zeroext i8 @test_roundeven_ui8(double %x) {
+; RV32IFD-LABEL: test_roundeven_ui8:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rne
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_roundeven_ui8:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rne
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.roundeven.f64(double %x)
+  %b = fptoui double %a to i8
+  ret i8 %b
+}
+
+define zeroext i16 @test_roundeven_ui16(double %x) {
+; RV32IFD-LABEL: test_roundeven_ui16:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rne
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_roundeven_ui16:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rne
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.roundeven.f64(double %x)
+  %b = fptoui double %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_roundeven_ui32(double %x) {
+; RV32IFD-LABEL: test_roundeven_ui32:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rne
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_roundeven_ui32:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.wu.d a0, fa0, rne
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.roundeven.f64(double %x)
+  %b = fptoui double %a to i32
+  ret i32 %b
+}
+
+define i64 @test_roundeven_ui64(double %x) {
+; RV32IFD-LABEL: test_roundeven_ui64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    .cfi_offset ra, -4
+; RV32IFD-NEXT:    call roundeven at plt
+; RV32IFD-NEXT:    call __fixunsdfdi at plt
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: test_roundeven_ui64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rne
+; RV64IFD-NEXT:    ret
+  %a = call double @llvm.roundeven.f64(double %x)
+  %b = fptoui double %a to i64
+  ret i64 %b
+}
+
+declare double @llvm.floor.f64(double)
+declare double @llvm.ceil.f64(double)
+declare double @llvm.trunc.f64(double)
+declare double @llvm.round.f64(double)
+declare double @llvm.roundeven.f64(double)

diff  --git a/llvm/test/CodeGen/RISCV/float-round-conv.ll b/llvm/test/CodeGen/RISCV/float-round-conv.ll
new file mode 100644
index 0000000000000..5f96c71c4e8fd
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/float-round-conv.ll
@@ -0,0 +1,681 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
+; RUN:   -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
+; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
+
+define signext i8 @test_floor_si8(float %x) {
+; RV32IF-LABEL: test_floor_si8:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_floor_si8:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rdn
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.floor.f32(float %x)
+  %b = fptosi float %a to i8
+  ret i8 %b
+}
+
+define signext i16 @test_floor_si16(float %x) {
+; RV32IF-LABEL: test_floor_si16:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_floor_si16:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rdn
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.floor.f32(float %x)
+  %b = fptosi float %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_floor_si32(float %x) {
+; RV32IF-LABEL: test_floor_si32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_floor_si32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.floor.f32(float %x)
+  %b = fptosi float %a to i32
+  ret i32 %b
+}
+
+define i64 @test_floor_si64(float %x) {
+; RV32IF-LABEL: test_floor_si64:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    .cfi_offset ra, -4
+; RV32IF-NEXT:    call floorf at plt
+; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_floor_si64:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rdn
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.floor.f32(float %x)
+  %b = fptosi float %a to i64
+  ret i64 %b
+}
+
+define zeroext i8 @test_floor_ui8(float %x) {
+; RV32IF-LABEL: test_floor_ui8:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rdn
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_floor_ui8:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rdn
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.floor.f32(float %x)
+  %b = fptoui float %a to i8
+  ret i8 %b
+}
+
+define zeroext i16 @test_floor_ui16(float %x) {
+; RV32IF-LABEL: test_floor_ui16:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rdn
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_floor_ui16:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rdn
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.floor.f32(float %x)
+  %b = fptoui float %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_floor_ui32(float %x) {
+; RV32IF-LABEL: test_floor_ui32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rdn
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_floor_ui32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rdn
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.floor.f32(float %x)
+  %b = fptoui float %a to i32
+  ret i32 %b
+}
+
+define i64 @test_floor_ui64(float %x) {
+; RV32IF-LABEL: test_floor_ui64:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    .cfi_offset ra, -4
+; RV32IF-NEXT:    call floorf at plt
+; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_floor_ui64:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rdn
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.floor.f32(float %x)
+  %b = fptoui float %a to i64
+  ret i64 %b
+}
+
+define signext i8 @test_ceil_si8(float %x) {
+; RV32IF-LABEL: test_ceil_si8:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_ceil_si8:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rup
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.ceil.f32(float %x)
+  %b = fptosi float %a to i8
+  ret i8 %b
+}
+
+define signext i16 @test_ceil_si16(float %x) {
+; RV32IF-LABEL: test_ceil_si16:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_ceil_si16:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rup
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.ceil.f32(float %x)
+  %b = fptosi float %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_ceil_si32(float %x) {
+; RV32IF-LABEL: test_ceil_si32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_ceil_si32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.w.s a0, fa0, rup
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.ceil.f32(float %x)
+  %b = fptosi float %a to i32
+  ret i32 %b
+}
+
+define i64 @test_ceil_si64(float %x) {
+; RV32IF-LABEL: test_ceil_si64:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    .cfi_offset ra, -4
+; RV32IF-NEXT:    call ceilf at plt
+; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_ceil_si64:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rup
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.ceil.f32(float %x)
+  %b = fptosi float %a to i64
+  ret i64 %b
+}
+
+define zeroext i8 @test_ceil_ui8(float %x) {
+; RV32IF-LABEL: test_ceil_ui8:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rup
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_ceil_ui8:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rup
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.ceil.f32(float %x)
+  %b = fptoui float %a to i8
+  ret i8 %b
+}
+
+define zeroext i16 @test_ceil_ui16(float %x) {
+; RV32IF-LABEL: test_ceil_ui16:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rup
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_ceil_ui16:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rup
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.ceil.f32(float %x)
+  %b = fptoui float %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_ceil_ui32(float %x) {
+; RV32IF-LABEL: test_ceil_ui32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rup
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_ceil_ui32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rup
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.ceil.f32(float %x)
+  %b = fptoui float %a to i32
+  ret i32 %b
+}
+
+define i64 @test_ceil_ui64(float %x) {
+; RV32IF-LABEL: test_ceil_ui64:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    .cfi_offset ra, -4
+; RV32IF-NEXT:    call ceilf at plt
+; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_ceil_ui64:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rup
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.ceil.f32(float %x)
+  %b = fptoui float %a to i64
+  ret i64 %b
+}
+
+define signext i8 @test_trunc_si8(float %x) {
+; RV32IF-LABEL: test_trunc_si8:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_trunc_si8:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.trunc.f32(float %x)
+  %b = fptosi float %a to i8
+  ret i8 %b
+}
+
+define signext i16 @test_trunc_si16(float %x) {
+; RV32IF-LABEL: test_trunc_si16:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_trunc_si16:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.trunc.f32(float %x)
+  %b = fptosi float %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_trunc_si32(float %x) {
+; RV32IF-LABEL: test_trunc_si32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_trunc_si32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.trunc.f32(float %x)
+  %b = fptosi float %a to i32
+  ret i32 %b
+}
+
+define i64 @test_trunc_si64(float %x) {
+; RV32IF-LABEL: test_trunc_si64:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    .cfi_offset ra, -4
+; RV32IF-NEXT:    call truncf at plt
+; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_trunc_si64:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.trunc.f32(float %x)
+  %b = fptosi float %a to i64
+  ret i64 %b
+}
+
+define zeroext i8 @test_trunc_ui8(float %x) {
+; RV32IF-LABEL: test_trunc_ui8:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_trunc_ui8:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.trunc.f32(float %x)
+  %b = fptoui float %a to i8
+  ret i8 %b
+}
+
+define zeroext i16 @test_trunc_ui16(float %x) {
+; RV32IF-LABEL: test_trunc_ui16:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_trunc_ui16:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.trunc.f32(float %x)
+  %b = fptoui float %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_trunc_ui32(float %x) {
+; RV32IF-LABEL: test_trunc_ui32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_trunc_ui32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rtz
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.trunc.f32(float %x)
+  %b = fptoui float %a to i32
+  ret i32 %b
+}
+
+define i64 @test_trunc_ui64(float %x) {
+; RV32IF-LABEL: test_trunc_ui64:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    .cfi_offset ra, -4
+; RV32IF-NEXT:    call truncf at plt
+; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_trunc_ui64:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.trunc.f32(float %x)
+  %b = fptoui float %a to i64
+  ret i64 %b
+}
+
+define signext i8 @test_round_si8(float %x) {
+; RV32IF-LABEL: test_round_si8:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_round_si8:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.round.f32(float %x)
+  %b = fptosi float %a to i8
+  ret i8 %b
+}
+
+define signext i16 @test_round_si16(float %x) {
+; RV32IF-LABEL: test_round_si16:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_round_si16:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.round.f32(float %x)
+  %b = fptosi float %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_round_si32(float %x) {
+; RV32IF-LABEL: test_round_si32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_round_si32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.round.f32(float %x)
+  %b = fptosi float %a to i32
+  ret i32 %b
+}
+
+define i64 @test_round_si64(float %x) {
+; RV32IF-LABEL: test_round_si64:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    .cfi_offset ra, -4
+; RV32IF-NEXT:    call roundf at plt
+; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_round_si64:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.round.f32(float %x)
+  %b = fptosi float %a to i64
+  ret i64 %b
+}
+
+define zeroext i8 @test_round_ui8(float %x) {
+; RV32IF-LABEL: test_round_ui8:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rmm
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_round_ui8:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rmm
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.round.f32(float %x)
+  %b = fptoui float %a to i8
+  ret i8 %b
+}
+
+define zeroext i16 @test_round_ui16(float %x) {
+; RV32IF-LABEL: test_round_ui16:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rmm
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_round_ui16:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rmm
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.round.f32(float %x)
+  %b = fptoui float %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_round_ui32(float %x) {
+; RV32IF-LABEL: test_round_ui32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rmm
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_round_ui32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rmm
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.round.f32(float %x)
+  %b = fptoui float %a to i32
+  ret i32 %b
+}
+
+define i64 @test_round_ui64(float %x) {
+; RV32IF-LABEL: test_round_ui64:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    .cfi_offset ra, -4
+; RV32IF-NEXT:    call roundf at plt
+; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_round_ui64:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rmm
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.round.f32(float %x)
+  %b = fptoui float %a to i64
+  ret i64 %b
+}
+
+define signext i8 @test_roundeven_si8(float %x) {
+; RV32IF-LABEL: test_roundeven_si8:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_roundeven_si8:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rne
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.roundeven.f32(float %x)
+  %b = fptosi float %a to i8
+  ret i8 %b
+}
+
+define signext i16 @test_roundeven_si16(float %x) {
+; RV32IF-LABEL: test_roundeven_si16:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_roundeven_si16:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rne
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.roundeven.f32(float %x)
+  %b = fptosi float %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_roundeven_si32(float %x) {
+; RV32IF-LABEL: test_roundeven_si32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_roundeven_si32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.w.s a0, fa0, rne
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.roundeven.f32(float %x)
+  %b = fptosi float %a to i32
+  ret i32 %b
+}
+
+define i64 @test_roundeven_si64(float %x) {
+; RV32IF-LABEL: test_roundeven_si64:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    .cfi_offset ra, -4
+; RV32IF-NEXT:    call roundevenf at plt
+; RV32IF-NEXT:    call __fixsfdi at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_roundeven_si64:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rne
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.roundeven.f32(float %x)
+  %b = fptosi float %a to i64
+  ret i64 %b
+}
+
+define zeroext i8 @test_roundeven_ui8(float %x) {
+; RV32IF-LABEL: test_roundeven_ui8:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rne
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_roundeven_ui8:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rne
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.roundeven.f32(float %x)
+  %b = fptoui float %a to i8
+  ret i8 %b
+}
+
+define zeroext i16 @test_roundeven_ui16(float %x) {
+; RV32IF-LABEL: test_roundeven_ui16:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rne
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_roundeven_ui16:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rne
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.roundeven.f32(float %x)
+  %b = fptoui float %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_roundeven_ui32(float %x) {
+; RV32IF-LABEL: test_roundeven_ui32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rne
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_roundeven_ui32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rne
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.roundeven.f32(float %x)
+  %b = fptoui float %a to i32
+  ret i32 %b
+}
+
+define i64 @test_roundeven_ui64(float %x) {
+; RV32IF-LABEL: test_roundeven_ui64:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    .cfi_def_cfa_offset 16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    .cfi_offset ra, -4
+; RV32IF-NEXT:    call roundevenf at plt
+; RV32IF-NEXT:    call __fixunssfdi at plt
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: test_roundeven_ui64:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rne
+; RV64IF-NEXT:    ret
+  %a = call float @llvm.roundeven.f32(float %x)
+  %b = fptoui float %a to i64
+  ret i64 %b
+}
+
+declare float @llvm.floor.f32(float)
+declare float @llvm.ceil.f32(float)
+declare float @llvm.trunc.f32(float)
+declare float @llvm.round.f32(float)
+declare float @llvm.roundeven.f32(float)

diff  --git a/llvm/test/CodeGen/RISCV/half-round-conv.ll b/llvm/test/CodeGen/RISCV/half-round-conv.ll
new file mode 100644
index 0000000000000..4d2dfe66e3c74
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/half-round-conv.ll
@@ -0,0 +1,701 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-zfh -verify-machineinstrs < %s \
+; RUN:   -target-abi=ilp32f | FileCheck -check-prefix=RV32IZFH %s
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-zfh -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64f | FileCheck -check-prefix=RV64IZFH %s
+
+define signext i8 @test_floor_si8(half %x) {
+; RV32IZFH-LABEL: test_floor_si8:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rdn
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_floor_si8:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rdn
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.floor.f16(half %x)
+  %b = fptosi half %a to i8
+  ret i8 %b
+}
+
+define signext i16 @test_floor_si16(half %x) {
+; RV32IZFH-LABEL: test_floor_si16:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rdn
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_floor_si16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rdn
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.floor.f16(half %x)
+  %b = fptosi half %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_floor_si32(half %x) {
+; RV32IZFH-LABEL: test_floor_si32:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rdn
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_floor_si32:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.w.h a0, fa0, rdn
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.floor.f16(half %x)
+  %b = fptosi half %a to i32
+  ret i32 %b
+}
+
+define i64 @test_floor_si64(half %x) {
+; RV32IZFH-LABEL: test_floor_si64:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    addi sp, sp, -16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    .cfi_offset ra, -4
+; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
+; RV32IZFH-NEXT:    call floorf at plt
+; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
+; RV32IZFH-NEXT:    call __fixhfdi at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_floor_si64:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rdn
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.floor.f16(half %x)
+  %b = fptosi half %a to i64
+  ret i64 %b
+}
+
+define zeroext i8 @test_floor_ui8(half %x) {
+; RV32IZFH-LABEL: test_floor_ui8:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rdn
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_floor_ui8:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rdn
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.floor.f16(half %x)
+  %b = fptoui half %a to i8
+  ret i8 %b
+}
+
+define zeroext i16 @test_floor_ui16(half %x) {
+; RV32IZFH-LABEL: test_floor_ui16:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rdn
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_floor_ui16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rdn
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.floor.f16(half %x)
+  %b = fptoui half %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_floor_ui32(half %x) {
+; RV32IZFH-LABEL: test_floor_ui32:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rdn
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_floor_ui32:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.wu.h a0, fa0, rdn
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.floor.f16(half %x)
+  %b = fptoui half %a to i32
+  ret i32 %b
+}
+
+define i64 @test_floor_ui64(half %x) {
+; RV32IZFH-LABEL: test_floor_ui64:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    addi sp, sp, -16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    .cfi_offset ra, -4
+; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
+; RV32IZFH-NEXT:    call floorf at plt
+; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
+; RV32IZFH-NEXT:    call __fixunshfdi at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_floor_ui64:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rdn
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.floor.f16(half %x)
+  %b = fptoui half %a to i64
+  ret i64 %b
+}
+
+define signext i8 @test_ceil_si8(half %x) {
+; RV32IZFH-LABEL: test_ceil_si8:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rup
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_ceil_si8:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rup
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.ceil.f16(half %x)
+  %b = fptosi half %a to i8
+  ret i8 %b
+}
+
+define signext i16 @test_ceil_si16(half %x) {
+; RV32IZFH-LABEL: test_ceil_si16:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rup
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_ceil_si16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rup
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.ceil.f16(half %x)
+  %b = fptosi half %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_ceil_si32(half %x) {
+; RV32IZFH-LABEL: test_ceil_si32:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rup
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_ceil_si32:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.w.h a0, fa0, rup
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.ceil.f16(half %x)
+  %b = fptosi half %a to i32
+  ret i32 %b
+}
+
+define i64 @test_ceil_si64(half %x) {
+; RV32IZFH-LABEL: test_ceil_si64:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    addi sp, sp, -16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    .cfi_offset ra, -4
+; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
+; RV32IZFH-NEXT:    call ceilf at plt
+; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
+; RV32IZFH-NEXT:    call __fixhfdi at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_ceil_si64:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rup
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.ceil.f16(half %x)
+  %b = fptosi half %a to i64
+  ret i64 %b
+}
+
+define zeroext i8 @test_ceil_ui8(half %x) {
+; RV32IZFH-LABEL: test_ceil_ui8:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rup
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_ceil_ui8:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rup
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.ceil.f16(half %x)
+  %b = fptoui half %a to i8
+  ret i8 %b
+}
+
+define zeroext i16 @test_ceil_ui16(half %x) {
+; RV32IZFH-LABEL: test_ceil_ui16:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rup
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_ceil_ui16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rup
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.ceil.f16(half %x)
+  %b = fptoui half %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_ceil_ui32(half %x) {
+; RV32IZFH-LABEL: test_ceil_ui32:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rup
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_ceil_ui32:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.wu.h a0, fa0, rup
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.ceil.f16(half %x)
+  %b = fptoui half %a to i32
+  ret i32 %b
+}
+
+define i64 @test_ceil_ui64(half %x) {
+; RV32IZFH-LABEL: test_ceil_ui64:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    addi sp, sp, -16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    .cfi_offset ra, -4
+; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
+; RV32IZFH-NEXT:    call ceilf at plt
+; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
+; RV32IZFH-NEXT:    call __fixunshfdi at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_ceil_ui64:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rup
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.ceil.f16(half %x)
+  %b = fptoui half %a to i64
+  ret i64 %b
+}
+
+define signext i8 @test_trunc_si8(half %x) {
+; RV32IZFH-LABEL: test_trunc_si8:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rtz
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_trunc_si8:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rtz
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.trunc.f16(half %x)
+  %b = fptosi half %a to i8
+  ret i8 %b
+}
+
+define signext i16 @test_trunc_si16(half %x) {
+; RV32IZFH-LABEL: test_trunc_si16:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rtz
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_trunc_si16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rtz
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.trunc.f16(half %x)
+  %b = fptosi half %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_trunc_si32(half %x) {
+; RV32IZFH-LABEL: test_trunc_si32:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rtz
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_trunc_si32:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.w.h a0, fa0, rtz
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.trunc.f16(half %x)
+  %b = fptosi half %a to i32
+  ret i32 %b
+}
+
+define i64 @test_trunc_si64(half %x) {
+; RV32IZFH-LABEL: test_trunc_si64:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    addi sp, sp, -16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    .cfi_offset ra, -4
+; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
+; RV32IZFH-NEXT:    call truncf at plt
+; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
+; RV32IZFH-NEXT:    call __fixhfdi at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_trunc_si64:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rtz
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.trunc.f16(half %x)
+  %b = fptosi half %a to i64
+  ret i64 %b
+}
+
+define zeroext i8 @test_trunc_ui8(half %x) {
+; RV32IZFH-LABEL: test_trunc_ui8:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_trunc_ui8:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rtz
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.trunc.f16(half %x)
+  %b = fptoui half %a to i8
+  ret i8 %b
+}
+
+define zeroext i16 @test_trunc_ui16(half %x) {
+; RV32IZFH-LABEL: test_trunc_ui16:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_trunc_ui16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rtz
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.trunc.f16(half %x)
+  %b = fptoui half %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_trunc_ui32(half %x) {
+; RV32IZFH-LABEL: test_trunc_ui32:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_trunc_ui32:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.trunc.f16(half %x)
+  %b = fptoui half %a to i32
+  ret i32 %b
+}
+
+define i64 @test_trunc_ui64(half %x) {
+; RV32IZFH-LABEL: test_trunc_ui64:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    addi sp, sp, -16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    .cfi_offset ra, -4
+; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
+; RV32IZFH-NEXT:    call truncf at plt
+; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
+; RV32IZFH-NEXT:    call __fixunshfdi at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_trunc_ui64:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rtz
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.trunc.f16(half %x)
+  %b = fptoui half %a to i64
+  ret i64 %b
+}
+
+define signext i8 @test_round_si8(half %x) {
+; RV32IZFH-LABEL: test_round_si8:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rmm
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_round_si8:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rmm
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.round.f16(half %x)
+  %b = fptosi half %a to i8
+  ret i8 %b
+}
+
+define signext i16 @test_round_si16(half %x) {
+; RV32IZFH-LABEL: test_round_si16:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rmm
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_round_si16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rmm
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.round.f16(half %x)
+  %b = fptosi half %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_round_si32(half %x) {
+; RV32IZFH-LABEL: test_round_si32:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rmm
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_round_si32:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.w.h a0, fa0, rmm
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.round.f16(half %x)
+  %b = fptosi half %a to i32
+  ret i32 %b
+}
+
+define i64 @test_round_si64(half %x) {
+; RV32IZFH-LABEL: test_round_si64:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    addi sp, sp, -16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    .cfi_offset ra, -4
+; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
+; RV32IZFH-NEXT:    call roundf at plt
+; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
+; RV32IZFH-NEXT:    call __fixhfdi at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_round_si64:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rmm
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.round.f16(half %x)
+  %b = fptosi half %a to i64
+  ret i64 %b
+}
+
+define zeroext i8 @test_round_ui8(half %x) {
+; RV32IZFH-LABEL: test_round_ui8:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rmm
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_round_ui8:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rmm
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.round.f16(half %x)
+  %b = fptoui half %a to i8
+  ret i8 %b
+}
+
+define zeroext i16 @test_round_ui16(half %x) {
+; RV32IZFH-LABEL: test_round_ui16:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rmm
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_round_ui16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rmm
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.round.f16(half %x)
+  %b = fptoui half %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_round_ui32(half %x) {
+; RV32IZFH-LABEL: test_round_ui32:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rmm
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_round_ui32:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.wu.h a0, fa0, rmm
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.round.f16(half %x)
+  %b = fptoui half %a to i32
+  ret i32 %b
+}
+
+define i64 @test_round_ui64(half %x) {
+; RV32IZFH-LABEL: test_round_ui64:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    addi sp, sp, -16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    .cfi_offset ra, -4
+; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
+; RV32IZFH-NEXT:    call roundf at plt
+; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
+; RV32IZFH-NEXT:    call __fixunshfdi at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_round_ui64:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rmm
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.round.f16(half %x)
+  %b = fptoui half %a to i64
+  ret i64 %b
+}
+
+define signext i8 @test_roundeven_si8(half %x) {
+; RV32IZFH-LABEL: test_roundeven_si8:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rne
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_roundeven_si8:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rne
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.roundeven.f16(half %x)
+  %b = fptosi half %a to i8
+  ret i8 %b
+}
+
+define signext i16 @test_roundeven_si16(half %x) {
+; RV32IZFH-LABEL: test_roundeven_si16:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rne
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_roundeven_si16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rne
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.roundeven.f16(half %x)
+  %b = fptosi half %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_roundeven_si32(half %x) {
+; RV32IZFH-LABEL: test_roundeven_si32:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rne
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_roundeven_si32:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.w.h a0, fa0, rne
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.roundeven.f16(half %x)
+  %b = fptosi half %a to i32
+  ret i32 %b
+}
+
+define i64 @test_roundeven_si64(half %x) {
+; RV32IZFH-LABEL: test_roundeven_si64:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    addi sp, sp, -16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    .cfi_offset ra, -4
+; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
+; RV32IZFH-NEXT:    call roundevenf at plt
+; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
+; RV32IZFH-NEXT:    call __fixhfdi at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_roundeven_si64:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rne
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.roundeven.f16(half %x)
+  %b = fptosi half %a to i64
+  ret i64 %b
+}
+
+define zeroext i8 @test_roundeven_ui8(half %x) {
+; RV32IZFH-LABEL: test_roundeven_ui8:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rne
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_roundeven_ui8:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rne
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.roundeven.f16(half %x)
+  %b = fptoui half %a to i8
+  ret i8 %b
+}
+
+define zeroext i16 @test_roundeven_ui16(half %x) {
+; RV32IZFH-LABEL: test_roundeven_ui16:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rne
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_roundeven_ui16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rne
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.roundeven.f16(half %x)
+  %b = fptoui half %a to i16
+  ret i16 %b
+}
+
+define signext i32 @test_roundeven_ui32(half %x) {
+; RV32IZFH-LABEL: test_roundeven_ui32:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rne
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_roundeven_ui32:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.wu.h a0, fa0, rne
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.roundeven.f16(half %x)
+  %b = fptoui half %a to i32
+  ret i32 %b
+}
+
+define i64 @test_roundeven_ui64(half %x) {
+; RV32IZFH-LABEL: test_roundeven_ui64:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    addi sp, sp, -16
+; RV32IZFH-NEXT:    .cfi_def_cfa_offset 16
+; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT:    .cfi_offset ra, -4
+; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
+; RV32IZFH-NEXT:    call roundevenf at plt
+; RV32IZFH-NEXT:    fcvt.h.s fa0, fa0
+; RV32IZFH-NEXT:    call __fixunshfdi at plt
+; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT:    addi sp, sp, 16
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: test_roundeven_ui64:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rne
+; RV64IZFH-NEXT:    ret
+  %a = call half @llvm.roundeven.f16(half %x)
+  %b = fptoui half %a to i64
+  ret i64 %b
+}
+
+declare half @llvm.floor.f16(half)
+declare half @llvm.ceil.f16(half)
+declare half @llvm.trunc.f16(half)
+declare half @llvm.round.f16(half)
+declare half @llvm.roundeven.f16(half)


        


More information about the llvm-commits mailing list