[llvm] 5d30565 - [RISCV] Improve vector fround lowering by changing FRM.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 6 09:33:38 PDT 2022


Author: Craig Topper
Date: 2022-09-06T09:33:13-07:00
New Revision: 5d30565d807f66774d2c057c5a3e37e9e9fc2197

URL: https://github.com/llvm/llvm-project/commit/5d30565d807f66774d2c057c5a3e37e9e9fc2197
DIFF: https://github.com/llvm/llvm-project/commit/5d30565d807f66774d2c057c5a3e37e9e9fc2197.diff

LOG: [RISCV] Improve vector fround lowering by changing FRM.

This is a follow up to D133238 which did this for ceil/floor.

Reviewed By: arcbbb, frasercrmck

Differential Revision: https://reviews.llvm.org/D133335

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
    llvm/test/Analysis/CostModel/RISCV/fround.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 21d3fc730b526..2551860f97bcf 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1836,11 +1836,24 @@ static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
 }
 
-// Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
-// and back. Taking care to avoid converting values that are nan or already
-// correct.
-static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG,
-                                        const RISCVSubtarget &Subtarget) {
+static RISCVFPRndMode::RoundingMode matchRoundingOp(unsigned Opc) {
+  switch (Opc) {
+  case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
+  case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
+  case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
+  case ISD::FCEIL:      return RISCVFPRndMode::RUP;
+  case ISD::FROUND:     return RISCVFPRndMode::RMM;
+  }
+
+  return RISCVFPRndMode::Invalid;
+}
+
+// Expand vector FTRUNC, FCEIL, FFLOOR, and FROUND by converting to the integer
+// domain/ and back. Taking care to avoid converting values that are nan or
+// already correct.
+static SDValue
+lowerFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
+                                const RISCVSubtarget &Subtarget) {
   MVT VT = Op.getSimpleValueType();
   assert(VT.isVector() && "Unexpected type");
 
@@ -1892,15 +1905,14 @@ static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG,
   default:
     llvm_unreachable("Unexpected opcode");
   case ISD::FCEIL:
-    Truncated =
-        DAG.getNode(RISCVISD::VFCVT_X_F_VL, DL, IntVT, Src, Mask,
-                    DAG.getTargetConstant(RISCVFPRndMode::RUP, DL, XLenVT), VL);
-    break;
   case ISD::FFLOOR:
-    Truncated =
-        DAG.getNode(RISCVISD::VFCVT_X_F_VL, DL, IntVT, Src, Mask,
-                    DAG.getTargetConstant(RISCVFPRndMode::RDN, DL, XLenVT), VL);
+  case ISD::FROUND: {
+    RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Op.getOpcode());
+    assert(FRM != RISCVFPRndMode::Invalid);
+    Truncated = DAG.getNode(RISCVISD::VFCVT_X_F_VL, DL, IntVT, Src, Mask,
+                            DAG.getTargetConstant(FRM, DL, XLenVT), VL);
     break;
+  }
   case ISD::FTRUNC:
     Truncated = DAG.getNode(RISCVISD::FP_TO_SINT_VL, DL, IntVT, Src, Mask, VL);
     break;
@@ -1919,88 +1931,6 @@ static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG,
   return convertFromScalableVector(VT, Truncated, DAG, Subtarget);
 }
 
-// ISD::FROUND is defined to round to nearest with ties rounding away from 0.
-// This mode isn't supported in vector hardware on RISCV. But as long as we
-// aren't compiling with trapping math, we can emulate this with
-// floor(X + copysign(nextafter(0.5, 0.0), X)).
-// FIXME: Could be shorter by changing rounding mode, but we don't have FRM
-// dependencies modeled yet.
-static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG,
-                           const RISCVSubtarget &Subtarget) {
-  MVT VT = Op.getSimpleValueType();
-  assert(VT.isVector() && "Unexpected type");
-
-  SDLoc DL(Op);
-
-  SDValue Src = Op.getOperand(0);
-
-  MVT ContainerVT = VT;
-  if (VT.isFixedLengthVector()) {
-    ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
-    Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
-  }
-
-  SDValue TrueMask, VL;
-  std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
-
-  // Freeze the source since we are increasing the number of uses.
-  Src = DAG.getFreeze(Src);
-
-  // We do the conversion on the absolute value and fix the sign at the end.
-  SDValue Abs =
-      DAG.getNode(RISCVISD::FABS_VL, DL, ContainerVT, Src, TrueMask, VL);
-
-  // Determine the largest integer that can be represented exactly. This and
-  // values larger than it don't have any fractional bits so don't need to
-  // be converted.
-  const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(ContainerVT);
-  unsigned Precision = APFloat::semanticsPrecision(FltSem);
-  APFloat MaxVal = APFloat(FltSem);
-  MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
-                          /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
-  SDValue MaxValNode =
-      DAG.getConstantFP(MaxVal, DL, ContainerVT.getVectorElementType());
-  SDValue MaxValSplat = DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, ContainerVT,
-                                    DAG.getUNDEF(ContainerVT), MaxValNode, VL);
-
-  // If abs(Src) was larger than MaxVal or nan, keep it.
-  MVT SetccVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
-  SDValue Mask = DAG.getNode(RISCVISD::SETCC_VL, DL, SetccVT,
-                             {Abs, MaxValSplat, DAG.getCondCode(ISD::SETOLT),
-                              DAG.getUNDEF(SetccVT), TrueMask, VL});
-
-  bool Ignored;
-  APFloat Point5Pred = APFloat(0.5f);
-  Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored);
-  Point5Pred.next(/*nextDown*/ true);
-  SDValue SplatVal =
-      DAG.getConstantFP(Point5Pred, DL, ContainerVT.getVectorElementType());
-  SDValue Splat = DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, ContainerVT,
-                              DAG.getUNDEF(ContainerVT), SplatVal, VL);
-
-  // Add the adjustment.
-  SDValue Adjust = DAG.getNode(RISCVISD::FADD_VL, DL, ContainerVT, Abs, Splat,
-                               DAG.getUNDEF(ContainerVT), Mask, VL);
-
-  // Truncate to integer and convert back to fp.
-  MVT IntVT = ContainerVT.changeVectorElementTypeToInteger();
-  SDValue Truncated =
-      DAG.getNode(RISCVISD::FP_TO_SINT_VL, DL, IntVT, Adjust, Mask, VL);
-
-  Truncated = DAG.getNode(RISCVISD::SINT_TO_FP_VL, DL, ContainerVT, Truncated,
-                          Mask, VL);
-
-  // Restore the original sign and merge the original source to masked off
-  // lanes.
-  Truncated = DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Truncated,
-                          Src, Src, Mask, VL);
-
-  if (!VT.isFixedLengthVector())
-    return Truncated;
-
-  return convertFromScalableVector(VT, Truncated, DAG, Subtarget);
-}
-
 struct VIDSequence {
   int64_t StepNumerator;
   unsigned StepDenominator;
@@ -3493,9 +3423,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
   case ISD::FTRUNC:
   case ISD::FCEIL:
   case ISD::FFLOOR:
-    return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG, Subtarget);
   case ISD::FROUND:
-    return lowerFROUND(Op, DAG, Subtarget);
+    return lowerFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
   case ISD::VECREDUCE_ADD:
   case ISD::VECREDUCE_UMAX:
   case ISD::VECREDUCE_SMAX:
@@ -8844,18 +8773,6 @@ static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Merge, Mask, VL);
 }
 
-static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
-  switch (Op.getOpcode()) {
-  case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
-  case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
-  case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
-  case ISD::FCEIL:      return RISCVFPRndMode::RUP;
-  case ISD::FROUND:     return RISCVFPRndMode::RMM;
-  }
-
-  return RISCVFPRndMode::Invalid;
-}
-
 // Fold
 //   (fp_to_int (froundeven X)) -> fcvt X, rne
 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
@@ -8885,7 +8802,7 @@ static SDValue performFP_TO_INTCombine(SDNode *N,
   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
     return SDValue();
 
-  RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
+  RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src.getOpcode());
   if (FRM == RISCVFPRndMode::Invalid)
     return SDValue();
 
@@ -8934,7 +8851,7 @@ static SDValue performFP_TO_INT_SATCombine(SDNode *N,
 
   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
 
-  RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
+  RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src.getOpcode());
   if (FRM == RISCVFPRndMode::Invalid)
     return SDValue();
 

diff  --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 6edabdfff4b3d..519bafa5853bb 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -312,22 +312,22 @@ static const CostTblEntry VectorIntrinsicCostTable[]{
     {Intrinsic::trunc, MVT::nxv2f64, 7},
     {Intrinsic::trunc, MVT::nxv4f64, 7},
     {Intrinsic::trunc, MVT::nxv8f64, 7},
-    {Intrinsic::round, MVT::v2f32, 10},
-    {Intrinsic::round, MVT::v4f32, 10},
-    {Intrinsic::round, MVT::v8f32, 10},
-    {Intrinsic::round, MVT::v16f32, 10},
-    {Intrinsic::round, MVT::nxv2f32, 10},
-    {Intrinsic::round, MVT::nxv4f32, 10},
-    {Intrinsic::round, MVT::nxv8f32, 10},
-    {Intrinsic::round, MVT::nxv16f32, 10},
-    {Intrinsic::round, MVT::v2f64, 10},
-    {Intrinsic::round, MVT::v4f64, 10},
-    {Intrinsic::round, MVT::v8f64, 10},
-    {Intrinsic::round, MVT::v16f64, 10},
-    {Intrinsic::round, MVT::nxv1f64, 10},
-    {Intrinsic::round, MVT::nxv2f64, 10},
-    {Intrinsic::round, MVT::nxv4f64, 10},
-    {Intrinsic::round, MVT::nxv8f64, 10},
+    {Intrinsic::round, MVT::v2f32, 9},
+    {Intrinsic::round, MVT::v4f32, 9},
+    {Intrinsic::round, MVT::v8f32, 9},
+    {Intrinsic::round, MVT::v16f32, 9},
+    {Intrinsic::round, MVT::nxv2f32, 9},
+    {Intrinsic::round, MVT::nxv4f32, 9},
+    {Intrinsic::round, MVT::nxv8f32, 9},
+    {Intrinsic::round, MVT::nxv16f32, 9},
+    {Intrinsic::round, MVT::v2f64, 9},
+    {Intrinsic::round, MVT::v4f64, 9},
+    {Intrinsic::round, MVT::v8f64, 9},
+    {Intrinsic::round, MVT::v16f64, 9},
+    {Intrinsic::round, MVT::nxv1f64, 9},
+    {Intrinsic::round, MVT::nxv2f64, 9},
+    {Intrinsic::round, MVT::nxv4f64, 9},
+    {Intrinsic::round, MVT::nxv8f64, 9},
     {Intrinsic::fabs, MVT::v2f32, 1},
     {Intrinsic::fabs, MVT::v4f32, 1},
     {Intrinsic::fabs, MVT::v8f32, 1},

diff  --git a/llvm/test/Analysis/CostModel/RISCV/fround.ll b/llvm/test/Analysis/CostModel/RISCV/fround.ll
index d4bd192ade7c3..af5173006d2df 100644
--- a/llvm/test/Analysis/CostModel/RISCV/fround.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/fround.ll
@@ -219,23 +219,23 @@ define void @nearbyint() {
 define void @round() {
 ; CHECK-LABEL: 'round'
 ; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %1 = call float @llvm.round.f32(float undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %2 = call <2 x float> @llvm.round.v2f32(<2 x float> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %3 = call <4 x float> @llvm.round.v4f32(<4 x float> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %4 = call <8 x float> @llvm.round.v8f32(<8 x float> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %5 = call <16 x float> @llvm.round.v16f32(<16 x float> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %6 = call <vscale x 2 x float> @llvm.round.nxv2f32(<vscale x 2 x float> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %7 = call <vscale x 4 x float> @llvm.round.nxv4f32(<vscale x 4 x float> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %8 = call <vscale x 8 x float> @llvm.round.nxv8f32(<vscale x 8 x float> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %9 = call <vscale x 16 x float> @llvm.round.nxv16f32(<vscale x 16 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %2 = call <2 x float> @llvm.round.v2f32(<2 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %3 = call <4 x float> @llvm.round.v4f32(<4 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %4 = call <8 x float> @llvm.round.v8f32(<8 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %5 = call <16 x float> @llvm.round.v16f32(<16 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %6 = call <vscale x 2 x float> @llvm.round.nxv2f32(<vscale x 2 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %7 = call <vscale x 4 x float> @llvm.round.nxv4f32(<vscale x 4 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %8 = call <vscale x 8 x float> @llvm.round.nxv8f32(<vscale x 8 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %9 = call <vscale x 16 x float> @llvm.round.nxv16f32(<vscale x 16 x float> undef)
 ; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %10 = call double @llvm.round.f64(double undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %11 = call <2 x double> @llvm.round.v2f64(<2 x double> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %12 = call <4 x double> @llvm.round.v4f64(<4 x double> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %13 = call <8 x double> @llvm.round.v8f64(<8 x double> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %14 = call <16 x double> @llvm.round.v16f64(<16 x double> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %15 = call <vscale x 1 x double> @llvm.round.nxv1f64(<vscale x 1 x double> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %16 = call <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %17 = call <vscale x 4 x double> @llvm.round.nxv4f64(<vscale x 4 x double> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %18 = call <vscale x 8 x double> @llvm.round.nxv8f64(<vscale x 8 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %11 = call <2 x double> @llvm.round.v2f64(<2 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %12 = call <4 x double> @llvm.round.v4f64(<4 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %13 = call <8 x double> @llvm.round.v8f64(<8 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %14 = call <16 x double> @llvm.round.v16f64(<16 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %15 = call <vscale x 1 x double> @llvm.round.nxv1f64(<vscale x 1 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %16 = call <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %17 = call <vscale x 4 x double> @llvm.round.nxv4f64(<vscale x 4 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 9 for instruction: %18 = call <vscale x 8 x double> @llvm.round.nxv8f64(<vscale x 8 x double> undef)
 ; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: ret void
 ;
   call float @llvm.round.f32(float undef)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index 3986bc2d071db..a12ac26aea3fc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -2171,12 +2171,11 @@ define void @round_v8f16(<8 x half>* %x) {
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI100_0)
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI100_0)(a1)
-; CHECK-NEXT:    lui a1, %hi(.LCPI100_1)
-; CHECK-NEXT:    flh ft1, %lo(.LCPI100_1)(a1)
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
-; CHECK-NEXT:    vfadd.vf v9, v9, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9, v0.t
+; CHECK-NEXT:    fsrmi a1, 4
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a1
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    vse16.v v8, (a0)
@@ -2195,12 +2194,11 @@ define void @round_v4f32(<4 x float>* %x) {
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI101_0)
 ; CHECK-NEXT:    flw ft0, %lo(.LCPI101_0)(a1)
-; CHECK-NEXT:    lui a1, %hi(.LCPI101_1)
-; CHECK-NEXT:    flw ft1, %lo(.LCPI101_1)(a1)
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
-; CHECK-NEXT:    vfadd.vf v9, v9, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9, v0.t
+; CHECK-NEXT:    fsrmi a1, 4
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a1
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    vse32.v v8, (a0)
@@ -2219,12 +2217,11 @@ define void @round_v2f64(<2 x double>* %x) {
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI102_0)
 ; CHECK-NEXT:    fld ft0, %lo(.LCPI102_0)(a1)
-; CHECK-NEXT:    lui a1, %hi(.LCPI102_1)
-; CHECK-NEXT:    fld ft1, %lo(.LCPI102_1)(a1)
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
-; CHECK-NEXT:    vfadd.vf v9, v9, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9, v0.t
+; CHECK-NEXT:    fsrmi a1, 4
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a1
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    vse64.v v8, (a0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
index 103158e0ca228..6354bcb5de20a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
@@ -7,15 +7,14 @@
 define <vscale x 1 x half> @round_nxv1f16(<vscale x 1 x half> %x) {
 ; CHECK-LABEL: round_nxv1f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI0_1)
-; CHECK-NEXT:    flh ft1, %lo(.LCPI0_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
-; CHECK-NEXT:    vfadd.vf v9, v9, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9, v0.t
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
@@ -27,15 +26,14 @@ declare <vscale x 1 x half> @llvm.round.nxv1f16(<vscale x 1 x half>)
 define <vscale x 2 x half> @round_nxv2f16(<vscale x 2 x half> %x) {
 ; CHECK-LABEL: round_nxv2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI1_1)
-; CHECK-NEXT:    flh ft1, %lo(.LCPI1_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
-; CHECK-NEXT:    vfadd.vf v9, v9, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9, v0.t
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
@@ -47,15 +45,14 @@ declare <vscale x 2 x half> @llvm.round.nxv2f16(<vscale x 2 x half>)
 define <vscale x 4 x half> @round_nxv4f16(<vscale x 4 x half> %x) {
 ; CHECK-LABEL: round_nxv4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI2_1)
-; CHECK-NEXT:    flh ft1, %lo(.LCPI2_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
-; CHECK-NEXT:    vfadd.vf v9, v9, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9, v0.t
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
@@ -67,15 +64,14 @@ declare <vscale x 4 x half> @llvm.round.nxv4f16(<vscale x 4 x half>)
 define <vscale x 8 x half> @round_nxv8f16(<vscale x 8 x half> %x) {
 ; CHECK-LABEL: round_nxv8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI3_1)
-; CHECK-NEXT:    flh ft1, %lo(.LCPI3_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft0
-; CHECK-NEXT:    vfadd.vf v10, v10, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v10, v0.t
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
@@ -87,15 +83,14 @@ declare <vscale x 8 x half> @llvm.round.nxv8f16(<vscale x 8 x half>)
 define <vscale x 16 x half> @round_nxv16f16(<vscale x 16 x half> %x) {
 ; CHECK-LABEL: round_nxv16f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI4_1)
-; CHECK-NEXT:    flh ft1, %lo(.LCPI4_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    vmflt.vf v0, v12, ft0
-; CHECK-NEXT:    vfadd.vf v12, v12, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v12, v0.t
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
@@ -107,15 +102,14 @@ declare <vscale x 16 x half> @llvm.round.nxv16f16(<vscale x 16 x half>)
 define <vscale x 32 x half> @round_nxv32f16(<vscale x 32 x half> %x) {
 ; CHECK-LABEL: round_nxv32f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI5_1)
-; CHECK-NEXT:    flh ft1, %lo(.LCPI5_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    vmflt.vf v0, v16, ft0
-; CHECK-NEXT:    vfadd.vf v16, v16, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v16, v0.t
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
@@ -127,15 +121,14 @@ declare <vscale x 32 x half> @llvm.round.nxv32f16(<vscale x 32 x half>)
 define <vscale x 1 x float> @round_nxv1f32(<vscale x 1 x float> %x) {
 ; CHECK-LABEL: round_nxv1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.LCPI6_0)
 ; CHECK-NEXT:    flw ft0, %lo(.LCPI6_0)(a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI6_1)
-; CHECK-NEXT:    flw ft1, %lo(.LCPI6_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
-; CHECK-NEXT:    vfadd.vf v9, v9, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9, v0.t
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
@@ -147,15 +140,14 @@ declare <vscale x 1 x float> @llvm.round.nxv1f32(<vscale x 1 x float>)
 define <vscale x 2 x float> @round_nxv2f32(<vscale x 2 x float> %x) {
 ; CHECK-LABEL: round_nxv2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.LCPI7_0)
 ; CHECK-NEXT:    flw ft0, %lo(.LCPI7_0)(a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI7_1)
-; CHECK-NEXT:    flw ft1, %lo(.LCPI7_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
-; CHECK-NEXT:    vfadd.vf v9, v9, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9, v0.t
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
@@ -167,15 +159,14 @@ declare <vscale x 2 x float> @llvm.round.nxv2f32(<vscale x 2 x float>)
 define <vscale x 4 x float> @round_nxv4f32(<vscale x 4 x float> %x) {
 ; CHECK-LABEL: round_nxv4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.LCPI8_0)
 ; CHECK-NEXT:    flw ft0, %lo(.LCPI8_0)(a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI8_1)
-; CHECK-NEXT:    flw ft1, %lo(.LCPI8_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft0
-; CHECK-NEXT:    vfadd.vf v10, v10, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v10, v0.t
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
@@ -187,15 +178,14 @@ declare <vscale x 4 x float> @llvm.round.nxv4f32(<vscale x 4 x float>)
 define <vscale x 8 x float> @round_nxv8f32(<vscale x 8 x float> %x) {
 ; CHECK-LABEL: round_nxv8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.LCPI9_0)
 ; CHECK-NEXT:    flw ft0, %lo(.LCPI9_0)(a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI9_1)
-; CHECK-NEXT:    flw ft1, %lo(.LCPI9_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    vmflt.vf v0, v12, ft0
-; CHECK-NEXT:    vfadd.vf v12, v12, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v12, v0.t
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
@@ -207,15 +197,14 @@ declare <vscale x 8 x float> @llvm.round.nxv8f32(<vscale x 8 x float>)
 define <vscale x 16 x float> @round_nxv16f32(<vscale x 16 x float> %x) {
 ; CHECK-LABEL: round_nxv16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.LCPI10_0)
 ; CHECK-NEXT:    flw ft0, %lo(.LCPI10_0)(a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI10_1)
-; CHECK-NEXT:    flw ft1, %lo(.LCPI10_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    vmflt.vf v0, v16, ft0
-; CHECK-NEXT:    vfadd.vf v16, v16, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v16, v0.t
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
@@ -227,15 +216,14 @@ declare <vscale x 16 x float> @llvm.round.nxv16f32(<vscale x 16 x float>)
 define <vscale x 1 x double> @round_nxv1f64(<vscale x 1 x double> %x) {
 ; CHECK-LABEL: round_nxv1f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.LCPI11_0)
 ; CHECK-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI11_1)
-; CHECK-NEXT:    fld ft1, %lo(.LCPI11_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
-; CHECK-NEXT:    vfadd.vf v9, v9, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9, v0.t
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
@@ -247,15 +235,14 @@ declare <vscale x 1 x double> @llvm.round.nxv1f64(<vscale x 1 x double>)
 define <vscale x 2 x double> @round_nxv2f64(<vscale x 2 x double> %x) {
 ; CHECK-LABEL: round_nxv2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.LCPI12_0)
 ; CHECK-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI12_1)
-; CHECK-NEXT:    fld ft1, %lo(.LCPI12_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft0
-; CHECK-NEXT:    vfadd.vf v10, v10, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v10, v0.t
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
@@ -267,15 +254,14 @@ declare <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double>)
 define <vscale x 4 x double> @round_nxv4f64(<vscale x 4 x double> %x) {
 ; CHECK-LABEL: round_nxv4f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.LCPI13_0)
 ; CHECK-NEXT:    fld ft0, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI13_1)
-; CHECK-NEXT:    fld ft1, %lo(.LCPI13_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    vmflt.vf v0, v12, ft0
-; CHECK-NEXT:    vfadd.vf v12, v12, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v12, v0.t
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
@@ -287,15 +273,14 @@ declare <vscale x 4 x double> @llvm.round.nxv4f64(<vscale x 4 x double>)
 define <vscale x 8 x double> @round_nxv8f64(<vscale x 8 x double> %x) {
 ; CHECK-LABEL: round_nxv8f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.LCPI14_0)
 ; CHECK-NEXT:    fld ft0, %lo(.LCPI14_0)(a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI14_1)
-; CHECK-NEXT:    fld ft1, %lo(.LCPI14_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    vmflt.vf v0, v16, ft0
-; CHECK-NEXT:    vfadd.vf v16, v16, ft1, v0.t
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v16, v0.t
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret


        


More information about the llvm-commits mailing list