[llvm] c83905a - [RISCV] Add inline expansion for vector fround.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 4 09:12:26 PST 2022


Author: Craig Topper
Date: 2022-02-04T09:12:09-08:00
New Revision: c83905a30855c6c08e275310c7c3a3192c3e29b8

URL: https://github.com/llvm/llvm-project/commit/c83905a30855c6c08e275310c7c3a3192c3e29b8
DIFF: https://github.com/llvm/llvm-project/commit/c83905a30855c6c08e275310c7c3a3192c3e29b8.diff

LOG: [RISCV] Add inline expansion for vector fround.

This avoids a crash for scalable vectors and or scalarization for
fixed vectors.

The algorithm is different enough that I don't think it makes sense
to merge with ceil/floor/trunc. Algorithm is adapted from gcc's X86
SSE2 output.

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D117247

Added: 
    llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index eadddea4f551f..f43db6c5f78fa 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -760,6 +760,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::FTRUNC, VT, Custom);
       setOperationAction(ISD::FCEIL, VT, Custom);
       setOperationAction(ISD::FFLOOR, VT, Custom);
+      setOperationAction(ISD::FROUND, VT, Custom);
 
       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
@@ -1018,6 +1019,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
         setOperationAction(ISD::FTRUNC, VT, Custom);
         setOperationAction(ISD::FCEIL, VT, Custom);
         setOperationAction(ISD::FFLOOR, VT, Custom);
+        setOperationAction(ISD::FROUND, VT, Custom);
 
         for (auto CC : VFPCCToExpand)
           setCondCodeAction(CC, VT, Expand);
@@ -1844,6 +1846,58 @@ static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
 }
 
+// ISD::FROUND is defined to round to nearest with ties rounding away from 0.
+// This mode isn't supported in vector hardware on RISCV. But as long as we
+// aren't compiling with trapping math, we can emulate this with
+// floor(X + copysign(nextafter(0.5, 0.0), X)).
+// FIXME: Could be shorter by changing rounding mode, but we don't have FRM
+// dependencies modeled yet.
+// FIXME: Use masked operations to avoid final merge.
+static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG) {
+  MVT VT = Op.getSimpleValueType();
+  assert(VT.isVector() && "Unexpected type");
+
+  SDLoc DL(Op);
+
+  // Freeze the source since we are increasing the number of uses.
+  SDValue Src = DAG.getNode(ISD::FREEZE, DL, VT, Op.getOperand(0));
+
+  // We do the conversion on the absolute value and fix the sign at the end.
+  SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
+
+  const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
+  bool Ignored;
+  APFloat Point5Pred = APFloat(0.5f);
+  Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored);
+  Point5Pred.next(/*nextDown*/ true);
+
+  // Add the adjustment.
+  SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Abs,
+                               DAG.getConstantFP(Point5Pred, DL, VT));
+
+  // Truncate to integer and convert back to fp.
+  MVT IntVT = VT.changeVectorElementTypeToInteger();
+  SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Adjust);
+  Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
+
+  // Restore the original sign.
+  Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
+
+  // Determine the largest integer that can be represented exactly. This and
+  // values larger than it don't have any fractional bits so don't need to
+  // be converted.
+  unsigned Precision = APFloat::semanticsPrecision(FltSem);
+  APFloat MaxVal = APFloat(FltSem);
+  MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
+                          /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
+  SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
+
+  // If abs(Src) was larger than MaxVal or nan, keep it.
+  MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
+  SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
+  return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
+}
+
 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
                                  const RISCVSubtarget &Subtarget) {
   MVT VT = Op.getSimpleValueType();
@@ -3312,6 +3366,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
   case ISD::FCEIL:
   case ISD::FFLOOR:
     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
+  case ISD::FROUND:
+    return lowerFROUND(Op, DAG);
   case ISD::VECREDUCE_ADD:
   case ISD::VECREDUCE_UMAX:
   case ISD::VECREDUCE_SMAX:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index ba63fdc61da94..9f384cfcb9a36 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -2191,3 +2191,78 @@ define void @floor_v2f64(<2 x double>* %x) {
   ret void
 }
 declare <2 x double> @llvm.floor.v2f64(<2 x double>)
+
+define void @round_v8f16(<8 x half>* %x) {
+; CHECK-LABEL: round_v8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    lui a1, %hi(.LCPI100_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI100_0)(a1)
+; CHECK-NEXT:    lui a1, %hi(.LCPI100_1)
+; CHECK-NEXT:    flh ft1, %lo(.LCPI100_1)(a1)
+; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vfadd.vf v9, v9, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vse16.v v8, (a0)
+; CHECK-NEXT:    ret
+  %a = load <8 x half>, <8 x half>* %x
+  %b = call <8 x half> @llvm.round.v8f16(<8 x half> %a)
+  store <8 x half> %b, <8 x half>* %x
+  ret void
+}
+declare <8 x half> @llvm.round.v8f16(<8 x half>)
+
+define void @round_v4f32(<4 x float>* %x) {
+; CHECK-LABEL: round_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    lui a1, %hi(.LCPI101_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI101_0)(a1)
+; CHECK-NEXT:    lui a1, %hi(.LCPI101_1)
+; CHECK-NEXT:    flw ft1, %lo(.LCPI101_1)(a1)
+; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vfadd.vf v9, v9, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
+  %a = load <4 x float>, <4 x float>* %x
+  %b = call <4 x float> @llvm.round.v4f32(<4 x float> %a)
+  store <4 x float> %b, <4 x float>* %x
+  ret void
+}
+declare <4 x float> @llvm.round.v4f32(<4 x float>)
+
+define void @round_v2f64(<2 x double>* %x) {
+; CHECK-LABEL: round_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    lui a1, %hi(.LCPI102_0)
+; CHECK-NEXT:    fld ft0, %lo(.LCPI102_0)(a1)
+; CHECK-NEXT:    lui a1, %hi(.LCPI102_1)
+; CHECK-NEXT:    fld ft1, %lo(.LCPI102_1)(a1)
+; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vfadd.vf v9, v9, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    ret
+  %a = load <2 x double>, <2 x double>* %x
+  %b = call <2 x double> @llvm.round.v2f64(<2 x double> %a)
+  store <2 x double> %b, <2 x double>* %x
+  ret void
+}
+declare <2 x double> @llvm.round.v2f64(<2 x double>)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
new file mode 100644
index 0000000000000..0778ce69d725d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
@@ -0,0 +1,320 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x half> @round_nxv1f16(<vscale x 1 x half> %x) {
+; CHECK-LABEL: round_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI0_1)
+; CHECK-NEXT:    flh ft1, %lo(.LCPI0_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
+; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vfadd.vf v9, v9, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x half> @llvm.round.nxv1f16(<vscale x 1 x half> %x)
+  ret <vscale x 1 x half> %a
+}
+declare <vscale x 1 x half> @llvm.round.nxv1f16(<vscale x 1 x half>)
+
+define <vscale x 2 x half> @round_nxv2f16(<vscale x 2 x half> %x) {
+; CHECK-LABEL: round_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI1_1)
+; CHECK-NEXT:    flh ft1, %lo(.LCPI1_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
+; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vfadd.vf v9, v9, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x half> @llvm.round.nxv2f16(<vscale x 2 x half> %x)
+  ret <vscale x 2 x half> %a
+}
+declare <vscale x 2 x half> @llvm.round.nxv2f16(<vscale x 2 x half>)
+
+define <vscale x 4 x half> @round_nxv4f16(<vscale x 4 x half> %x) {
+; CHECK-LABEL: round_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI2_1)
+; CHECK-NEXT:    flh ft1, %lo(.LCPI2_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
+; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vfadd.vf v9, v9, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x half> @llvm.round.nxv4f16(<vscale x 4 x half> %x)
+  ret <vscale x 4 x half> %a
+}
+declare <vscale x 4 x half> @llvm.round.nxv4f16(<vscale x 4 x half>)
+
+define <vscale x 8 x half> @round_nxv8f16(<vscale x 8 x half> %x) {
+; CHECK-LABEL: round_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI3_1)
+; CHECK-NEXT:    flh ft1, %lo(.LCPI3_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
+; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vfadd.vf v10, v10, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v10
+; CHECK-NEXT:    vfcvt.f.x.v v10, v10
+; CHECK-NEXT:    vfsgnj.vv v10, v10, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x half> @llvm.round.nxv8f16(<vscale x 8 x half> %x)
+  ret <vscale x 8 x half> %a
+}
+declare <vscale x 8 x half> @llvm.round.nxv8f16(<vscale x 8 x half>)
+
+define <vscale x 16 x half> @round_nxv16f16(<vscale x 16 x half> %x) {
+; CHECK-LABEL: round_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI4_1)
+; CHECK-NEXT:    flh ft1, %lo(.LCPI4_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
+; CHECK-NEXT:    vfsgnjx.vv v12, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vfadd.vf v12, v12, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v12
+; CHECK-NEXT:    vfcvt.f.x.v v12, v12
+; CHECK-NEXT:    vfsgnj.vv v12, v12, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x half> @llvm.round.nxv16f16(<vscale x 16 x half> %x)
+  ret <vscale x 16 x half> %a
+}
+declare <vscale x 16 x half> @llvm.round.nxv16f16(<vscale x 16 x half>)
+
+define <vscale x 32 x half> @round_nxv32f16(<vscale x 32 x half> %x) {
+; CHECK-LABEL: round_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI5_1)
+; CHECK-NEXT:    flh ft1, %lo(.LCPI5_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vfadd.vf v16, v16, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v16
+; CHECK-NEXT:    vfcvt.f.x.v v16, v16
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x half> @llvm.round.nxv32f16(<vscale x 32 x half> %x)
+  ret <vscale x 32 x half> %a
+}
+declare <vscale x 32 x half> @llvm.round.nxv32f16(<vscale x 32 x half>)
+
+define <vscale x 1 x float> @round_nxv1f32(<vscale x 1 x float> %x) {
+; CHECK-LABEL: round_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI6_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI6_0)(a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI6_1)
+; CHECK-NEXT:    flw ft1, %lo(.LCPI6_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vfadd.vf v9, v9, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x float> @llvm.round.nxv1f32(<vscale x 1 x float> %x)
+  ret <vscale x 1 x float> %a
+}
+declare <vscale x 1 x float> @llvm.round.nxv1f32(<vscale x 1 x float>)
+
+define <vscale x 2 x float> @round_nxv2f32(<vscale x 2 x float> %x) {
+; CHECK-LABEL: round_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI7_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI7_0)(a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI7_1)
+; CHECK-NEXT:    flw ft1, %lo(.LCPI7_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vfadd.vf v9, v9, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x float> @llvm.round.nxv2f32(<vscale x 2 x float> %x)
+  ret <vscale x 2 x float> %a
+}
+declare <vscale x 2 x float> @llvm.round.nxv2f32(<vscale x 2 x float>)
+
+define <vscale x 4 x float> @round_nxv4f32(<vscale x 4 x float> %x) {
+; CHECK-LABEL: round_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI8_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI8_0)(a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI8_1)
+; CHECK-NEXT:    flw ft1, %lo(.LCPI8_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vfadd.vf v10, v10, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v10
+; CHECK-NEXT:    vfcvt.f.x.v v10, v10
+; CHECK-NEXT:    vfsgnj.vv v10, v10, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x float> @llvm.round.nxv4f32(<vscale x 4 x float> %x)
+  ret <vscale x 4 x float> %a
+}
+declare <vscale x 4 x float> @llvm.round.nxv4f32(<vscale x 4 x float>)
+
+define <vscale x 8 x float> @round_nxv8f32(<vscale x 8 x float> %x) {
+; CHECK-LABEL: round_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI9_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI9_0)(a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI9_1)
+; CHECK-NEXT:    flw ft1, %lo(.LCPI9_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vfsgnjx.vv v12, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vfadd.vf v12, v12, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v12
+; CHECK-NEXT:    vfcvt.f.x.v v12, v12
+; CHECK-NEXT:    vfsgnj.vv v12, v12, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x float> @llvm.round.nxv8f32(<vscale x 8 x float> %x)
+  ret <vscale x 8 x float> %a
+}
+declare <vscale x 8 x float> @llvm.round.nxv8f32(<vscale x 8 x float>)
+
+define <vscale x 16 x float> @round_nxv16f32(<vscale x 16 x float> %x) {
+; CHECK-LABEL: round_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI10_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI10_0)(a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI10_1)
+; CHECK-NEXT:    flw ft1, %lo(.LCPI10_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vfadd.vf v16, v16, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v16
+; CHECK-NEXT:    vfcvt.f.x.v v16, v16
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x float> @llvm.round.nxv16f32(<vscale x 16 x float> %x)
+  ret <vscale x 16 x float> %a
+}
+declare <vscale x 16 x float> @llvm.round.nxv16f32(<vscale x 16 x float>)
+
+define <vscale x 1 x double> @round_nxv1f64(<vscale x 1 x double> %x) {
+; CHECK-LABEL: round_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI11_0)
+; CHECK-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI11_1)
+; CHECK-NEXT:    fld ft1, %lo(.LCPI11_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vfadd.vf v9, v9, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x double> @llvm.round.nxv1f64(<vscale x 1 x double> %x)
+  ret <vscale x 1 x double> %a
+}
+declare <vscale x 1 x double> @llvm.round.nxv1f64(<vscale x 1 x double>)
+
+define <vscale x 2 x double> @round_nxv2f64(<vscale x 2 x double> %x) {
+; CHECK-LABEL: round_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI12_0)
+; CHECK-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI12_1)
+; CHECK-NEXT:    fld ft1, %lo(.LCPI12_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vfadd.vf v10, v10, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v10
+; CHECK-NEXT:    vfcvt.f.x.v v10, v10
+; CHECK-NEXT:    vfsgnj.vv v10, v10, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double> %x)
+  ret <vscale x 2 x double> %a
+}
+declare <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double>)
+
+define <vscale x 4 x double> @round_nxv4f64(<vscale x 4 x double> %x) {
+; CHECK-LABEL: round_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI13_0)
+; CHECK-NEXT:    fld ft0, %lo(.LCPI13_0)(a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI13_1)
+; CHECK-NEXT:    fld ft1, %lo(.LCPI13_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; CHECK-NEXT:    vfsgnjx.vv v12, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vfadd.vf v12, v12, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v12
+; CHECK-NEXT:    vfcvt.f.x.v v12, v12
+; CHECK-NEXT:    vfsgnj.vv v12, v12, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x double> @llvm.round.nxv4f64(<vscale x 4 x double> %x)
+  ret <vscale x 4 x double> %a
+}
+declare <vscale x 4 x double> @llvm.round.nxv4f64(<vscale x 4 x double>)
+
+define <vscale x 8 x double> @round_nxv8f64(<vscale x 8 x double> %x) {
+; CHECK-LABEL: round_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI14_0)
+; CHECK-NEXT:    fld ft0, %lo(.LCPI14_0)(a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI14_1)
+; CHECK-NEXT:    fld ft1, %lo(.LCPI14_1)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v8, v8
+; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vfadd.vf v16, v16, ft1
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v16
+; CHECK-NEXT:    vfcvt.f.x.v v16, v16
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x double> @llvm.round.nxv8f64(<vscale x 8 x double> %x)
+  ret <vscale x 8 x double> %a
+}
+declare <vscale x 8 x double> @llvm.round.nxv8f64(<vscale x 8 x double>)


        


More information about the llvm-commits mailing list