[llvm] 02a9828 - [RISCV] Add lowering for llvm.roundeven

via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 29 06:08:24 PDT 2022


Author: eopXD
Date: 2022-09-29T06:08:14-07:00
New Revision: 02a982829ce44bc023c7c7e62d12cd455e90de23

URL: https://github.com/llvm/llvm-project/commit/02a982829ce44bc023c7c7e62d12cd455e90de23
DIFF: https://github.com/llvm/llvm-project/commit/02a982829ce44bc023c7c7e62d12cd455e90de23.diff

LOG: [RISCV] Add lowering for llvm.roundeven

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D134785

Added: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll
    llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/Analysis/CostModel/RISCV/fround.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 5cec2581ab8c3..03b3b7d23ec26 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -660,8 +660,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
 
       setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, VT, Legal);
 
-      setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND},
-                         VT, Custom);
+      setOperationAction(
+          {ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND, ISD::FROUNDEVEN},
+          VT, Custom);
 
       setOperationAction(FloatingPointVecReduceOps, VT, Custom);
 
@@ -904,7 +905,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
 
         setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
 
-        setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND},
+        setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND,
+                            ISD::FROUNDEVEN},
                            VT, Custom);
 
         setCondCodeAction(VFPCCToExpand, VT, Expand);
@@ -2036,6 +2038,7 @@ lowerFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
   case ISD::FFLOOR:
   case ISD::VP_FFLOOR:
   case ISD::FROUND:
+  case ISD::FROUNDEVEN:
   case ISD::VP_FROUND:
   case ISD::VP_FROUNDEVEN: {
     RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Op.getOpcode());
@@ -3639,6 +3642,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
   case ISD::FCEIL:
   case ISD::FFLOOR:
   case ISD::FROUND:
+  case ISD::FROUNDEVEN:
     return lowerFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
   case ISD::VECREDUCE_ADD:
   case ISD::VECREDUCE_UMAX:

diff  --git a/llvm/test/Analysis/CostModel/RISCV/fround.ll b/llvm/test/Analysis/CostModel/RISCV/fround.ll
index df351bf3a45d3..08202909400fa 100644
--- a/llvm/test/Analysis/CostModel/RISCV/fround.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/fround.ll
@@ -262,23 +262,23 @@ define void @round() {
 define void @roundeven() {
 ; CHECK-LABEL: 'roundeven'
 ; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %1 = call float @llvm.roundeven.f32(float undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 23 for instruction: %2 = call <2 x float> @llvm.roundeven.v2f32(<2 x float> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 47 for instruction: %3 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 95 for instruction: %4 = call <8 x float> @llvm.roundeven.v8f32(<8 x float> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 191 for instruction: %5 = call <16 x float> @llvm.roundeven.v16f32(<16 x float> undef)
-; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x float> @llvm.roundeven.nxv2f32(<vscale x 2 x float> undef)
-; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x float> @llvm.roundeven.nxv4f32(<vscale x 4 x float> undef)
-; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x float> @llvm.roundeven.nxv8f32(<vscale x 8 x float> undef)
-; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x float> @llvm.roundeven.nxv16f32(<vscale x 16 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x float> @llvm.roundeven.v2f32(<2 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x float> @llvm.roundeven.v8f32(<8 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x float> @llvm.roundeven.v16f32(<16 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %6 = call <vscale x 2 x float> @llvm.roundeven.nxv2f32(<vscale x 2 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 4 x float> @llvm.roundeven.nxv4f32(<vscale x 4 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 8 x float> @llvm.roundeven.nxv8f32(<vscale x 8 x float> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 16 x float> @llvm.roundeven.nxv16f32(<vscale x 16 x float> undef)
 ; CHECK-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %10 = call double @llvm.roundeven.f64(double undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 23 for instruction: %11 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 47 for instruction: %12 = call <4 x double> @llvm.roundeven.v4f64(<4 x double> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 95 for instruction: %13 = call <8 x double> @llvm.roundeven.v8f64(<8 x double> undef)
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 191 for instruction: %14 = call <16 x double> @llvm.roundeven.v16f64(<16 x double> undef)
-; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %15 = call <vscale x 1 x double> @llvm.roundeven.nxv1f64(<vscale x 1 x double> undef)
-; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %16 = call <vscale x 2 x double> @llvm.roundeven.nxv2f64(<vscale x 2 x double> undef)
-; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %17 = call <vscale x 4 x double> @llvm.roundeven.nxv4f64(<vscale x 4 x double> undef)
-; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %18 = call <vscale x 8 x double> @llvm.roundeven.nxv8f64(<vscale x 8 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %11 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %12 = call <4 x double> @llvm.roundeven.v4f64(<4 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %13 = call <8 x double> @llvm.roundeven.v8f64(<8 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %14 = call <16 x double> @llvm.roundeven.v16f64(<16 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %15 = call <vscale x 1 x double> @llvm.roundeven.nxv1f64(<vscale x 1 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %16 = call <vscale x 2 x double> @llvm.roundeven.nxv2f64(<vscale x 2 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %17 = call <vscale x 4 x double> @llvm.roundeven.nxv4f64(<vscale x 4 x double> undef)
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %18 = call <vscale x 8 x double> @llvm.roundeven.nxv8f64(<vscale x 8 x double> undef)
 ; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: ret void
 ;
   call float @llvm.roundeven.f32(float undef)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll
new file mode 100644
index 0000000000000..fe7974624b91b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll
@@ -0,0 +1,293 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+; This file tests the code generation for `llvm.roundeven.*` on fixed vector type.
+
+define <1 x half> @roundeven_v1f16(<1 x half> %x) {
+; CHECK-LABEL: roundeven_v1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <1 x half> @llvm.roundeven.v1f16(<1 x half> %x)
+  ret <1 x half> %a
+}
+declare <1 x half> @llvm.roundeven.v1f16(<1 x half>)
+
+define <2 x half> @roundeven_v2f16(<2 x half> %x) {
+; CHECK-LABEL: roundeven_v2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <2 x half> @llvm.roundeven.v2f16(<2 x half> %x)
+  ret <2 x half> %a
+}
+declare <2 x half> @llvm.roundeven.v2f16(<2 x half>)
+
+define <4 x half> @roundeven_v4f16(<4 x half> %x) {
+; CHECK-LABEL: roundeven_v4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %x)
+  ret <4 x half> %a
+}
+declare <4 x half> @llvm.roundeven.v4f16(<4 x half>)
+
+define <8 x half> @roundeven_v8f16(<8 x half> %x) {
+; CHECK-LABEL: roundeven_v8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %x)
+  ret <8 x half> %a
+}
+declare <8 x half> @llvm.roundeven.v8f16(<8 x half>)
+
+define <16 x half> @roundeven_v16f16(<16 x half> %x) {
+; CHECK-LABEL: roundeven_v16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
+; CHECK-NEXT:    vfabs.v v10, v8
+; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <16 x half> @llvm.roundeven.v16f16(<16 x half> %x)
+  ret <16 x half> %a
+}
+declare <16 x half> @llvm.roundeven.v16f16(<16 x half>)
+
+define <32 x half> @roundeven_v32f16(<32 x half> %x) {
+; CHECK-LABEL: roundeven_v32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vfabs.v v12, v8
+; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <32 x half> @llvm.roundeven.v32f16(<32 x half> %x)
+  ret <32 x half> %a
+}
+declare <32 x half> @llvm.roundeven.v32f16(<32 x half>)
+
+define <1 x float> @roundeven_v1f32(<1 x float> %x) {
+; CHECK-LABEL: roundeven_v1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI6_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI6_0)(a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <1 x float> @llvm.roundeven.v1f32(<1 x float> %x)
+  ret <1 x float> %a
+}
+declare <1 x float> @llvm.roundeven.v1f32(<1 x float>)
+
+define <2 x float> @roundeven_v2f32(<2 x float> %x) {
+; CHECK-LABEL: roundeven_v2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI7_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI7_0)(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %x)
+  ret <2 x float> %a
+}
+declare <2 x float> @llvm.roundeven.v2f32(<2 x float>)
+
+define <4 x float> @roundeven_v4f32(<4 x float> %x) {
+; CHECK-LABEL: roundeven_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI8_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI8_0)(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %x)
+  ret <4 x float> %a
+}
+declare <4 x float> @llvm.roundeven.v4f32(<4 x float>)
+
+define <8 x float> @roundeven_v8f32(<8 x float> %x) {
+; CHECK-LABEL: roundeven_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI9_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI9_0)(a0)
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT:    vfabs.v v10, v8
+; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %x)
+  ret <8 x float> %a
+}
+declare <8 x float> @llvm.roundeven.v8f32(<8 x float>)
+
+define <16 x float> @roundeven_v16f32(<16 x float> %x) {
+; CHECK-LABEL: roundeven_v16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI10_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI10_0)(a0)
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
+; CHECK-NEXT:    vfabs.v v12, v8
+; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <16 x float> @llvm.roundeven.v16f32(<16 x float> %x)
+  ret <16 x float> %a
+}
+declare <16 x float> @llvm.roundeven.v16f32(<16 x float>)
+
+define <1 x double> @roundeven_v1f64(<1 x double> %x) {
+; CHECK-LABEL: roundeven_v1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI11_0)
+; CHECK-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <1 x double> @llvm.roundeven.v1f64(<1 x double> %x)
+  ret <1 x double> %a
+}
+declare <1 x double> @llvm.roundeven.v1f64(<1 x double>)
+
+define <2 x double> @roundeven_v2f64(<2 x double> %x) {
+; CHECK-LABEL: roundeven_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI12_0)
+; CHECK-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %x)
+  ret <2 x double> %a
+}
+declare <2 x double> @llvm.roundeven.v2f64(<2 x double>)
+
+define <4 x double> @roundeven_v4f64(<4 x double> %x) {
+; CHECK-LABEL: roundeven_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI13_0)
+; CHECK-NEXT:    fld ft0, %lo(.LCPI13_0)(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT:    vfabs.v v10, v8
+; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %x)
+  ret <4 x double> %a
+}
+declare <4 x double> @llvm.roundeven.v4f64(<4 x double>)
+
+define <8 x double> @roundeven_v8f64(<8 x double> %x) {
+; CHECK-LABEL: roundeven_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI14_0)
+; CHECK-NEXT:    fld ft0, %lo(.LCPI14_0)(a0)
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vfabs.v v12, v8
+; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <8 x double> @llvm.roundeven.v8f64(<8 x double> %x)
+  ret <8 x double> %a
+}
+declare <8 x double> @llvm.roundeven.v8f64(<8 x double>)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll
new file mode 100644
index 0000000000000..79701111ff0df
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll
@@ -0,0 +1,292 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+; This file tests the code generation for `llvm.roundeven.*` on scalable vector type.
+
+define <vscale x 1 x half> @roundeven_nxv1f16(<vscale x 1 x half> %x) {
+; CHECK-LABEL: roundeven_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x half> @llvm.roundeven.nxv1f16(<vscale x 1 x half> %x)
+  ret <vscale x 1 x half> %a
+}
+declare <vscale x 1 x half> @llvm.roundeven.nxv1f16(<vscale x 1 x half>)
+
+define <vscale x 2 x half> @roundeven_nxv2f16(<vscale x 2 x half> %x) {
+; CHECK-LABEL: roundeven_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x half> @llvm.roundeven.nxv2f16(<vscale x 2 x half> %x)
+  ret <vscale x 2 x half> %a
+}
+declare <vscale x 2 x half> @llvm.roundeven.nxv2f16(<vscale x 2 x half>)
+
+define <vscale x 4 x half> @roundeven_nxv4f16(<vscale x 4 x half> %x) {
+; CHECK-LABEL: roundeven_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x half> @llvm.roundeven.nxv4f16(<vscale x 4 x half> %x)
+  ret <vscale x 4 x half> %a
+}
+declare <vscale x 4 x half> @llvm.roundeven.nxv4f16(<vscale x 4 x half>)
+
+define <vscale x 8 x half> @roundeven_nxv8f16(<vscale x 8 x half> %x) {
+; CHECK-LABEL: roundeven_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
+; CHECK-NEXT:    vfabs.v v10, v8
+; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x half> @llvm.roundeven.nxv8f16(<vscale x 8 x half> %x)
+  ret <vscale x 8 x half> %a
+}
+declare <vscale x 8 x half> @llvm.roundeven.nxv8f16(<vscale x 8 x half>)
+
+define <vscale x 16 x half> @roundeven_nxv16f16(<vscale x 16 x half> %x) {
+; CHECK-LABEL: roundeven_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
+; CHECK-NEXT:    vfabs.v v12, v8
+; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x half> @llvm.roundeven.nxv16f16(<vscale x 16 x half> %x)
+  ret <vscale x 16 x half> %a
+}
+declare <vscale x 16 x half> @llvm.roundeven.nxv16f16(<vscale x 16 x half>)
+
+define <vscale x 32 x half> @roundeven_nxv32f16(<vscale x 32 x half> %x) {
+; CHECK-LABEL: roundeven_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
+; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
+; CHECK-NEXT:    vfabs.v v16, v8
+; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x half> @llvm.roundeven.nxv32f16(<vscale x 32 x half> %x)
+  ret <vscale x 32 x half> %a
+}
+declare <vscale x 32 x half> @llvm.roundeven.nxv32f16(<vscale x 32 x half>)
+
+define <vscale x 1 x float> @roundeven_nxv1f32(<vscale x 1 x float> %x) {
+; CHECK-LABEL: roundeven_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI6_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI6_0)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x float> @llvm.roundeven.nxv1f32(<vscale x 1 x float> %x)
+  ret <vscale x 1 x float> %a
+}
+declare <vscale x 1 x float> @llvm.roundeven.nxv1f32(<vscale x 1 x float>)
+
+define <vscale x 2 x float> @roundeven_nxv2f32(<vscale x 2 x float> %x) {
+; CHECK-LABEL: roundeven_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI7_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI7_0)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x float> @llvm.roundeven.nxv2f32(<vscale x 2 x float> %x)
+  ret <vscale x 2 x float> %a
+}
+declare <vscale x 2 x float> @llvm.roundeven.nxv2f32(<vscale x 2 x float>)
+
+define <vscale x 4 x float> @roundeven_nxv4f32(<vscale x 4 x float> %x) {
+; CHECK-LABEL: roundeven_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI8_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI8_0)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vfabs.v v10, v8
+; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x float> @llvm.roundeven.nxv4f32(<vscale x 4 x float> %x)
+  ret <vscale x 4 x float> %a
+}
+declare <vscale x 4 x float> @llvm.roundeven.nxv4f32(<vscale x 4 x float>)
+
+define <vscale x 8 x float> @roundeven_nxv8f32(<vscale x 8 x float> %x) {
+; CHECK-LABEL: roundeven_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI9_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI9_0)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vfabs.v v12, v8
+; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x float> @llvm.roundeven.nxv8f32(<vscale x 8 x float> %x)
+  ret <vscale x 8 x float> %a
+}
+declare <vscale x 8 x float> @llvm.roundeven.nxv8f32(<vscale x 8 x float>)
+
+define <vscale x 16 x float> @roundeven_nxv16f32(<vscale x 16 x float> %x) {
+; CHECK-LABEL: roundeven_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI10_0)
+; CHECK-NEXT:    flw ft0, %lo(.LCPI10_0)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
+; CHECK-NEXT:    vfabs.v v16, v8
+; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x float> @llvm.roundeven.nxv16f32(<vscale x 16 x float> %x)
+  ret <vscale x 16 x float> %a
+}
+declare <vscale x 16 x float> @llvm.roundeven.nxv16f32(<vscale x 16 x float>)
+
+define <vscale x 1 x double> @roundeven_nxv1f64(<vscale x 1 x double> %x) {
+; CHECK-LABEL: roundeven_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI11_0)
+; CHECK-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x double> @llvm.roundeven.nxv1f64(<vscale x 1 x double> %x)
+  ret <vscale x 1 x double> %a
+}
+declare <vscale x 1 x double> @llvm.roundeven.nxv1f64(<vscale x 1 x double>)
+
+define <vscale x 2 x double> @roundeven_nxv2f64(<vscale x 2 x double> %x) {
+; CHECK-LABEL: roundeven_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI12_0)
+; CHECK-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; CHECK-NEXT:    vfabs.v v10, v8
+; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x double> @llvm.roundeven.nxv2f64(<vscale x 2 x double> %x)
+  ret <vscale x 2 x double> %a
+}
+declare <vscale x 2 x double> @llvm.roundeven.nxv2f64(<vscale x 2 x double>)
+
+define <vscale x 4 x double> @roundeven_nxv4f64(<vscale x 4 x double> %x) {
+; CHECK-LABEL: roundeven_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI13_0)
+; CHECK-NEXT:    fld ft0, %lo(.LCPI13_0)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; CHECK-NEXT:    vfabs.v v12, v8
+; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x double> @llvm.roundeven.nxv4f64(<vscale x 4 x double> %x)
+  ret <vscale x 4 x double> %a
+}
+declare <vscale x 4 x double> @llvm.roundeven.nxv4f64(<vscale x 4 x double>)
+
+define <vscale x 8 x double> @roundeven_nxv8f64(<vscale x 8 x double> %x) {
+; CHECK-LABEL: roundeven_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI14_0)
+; CHECK-NEXT:    fld ft0, %lo(.LCPI14_0)(a0)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vfabs.v v16, v8
+; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x double> @llvm.roundeven.nxv8f64(<vscale x 8 x double> %x)
+  ret <vscale x 8 x double> %a
+}
+declare <vscale x 8 x double> @llvm.roundeven.nxv8f64(<vscale x 8 x double>)


        


More information about the llvm-commits mailing list