[llvm] [RISCV] Split fp rounding ops with zvfhmin nxv32f16 (PR #108765)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Sun Sep 15 22:43:20 PDT 2024
https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/108765
>From 2f4d238eb3a673025a49af05e7a38bd618f8fa86 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Sun, 15 Sep 2024 00:55:43 +0800
Subject: [PATCH 1/2] [RISCV] Split fp rounding ops with zvfhmin nxv32f16
This adds zvfhmin test coverage for fceil, ffloor, fnearbyint, frint, fround and froundeven and splits them at nxv32f16 to avoid crashing, similarly to what we do for other nodes that we promote.
This also sets ftrunc to promote which was previously missing. We already promote the VP version of it, vp_froundtozero.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 17 +-
llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll | 304 ++++++++++++-----
llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll | 304 ++++++++++++-----
.../CodeGen/RISCV/rvv/fnearbyint-sdnode.ll | 319 +++++++++++++-----
llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll | 268 +++++++++++----
llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll | 304 ++++++++++++-----
.../CodeGen/RISCV/rvv/froundeven-sdnode.ll | 304 ++++++++++++-----
llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll | 268 +++++++++++----
8 files changed, 1504 insertions(+), 584 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 6f2dc710cb3d4d..7b1b35160cf460 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -938,12 +938,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
// TODO: support more ops.
static const unsigned ZvfhminPromoteOps[] = {
- ISD::FMINNUM, ISD::FMAXNUM, ISD::FADD, ISD::FSUB,
- ISD::FMUL, ISD::FMA, ISD::FDIV, ISD::FSQRT,
- ISD::FCEIL, ISD::FFLOOR, ISD::FROUND, ISD::FROUNDEVEN,
- ISD::FRINT, ISD::FNEARBYINT, ISD::IS_FPCLASS, ISD::SETCC,
- ISD::FMAXIMUM, ISD::FMINIMUM, ISD::STRICT_FADD, ISD::STRICT_FSUB,
- ISD::STRICT_FMUL, ISD::STRICT_FDIV, ISD::STRICT_FSQRT, ISD::STRICT_FMA};
+ ISD::FMINNUM, ISD::FMAXNUM, ISD::FADD, ISD::FSUB,
+ ISD::FMUL, ISD::FMA, ISD::FDIV, ISD::FSQRT,
+ ISD::FCEIL, ISD::FTRUNC, ISD::FFLOOR, ISD::FROUND,
+ ISD::FROUNDEVEN, ISD::FRINT, ISD::FNEARBYINT, ISD::IS_FPCLASS,
+ ISD::SETCC, ISD::FMAXIMUM, ISD::FMINIMUM, ISD::STRICT_FADD,
+ ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV, ISD::STRICT_FSQRT,
+ ISD::STRICT_FMA};
// TODO: support more vp ops.
static const unsigned ZvfhminPromoteVPOps[] = {ISD::VP_FADD,
@@ -6926,6 +6927,10 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
case ISD::FRINT:
case ISD::FROUND:
case ISD::FROUNDEVEN:
+ if (Op.getValueType() == MVT::nxv32f16 &&
+ (Subtarget.hasVInstructionsF16Minimal() &&
+ !Subtarget.hasVInstructionsF16()))
+ return SplitVectorOp(Op, DAG);
return lowerFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
case ISD::LRINT:
case ISD::LLRINT:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll
index 9efc3183f15a52..111d1d8e07d3bf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll
@@ -1,124 +1,256 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFHMIN
define <vscale x 1 x half> @ceil_nxv1f16(<vscale x 1 x half> %x) {
-; CHECK-LABEL: ceil_nxv1f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: ceil_nxv1f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI0_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: fsrmi a0, 3
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: ceil_nxv1f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 3
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 1 x half> @llvm.ceil.nxv1f16(<vscale x 1 x half> %x)
ret <vscale x 1 x half> %a
}
declare <vscale x 1 x half> @llvm.ceil.nxv1f16(<vscale x 1 x half>)
define <vscale x 2 x half> @ceil_nxv2f16(<vscale x 2 x half> %x) {
-; CHECK-LABEL: ceil_nxv2f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: ceil_nxv2f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI1_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: fsrmi a0, 3
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: ceil_nxv2f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 3
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 2 x half> @llvm.ceil.nxv2f16(<vscale x 2 x half> %x)
ret <vscale x 2 x half> %a
}
declare <vscale x 2 x half> @llvm.ceil.nxv2f16(<vscale x 2 x half>)
define <vscale x 4 x half> @ceil_nxv4f16(<vscale x 4 x half> %x) {
-; CHECK-LABEL: ceil_nxv4f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: ceil_nxv4f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI2_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: fsrmi a0, 3
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: ceil_nxv4f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v10
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 3
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half> %x)
ret <vscale x 4 x half> %a
}
declare <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half>)
define <vscale x 8 x half> @ceil_nxv8f16(<vscale x 8 x half> %x) {
-; CHECK-LABEL: ceil_nxv8f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: ceil_nxv8f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI3_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; ZVFH-NEXT: fsrmi a0, 3
+; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: ceil_nxv8f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v12
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 3
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v12, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v12, v8, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 8 x half> @llvm.ceil.nxv8f16(<vscale x 8 x half> %x)
ret <vscale x 8 x half> %a
}
declare <vscale x 8 x half> @llvm.ceil.nxv8f16(<vscale x 8 x half>)
define <vscale x 16 x half> @ceil_nxv16f16(<vscale x 16 x half> %x) {
-; CHECK-LABEL: ceil_nxv16f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: ceil_nxv16f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI4_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; ZVFH-NEXT: fsrmi a0, 3
+; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: ceil_nxv16f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v16
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 3
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v8, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 16 x half> @llvm.ceil.nxv16f16(<vscale x 16 x half> %x)
ret <vscale x 16 x half> %a
}
declare <vscale x 16 x half> @llvm.ceil.nxv16f16(<vscale x 16 x half>)
define <vscale x 32 x half> @ceil_nxv32f16(<vscale x 32 x half> %x) {
-; CHECK-LABEL: ceil_nxv32f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: ceil_nxv32f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI5_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; ZVFH-NEXT: fsrmi a0, 3
+; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: ceil_nxv32f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v24, v16
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 3
+; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v24, v16
+; ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 3
+; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 32 x half> @llvm.ceil.nxv32f16(<vscale x 32 x half> %x)
ret <vscale x 32 x half> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll
index ec60b3ed3e0c88..97d84e91744038 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll
@@ -1,124 +1,256 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFHMIN
define <vscale x 1 x half> @floor_nxv1f16(<vscale x 1 x half> %x) {
-; CHECK-LABEL: floor_nxv1f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: floor_nxv1f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI0_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: fsrmi a0, 2
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: floor_nxv1f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 2
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 1 x half> @llvm.floor.nxv1f16(<vscale x 1 x half> %x)
ret <vscale x 1 x half> %a
}
declare <vscale x 1 x half> @llvm.floor.nxv1f16(<vscale x 1 x half>)
define <vscale x 2 x half> @floor_nxv2f16(<vscale x 2 x half> %x) {
-; CHECK-LABEL: floor_nxv2f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: floor_nxv2f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI1_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: fsrmi a0, 2
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: floor_nxv2f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 2
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 2 x half> @llvm.floor.nxv2f16(<vscale x 2 x half> %x)
ret <vscale x 2 x half> %a
}
declare <vscale x 2 x half> @llvm.floor.nxv2f16(<vscale x 2 x half>)
define <vscale x 4 x half> @floor_nxv4f16(<vscale x 4 x half> %x) {
-; CHECK-LABEL: floor_nxv4f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: floor_nxv4f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI2_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: fsrmi a0, 2
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: floor_nxv4f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v10
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 2
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 4 x half> @llvm.floor.nxv4f16(<vscale x 4 x half> %x)
ret <vscale x 4 x half> %a
}
declare <vscale x 4 x half> @llvm.floor.nxv4f16(<vscale x 4 x half>)
define <vscale x 8 x half> @floor_nxv8f16(<vscale x 8 x half> %x) {
-; CHECK-LABEL: floor_nxv8f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: floor_nxv8f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI3_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; ZVFH-NEXT: fsrmi a0, 2
+; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: floor_nxv8f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v12
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 2
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v12, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v12, v8, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 8 x half> @llvm.floor.nxv8f16(<vscale x 8 x half> %x)
ret <vscale x 8 x half> %a
}
declare <vscale x 8 x half> @llvm.floor.nxv8f16(<vscale x 8 x half>)
define <vscale x 16 x half> @floor_nxv16f16(<vscale x 16 x half> %x) {
-; CHECK-LABEL: floor_nxv16f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: floor_nxv16f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI4_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; ZVFH-NEXT: fsrmi a0, 2
+; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: floor_nxv16f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v16
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 2
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v8, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 16 x half> @llvm.floor.nxv16f16(<vscale x 16 x half> %x)
ret <vscale x 16 x half> %a
}
declare <vscale x 16 x half> @llvm.floor.nxv16f16(<vscale x 16 x half>)
define <vscale x 32 x half> @floor_nxv32f16(<vscale x 32 x half> %x) {
-; CHECK-LABEL: floor_nxv32f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: floor_nxv32f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI5_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; ZVFH-NEXT: fsrmi a0, 2
+; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: floor_nxv32f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v24, v16
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 2
+; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v24, v16
+; ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 2
+; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 32 x half> @llvm.floor.nxv32f16(<vscale x 32 x half> %x)
ret <vscale x 32 x half> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll
index 9e14852305caa1..0655b9d099cbb7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll
@@ -1,124 +1,271 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFHMIN
define <vscale x 1 x half> @nearbyint_nxv1f16(<vscale x 1 x half> %x) {
-; CHECK-LABEL: nearbyint_nxv1f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: nearbyint_nxv1f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI0_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: frflags a0
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: fsflags a0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: nearbyint_nxv1f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: frflags a0
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: fsflags a0
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 1 x half> @llvm.nearbyint.nxv1f16(<vscale x 1 x half> %x)
ret <vscale x 1 x half> %a
}
declare <vscale x 1 x half> @llvm.nearbyint.nxv1f16(<vscale x 1 x half>)
define <vscale x 2 x half> @nearbyint_nxv2f16(<vscale x 2 x half> %x) {
-; CHECK-LABEL: nearbyint_nxv2f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: nearbyint_nxv2f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI1_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: frflags a0
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: fsflags a0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: nearbyint_nxv2f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: frflags a0
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: fsflags a0
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 2 x half> @llvm.nearbyint.nxv2f16(<vscale x 2 x half> %x)
ret <vscale x 2 x half> %a
}
declare <vscale x 2 x half> @llvm.nearbyint.nxv2f16(<vscale x 2 x half>)
define <vscale x 4 x half> @nearbyint_nxv4f16(<vscale x 4 x half> %x) {
-; CHECK-LABEL: nearbyint_nxv4f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: nearbyint_nxv4f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI2_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: frflags a0
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: fsflags a0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: nearbyint_nxv4f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v10
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: frflags a0
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
+; ZVFHMIN-NEXT: fsflags a0
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 4 x half> @llvm.nearbyint.nxv4f16(<vscale x 4 x half> %x)
ret <vscale x 4 x half> %a
}
declare <vscale x 4 x half> @llvm.nearbyint.nxv4f16(<vscale x 4 x half>)
define <vscale x 8 x half> @nearbyint_nxv8f16(<vscale x 8 x half> %x) {
-; CHECK-LABEL: nearbyint_nxv8f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: nearbyint_nxv8f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI3_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; ZVFH-NEXT: frflags a0
+; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; ZVFH-NEXT: fsflags a0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: nearbyint_nxv8f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v12
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: frflags a0
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v12, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v12, v8, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
+; ZVFHMIN-NEXT: fsflags a0
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 8 x half> @llvm.nearbyint.nxv8f16(<vscale x 8 x half> %x)
ret <vscale x 8 x half> %a
}
declare <vscale x 8 x half> @llvm.nearbyint.nxv8f16(<vscale x 8 x half>)
define <vscale x 16 x half> @nearbyint_nxv16f16(<vscale x 16 x half> %x) {
-; CHECK-LABEL: nearbyint_nxv16f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: nearbyint_nxv16f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI4_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; ZVFH-NEXT: frflags a0
+; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; ZVFH-NEXT: fsflags a0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: nearbyint_nxv16f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v16
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: frflags a0
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v8, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: fsflags a0
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 16 x half> @llvm.nearbyint.nxv16f16(<vscale x 16 x half> %x)
ret <vscale x 16 x half> %a
}
declare <vscale x 16 x half> @llvm.nearbyint.nxv16f16(<vscale x 16 x half>)
define <vscale x 32 x half> @nearbyint_nxv32f16(<vscale x 32 x half> %x) {
-; CHECK-LABEL: nearbyint_nxv32f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: nearbyint_nxv32f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI5_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; ZVFH-NEXT: frflags a0
+; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; ZVFH-NEXT: fsflags a0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: nearbyint_nxv32f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: sub sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v24, v16
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT: frflags a0
+; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; ZVFHMIN-NEXT: fsflags a0
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v24
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: frflags a0
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
+; ZVFHMIN-NEXT: addi a1, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v24, v16, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
+; ZVFHMIN-NEXT: fsflags a0
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: addi sp, sp, 16
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 32 x half> @llvm.nearbyint.nxv32f16(<vscale x 32 x half> %x)
ret <vscale x 32 x half> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll
index fb77b746549400..ca1f72ee4d524b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll
@@ -1,112 +1,232 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFHMIN
define <vscale x 1 x half> @rint_nxv1f16(<vscale x 1 x half> %x) {
-; CHECK-LABEL: rint_nxv1f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: rint_nxv1f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI0_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: rint_nxv1f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 1 x half> @llvm.rint.nxv1f16(<vscale x 1 x half> %x)
ret <vscale x 1 x half> %a
}
declare <vscale x 1 x half> @llvm.rint.nxv1f16(<vscale x 1 x half>)
define <vscale x 2 x half> @rint_nxv2f16(<vscale x 2 x half> %x) {
-; CHECK-LABEL: rint_nxv2f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: rint_nxv2f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI1_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: rint_nxv2f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 2 x half> @llvm.rint.nxv2f16(<vscale x 2 x half> %x)
ret <vscale x 2 x half> %a
}
declare <vscale x 2 x half> @llvm.rint.nxv2f16(<vscale x 2 x half>)
define <vscale x 4 x half> @rint_nxv4f16(<vscale x 4 x half> %x) {
-; CHECK-LABEL: rint_nxv4f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: rint_nxv4f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI2_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: rint_nxv4f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v10
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 4 x half> @llvm.rint.nxv4f16(<vscale x 4 x half> %x)
ret <vscale x 4 x half> %a
}
declare <vscale x 4 x half> @llvm.rint.nxv4f16(<vscale x 4 x half>)
define <vscale x 8 x half> @rint_nxv8f16(<vscale x 8 x half> %x) {
-; CHECK-LABEL: rint_nxv8f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: rint_nxv8f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI3_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: rint_nxv8f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v12
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v12, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v12, v8, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 8 x half> @llvm.rint.nxv8f16(<vscale x 8 x half> %x)
ret <vscale x 8 x half> %a
}
declare <vscale x 8 x half> @llvm.rint.nxv8f16(<vscale x 8 x half>)
define <vscale x 16 x half> @rint_nxv16f16(<vscale x 16 x half> %x) {
-; CHECK-LABEL: rint_nxv16f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: rint_nxv16f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI4_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: rint_nxv16f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v16
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v8, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 16 x half> @llvm.rint.nxv16f16(<vscale x 16 x half> %x)
ret <vscale x 16 x half> %a
}
declare <vscale x 16 x half> @llvm.rint.nxv16f16(<vscale x 16 x half>)
define <vscale x 32 x half> @rint_nxv32f16(<vscale x 32 x half> %x) {
-; CHECK-LABEL: rint_nxv32f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: rint_nxv32f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI5_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: rint_nxv32f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v24, v16
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v24
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v24, v16, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 32 x half> @llvm.rint.nxv32f16(<vscale x 32 x half> %x)
ret <vscale x 32 x half> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
index bb6724eeb32006..a39abcc6ed0e27 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
@@ -1,126 +1,258 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFHMIN
; This file tests the code generation for `llvm.round.*` on scalable vector type.
define <vscale x 1 x half> @round_nxv1f16(<vscale x 1 x half> %x) {
-; CHECK-LABEL: round_nxv1f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: round_nxv1f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI0_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: fsrmi a0, 4
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: round_nxv1f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 4
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 1 x half> @llvm.round.nxv1f16(<vscale x 1 x half> %x)
ret <vscale x 1 x half> %a
}
declare <vscale x 1 x half> @llvm.round.nxv1f16(<vscale x 1 x half>)
define <vscale x 2 x half> @round_nxv2f16(<vscale x 2 x half> %x) {
-; CHECK-LABEL: round_nxv2f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: round_nxv2f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI1_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: fsrmi a0, 4
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: round_nxv2f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 4
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 2 x half> @llvm.round.nxv2f16(<vscale x 2 x half> %x)
ret <vscale x 2 x half> %a
}
declare <vscale x 2 x half> @llvm.round.nxv2f16(<vscale x 2 x half>)
define <vscale x 4 x half> @round_nxv4f16(<vscale x 4 x half> %x) {
-; CHECK-LABEL: round_nxv4f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: round_nxv4f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI2_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: fsrmi a0, 4
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: round_nxv4f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v10
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 4
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 4 x half> @llvm.round.nxv4f16(<vscale x 4 x half> %x)
ret <vscale x 4 x half> %a
}
declare <vscale x 4 x half> @llvm.round.nxv4f16(<vscale x 4 x half>)
define <vscale x 8 x half> @round_nxv8f16(<vscale x 8 x half> %x) {
-; CHECK-LABEL: round_nxv8f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: round_nxv8f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI3_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; ZVFH-NEXT: fsrmi a0, 4
+; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: round_nxv8f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v12
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 4
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v12, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v12, v8, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 8 x half> @llvm.round.nxv8f16(<vscale x 8 x half> %x)
ret <vscale x 8 x half> %a
}
declare <vscale x 8 x half> @llvm.round.nxv8f16(<vscale x 8 x half>)
define <vscale x 16 x half> @round_nxv16f16(<vscale x 16 x half> %x) {
-; CHECK-LABEL: round_nxv16f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: round_nxv16f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI4_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; ZVFH-NEXT: fsrmi a0, 4
+; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: round_nxv16f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v16
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 4
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v8, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 16 x half> @llvm.round.nxv16f16(<vscale x 16 x half> %x)
ret <vscale x 16 x half> %a
}
declare <vscale x 16 x half> @llvm.round.nxv16f16(<vscale x 16 x half>)
define <vscale x 32 x half> @round_nxv32f16(<vscale x 32 x half> %x) {
-; CHECK-LABEL: round_nxv32f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: round_nxv32f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI5_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; ZVFH-NEXT: fsrmi a0, 4
+; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: round_nxv32f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v24, v16
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 4
+; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v24, v16
+; ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 4
+; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 32 x half> @llvm.round.nxv32f16(<vscale x 32 x half> %x)
ret <vscale x 32 x half> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll
index 6f5207a25518f5..52ad443bfdebda 100644
--- a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll
@@ -1,126 +1,258 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFHMIN
; This file tests the code generation for `llvm.roundeven.*` on scalable vector type.
define <vscale x 1 x half> @roundeven_nxv1f16(<vscale x 1 x half> %x) {
-; CHECK-LABEL: roundeven_nxv1f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: roundeven_nxv1f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI0_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: fsrmi a0, 0
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: roundeven_nxv1f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 0
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 1 x half> @llvm.roundeven.nxv1f16(<vscale x 1 x half> %x)
ret <vscale x 1 x half> %a
}
declare <vscale x 1 x half> @llvm.roundeven.nxv1f16(<vscale x 1 x half>)
define <vscale x 2 x half> @roundeven_nxv2f16(<vscale x 2 x half> %x) {
-; CHECK-LABEL: roundeven_nxv2f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: roundeven_nxv2f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI1_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: fsrmi a0, 0
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: roundeven_nxv2f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 0
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 2 x half> @llvm.roundeven.nxv2f16(<vscale x 2 x half> %x)
ret <vscale x 2 x half> %a
}
declare <vscale x 2 x half> @llvm.roundeven.nxv2f16(<vscale x 2 x half>)
define <vscale x 4 x half> @roundeven_nxv4f16(<vscale x 4 x half> %x) {
-; CHECK-LABEL: roundeven_nxv4f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: roundeven_nxv4f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI2_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: fsrmi a0, 0
+; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: roundeven_nxv4f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v10
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 0
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 4 x half> @llvm.roundeven.nxv4f16(<vscale x 4 x half> %x)
ret <vscale x 4 x half> %a
}
declare <vscale x 4 x half> @llvm.roundeven.nxv4f16(<vscale x 4 x half>)
define <vscale x 8 x half> @roundeven_nxv8f16(<vscale x 8 x half> %x) {
-; CHECK-LABEL: roundeven_nxv8f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: roundeven_nxv8f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI3_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; ZVFH-NEXT: fsrmi a0, 0
+; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: roundeven_nxv8f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v12
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 0
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v12, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v12, v8, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 8 x half> @llvm.roundeven.nxv8f16(<vscale x 8 x half> %x)
ret <vscale x 8 x half> %a
}
declare <vscale x 8 x half> @llvm.roundeven.nxv8f16(<vscale x 8 x half>)
define <vscale x 16 x half> @roundeven_nxv16f16(<vscale x 16 x half> %x) {
-; CHECK-LABEL: roundeven_nxv16f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: roundeven_nxv16f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI4_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; ZVFH-NEXT: fsrmi a0, 0
+; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: roundeven_nxv16f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v16
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 0
+; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v8, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 16 x half> @llvm.roundeven.nxv16f16(<vscale x 16 x half> %x)
ret <vscale x 16 x half> %a
}
declare <vscale x 16 x half> @llvm.roundeven.nxv16f16(<vscale x 16 x half>)
define <vscale x 32 x half> @roundeven_nxv32f16(<vscale x 32 x half> %x) {
-; CHECK-LABEL: roundeven_nxv32f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: roundeven_nxv32f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI5_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; ZVFH-NEXT: fsrmi a0, 0
+; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; ZVFH-NEXT: fsrm a0
+; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: roundeven_nxv32f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v24, v16
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 0
+; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v24, v16
+; ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT: fsrmi a0, 0
+; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT: fsrm a0
+; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 32 x half> @llvm.roundeven.nxv32f16(<vscale x 32 x half> %x)
ret <vscale x 32 x half> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll
index 8841232e7f76df..971424e8cea09e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll
@@ -1,112 +1,232 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFHMIN
define <vscale x 1 x half> @trunc_nxv1f16(<vscale x 1 x half> %x) {
-; CHECK-LABEL: trunc_nxv1f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: trunc_nxv1f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI0_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: trunc_nxv1f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half> %x)
ret <vscale x 1 x half> %a
}
declare <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half>)
define <vscale x 2 x half> @trunc_nxv2f16(<vscale x 2 x half> %x) {
-; CHECK-LABEL: trunc_nxv2f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: trunc_nxv2f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI1_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: trunc_nxv2f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 2 x half> @llvm.trunc.nxv2f16(<vscale x 2 x half> %x)
ret <vscale x 2 x half> %a
}
declare <vscale x 2 x half> @llvm.trunc.nxv2f16(<vscale x 2 x half>)
define <vscale x 4 x half> @trunc_nxv4f16(<vscale x 4 x half> %x) {
-; CHECK-LABEL: trunc_nxv4f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: trunc_nxv4f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI2_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: trunc_nxv4f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v10
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> %x)
ret <vscale x 4 x half> %a
}
declare <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half>)
define <vscale x 8 x half> @trunc_nxv8f16(<vscale x 8 x half> %x) {
-; CHECK-LABEL: trunc_nxv8f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: trunc_nxv8f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI3_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; ZVFH-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: trunc_nxv8f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v12
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v12, v8, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 8 x half> @llvm.trunc.nxv8f16(<vscale x 8 x half> %x)
ret <vscale x 8 x half> %a
}
declare <vscale x 8 x half> @llvm.trunc.nxv8f16(<vscale x 8 x half>)
define <vscale x 16 x half> @trunc_nxv16f16(<vscale x 16 x half> %x) {
-; CHECK-LABEL: trunc_nxv16f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: trunc_nxv16f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI4_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; ZVFH-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: trunc_nxv16f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v16
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v8, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 16 x half> @llvm.trunc.nxv16f16(<vscale x 16 x half> %x)
ret <vscale x 16 x half> %a
}
declare <vscale x 16 x half> @llvm.trunc.nxv16f16(<vscale x 16 x half>)
define <vscale x 32 x half> @trunc_nxv32f16(<vscale x 32 x half> %x) {
-; CHECK-LABEL: trunc_nxv32f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: trunc_nxv32f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, %hi(.LCPI5_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
+; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; ZVFH-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t
+; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
+; ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: trunc_nxv32f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v24, v16
+; ZVFHMIN-NEXT: lui a0, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a0
+; ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v24
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v24, v16, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
+; ZVFHMIN-NEXT: ret
%a = call <vscale x 32 x half> @llvm.trunc.nxv32f16(<vscale x 32 x half> %x)
ret <vscale x 32 x half> %a
}
>From 0350ec6b7d17b8442565b8f1b44cf5d6202e0fe2 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 16 Sep 2024 13:42:37 +0800
Subject: [PATCH 2/2] Update fixed length vector tests
Marking ftrunc as promoted means these are no longer expanded
---
llvm/test/Analysis/CostModel/RISCV/fround.ll | 16 +-
.../CodeGen/RISCV/rvv/fixed-vectors-fp.ll | 943 +-----------------
2 files changed, 45 insertions(+), 914 deletions(-)
diff --git a/llvm/test/Analysis/CostModel/RISCV/fround.ll b/llvm/test/Analysis/CostModel/RISCV/fround.ll
index dc501b82417d3d..b4740f223eca3a 100644
--- a/llvm/test/Analysis/CostModel/RISCV/fround.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/fround.ll
@@ -233,10 +233,10 @@ define void @trunc_fp16() {
;
; ZVFHMIN-LABEL: 'trunc_fp16'
; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %1 = call half @llvm.trunc.f16(half undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %2 = call <2 x half> @llvm.trunc.v2f16(<2 x half> undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 39 for instruction: %3 = call <4 x half> @llvm.trunc.v4f16(<4 x half> undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 79 for instruction: %4 = call <8 x half> @llvm.trunc.v8f16(<8 x half> undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 159 for instruction: %5 = call <16 x half> @llvm.trunc.v16f16(<16 x half> undef)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.trunc.v2f16(<2 x half> undef)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.trunc.v4f16(<4 x half> undef)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.trunc.v8f16(<8 x half> undef)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x half> @llvm.trunc.v16f16(<16 x half> undef)
; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half> undef)
; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.trunc.nxv2f16(<vscale x 2 x half> undef)
; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> undef)
@@ -1108,10 +1108,10 @@ define void @vp_roundtozero_f16() {
; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
; ZVFHMIN-LABEL: 'vp_roundtozero_f16'
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %1 = call <2 x half> @llvm.vp.roundtozero.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %2 = call <4 x half> @llvm.vp.roundtozero.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 94 for instruction: %3 = call <8 x half> @llvm.vp.roundtozero.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 190 for instruction: %4 = call <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call <2 x half> @llvm.vp.roundtozero.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x half> @llvm.vp.roundtozero.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <8 x half> @llvm.vp.roundtozero.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef)
; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <vscale x 1 x half> @llvm.vp.roundtozero.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef)
; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x half> @llvm.vp.roundtozero.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef)
; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x half> @llvm.vp.roundtozero.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index d996a9c05aca4d..b5c40fbfaac6c9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -5545,457 +5545,24 @@ define void @trunc_v8f16(ptr %x) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMIN-ZFH-RV32-LABEL: trunc_v8f16:
-; ZVFHMIN-ZFH-RV32: # %bb.0:
-; ZVFHMIN-ZFH-RV32-NEXT: addi sp, sp, -16
-; ZVFHMIN-ZFH-RV32-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-ZFH-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-ZFH-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMIN-ZFH-RV32-NEXT: mv a1, sp
-; ZVFHMIN-ZFH-RV32-NEXT: vse16.v v8, (a1)
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa4, 2(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: lui a1, %hi(.LCPI115_0)
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa5, %lo(.LCPI115_0)(a1)
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa3, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a1, fa3, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a1, .LBB115_2
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.1:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a1, fa4, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa3, a1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa4, fa3, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB115_2:
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa1, 0(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa3, fa1
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a1, fa3, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a1, .LBB115_4
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.3:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a1, fa1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa3, a1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa1, fa3, fa1
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB115_4:
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa2, 4(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa3, fa2
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a1, fa3, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a1, .LBB115_6
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.5:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a1, fa2, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa3, a1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa2, fa3, fa2
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB115_6:
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa3, 6(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa0, fa3
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a1, fa0, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a2, fa1
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a1, .LBB115_8
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.7:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a1, fa3, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa1, a1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa3, fa1, fa3
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB115_8:
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa1, 10(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a1, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa4, fa1
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a3, fa4, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: vmv.v.x v8, a2
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a3, .LBB115_10
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.9:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a2, fa1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa4, a2, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa1, fa4, fa1
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB115_10:
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa4, 8(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a2, fa2
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa2, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a3, fa2, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a1, fa1
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a3, .LBB115_12
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.11:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a3, fa4, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa2, a3, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa4, fa2, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB115_12:
-; ZVFHMIN-ZFH-RV32-NEXT: vslide1down.vx v8, v8, a2
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa2, 12(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a2, fa3
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a3, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: vmv.v.x v9, a3
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa4, fa2
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a3, fa4, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a3, .LBB115_14
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.13:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a1, fa2, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa4, a1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa2, fa4, fa2
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB115_14:
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa4, 14(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: vslide1down.vx v8, v8, a2
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a1, fa2
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa3, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a2, fa3, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a2, .LBB115_16
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.15:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a1, fa4, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa5, a1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa4, fa5, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB115_16:
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a1, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: vmv.v.i v0, 15
-; ZVFHMIN-ZFH-RV32-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFH-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFHMIN-ZFH-RV32-NEXT: vslidedown.vi v9, v8, 4, v0.t
-; ZVFHMIN-ZFH-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMIN-ZFH-RV32-NEXT: addi sp, sp, 16
-; ZVFHMIN-ZFH-RV32-NEXT: ret
-;
-; ZVFHMIN-ZFH-RV64-LABEL: trunc_v8f16:
-; ZVFHMIN-ZFH-RV64: # %bb.0:
-; ZVFHMIN-ZFH-RV64-NEXT: addi sp, sp, -16
-; ZVFHMIN-ZFH-RV64-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-ZFH-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-ZFH-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMIN-ZFH-RV64-NEXT: mv a1, sp
-; ZVFHMIN-ZFH-RV64-NEXT: vse16.v v8, (a1)
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa4, 2(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: lui a1, %hi(.LCPI115_0)
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa5, %lo(.LCPI115_0)(a1)
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa3, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a1, fa3, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a1, .LBB115_2
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.1:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a1, fa4, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa3, a1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa4, fa3, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB115_2:
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa1, 0(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa3, fa1
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a1, fa3, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a1, .LBB115_4
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.3:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a1, fa1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa3, a1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa1, fa3, fa1
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB115_4:
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa2, 4(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa3, fa2
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a1, fa3, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a1, .LBB115_6
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.5:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a1, fa2, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa3, a1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa2, fa3, fa2
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB115_6:
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa3, 6(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa0, fa3
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a1, fa0, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a2, fa1
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a1, .LBB115_8
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.7:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a1, fa3, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa1, a1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa3, fa1, fa3
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB115_8:
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa1, 10(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a1, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa4, fa1
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a3, fa4, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: vmv.v.x v8, a2
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a3, .LBB115_10
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.9:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a2, fa1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa4, a2, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa1, fa4, fa1
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB115_10:
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa4, 8(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a2, fa2
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa2, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a3, fa2, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a1, fa1
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a3, .LBB115_12
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.11:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a3, fa4, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa2, a3, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa4, fa2, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB115_12:
-; ZVFHMIN-ZFH-RV64-NEXT: vslide1down.vx v8, v8, a2
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa2, 12(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a2, fa3
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a3, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: vmv.v.x v9, a3
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa4, fa2
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a3, fa4, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a3, .LBB115_14
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.13:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a1, fa2, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa4, a1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa2, fa4, fa2
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB115_14:
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa4, 14(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: vslide1down.vx v8, v8, a2
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a1, fa2
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa3, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a2, fa3, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a2, .LBB115_16
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.15:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a1, fa4, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa5, a1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa4, fa5, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB115_16:
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a1, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: vmv.v.i v0, 15
-; ZVFHMIN-ZFH-RV64-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFH-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFHMIN-ZFH-RV64-NEXT: vslidedown.vi v9, v8, 4, v0.t
-; ZVFHMIN-ZFH-RV64-NEXT: vse16.v v9, (a0)
-; ZVFHMIN-ZFH-RV64-NEXT: addi sp, sp, 16
-; ZVFHMIN-ZFH-RV64-NEXT: ret
-;
-; ZVFHMIN-ZFHIN-RV32-LABEL: trunc_v8f16:
-; ZVFHMIN-ZFHIN-RV32: # %bb.0:
-; ZVFHMIN-ZFHIN-RV32-NEXT: addi sp, sp, -16
-; ZVFHMIN-ZFHIN-RV32-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-ZFHIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-ZFHIN-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMIN-ZFHIN-RV32-NEXT: mv a1, sp
-; ZVFHMIN-ZFHIN-RV32-NEXT: vse16.v v8, (a1)
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa5, 2(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa4, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: lui a1, 307200
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.w.x fa5, a1
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a1, fa3, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a1, .LBB115_2
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.1:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a1, fa4, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa3, a1, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa4, fa3, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB115_2:
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa3, 0(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa2, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa3, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a1, fa3, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a1, .LBB115_4
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.3:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a1, fa2, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa3, a1, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa2, fa3, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB115_4:
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa3, 4(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa3, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa1, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a1, fa1, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa1, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a1, .LBB115_6
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.5:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a1, fa3, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa2, a1, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa3, fa2, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB115_6:
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa0, 6(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa2, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa4, fa0
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa0, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a1, fa0, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a2, fa1
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a1, .LBB115_8
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.7:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a1, fa4, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa1, a1, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa4, fa1, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB115_8:
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa1, 10(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a1, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: vmv.v.x v8, a2
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa2, fa1
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa1, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a2, fa1, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa3, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a2, .LBB115_10
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.9:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a2, fa2, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa1, a2, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa2, fa1, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB115_10:
-; ZVFHMIN-ZFHIN-RV32-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa1, 8(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a1, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa2, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa4, fa1
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa1, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a3, fa1, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a2, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a3, .LBB115_12
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.11:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a3, fa4, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa2, a3, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa4, fa2, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB115_12:
-; ZVFHMIN-ZFHIN-RV32-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a1, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa3, 12(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa4, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a3, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: vmv.v.x v9, a3
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa4, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a3, fa3, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: vslide1down.vx v9, v9, a2
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a3, .LBB115_14
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.13:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a2, fa4, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa3, a2, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa4, fa3, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB115_14:
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa3, 14(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa4, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a1, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa4, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a2, fa3, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a2, .LBB115_16
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.15:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a1, fa4, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa5, a1, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa4, fa5, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB115_16:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa5, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a1, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: vmv.v.i v0, 15
-; ZVFHMIN-ZFHIN-RV32-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFHIN-RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFHMIN-ZFHIN-RV32-NEXT: vslidedown.vi v9, v8, 4, v0.t
-; ZVFHMIN-ZFHIN-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMIN-ZFHIN-RV32-NEXT: addi sp, sp, 16
-; ZVFHMIN-ZFHIN-RV32-NEXT: ret
-;
-; ZVFHMIN-ZFHIN-RV64-LABEL: trunc_v8f16:
-; ZVFHMIN-ZFHIN-RV64: # %bb.0:
-; ZVFHMIN-ZFHIN-RV64-NEXT: addi sp, sp, -16
-; ZVFHMIN-ZFHIN-RV64-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-ZFHIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-ZFHIN-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMIN-ZFHIN-RV64-NEXT: mv a1, sp
-; ZVFHMIN-ZFHIN-RV64-NEXT: vse16.v v8, (a1)
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa5, 2(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa4, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: lui a1, 307200
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.w.x fa5, a1
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a1, fa3, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a1, .LBB115_2
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.1:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a1, fa4, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa3, a1, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa4, fa3, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB115_2:
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa3, 0(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa2, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa3, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a1, fa3, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a1, .LBB115_4
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.3:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a1, fa2, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa3, a1, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa2, fa3, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB115_4:
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa3, 4(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa3, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa1, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a1, fa1, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa1, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a1, .LBB115_6
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.5:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a1, fa3, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa2, a1, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa3, fa2, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB115_6:
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa0, 6(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa2, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa4, fa0
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa0, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a1, fa0, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a2, fa1
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a1, .LBB115_8
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.7:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a1, fa4, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa1, a1, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa4, fa1, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB115_8:
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa1, 10(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a1, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: vmv.v.x v8, a2
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa2, fa1
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa1, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a2, fa1, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa3, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a2, .LBB115_10
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.9:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a2, fa2, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa1, a2, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa2, fa1, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB115_10:
-; ZVFHMIN-ZFHIN-RV64-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa1, 8(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a1, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa2, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa4, fa1
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa1, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a3, fa1, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a2, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a3, .LBB115_12
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.11:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a3, fa4, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa2, a3, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa4, fa2, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB115_12:
-; ZVFHMIN-ZFHIN-RV64-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a1, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa3, 12(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa4, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a3, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: vmv.v.x v9, a3
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa4, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a3, fa3, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: vslide1down.vx v9, v9, a2
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a3, .LBB115_14
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.13:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a2, fa4, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa3, a2, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa4, fa3, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB115_14:
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa3, 14(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa4, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a1, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa4, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a2, fa3, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a2, .LBB115_16
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.15:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a1, fa4, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa5, a1, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa4, fa5, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB115_16:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa5, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a1, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: vmv.v.i v0, 15
-; ZVFHMIN-ZFHIN-RV64-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFHIN-RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFHMIN-ZFHIN-RV64-NEXT: vslidedown.vi v9, v8, 4, v0.t
-; ZVFHMIN-ZFHIN-RV64-NEXT: vse16.v v9, (a0)
-; ZVFHMIN-ZFHIN-RV64-NEXT: addi sp, sp, 16
-; ZVFHMIN-ZFHIN-RV64-NEXT: ret
+; ZVFHMIN-LABEL: trunc_v8f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a1, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a1
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: vse16.v v8, (a0)
+; ZVFHMIN-NEXT: ret
%a = load <8 x half>, ptr %x
%b = call <8 x half> @llvm.trunc.v8f16(<8 x half> %a)
store <8 x half> %b, ptr %x
@@ -6020,461 +5587,25 @@ define void @trunc_v6f16(ptr %x) {
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
;
-; ZVFHMIN-ZFH-RV32-LABEL: trunc_v6f16:
-; ZVFHMIN-ZFH-RV32: # %bb.0:
-; ZVFHMIN-ZFH-RV32-NEXT: addi sp, sp, -16
-; ZVFHMIN-ZFH-RV32-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-ZFH-RV32-NEXT: vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-ZFH-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMIN-ZFH-RV32-NEXT: mv a1, sp
-; ZVFHMIN-ZFH-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-ZFH-RV32-NEXT: vse16.v v8, (a1)
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa4, 2(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: lui a1, %hi(.LCPI116_0)
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa5, %lo(.LCPI116_0)(a1)
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa3, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a1, fa3, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a1, .LBB116_2
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.1:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a1, fa4, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa3, a1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa4, fa3, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB116_2:
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa1, 0(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa3, fa1
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a1, fa3, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a1, .LBB116_4
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.3:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a1, fa1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa3, a1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa1, fa3, fa1
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB116_4:
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa2, 4(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa3, fa2
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a1, fa3, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a1, .LBB116_6
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.5:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a1, fa2, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa3, a1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa2, fa3, fa2
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB116_6:
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa3, 6(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa0, fa3
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a1, fa0, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a2, fa1
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a1, .LBB116_8
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.7:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a1, fa3, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa1, a1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa3, fa1, fa3
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB116_8:
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa1, 10(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a1, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa4, fa1
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a3, fa4, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: vmv.v.x v8, a2
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a3, .LBB116_10
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.9:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a2, fa1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa4, a2, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa1, fa4, fa1
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB116_10:
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa4, 8(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a2, fa2
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa2, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a3, fa2, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a1, fa1
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a3, .LBB116_12
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.11:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a3, fa4, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa2, a3, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa4, fa2, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB116_12:
-; ZVFHMIN-ZFH-RV32-NEXT: vslide1down.vx v8, v8, a2
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa2, 12(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a2, fa3
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a3, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: vmv.v.x v9, a3
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa4, fa2
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a3, fa4, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a3, .LBB116_14
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.13:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a1, fa2, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa4, a1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa2, fa4, fa2
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB116_14:
-; ZVFHMIN-ZFH-RV32-NEXT: flh fa4, 14(sp)
-; ZVFHMIN-ZFH-RV32-NEXT: vslide1down.vx v8, v8, a2
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a1, fa2
-; ZVFHMIN-ZFH-RV32-NEXT: fabs.h fa3, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: flt.h a2, fa3, fa5
-; ZVFHMIN-ZFH-RV32-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFH-RV32-NEXT: beqz a2, .LBB116_16
-; ZVFHMIN-ZFH-RV32-NEXT: # %bb.15:
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.w.h a1, fa4, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fcvt.h.w fa5, a1, rtz
-; ZVFHMIN-ZFH-RV32-NEXT: fsgnj.h fa4, fa5, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: .LBB116_16:
-; ZVFHMIN-ZFH-RV32-NEXT: fmv.x.h a1, fa4
-; ZVFHMIN-ZFH-RV32-NEXT: vmv.v.i v0, 15
-; ZVFHMIN-ZFH-RV32-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFH-RV32-NEXT: vsetivli zero, 6, e16, mf2, ta, mu
-; ZVFHMIN-ZFH-RV32-NEXT: vslidedown.vi v9, v8, 4, v0.t
-; ZVFHMIN-ZFH-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMIN-ZFH-RV32-NEXT: addi sp, sp, 16
-; ZVFHMIN-ZFH-RV32-NEXT: ret
-;
-; ZVFHMIN-ZFH-RV64-LABEL: trunc_v6f16:
-; ZVFHMIN-ZFH-RV64: # %bb.0:
-; ZVFHMIN-ZFH-RV64-NEXT: addi sp, sp, -16
-; ZVFHMIN-ZFH-RV64-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-ZFH-RV64-NEXT: vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-ZFH-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMIN-ZFH-RV64-NEXT: mv a1, sp
-; ZVFHMIN-ZFH-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-ZFH-RV64-NEXT: vse16.v v8, (a1)
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa4, 2(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: lui a1, %hi(.LCPI116_0)
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa5, %lo(.LCPI116_0)(a1)
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa3, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a1, fa3, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a1, .LBB116_2
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.1:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a1, fa4, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa3, a1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa4, fa3, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB116_2:
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa1, 0(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa3, fa1
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a1, fa3, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a1, .LBB116_4
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.3:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a1, fa1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa3, a1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa1, fa3, fa1
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB116_4:
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa2, 4(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa3, fa2
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a1, fa3, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a1, .LBB116_6
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.5:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a1, fa2, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa3, a1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa2, fa3, fa2
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB116_6:
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa3, 6(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa0, fa3
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a1, fa0, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a2, fa1
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a1, .LBB116_8
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.7:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a1, fa3, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa1, a1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa3, fa1, fa3
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB116_8:
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa1, 10(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a1, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa4, fa1
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a3, fa4, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: vmv.v.x v8, a2
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a3, .LBB116_10
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.9:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a2, fa1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa4, a2, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa1, fa4, fa1
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB116_10:
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa4, 8(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a2, fa2
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa2, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a3, fa2, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a1, fa1
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a3, .LBB116_12
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.11:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a3, fa4, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa2, a3, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa4, fa2, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB116_12:
-; ZVFHMIN-ZFH-RV64-NEXT: vslide1down.vx v8, v8, a2
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa2, 12(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a2, fa3
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a3, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: vmv.v.x v9, a3
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa4, fa2
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a3, fa4, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a3, .LBB116_14
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.13:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a1, fa2, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa4, a1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa2, fa4, fa2
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB116_14:
-; ZVFHMIN-ZFH-RV64-NEXT: flh fa4, 14(sp)
-; ZVFHMIN-ZFH-RV64-NEXT: vslide1down.vx v8, v8, a2
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a1, fa2
-; ZVFHMIN-ZFH-RV64-NEXT: fabs.h fa3, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: flt.h a2, fa3, fa5
-; ZVFHMIN-ZFH-RV64-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFH-RV64-NEXT: beqz a2, .LBB116_16
-; ZVFHMIN-ZFH-RV64-NEXT: # %bb.15:
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.w.h a1, fa4, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fcvt.h.w fa5, a1, rtz
-; ZVFHMIN-ZFH-RV64-NEXT: fsgnj.h fa4, fa5, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: .LBB116_16:
-; ZVFHMIN-ZFH-RV64-NEXT: fmv.x.h a1, fa4
-; ZVFHMIN-ZFH-RV64-NEXT: vmv.v.i v0, 15
-; ZVFHMIN-ZFH-RV64-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFH-RV64-NEXT: vsetivli zero, 6, e16, mf2, ta, mu
-; ZVFHMIN-ZFH-RV64-NEXT: vslidedown.vi v9, v8, 4, v0.t
-; ZVFHMIN-ZFH-RV64-NEXT: vse16.v v9, (a0)
-; ZVFHMIN-ZFH-RV64-NEXT: addi sp, sp, 16
-; ZVFHMIN-ZFH-RV64-NEXT: ret
-;
-; ZVFHMIN-ZFHIN-RV32-LABEL: trunc_v6f16:
-; ZVFHMIN-ZFHIN-RV32: # %bb.0:
-; ZVFHMIN-ZFHIN-RV32-NEXT: addi sp, sp, -16
-; ZVFHMIN-ZFHIN-RV32-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-ZFHIN-RV32-NEXT: vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-ZFHIN-RV32-NEXT: vle16.v v8, (a0)
-; ZVFHMIN-ZFHIN-RV32-NEXT: mv a1, sp
-; ZVFHMIN-ZFHIN-RV32-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-ZFHIN-RV32-NEXT: vse16.v v8, (a1)
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa5, 2(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa4, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: lui a1, 307200
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.w.x fa5, a1
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a1, fa3, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a1, .LBB116_2
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.1:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a1, fa4, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa3, a1, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa4, fa3, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB116_2:
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa3, 0(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa2, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa3, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a1, fa3, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a1, .LBB116_4
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.3:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a1, fa2, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa3, a1, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa2, fa3, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB116_4:
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa3, 4(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa3, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa1, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a1, fa1, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa1, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a1, .LBB116_6
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.5:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a1, fa3, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa2, a1, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa3, fa2, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB116_6:
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa0, 6(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa2, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa4, fa0
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa0, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a1, fa0, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a2, fa1
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a1, .LBB116_8
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.7:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a1, fa4, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa1, a1, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa4, fa1, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB116_8:
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa1, 10(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a1, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: vmv.v.x v8, a2
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa2, fa1
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa1, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a2, fa1, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa3, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a2, .LBB116_10
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.9:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a2, fa2, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa1, a2, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa2, fa1, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB116_10:
-; ZVFHMIN-ZFHIN-RV32-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa1, 8(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a1, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa2, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa4, fa1
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa1, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a3, fa1, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a2, fa2
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a3, .LBB116_12
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.11:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a3, fa4, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa2, a3, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa4, fa2, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB116_12:
-; ZVFHMIN-ZFHIN-RV32-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a1, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa3, 12(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa4, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a3, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: vmv.v.x v9, a3
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa4, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a3, fa3, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: vslide1down.vx v9, v9, a2
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a3, .LBB116_14
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.13:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a2, fa4, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa3, a2, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa4, fa3, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB116_14:
-; ZVFHMIN-ZFHIN-RV32-NEXT: flh fa3, 14(sp)
-; ZVFHMIN-ZFHIN-RV32-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa4, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a1, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.h fa4, fa3
-; ZVFHMIN-ZFHIN-RV32-NEXT: fabs.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: flt.s a2, fa3, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFHIN-RV32-NEXT: beqz a2, .LBB116_16
-; ZVFHMIN-ZFHIN-RV32-NEXT: # %bb.15:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.w.s a1, fa4, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.s.w fa5, a1, rtz
-; ZVFHMIN-ZFHIN-RV32-NEXT: fsgnj.s fa4, fa5, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: .LBB116_16:
-; ZVFHMIN-ZFHIN-RV32-NEXT: fcvt.h.s fa5, fa4
-; ZVFHMIN-ZFHIN-RV32-NEXT: fmv.x.h a1, fa5
-; ZVFHMIN-ZFHIN-RV32-NEXT: vmv.v.i v0, 15
-; ZVFHMIN-ZFHIN-RV32-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFHIN-RV32-NEXT: vsetivli zero, 6, e16, mf2, ta, mu
-; ZVFHMIN-ZFHIN-RV32-NEXT: vslidedown.vi v9, v8, 4, v0.t
-; ZVFHMIN-ZFHIN-RV32-NEXT: vse16.v v9, (a0)
-; ZVFHMIN-ZFHIN-RV32-NEXT: addi sp, sp, 16
-; ZVFHMIN-ZFHIN-RV32-NEXT: ret
-;
-; ZVFHMIN-ZFHIN-RV64-LABEL: trunc_v6f16:
-; ZVFHMIN-ZFHIN-RV64: # %bb.0:
-; ZVFHMIN-ZFHIN-RV64-NEXT: addi sp, sp, -16
-; ZVFHMIN-ZFHIN-RV64-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-ZFHIN-RV64-NEXT: vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-ZFHIN-RV64-NEXT: vle16.v v8, (a0)
-; ZVFHMIN-ZFHIN-RV64-NEXT: mv a1, sp
-; ZVFHMIN-ZFHIN-RV64-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-ZFHIN-RV64-NEXT: vse16.v v8, (a1)
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa5, 2(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa4, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: lui a1, 307200
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.w.x fa5, a1
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a1, fa3, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a1, .LBB116_2
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.1:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a1, fa4, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa3, a1, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa4, fa3, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB116_2:
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa3, 0(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa2, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa3, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a1, fa3, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a1, .LBB116_4
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.3:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a1, fa2, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa3, a1, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa2, fa3, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB116_4:
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa3, 4(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa3, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa1, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a1, fa1, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa1, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a1, .LBB116_6
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.5:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a1, fa3, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa2, a1, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa3, fa2, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB116_6:
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa0, 6(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa2, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa4, fa0
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa0, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a1, fa0, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a2, fa1
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a1, .LBB116_8
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.7:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a1, fa4, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa1, a1, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa4, fa1, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB116_8:
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa1, 10(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a1, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: vmv.v.x v8, a2
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa2, fa1
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa1, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a2, fa1, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa3, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a2, .LBB116_10
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.9:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a2, fa2, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa1, a2, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa2, fa1, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB116_10:
-; ZVFHMIN-ZFHIN-RV64-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa1, 8(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a1, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa2, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa4, fa1
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa1, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a3, fa1, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a2, fa2
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a3, .LBB116_12
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.11:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a3, fa4, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa2, a3, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa4, fa2, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB116_12:
-; ZVFHMIN-ZFHIN-RV64-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a1, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa3, 12(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa4, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a3, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: vmv.v.x v9, a3
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa4, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a3, fa3, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: vslide1down.vx v9, v9, a2
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a3, .LBB116_14
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.13:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a2, fa4, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa3, a2, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa4, fa3, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB116_14:
-; ZVFHMIN-ZFHIN-RV64-NEXT: flh fa3, 14(sp)
-; ZVFHMIN-ZFHIN-RV64-NEXT: vslide1down.vx v8, v8, a1
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa4, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a1, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.h fa4, fa3
-; ZVFHMIN-ZFHIN-RV64-NEXT: fabs.s fa3, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: flt.s a2, fa3, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFHIN-RV64-NEXT: beqz a2, .LBB116_16
-; ZVFHMIN-ZFHIN-RV64-NEXT: # %bb.15:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.w.s a1, fa4, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.s.w fa5, a1, rtz
-; ZVFHMIN-ZFHIN-RV64-NEXT: fsgnj.s fa4, fa5, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: .LBB116_16:
-; ZVFHMIN-ZFHIN-RV64-NEXT: fcvt.h.s fa5, fa4
-; ZVFHMIN-ZFHIN-RV64-NEXT: fmv.x.h a1, fa5
-; ZVFHMIN-ZFHIN-RV64-NEXT: vmv.v.i v0, 15
-; ZVFHMIN-ZFHIN-RV64-NEXT: vslide1down.vx v9, v9, a1
-; ZVFHMIN-ZFHIN-RV64-NEXT: vsetivli zero, 6, e16, mf2, ta, mu
-; ZVFHMIN-ZFHIN-RV64-NEXT: vslidedown.vi v9, v8, 4, v0.t
-; ZVFHMIN-ZFHIN-RV64-NEXT: vse16.v v9, (a0)
-; ZVFHMIN-ZFHIN-RV64-NEXT: addi sp, sp, 16
-; ZVFHMIN-ZFHIN-RV64-NEXT: ret
+; ZVFHMIN-LABEL: trunc_v6f16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vle16.v v8, (a0)
+; ZVFHMIN-NEXT: vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v8, v9
+; ZVFHMIN-NEXT: lui a1, 307200
+; ZVFHMIN-NEXT: fmv.w.x fa5, a1
+; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu
+; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT: vse16.v v8, (a0)
+; ZVFHMIN-NEXT: ret
%a = load <6 x half>, ptr %x
%b = call <6 x half> @llvm.trunc.v6f16(<6 x half> %a)
store <6 x half> %b, ptr %x
More information about the llvm-commits
mailing list