[llvm] 9c97b38 - [ISel/RISCV] Custom-promote [b]f16 in [l]lrint (#146507)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 9 02:24:43 PDT 2025
Author: Ramkumar Ramachandra
Date: 2025-07-09T10:24:38+01:00
New Revision: 9c97b38d44c5dfcab6e97b5177ebcc7b978fb476
URL: https://github.com/llvm/llvm-project/commit/9c97b38d44c5dfcab6e97b5177ebcc7b978fb476
DIFF: https://github.com/llvm/llvm-project/commit/9c97b38d44c5dfcab6e97b5177ebcc7b978fb476.diff
LOG: [ISel/RISCV] Custom-promote [b]f16 in [l]lrint (#146507)
Extend lowerVectorXRINT to also do a FP_EXTEND_VL when the source
element type is [b]f16, and wire up this custom-promote. Updating the
cost-model to not give these an invalid cost is left to a companion
patch.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll
llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 98b613d9cc856..dcb4f690ba35c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1150,6 +1150,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction({ISD::STRICT_FP_ROUND, ISD::STRICT_FP_EXTEND}, VT,
Custom);
setOperationAction({ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Custom);
+ setOperationAction({ISD::LRINT, ISD::LLRINT}, VT, Custom);
setOperationAction({ISD::VP_MERGE, ISD::VP_SELECT, ISD::SELECT}, VT,
Custom);
setOperationAction(ISD::SELECT_CC, VT, Expand);
@@ -1451,6 +1452,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
Custom);
setOperationAction({ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP}, VT,
Custom);
+ setOperationAction({ISD::LRINT, ISD::LLRINT}, VT, Custom);
if (Subtarget.hasStdExtZfhmin()) {
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
} else {
@@ -1475,6 +1477,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
if (VT.getVectorElementType() == MVT::bf16) {
setOperationAction(ISD::BITCAST, VT, Custom);
setOperationAction({ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Custom);
+ setOperationAction({ISD::LRINT, ISD::LLRINT}, VT, Custom);
if (Subtarget.hasStdExtZfbfmin()) {
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
} else {
@@ -3487,6 +3490,14 @@ static SDValue lowerVectorXRINT(SDValue Op, SelectionDAG &DAG,
}
auto [Mask, VL] = getDefaultVLOps(SrcVT, SrcContainerVT, DL, DAG, Subtarget);
+
+ // [b]f16 -> f32
+ MVT SrcElemType = SrcVT.getVectorElementType();
+ if (SrcElemType == MVT::f16 || SrcElemType == MVT::bf16) {
+ MVT F32VT = SrcContainerVT.changeVectorElementType(MVT::f32);
+ Src = DAG.getNode(RISCVISD::FP_EXTEND_VL, DL, F32VT, Src, Mask, VL);
+ }
+
SDValue Res =
DAG.getNode(RISCVISD::VFCVT_RM_X_F_VL, DL, DstContainerVT, Src, Mask,
DAG.getTargetConstant(matchRoundingOp(Op.getOpcode()), DL,
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
index 0a6f9f5ba0928..b9a84ff9b07b9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
@@ -1,187 +1,277 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
-; RV32-LABEL: llrint_v1i64_v1f32:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV32-NEXT: vfwcvt.x.f.v v9, v8
-; RV32-NEXT: vmv1r.v v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: llrint_v1i64_v1f32:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV64-NEXT: vfwcvt.x.f.v v9, v8
-; RV64-NEXT: vmv1r.v v8, v9
-; RV64-NEXT: ret
+; CHECK-LABEL: llrint_v1i64_v1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
%a = call <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float> %x)
ret <1 x i64> %a
}
declare <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float>)
define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
-; RV32-LABEL: llrint_v2i64_v2f32:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; RV32-NEXT: vfwcvt.x.f.v v9, v8
-; RV32-NEXT: vmv1r.v v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: llrint_v2i64_v2f32:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; RV64-NEXT: vfwcvt.x.f.v v9, v8
-; RV64-NEXT: vmv1r.v v8, v9
-; RV64-NEXT: ret
+; CHECK-LABEL: llrint_v2i64_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
%a = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> %x)
ret <2 x i64> %a
}
declare <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float>)
define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) {
-; RV32-LABEL: llrint_v3i64_v3f32:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT: vmv1r.v v10, v8
-; RV32-NEXT: vfwcvt.x.f.v v8, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: llrint_v3i64_v3f32:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT: vmv1r.v v10, v8
-; RV64-NEXT: vfwcvt.x.f.v v8, v10
-; RV64-NEXT: ret
+; CHECK-LABEL: llrint_v3i64_v3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vfwcvt.x.f.v v8, v10
+; CHECK-NEXT: ret
%a = call <3 x i64> @llvm.llrint.v3i64.v3f32(<3 x float> %x)
ret <3 x i64> %a
}
declare <3 x i64> @llvm.llrint.v3i64.v3f32(<3 x float>)
define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
-; RV32-LABEL: llrint_v4i64_v4f32:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT: vmv1r.v v10, v8
-; RV32-NEXT: vfwcvt.x.f.v v8, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: llrint_v4i64_v4f32:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT: vmv1r.v v10, v8
-; RV64-NEXT: vfwcvt.x.f.v v8, v10
-; RV64-NEXT: ret
+; CHECK-LABEL: llrint_v4i64_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vfwcvt.x.f.v v8, v10
+; CHECK-NEXT: ret
%a = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> %x)
ret <4 x i64> %a
}
declare <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float>)
define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
-; RV32-LABEL: llrint_v8i64_v8f32:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT: vmv2r.v v12, v8
-; RV32-NEXT: vfwcvt.x.f.v v8, v12
-; RV32-NEXT: ret
-;
-; RV64-LABEL: llrint_v8i64_v8f32:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64-NEXT: vmv2r.v v12, v8
-; RV64-NEXT: vfwcvt.x.f.v v8, v12
-; RV64-NEXT: ret
+; CHECK-LABEL: llrint_v8i64_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vfwcvt.x.f.v v8, v12
+; CHECK-NEXT: ret
%a = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> %x)
ret <8 x i64> %a
}
declare <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float>)
define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
-; RV32-LABEL: llrint_v16i64_v16f32:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT: vmv4r.v v16, v8
-; RV32-NEXT: vfwcvt.x.f.v v8, v16
-; RV32-NEXT: ret
-;
-; RV64-LABEL: llrint_v16i64_v16f32:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV64-NEXT: vmv4r.v v16, v8
-; RV64-NEXT: vfwcvt.x.f.v v8, v16
-; RV64-NEXT: ret
+; CHECK-LABEL: llrint_v16i64_v16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vfwcvt.x.f.v v8, v16
+; CHECK-NEXT: ret
%a = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> %x)
ret <16 x i64> %a
}
declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>)
define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) {
-; RV32-LABEL: llrint_v1i64_v1f64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vfcvt.x.f.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: llrint_v1i64_v1f64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vfcvt.x.f.v v8, v8
-; RV64-NEXT: ret
+; CHECK-LABEL: llrint_v1i64_v1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT: vfcvt.x.f.v v8, v8
+; CHECK-NEXT: ret
%a = call <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double> %x)
ret <1 x i64> %a
}
declare <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double>)
define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
-; RV32-LABEL: llrint_v2i64_v2f64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vfcvt.x.f.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: llrint_v2i64_v2f64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT: vfcvt.x.f.v v8, v8
-; RV64-NEXT: ret
+; CHECK-LABEL: llrint_v2i64_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vfcvt.x.f.v v8, v8
+; CHECK-NEXT: ret
%a = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> %x)
ret <2 x i64> %a
}
declare <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double>)
define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
-; RV32-LABEL: llrint_v4i64_v4f64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vfcvt.x.f.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: llrint_v4i64_v4f64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT: vfcvt.x.f.v v8, v8
-; RV64-NEXT: ret
+; CHECK-LABEL: llrint_v4i64_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vfcvt.x.f.v v8, v8
+; CHECK-NEXT: ret
%a = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> %x)
ret <4 x i64> %a
}
declare <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double>)
define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
-; RV32-LABEL: llrint_v8i64_v8f64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vfcvt.x.f.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: llrint_v8i64_v8f64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vfcvt.x.f.v v8, v8
-; RV64-NEXT: ret
+; CHECK-LABEL: llrint_v8i64_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vfcvt.x.f.v v8, v8
+; CHECK-NEXT: ret
%a = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> %x)
ret <8 x i64> %a
}
declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>)
+
+define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) {
+; CHECK-LABEL: llrint_v1i64_v1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v9
+; CHECK-NEXT: ret
+ %a = call <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half> %x)
+ ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half>)
+
+define <2 x i64> @llrint_v2i64_v2f16(<2 x half> %x) {
+; CHECK-LABEL: llrint_v2i64_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v9
+; CHECK-NEXT: ret
+ %a = call <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half> %x)
+ ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half>)
+
+define <3 x i64> @llrint_v3i64_v3f16(<3 x half> %x) {
+; CHECK-LABEL: llrint_v3i64_v3f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v10
+; CHECK-NEXT: ret
+ %a = call <3 x i64> @llvm.llrint.v3i64.v3f16(<3 x half> %x)
+ ret <3 x i64> %a
+}
+declare <3 x i64> @llvm.llrint.v3i64.v3f16(<3 x half>)
+
+define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) {
+; CHECK-LABEL: llrint_v4i64_v4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v10
+; CHECK-NEXT: ret
+ %a = call <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half> %x)
+ ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half>)
+
+define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) {
+; CHECK-LABEL: llrint_v8i64_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v12
+; CHECK-NEXT: ret
+ %a = call <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half> %x)
+ ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half>)
+
+define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
+; CHECK-LABEL: llrint_v16i64_v16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v16, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v16
+; CHECK-NEXT: ret
+ %a = call <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half> %x)
+ ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half>)
+
+define <1 x i64> @llrint_v1i64_v1bf16(<1 x bfloat> %x) {
+; CHECK-LABEL: llrint_v1i64_v1bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v9
+; CHECK-NEXT: ret
+ %a = call <1 x i64> @llvm.llrint.v1i64.v1bf16(<1 x bfloat> %x)
+ ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llrint.v1i64.v1bf16(<1 x bfloat>)
+
+define <2 x i64> @llrint_v2i64_v2bf16(<2 x bfloat> %x) {
+; CHECK-LABEL: llrint_v2i64_v2bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v9
+; CHECK-NEXT: ret
+ %a = call <2 x i64> @llvm.llrint.v2i64.v2bf16(<2 x bfloat> %x)
+ ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llrint.v2i64.v2bf16(<2 x bfloat>)
+
+define <3 x i64> @llrint_v3i64_v3bf16(<3 x bfloat> %x) {
+; CHECK-LABEL: llrint_v3i64_v3bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v10
+; CHECK-NEXT: ret
+ %a = call <3 x i64> @llvm.llrint.v3i64.v3bf16(<3 x bfloat> %x)
+ ret <3 x i64> %a
+}
+declare <3 x i64> @llvm.llrint.v3i64.v3bf16(<3 x bfloat>)
+
+define <4 x i64> @llrint_v4i64_v4bf16(<4 x bfloat> %x) {
+; CHECK-LABEL: llrint_v4i64_v4bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v10
+; CHECK-NEXT: ret
+ %a = call <4 x i64> @llvm.llrint.v4i64.v4bf16(<4 x bfloat> %x)
+ ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llrint.v4i64.v4bf16(<4 x bfloat>)
+
+define <8 x i64> @llrint_v8i64_v8bf16(<8 x bfloat> %x) {
+; CHECK-LABEL: llrint_v8i64_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v12
+; CHECK-NEXT: ret
+ %a = call <8 x i64> @llvm.llrint.v8i64.v8bf16(<8 x bfloat> %x)
+ ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llrint.v8i64.v8bf16(<8 x bfloat>)
+
+define <16 x i64> @llrint_v16i64_v16bf16(<16 x bfloat> %x) {
+; CHECK-LABEL: llrint_v16i64_v16bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v16
+; CHECK-NEXT: ret
+ %a = call <16 x i64> @llvm.llrint.v16i64.v16bf16(<16 x bfloat> %x)
+ ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.llrint.v16i64.v16bf16(<16 x bfloat>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
index ef2208666e0b4..a52290072c540 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
@@ -1,9 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+f,+d \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin \
; RUN: -target-abi=ilp32d -verify-machineinstrs | FileCheck %s --check-prefix=RV32
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin \
; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i32
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin \
; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i64
define <1 x iXLen> @lrint_v1f32(<1 x float> %x) {
@@ -249,3 +249,388 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x) {
ret <8 x iXLen> %a
}
declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double>)
+
+define <1 x iXLen> @lrint_v1f16(<1 x half> %x) {
+; RV32-LABEL: lrint_v1f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_v1f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_v1f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v9
+; RV64-i64-NEXT: ret
+ %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half> %x)
+ ret <1 x iXLen> %a
+}
+declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half>)
+
+define <2 x iXLen> @lrint_v2f16(<2 x half> %x) {
+; RV32-LABEL: lrint_v2f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_v2f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_v2f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v9
+; RV64-i64-NEXT: ret
+ %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half> %x)
+ ret <2 x iXLen> %a
+}
+declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half>)
+
+define <3 x iXLen> @lrint_v3f16(<3 x half> %x) {
+; RV32-LABEL: lrint_v3f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_v3f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_v3f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v10, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v10
+; RV64-i64-NEXT: ret
+ %a = call <3 x iXLen> @llvm.lrint.v3iXLen.v3f16(<3 x half> %x)
+ ret <3 x iXLen> %a
+}
+declare <3 x iXLen> @llvm.lrint.v3iXLen.v3f16(<3 x half>)
+
+define <4 x iXLen> @lrint_v4f16(<4 x half> %x) {
+; RV32-LABEL: lrint_v4f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_v4f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_v4f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v10, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v10
+; RV64-i64-NEXT: ret
+ %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half> %x)
+ ret <4 x iXLen> %a
+}
+declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half>)
+
+define <8 x iXLen> @lrint_v8f16(<8 x half> %x) {
+; RV32-LABEL: lrint_v8f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v10, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v10
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_v8f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v10, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v10
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_v8f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v12, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v12
+; RV64-i64-NEXT: ret
+ %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half> %x)
+ ret <8 x iXLen> %a
+}
+declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half>)
+
+define <16 x iXLen> @lrint_v16f16(<16 x half> %x) {
+; RV32-LABEL: lrint_v16f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v12, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_v16f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v12, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v12
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_v16f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v16, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT: ret
+ %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half> %x)
+ ret <16 x iXLen> %a
+}
+declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half>)
+
+define <1 x iXLen> @lrint_v1bf16(<1 x bfloat> %x) {
+; RV32-LABEL: lrint_v1bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_v1bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_v1bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v9
+; RV64-i64-NEXT: ret
+ %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1bf16(<1 x bfloat> %x)
+ ret <1 x iXLen> %a
+}
+declare <1 x iXLen> @llvm.lrint.v1iXLen.v1bf16(<1 x bfloat>)
+
+define <2 x iXLen> @lrint_v2bf16(<2 x bfloat> %x) {
+; RV32-LABEL: lrint_v2bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_v2bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_v2bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v9
+; RV64-i64-NEXT: ret
+ %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2bf16(<2 x bfloat> %x)
+ ret <2 x iXLen> %a
+}
+declare <2 x iXLen> @llvm.lrint.v2iXLen.v2bf16(<2 x bfloat>)
+
+define <3 x iXLen> @lrint_v3bf16(<3 x bfloat> %x) {
+; RV32-LABEL: lrint_v3bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_v3bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_v3bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v10, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v10
+; RV64-i64-NEXT: ret
+ %a = call <3 x iXLen> @llvm.lrint.v3iXLen.v3bf16(<3 x bfloat> %x)
+ ret <3 x iXLen> %a
+}
+declare <3 x iXLen> @llvm.lrint.v3iXLen.v3bf16(<3 x bfloat>)
+
+define <4 x iXLen> @lrint_v4bf16(<4 x bfloat> %x) {
+; RV32-LABEL: lrint_v4bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_v4bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_v4bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v10, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v10
+; RV64-i64-NEXT: ret
+ %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4bf16(<4 x bfloat> %x)
+ ret <4 x iXLen> %a
+}
+declare <4 x iXLen> @llvm.lrint.v4iXLen.v4bf16(<4 x bfloat>)
+
+define <8 x iXLen> @lrint_v8bf16(<8 x bfloat> %x) {
+; RV32-LABEL: lrint_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v10, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v10
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_v8bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v10, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v10
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_v8bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v12, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v12
+; RV64-i64-NEXT: ret
+ %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8bf16(<8 x bfloat> %x)
+ ret <8 x iXLen> %a
+}
+declare <8 x iXLen> @llvm.lrint.v8iXLen.v8bf16(<8 x bfloat>)
+
+define <16 x iXLen> @lrint_v16bf16(<16 x bfloat> %x) {
+; RV32-LABEL: lrint_v16bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v12, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_v16bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v12, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v12
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_v16bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v16, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT: ret
+ %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16bf16(<16 x bfloat> %x)
+ ret <16 x iXLen> %a
+}
+declare <16 x iXLen> @llvm.lrint.v16iXLen.v16bf16(<16 x bfloat>)
+
+define <32 x iXLen> @lrint_v32bf16(<32 x bfloat> %x) {
+; RV32-LABEL: lrint_v32bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v16, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v16
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_v32bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: li a0, 32
+; RV64-i32-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v16, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v16
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_v32bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v16, v8
+; RV64-i64-NEXT: vsetivli zero, 16, e16, m4, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v20, v8, 16
+; RV64-i64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v24, v20
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v16, v24
+; RV64-i64-NEXT: ret
+ %a = call <32 x iXLen> @llvm.lrint.v32iXLen.v32bf16(<32 x bfloat> %x)
+ ret <32 x iXLen> %a
+}
+declare <32 x iXLen> @llvm.lrint.v32iXLen.v32bf16(<32 x bfloat>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll
index 66c64c6b275f4..6bc934cbdf0d8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d -target-abi=ilp32d \
+; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin -target-abi=ilp32d \
; RUN: -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d -target-abi=lp64d \
+; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin -target-abi=lp64d \
; RUN: -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i64> @llrint_nxv1i64_nxv1f32(<vscale x 1 x float> %x) {
@@ -108,3 +108,137 @@ define <vscale x 8 x i64> @llrint_nxv8i64_nxv8f64(<vscale x 8 x double> %x) {
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double>)
+
+define <vscale x 1 x i64> @llrint_nxv1f16(<vscale x 1 x half> %x) {
+; CHECK-LABEL: llrint_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v9
+; CHECK-NEXT: ret
+ %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f16(<vscale x 1 x half> %x)
+ ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f16(<vscale x 1 x half>)
+
+define <vscale x 2 x i64> @llrint_nxv2f16(<vscale x 2 x half> %x) {
+; CHECK-LABEL: llrint_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v10
+; CHECK-NEXT: ret
+ %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f16(<vscale x 2 x half> %x)
+ ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f16(<vscale x 2 x half>)
+
+define <vscale x 4 x i64> @llrint_nxv4f16(<vscale x 4 x half> %x) {
+; CHECK-LABEL: llrint_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v12
+; CHECK-NEXT: ret
+ %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f16(<vscale x 4 x half> %x)
+ ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f16(<vscale x 4 x half>)
+
+define <vscale x 8 x i64> @llrint_nxv8f16(<vscale x 8 x half> %x) {
+; CHECK-LABEL: llrint_nxv8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v16, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v16
+; CHECK-NEXT: ret
+ %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f16(<vscale x 8 x half> %x)
+ ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f16(<vscale x 8 x half>)
+
+define <vscale x 16 x i64> @llrint_nxv16f16(<vscale x 16 x half> %x) {
+; CHECK-LABEL: llrint_nxv16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v16, v8
+; CHECK-NEXT: vfwcvt.f.f.v v24, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v16
+; CHECK-NEXT: vfwcvt.x.f.v v16, v24
+; CHECK-NEXT: ret
+ %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f16(<vscale x 16 x half> %x)
+ ret <vscale x 16 x i64> %a
+}
+declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f16(<vscale x 16 x half>)
+
+define <vscale x 1 x i64> @llrint_nxv1bf16(<vscale x 1 x bfloat> %x) {
+; CHECK-LABEL: llrint_nxv1bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v9
+; CHECK-NEXT: ret
+ %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1bf16(<vscale x 1 x bfloat> %x)
+ ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1bf16(<vscale x 1 x bfloat>)
+
+define <vscale x 2 x i64> @llrint_nxv2bf16(<vscale x 2 x bfloat> %x) {
+; CHECK-LABEL: llrint_nxv2bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v10
+; CHECK-NEXT: ret
+ %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> %x)
+ ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2bf16(<vscale x 2 x bfloat>)
+
+define <vscale x 4 x i64> @llrint_nxv4bf16(<vscale x 4 x bfloat> %x) {
+; CHECK-LABEL: llrint_nxv4bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v12
+; CHECK-NEXT: ret
+ %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4bf16(<vscale x 4 x bfloat> %x)
+ ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4bf16(<vscale x 4 x bfloat>)
+
+define <vscale x 8 x i64> @llrint_nxv8bf16(<vscale x 8 x bfloat> %x) {
+; CHECK-LABEL: llrint_nxv8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v16
+; CHECK-NEXT: ret
+ %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8bf16(<vscale x 8 x bfloat> %x)
+ ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8bf16(<vscale x 8 x bfloat>)
+
+define <vscale x 16 x i64> @llrint_nxv16bf16(<vscale x 16 x bfloat> %x) {
+; CHECK-LABEL: llrint_nxv16bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfwcvt.x.f.v v8, v16
+; CHECK-NEXT: vfwcvt.x.f.v v16, v24
+; CHECK-NEXT: ret
+ %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16bf16(<vscale x 16 x bfloat> %x)
+ ret <vscale x 16 x i64> %a
+}
+declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16bf16(<vscale x 16 x bfloat>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll
index 534fdf21675ce..6df738fd72854 100644
--- a/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll
@@ -1,9 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+f,+d \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin \
; RUN: -target-abi=ilp32d -verify-machineinstrs | FileCheck %s --check-prefix=RV32
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin \
; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i32
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin \
; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i64
define <vscale x 1 x iXLen> @lrint_nxv1f32(<vscale x 1 x float> %x) {
@@ -226,3 +226,414 @@ define <vscale x 8 x iXLen> @lrint_nxv8f64(<vscale x 8 x double> %x) {
ret <vscale x 8 x iXLen> %a
}
declare <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f64(<vscale x 8 x double>)
+
+define <vscale x 1 x iXLen> @lrint_nxv1f16(<vscale x 1 x half> %x) {
+; RV32-LABEL: lrint_nxv1f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv1f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv1f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v9
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f16(<vscale x 1 x half> %x)
+ ret <vscale x 1 x iXLen> %a
+}
+declare <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f16(<vscale x 1 x half>)
+
+define <vscale x 2 x iXLen> @lrint_nxv2f16(<vscale x 2 x half> %x) {
+; RV32-LABEL: lrint_nxv2f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv2f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv2f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v10, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v10
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f16(<vscale x 2 x half> %x)
+ ret <vscale x 2 x iXLen> %a
+}
+declare <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f16(<vscale x 2 x half>)
+
+define <vscale x 4 x iXLen> @lrint_nxv4f16(<vscale x 4 x half> %x) {
+; RV32-LABEL: lrint_nxv4f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v10, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v10
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv4f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v10, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v10
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv4f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v12, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v12
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f16(<vscale x 4 x half> %x)
+ ret <vscale x 4 x iXLen> %a
+}
+declare <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f16(<vscale x 4 x half>)
+
+define <vscale x 8 x iXLen> @lrint_nxv8f16(<vscale x 8 x half> %x) {
+; RV32-LABEL: lrint_nxv8f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v12, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv8f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v12, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v12
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv8f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v16, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f16(<vscale x 8 x half> %x)
+ ret <vscale x 8 x iXLen> %a
+}
+declare <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f16(<vscale x 8 x half>)
+
+define <vscale x 16 x iXLen> @lrint_nxv16f16(<vscale x 16 x half> %x) {
+; RV32-LABEL: lrint_nxv16f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v16, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v16
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv16f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v16, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v16
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv16f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v16, v8
+; RV64-i64-NEXT: vfwcvt.f.f.v v24, v10
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT: vfwcvt.x.f.v v16, v24
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 16 x iXLen> @llvm.lrint.nxv16iXLen.nxv16f16(<vscale x 16 x half> %x)
+ ret <vscale x 16 x iXLen> %a
+}
+declare <vscale x 16 x iXLen> @llvm.lrint.nxv16iXLen.nxv16f16(<vscale x 16 x half>)
+
+define <vscale x 1 x iXLen> @lrint_nxv1bf16(<vscale x 1 x bfloat> %x) {
+; RV32-LABEL: lrint_nxv1bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv1bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv1bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v9
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1bf16(<vscale x 1 x bfloat> %x)
+ ret <vscale x 1 x iXLen> %a
+}
+declare <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1bf16(<vscale x 1 x bfloat>)
+
+define <vscale x 2 x iXLen> @lrint_nxv2bf16(<vscale x 2 x bfloat> %x) {
+; RV32-LABEL: lrint_nxv2bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv2bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv2bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v10, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v10
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2bf16(<vscale x 2 x bfloat> %x)
+ ret <vscale x 2 x iXLen> %a
+}
+declare <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2bf16(<vscale x 2 x bfloat>)
+
+define <vscale x 4 x iXLen> @lrint_nxv4bf16(<vscale x 4 x bfloat> %x) {
+; RV32-LABEL: lrint_nxv4bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v10, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v10
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv4bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v10, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v10
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv4bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v12, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v12
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4bf16(<vscale x 4 x bfloat> %x)
+ ret <vscale x 4 x iXLen> %a
+}
+declare <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4bf16(<vscale x 4 x bfloat>)
+
+define <vscale x 8 x iXLen> @lrint_nxv8bf16(<vscale x 8 x bfloat> %x) {
+; RV32-LABEL: lrint_nxv8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v12, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv8bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v12, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v12
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv8bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v16, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8bf16(<vscale x 8 x bfloat> %x)
+ ret <vscale x 8 x iXLen> %a
+}
+declare <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8bf16(<vscale x 8 x bfloat>)
+
+define <vscale x 16 x iXLen> @lrint_nxv16bf16(<vscale x 16 x bfloat> %x) {
+; RV32-LABEL: lrint_nxv16bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v16, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v16
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv16bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v16, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v16
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv16bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v16, v8
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v24, v10
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT: vfwcvt.x.f.v v16, v24
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 16 x iXLen> @llvm.lrint.nxv16iXLen.nxv16bf16(<vscale x 16 x bfloat> %x)
+ ret <vscale x 16 x iXLen> %a
+}
+declare <vscale x 16 x iXLen> @llvm.lrint.nxv16iXLen.nxv16bf16(<vscale x 16 x bfloat>)
+
+define <vscale x 32 x iXLen> @lrint_nxv32bf16(<vscale x 32 x bfloat> %x) {
+; RV32-LABEL: lrint_nxv32bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v16, v8
+; RV32-NEXT: vfwcvtbf16.f.f.v v24, v12
+; RV32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v16
+; RV32-NEXT: vfcvt.x.f.v v16, v24
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv32bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v16, v8
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v24, v12
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v16
+; RV64-i32-NEXT: vfcvt.x.f.v v16, v24
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv32bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: addi sp, sp, -64
+; RV64-i64-NEXT: .cfi_def_cfa_offset 64
+; RV64-i64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT: .cfi_offset ra, -8
+; RV64-i64-NEXT: .cfi_offset s0, -16
+; RV64-i64-NEXT: .cfi_offset s1, -24
+; RV64-i64-NEXT: csrr a1, vlenb
+; RV64-i64-NEXT: slli a1, a1, 5
+; RV64-i64-NEXT: sub sp, sp, a1
+; RV64-i64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 32 * vlenb
+; RV64-i64-NEXT: mv s0, a0
+; RV64-i64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v16, v8
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v20, v10
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v8, v12
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v24, v14
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v0, v16
+; RV64-i64-NEXT: csrr a0, vlenb
+; RV64-i64-NEXT: slli a0, a0, 3
+; RV64-i64-NEXT: mv a1, a0
+; RV64-i64-NEXT: slli a0, a0, 1
+; RV64-i64-NEXT: add a0, a0, a1
+; RV64-i64-NEXT: add a0, sp, a0
+; RV64-i64-NEXT: addi a0, a0, 32
+; RV64-i64-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
+; RV64-i64-NEXT: vfwcvt.x.f.v v0, v20
+; RV64-i64-NEXT: csrr a0, vlenb
+; RV64-i64-NEXT: slli a0, a0, 4
+; RV64-i64-NEXT: add a0, sp, a0
+; RV64-i64-NEXT: addi a0, a0, 32
+; RV64-i64-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
+; RV64-i64-NEXT: vfwcvt.x.f.v v16, v8
+; RV64-i64-NEXT: csrr a0, vlenb
+; RV64-i64-NEXT: slli a0, a0, 3
+; RV64-i64-NEXT: add a0, sp, a0
+; RV64-i64-NEXT: addi a0, a0, 32
+; RV64-i64-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v24
+; RV64-i64-NEXT: addi a0, sp, 32
+; RV64-i64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; RV64-i64-NEXT: csrr s1, vlenb
+; RV64-i64-NEXT: li a1, 24
+; RV64-i64-NEXT: mv a0, s1
+; RV64-i64-NEXT: call __muldi3
+; RV64-i64-NEXT: add a0, s0, a0
+; RV64-i64-NEXT: addi a1, sp, 32
+; RV64-i64-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
+; RV64-i64-NEXT: vs8r.v v8, (a0)
+; RV64-i64-NEXT: slli a0, s1, 4
+; RV64-i64-NEXT: slli s1, s1, 3
+; RV64-i64-NEXT: add a0, s0, a0
+; RV64-i64-NEXT: add s1, s0, s1
+; RV64-i64-NEXT: csrr a1, vlenb
+; RV64-i64-NEXT: slli a1, a1, 3
+; RV64-i64-NEXT: add a1, sp, a1
+; RV64-i64-NEXT: addi a1, a1, 32
+; RV64-i64-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
+; RV64-i64-NEXT: vs8r.v v8, (a0)
+; RV64-i64-NEXT: csrr a0, vlenb
+; RV64-i64-NEXT: slli a0, a0, 4
+; RV64-i64-NEXT: add a0, sp, a0
+; RV64-i64-NEXT: addi a0, a0, 32
+; RV64-i64-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; RV64-i64-NEXT: vs8r.v v8, (s1)
+; RV64-i64-NEXT: csrr a0, vlenb
+; RV64-i64-NEXT: slli a0, a0, 3
+; RV64-i64-NEXT: mv a1, a0
+; RV64-i64-NEXT: slli a0, a0, 1
+; RV64-i64-NEXT: add a0, a0, a1
+; RV64-i64-NEXT: add a0, sp, a0
+; RV64-i64-NEXT: addi a0, a0, 32
+; RV64-i64-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; RV64-i64-NEXT: vs8r.v v8, (s0)
+; RV64-i64-NEXT: csrr a0, vlenb
+; RV64-i64-NEXT: slli a0, a0, 5
+; RV64-i64-NEXT: add sp, sp, a0
+; RV64-i64-NEXT: .cfi_def_cfa sp, 64
+; RV64-i64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT: .cfi_restore ra
+; RV64-i64-NEXT: .cfi_restore s0
+; RV64-i64-NEXT: .cfi_restore s1
+; RV64-i64-NEXT: addi sp, sp, 64
+; RV64-i64-NEXT: .cfi_def_cfa_offset 0
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 32 x iXLen> @llvm.lrint.nxv32iXLen.nxv32bf16(<vscale x 32 x bfloat> %x)
+ ret <vscale x 32 x iXLen> %a
+}
+declare <vscale x 32 x iXLen> @llvm.lrint.nxv32iXLen.nxv32bf16(<vscale x 32 x bfloat>)
More information about the llvm-commits
mailing list