[llvm] 947e072 - [CodeGen/RISCV] Add fixed-vector [l]lround tests (#145926)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 26 11:43:25 PDT 2025
Author: Ramkumar Ramachandra
Date: 2025-06-26T19:43:22+01:00
New Revision: 947e072744ae8c01c1b33ee2891838c771571a8d
URL: https://github.com/llvm/llvm-project/commit/947e072744ae8c01c1b33ee2891838c771571a8d
DIFF: https://github.com/llvm/llvm-project/commit/947e072744ae8c01c1b33ee2891838c771571a8d.diff
LOG: [CodeGen/RISCV] Add fixed-vector [l]lround tests (#145926)
In preparation to unify the codegen of [l]lrint and [l]lround, making
the latter go through custom-lowering for vector-codegen, add some tests
showing the current fixed-vector-unrolled codegen of [l]lround.
Added:
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll
new file mode 100644
index 0000000000000..b8ca7fd71cb93
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll
@@ -0,0 +1,1674 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d,+zvfh -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfh -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64
+
+define <1 x i64> @llround_v1f16(<1 x half> %x) nounwind {
+; RV32-LABEL: llround_v1f16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 0(sp)
+; RV32-NEXT: sw a1, 4(sp)
+; RV32-NEXT: mv a0, sp
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v8, (a0), zero
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v1f16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT: vmv.s.x v8, a0
+; RV64-NEXT: ret
+ %a = call <1 x i64> @llvm.llround.v1i64.v1f16(<1 x half> %x)
+ ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llround.v1i64.v1f16(<1 x half>)
+
+define <2 x i64> @llround_v2f16(<2 x half> %x) nounwind {
+; RV32-LABEL: llround_v2f16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vl1r.v v8, (a2) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v2f16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v9
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, a1
+; RV64-NEXT: ret
+ %a = call <2 x i64> @llvm.llround.v2i64.v2f16(<2 x half> %x)
+ ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llround.v2i64.v2f16(<2 x half>)
+
+define <3 x i64> @llround_v3f16(<3 x half> %x) nounwind {
+; RV32-LABEL: llround_v3f16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v3f16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-NEXT: vslidedown.vi v11, v8, 3
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v9
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-NEXT: vfmv.f.s fa5, v11
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a1
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: ret
+ %a = call <3 x i64> @llvm.llround.v3i64.v3f16(<3 x half> %x)
+ ret <3 x i64> %a
+}
+declare <3 x i64> @llvm.llround.v3i64.v3f16(<3 x half>)
+
+define <4 x i64> @llround_v4f16(<4 x half> %x) nounwind {
+; RV32-LABEL: llround_v4f16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v4f16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-NEXT: vslidedown.vi v11, v8, 3
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v9
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-NEXT: vfmv.f.s fa5, v11
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a1
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: ret
+ %a = call <4 x i64> @llvm.llround.v4i64.v4f16(<4 x half> %x)
+ ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llround.v4i64.v4f16(<4 x half>)
+
+define <8 x i64> @llround_v8f16(<8 x half> %x) nounwind {
+; RV32-LABEL: llround_v8f16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -208
+; RV32-NEXT: sw ra, 204(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 200(sp) # 4-byte Folded Spill
+; RV32-NEXT: addi s0, sp, 208
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -64
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 64(sp)
+; RV32-NEXT: sw a1, 68(sp)
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 120(sp)
+; RV32-NEXT: sw a1, 124(sp)
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 112(sp)
+; RV32-NEXT: sw a1, 116(sp)
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 104(sp)
+; RV32-NEXT: sw a1, 108(sp)
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 96(sp)
+; RV32-NEXT: sw a1, 100(sp)
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 88(sp)
+; RV32-NEXT: sw a1, 92(sp)
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 80(sp)
+; RV32-NEXT: sw a1, 84(sp)
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 72(sp)
+; RV32-NEXT: sw a1, 76(sp)
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vle32.v v8, (a0)
+; RV32-NEXT: addi sp, s0, -208
+; RV32-NEXT: lw ra, 204(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 200(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 208
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v8f16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -128
+; RV64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
+; RV64-NEXT: addi s0, sp, 128
+; RV64-NEXT: andi sp, sp, -64
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: vslidedown.vi v9, v8, 7
+; RV64-NEXT: vslidedown.vi v10, v8, 6
+; RV64-NEXT: vslidedown.vi v11, v8, 5
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v9
+; RV64-NEXT: vslidedown.vi v9, v8, 4
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: vslidedown.vi v10, v8, 3
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a2, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v11
+; RV64-NEXT: vslidedown.vi v11, v8, 2
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a3, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v9
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a4, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a5, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v11
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a6, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: sd a4, 32(sp)
+; RV64-NEXT: sd a3, 40(sp)
+; RV64-NEXT: sd a2, 48(sp)
+; RV64-NEXT: sd a1, 56(sp)
+; RV64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-NEXT: sd a0, 0(sp)
+; RV64-NEXT: sd a1, 8(sp)
+; RV64-NEXT: sd a6, 16(sp)
+; RV64-NEXT: sd a5, 24(sp)
+; RV64-NEXT: mv a0, sp
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: addi sp, s0, -128
+; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 128
+; RV64-NEXT: ret
+ %a = call <8 x i64> @llvm.llround.v8i64.v8f16(<8 x half> %x)
+ ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llround.v8i64.v8f16(<8 x half>)
+
+define <16 x i64> @llround_v16f16(<16 x half> %x) nounwind {
+; RV32-LABEL: llround_v16f16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -400
+; RV32-NEXT: sw ra, 396(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 392(sp) # 4-byte Folded Spill
+; RV32-NEXT: addi s0, sp, 400
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -128
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 128(sp)
+; RV32-NEXT: sw a1, 132(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 15
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 248(sp)
+; RV32-NEXT: sw a1, 252(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 14
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 240(sp)
+; RV32-NEXT: sw a1, 244(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 13
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 232(sp)
+; RV32-NEXT: sw a1, 236(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 12
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 224(sp)
+; RV32-NEXT: sw a1, 228(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 11
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 216(sp)
+; RV32-NEXT: sw a1, 220(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 10
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 208(sp)
+; RV32-NEXT: sw a1, 212(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 9
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 200(sp)
+; RV32-NEXT: sw a1, 204(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 8
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 192(sp)
+; RV32-NEXT: sw a1, 196(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 184(sp)
+; RV32-NEXT: sw a1, 188(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 176(sp)
+; RV32-NEXT: sw a1, 180(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 168(sp)
+; RV32-NEXT: sw a1, 172(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 160(sp)
+; RV32-NEXT: sw a1, 164(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 152(sp)
+; RV32-NEXT: sw a1, 156(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 144(sp)
+; RV32-NEXT: sw a1, 148(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa0, fa5
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 136(sp)
+; RV32-NEXT: sw a1, 140(sp)
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: addi a1, sp, 128
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT: vle32.v v8, (a1)
+; RV32-NEXT: addi sp, s0, -400
+; RV32-NEXT: lw ra, 396(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 392(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 400
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v16f16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -256
+; RV64-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
+; RV64-NEXT: addi s0, sp, 256
+; RV64-NEXT: andi sp, sp, -128
+; RV64-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: vslidedown.vi v10, v8, 15
+; RV64-NEXT: vslidedown.vi v12, v8, 14
+; RV64-NEXT: vslidedown.vi v14, v8, 13
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: vslidedown.vi v10, v8, 12
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v12
+; RV64-NEXT: vslidedown.vi v12, v8, 11
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a2, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v14
+; RV64-NEXT: vslidedown.vi v14, v8, 10
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a3, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: vslidedown.vi v10, v8, 9
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a5, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v12
+; RV64-NEXT: vslidedown.vi v12, v8, 8
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a4, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v14
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v9, v8, 7
+; RV64-NEXT: vslidedown.vi v11, v8, 6
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a6, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: vslidedown.vi v10, v8, 5
+; RV64-NEXT: vslidedown.vi v13, v8, 4
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s a7, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v12
+; RV64-NEXT: vslidedown.vi v12, v8, 3
+; RV64-NEXT: vslidedown.vi v14, v8, 2
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s t0, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v9
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s t1, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v11
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s t2, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s t3, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v13
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: fcvt.l.s t4, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v12
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: sd a5, 96(sp)
+; RV64-NEXT: sd a3, 104(sp)
+; RV64-NEXT: sd a2, 112(sp)
+; RV64-NEXT: sd a1, 120(sp)
+; RV64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v14
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: sd t0, 64(sp)
+; RV64-NEXT: sd a7, 72(sp)
+; RV64-NEXT: sd a6, 80(sp)
+; RV64-NEXT: sd a4, 88(sp)
+; RV64-NEXT: fcvt.l.s a2, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: fcvt.s.h fa5, fa5
+; RV64-NEXT: sd t4, 32(sp)
+; RV64-NEXT: sd t3, 40(sp)
+; RV64-NEXT: sd t2, 48(sp)
+; RV64-NEXT: sd t1, 56(sp)
+; RV64-NEXT: fcvt.l.s a3, fa5, rmm
+; RV64-NEXT: sd a0, 0(sp)
+; RV64-NEXT: sd a3, 8(sp)
+; RV64-NEXT: sd a2, 16(sp)
+; RV64-NEXT: sd a1, 24(sp)
+; RV64-NEXT: mv a0, sp
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: addi sp, s0, -256
+; RV64-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 256
+; RV64-NEXT: ret
+ %a = call <16 x i64> @llvm.llround.v16i64.v16f16(<16 x half> %x)
+ ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.llround.v16i64.v16f16(<16 x half>)
+
+define <1 x i64> @llround_v1i64_v1f32(<1 x float> %x) nounwind {
+; RV32-LABEL: llround_v1i64_v1f32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 0(sp)
+; RV32-NEXT: sw a1, 4(sp)
+; RV32-NEXT: mv a0, sp
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v8, (a0), zero
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v1i64_v1f32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vmv.s.x v8, a0
+; RV64-NEXT: ret
+ %a = call <1 x i64> @llvm.llround.v1i64.v1f32(<1 x float> %x)
+ ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llround.v1i64.v1f32(<1 x float>)
+
+define <2 x i64> @llround_v2i64_v2f32(<2 x float> %x) nounwind {
+; RV32-LABEL: llround_v2i64_v2f32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vl1r.v v8, (a2) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v2i64_v2f32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v9
+; RV64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, a1
+; RV64-NEXT: ret
+ %a = call <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float> %x)
+ ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float>)
+
+define <3 x i64> @llround_v3i64_v3f32(<3 x float> %x) nounwind {
+; RV32-LABEL: llround_v3i64_v3f32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v3i64_v3f32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-NEXT: vslidedown.vi v11, v8, 3
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v9
+; RV64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-NEXT: vfmv.f.s fa5, v11
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a1
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: ret
+ %a = call <3 x i64> @llvm.llround.v3i64.v3f32(<3 x float> %x)
+ ret <3 x i64> %a
+}
+declare <3 x i64> @llvm.llround.v3i64.v3f32(<3 x float>)
+
+define <4 x i64> @llround_v4i64_v4f32(<4 x float> %x) nounwind {
+; RV32-LABEL: llround_v4i64_v4f32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v4i64_v4f32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-NEXT: vslidedown.vi v11, v8, 3
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v9
+; RV64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-NEXT: vfmv.f.s fa5, v11
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a1
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: ret
+ %a = call <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float> %x)
+ ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float>)
+
+define <8 x i64> @llround_v8i64_v8f32(<8 x float> %x) nounwind {
+; RV32-LABEL: llround_v8i64_v8f32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -208
+; RV32-NEXT: sw ra, 204(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 200(sp) # 4-byte Folded Spill
+; RV32-NEXT: addi s0, sp, 208
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -64
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 64(sp)
+; RV32-NEXT: sw a1, 68(sp)
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 120(sp)
+; RV32-NEXT: sw a1, 124(sp)
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 112(sp)
+; RV32-NEXT: sw a1, 116(sp)
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 104(sp)
+; RV32-NEXT: sw a1, 108(sp)
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 96(sp)
+; RV32-NEXT: sw a1, 100(sp)
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 88(sp)
+; RV32-NEXT: sw a1, 92(sp)
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 80(sp)
+; RV32-NEXT: sw a1, 84(sp)
+; RV32-NEXT: addi a0, sp, 192
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 72(sp)
+; RV32-NEXT: sw a1, 76(sp)
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vle32.v v8, (a0)
+; RV32-NEXT: addi sp, s0, -208
+; RV32-NEXT: lw ra, 204(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 200(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 208
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v8i64_v8f32:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -128
+; RV64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
+; RV64-NEXT: addi s0, sp, 128
+; RV64-NEXT: andi sp, sp, -64
+; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: vslidedown.vi v10, v8, 7
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: vslidedown.vi v10, v8, 6
+; RV64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: vslidedown.vi v10, v8, 5
+; RV64-NEXT: fcvt.l.s a2, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: vslidedown.vi v10, v8, 4
+; RV64-NEXT: fcvt.l.s a3, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v9, v8, 3
+; RV64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: fcvt.l.s a4, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v9
+; RV64-NEXT: fcvt.l.s a5, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: fcvt.l.s a6, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: sd a4, 32(sp)
+; RV64-NEXT: sd a3, 40(sp)
+; RV64-NEXT: sd a2, 48(sp)
+; RV64-NEXT: sd a1, 56(sp)
+; RV64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-NEXT: sd a0, 0(sp)
+; RV64-NEXT: sd a1, 8(sp)
+; RV64-NEXT: sd a6, 16(sp)
+; RV64-NEXT: sd a5, 24(sp)
+; RV64-NEXT: mv a0, sp
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: addi sp, s0, -128
+; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 128
+; RV64-NEXT: ret
+ %a = call <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float> %x)
+ ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float>)
+
+define <16 x i64> @llround_v16i64_v16f32(<16 x float> %x) nounwind {
+; RV32-LABEL: llround_v16i64_v16f32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -400
+; RV32-NEXT: sw ra, 396(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 392(sp) # 4-byte Folded Spill
+; RV32-NEXT: addi s0, sp, 400
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -128
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vse32.v v8, (a0)
+; RV32-NEXT: flw fa0, 124(sp)
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 248(sp)
+; RV32-NEXT: sw a1, 252(sp)
+; RV32-NEXT: flw fa0, 120(sp)
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 240(sp)
+; RV32-NEXT: sw a1, 244(sp)
+; RV32-NEXT: flw fa0, 116(sp)
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 232(sp)
+; RV32-NEXT: sw a1, 236(sp)
+; RV32-NEXT: flw fa0, 112(sp)
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 224(sp)
+; RV32-NEXT: sw a1, 228(sp)
+; RV32-NEXT: flw fa0, 108(sp)
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 216(sp)
+; RV32-NEXT: sw a1, 220(sp)
+; RV32-NEXT: flw fa0, 104(sp)
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 208(sp)
+; RV32-NEXT: sw a1, 212(sp)
+; RV32-NEXT: flw fa0, 100(sp)
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 200(sp)
+; RV32-NEXT: sw a1, 204(sp)
+; RV32-NEXT: flw fa0, 96(sp)
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 192(sp)
+; RV32-NEXT: sw a1, 196(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 128(sp)
+; RV32-NEXT: sw a1, 132(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 152(sp)
+; RV32-NEXT: sw a1, 156(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 144(sp)
+; RV32-NEXT: sw a1, 148(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 136(sp)
+; RV32-NEXT: sw a1, 140(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 184(sp)
+; RV32-NEXT: sw a1, 188(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 176(sp)
+; RV32-NEXT: sw a1, 180(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 168(sp)
+; RV32-NEXT: sw a1, 172(sp)
+; RV32-NEXT: addi a0, sp, 384
+; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llroundf
+; RV32-NEXT: sw a0, 160(sp)
+; RV32-NEXT: sw a1, 164(sp)
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: addi a1, sp, 128
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT: vle32.v v8, (a1)
+; RV32-NEXT: addi sp, s0, -400
+; RV32-NEXT: lw ra, 396(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 392(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 400
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v16i64_v16f32:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -384
+; RV64-NEXT: sd ra, 376(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
+; RV64-NEXT: addi s0, sp, 384
+; RV64-NEXT: andi sp, sp, -128
+; RV64-NEXT: addi a0, sp, 64
+; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT: vse32.v v8, (a0)
+; RV64-NEXT: flw fa5, 124(sp)
+; RV64-NEXT: vfmv.f.s fa4, v8
+; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v10, v8, 3
+; RV64-NEXT: vslidedown.vi v12, v8, 2
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: sd a0, 248(sp)
+; RV64-NEXT: flw fa5, 120(sp)
+; RV64-NEXT: vslidedown.vi v13, v8, 1
+; RV64-NEXT: fcvt.l.s a0, fa4, rmm
+; RV64-NEXT: vfmv.f.s fa4, v10
+; RV64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-NEXT: sd a1, 240(sp)
+; RV64-NEXT: flw fa5, 116(sp)
+; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64-NEXT: vslidedown.vi v10, v8, 7
+; RV64-NEXT: fcvt.l.s a1, fa4, rmm
+; RV64-NEXT: vfmv.f.s fa4, v12
+; RV64-NEXT: fcvt.l.s a2, fa5, rmm
+; RV64-NEXT: sd a2, 232(sp)
+; RV64-NEXT: flw fa5, 112(sp)
+; RV64-NEXT: fcvt.l.s a2, fa4, rmm
+; RV64-NEXT: vfmv.f.s fa4, v13
+; RV64-NEXT: vslidedown.vi v12, v8, 6
+; RV64-NEXT: fcvt.l.s a3, fa5, rmm
+; RV64-NEXT: sd a3, 224(sp)
+; RV64-NEXT: flw fa5, 108(sp)
+; RV64-NEXT: fcvt.l.s a3, fa4, rmm
+; RV64-NEXT: vfmv.f.s fa4, v10
+; RV64-NEXT: vslidedown.vi v10, v8, 5
+; RV64-NEXT: fcvt.l.s a4, fa5, rmm
+; RV64-NEXT: sd a4, 216(sp)
+; RV64-NEXT: flw fa5, 104(sp)
+; RV64-NEXT: fcvt.l.s a4, fa4, rmm
+; RV64-NEXT: vfmv.f.s fa4, v12
+; RV64-NEXT: fcvt.l.s a5, fa4, rmm
+; RV64-NEXT: fcvt.l.s a6, fa5, rmm
+; RV64-NEXT: sd a6, 208(sp)
+; RV64-NEXT: flw fa5, 100(sp)
+; RV64-NEXT: vfmv.f.s fa4, v10
+; RV64-NEXT: fcvt.l.s a6, fa4, rmm
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: fcvt.l.s a7, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: sd a7, 200(sp)
+; RV64-NEXT: fcvt.l.s a7, fa5, rmm
+; RV64-NEXT: flw fa5, 96(sp)
+; RV64-NEXT: sd a0, 128(sp)
+; RV64-NEXT: sd a3, 136(sp)
+; RV64-NEXT: sd a2, 144(sp)
+; RV64-NEXT: sd a1, 152(sp)
+; RV64-NEXT: sd a7, 160(sp)
+; RV64-NEXT: sd a6, 168(sp)
+; RV64-NEXT: sd a5, 176(sp)
+; RV64-NEXT: sd a4, 184(sp)
+; RV64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-NEXT: sd a0, 192(sp)
+; RV64-NEXT: addi a0, sp, 128
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: addi sp, s0, -384
+; RV64-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 384
+; RV64-NEXT: ret
+ %a = call <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float> %x)
+ ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float>)
+
+define <1 x i64> @llround_v1i64_v1f64(<1 x double> %x) nounwind {
+; RV32-LABEL: llround_v1i64_v1f64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llround
+; RV32-NEXT: sw a0, 0(sp)
+; RV32-NEXT: sw a1, 4(sp)
+; RV32-NEXT: mv a0, sp
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v8, (a0), zero
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v1i64_v1f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: fcvt.l.d a0, fa5, rmm
+; RV64-NEXT: vmv.s.x v8, a0
+; RV64-NEXT: ret
+ %a = call <1 x i64> @llvm.llround.v1i64.v1f64(<1 x double> %x)
+ ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llround.v1i64.v1f64(<1 x double>)
+
+define <2 x i64> @llround_v2i64_v2f64(<2 x double> %x) nounwind {
+; RV32-LABEL: llround_v2i64_v2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llround
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llround
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vl1r.v v8, (a2) # vscale x 8-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v2i64_v2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: fcvt.l.d a0, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v9
+; RV64-NEXT: fcvt.l.d a1, fa5, rmm
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, a1
+; RV64-NEXT: ret
+ %a = call <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double> %x)
+ ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double>)
+
+define <4 x i64> @llround_v4i64_v4f64(<4 x double> %x) nounwind {
+; RV32-LABEL: llround_v4i64_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llround
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llround
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llround
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llround
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v4i64_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v12, v8, 1
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: fcvt.l.d a0, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v12
+; RV64-NEXT: fcvt.l.d a1, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v10
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vmv.v.x v10, a0
+; RV64-NEXT: fcvt.l.d a0, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: vslide1down.vx v8, v10, a1
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: fcvt.l.d a0, fa5, rmm
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: ret
+ %a = call <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double> %x)
+ ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double>)
+
+define <8 x i64> @llround_v8i64_v8f64(<8 x double> %x) nounwind {
+; RV32-LABEL: llround_v8i64_v8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -272
+; RV32-NEXT: sw ra, 268(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 264(sp) # 4-byte Folded Spill
+; RV32-NEXT: addi s0, sp, 272
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -64
+; RV32-NEXT: addi a0, sp, 256
+; RV32-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: fld fa0, 120(sp)
+; RV32-NEXT: call llround
+; RV32-NEXT: sw a0, 184(sp)
+; RV32-NEXT: sw a1, 188(sp)
+; RV32-NEXT: fld fa0, 112(sp)
+; RV32-NEXT: call llround
+; RV32-NEXT: sw a0, 176(sp)
+; RV32-NEXT: sw a1, 180(sp)
+; RV32-NEXT: fld fa0, 104(sp)
+; RV32-NEXT: call llround
+; RV32-NEXT: sw a0, 168(sp)
+; RV32-NEXT: sw a1, 172(sp)
+; RV32-NEXT: fld fa0, 96(sp)
+; RV32-NEXT: call llround
+; RV32-NEXT: sw a0, 160(sp)
+; RV32-NEXT: sw a1, 164(sp)
+; RV32-NEXT: addi a0, sp, 256
+; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llround
+; RV32-NEXT: sw a0, 128(sp)
+; RV32-NEXT: sw a1, 132(sp)
+; RV32-NEXT: addi a0, sp, 256
+; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llround
+; RV32-NEXT: sw a0, 136(sp)
+; RV32-NEXT: sw a1, 140(sp)
+; RV32-NEXT: addi a0, sp, 256
+; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llround
+; RV32-NEXT: sw a0, 152(sp)
+; RV32-NEXT: sw a1, 156(sp)
+; RV32-NEXT: addi a0, sp, 256
+; RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call llround
+; RV32-NEXT: sw a0, 144(sp)
+; RV32-NEXT: sw a1, 148(sp)
+; RV32-NEXT: addi a0, sp, 128
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vle32.v v8, (a0)
+; RV32-NEXT: addi sp, s0, -272
+; RV32-NEXT: lw ra, 268(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 264(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 272
+; RV32-NEXT: ret
+;
+; RV64-LABEL: llround_v8i64_v8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -192
+; RV64-NEXT: sd ra, 184(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 176(sp) # 8-byte Folded Spill
+; RV64-NEXT: addi s0, sp, 192
+; RV64-NEXT: andi sp, sp, -64
+; RV64-NEXT: mv a0, sp
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT: vse64.v v8, (a0)
+; RV64-NEXT: fld fa5, 56(sp)
+; RV64-NEXT: vfmv.f.s fa4, v8
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v10, v8, 1
+; RV64-NEXT: fcvt.l.d a0, fa4, rmm
+; RV64-NEXT: fcvt.l.d a1, fa5, rmm
+; RV64-NEXT: sd a1, 120(sp)
+; RV64-NEXT: fld fa5, 48(sp)
+; RV64-NEXT: vfmv.f.s fa4, v10
+; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT: vslidedown.vi v10, v8, 3
+; RV64-NEXT: fcvt.l.d a1, fa4, rmm
+; RV64-NEXT: fcvt.l.d a2, fa5, rmm
+; RV64-NEXT: sd a2, 112(sp)
+; RV64-NEXT: fld fa5, 40(sp)
+; RV64-NEXT: vfmv.f.s fa4, v10
+; RV64-NEXT: fcvt.l.d a2, fa4, rmm
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: fcvt.l.d a3, fa5, rmm
+; RV64-NEXT: vfmv.f.s fa5, v8
+; RV64-NEXT: sd a3, 104(sp)
+; RV64-NEXT: fcvt.l.d a3, fa5, rmm
+; RV64-NEXT: fld fa5, 32(sp)
+; RV64-NEXT: sd a0, 64(sp)
+; RV64-NEXT: sd a1, 72(sp)
+; RV64-NEXT: sd a3, 80(sp)
+; RV64-NEXT: sd a2, 88(sp)
+; RV64-NEXT: fcvt.l.d a0, fa5, rmm
+; RV64-NEXT: sd a0, 96(sp)
+; RV64-NEXT: addi a0, sp, 64
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: addi sp, s0, -192
+; RV64-NEXT: ld ra, 184(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 176(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 192
+; RV64-NEXT: ret
+ %a = call <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double> %x)
+ ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll
new file mode 100644
index 0000000000000..8289a8b8f833a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll
@@ -0,0 +1,1594 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+f,+d,+zvfh \
+; RUN: -target-abi=ilp32d -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfh \
+; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfh \
+; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i64
+
+define <1 x iXLen> @lround_v1f16(<1 x half> %x) nounwind {
+; RV32-LABEL: lround_v1f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vmv.s.x v8, a0
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v1f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT: vmv.s.x v8, a0
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v1f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-i64-NEXT: vmv.s.x v8, a0
+; RV64-i64-NEXT: ret
+ %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f16(<1 x half> %x)
+ ret <1 x iXLen> %a
+}
+declare <1 x iXLen> @llvm.lround.v1iXLen.v1f16(<1 x half>)
+
+define <2 x iXLen> @lround_v2f16(<2 x half> %x) nounwind {
+; RV32-LABEL: lround_v2f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v9
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v2f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v9
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-i32-NEXT: vmv.v.x v8, a0
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a1
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v2f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v9
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-i64-NEXT: vmv.v.x v8, a0
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT: ret
+ %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f16(<2 x half> %x)
+ ret <2 x iXLen> %a
+}
+declare <2 x iXLen> @llvm.lround.v2iXLen.v2f16(<2 x half>)
+
+define <3 x iXLen> @lround_v3f16(<3 x half> %x) nounwind {
+; RV32-LABEL: lround_v3f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: vslidedown.vi v10, v8, 2
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v9
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v10
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v9, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v9, a1
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v3f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 2
+; RV64-i32-NEXT: vslidedown.vi v8, v8, 3
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v9
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v10
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i32-NEXT: vmv.v.x v9, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT: vslide1down.vx v8, v9, a1
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v3f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-i64-NEXT: vslidedown.vi v11, v8, 3
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v9
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT: vmv.v.x v8, a0
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-i64-NEXT: vfmv.f.s fa5, v11
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT: ret
+ %a = call <3 x iXLen> @llvm.lround.v3iXLen.v3f16(<3 x half> %x)
+ ret <3 x iXLen> %a
+}
+declare <3 x iXLen> @llvm.lround.v3iXLen.v3f16(<3 x half>)
+
+define <4 x iXLen> @lround_v4f16(<4 x half> %x) nounwind {
+; RV32-LABEL: lround_v4f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: vslidedown.vi v10, v8, 2
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v9
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v10
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v9, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v9, a1
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v4f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 2
+; RV64-i32-NEXT: vslidedown.vi v8, v8, 3
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v9
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v10
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i32-NEXT: vmv.v.x v9, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT: vslide1down.vx v8, v9, a1
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v4f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-i64-NEXT: vslidedown.vi v11, v8, 3
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v9
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT: vmv.v.x v8, a0
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-i64-NEXT: vfmv.f.s fa5, v11
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT: ret
+ %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f16(<4 x half> %x)
+ ret <4 x iXLen> %a
+}
+declare <4 x iXLen> @llvm.lround.v4iXLen.v4f16(<4 x half>)
+
+define <8 x iXLen> @lround_v8f16(<8 x half> %x) nounwind {
+; RV32-LABEL: lround_v8f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: vslidedown.vi v10, v8, 2
+; RV32-NEXT: vslidedown.vi v12, v8, 3
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v9
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v10
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v10, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v12
+; RV32-NEXT: vslidedown.vi v9, v8, 4
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v10, v10, a1
+; RV32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v9
+; RV32-NEXT: vslidedown.vi v9, v8, 5
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v10, v10, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v9
+; RV32-NEXT: vslidedown.vi v12, v8, 6
+; RV32-NEXT: vslidedown.vi v13, v8, 7
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v10, a1
+; RV32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v12
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v13
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v8f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 2
+; RV64-i32-NEXT: vslidedown.vi v12, v8, 3
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v9
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v10
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64-i32-NEXT: vmv.v.x v10, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-i32-NEXT: vfmv.f.s fa5, v12
+; RV64-i32-NEXT: vslidedown.vi v9, v8, 4
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT: vslide1down.vx v10, v10, a1
+; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-i32-NEXT: vfmv.f.s fa5, v9
+; RV64-i32-NEXT: vslidedown.vi v9, v8, 5
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-i32-NEXT: vfmv.f.s fa5, v9
+; RV64-i32-NEXT: vslidedown.vi v12, v8, 6
+; RV64-i32-NEXT: vslidedown.vi v13, v8, 7
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT: vslide1down.vx v8, v10, a1
+; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-i32-NEXT: vfmv.f.s fa5, v12
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-i32-NEXT: vfmv.f.s fa5, v13
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a1
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v8f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: addi sp, sp, -128
+; RV64-i64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT: addi s0, sp, 128
+; RV64-i64-NEXT: andi sp, sp, -64
+; RV64-i64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: vslidedown.vi v9, v8, 7
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 6
+; RV64-i64-NEXT: vslidedown.vi v11, v8, 5
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v9
+; RV64-i64-NEXT: vslidedown.vi v9, v8, 4
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 3
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a2, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v11
+; RV64-i64-NEXT: vslidedown.vi v11, v8, 2
+; RV64-i64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a3, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v9
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a4, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a5, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v11
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a6, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: sd a4, 32(sp)
+; RV64-i64-NEXT: sd a3, 40(sp)
+; RV64-i64-NEXT: sd a2, 48(sp)
+; RV64-i64-NEXT: sd a1, 56(sp)
+; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT: sd a0, 0(sp)
+; RV64-i64-NEXT: sd a1, 8(sp)
+; RV64-i64-NEXT: sd a6, 16(sp)
+; RV64-i64-NEXT: sd a5, 24(sp)
+; RV64-i64-NEXT: mv a0, sp
+; RV64-i64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-i64-NEXT: vle64.v v8, (a0)
+; RV64-i64-NEXT: addi sp, s0, -128
+; RV64-i64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT: addi sp, sp, 128
+; RV64-i64-NEXT: ret
+ %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f16(<8 x half> %x)
+ ret <8 x iXLen> %a
+}
+declare <8 x iXLen> @llvm.lround.v8iXLen.v8f16(<8 x half>)
+
+define <16 x iXLen> @lround_v16f16(<16 x half> %x) nounwind {
+; RV32-LABEL: lround_v16f16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -128
+; RV32-NEXT: sw ra, 124(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 120(sp) # 4-byte Folded Spill
+; RV32-NEXT: addi s0, sp, 128
+; RV32-NEXT: andi sp, sp, -64
+; RV32-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: vslidedown.vi v10, v8, 15
+; RV32-NEXT: vslidedown.vi v12, v8, 14
+; RV32-NEXT: vslidedown.vi v14, v8, 13
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v10
+; RV32-NEXT: vslidedown.vi v10, v8, 12
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v12
+; RV32-NEXT: vslidedown.vi v12, v8, 11
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a2, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v14
+; RV32-NEXT: vslidedown.vi v14, v8, 10
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a3, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v10
+; RV32-NEXT: vslidedown.vi v10, v8, 9
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a5, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v12
+; RV32-NEXT: vslidedown.vi v12, v8, 8
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a4, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v14
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v8, 7
+; RV32-NEXT: vslidedown.vi v11, v8, 6
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a6, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v10
+; RV32-NEXT: vslidedown.vi v10, v8, 5
+; RV32-NEXT: vslidedown.vi v13, v8, 4
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s a7, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v12
+; RV32-NEXT: vslidedown.vi v12, v8, 3
+; RV32-NEXT: vslidedown.vi v14, v8, 2
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s t0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v9
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s t1, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v11
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s t2, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v10
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s t3, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v13
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: fcvt.w.s t4, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v12
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: sw a5, 48(sp)
+; RV32-NEXT: sw a3, 52(sp)
+; RV32-NEXT: sw a2, 56(sp)
+; RV32-NEXT: sw a1, 60(sp)
+; RV32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v14
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: sw t0, 32(sp)
+; RV32-NEXT: sw a7, 36(sp)
+; RV32-NEXT: sw a6, 40(sp)
+; RV32-NEXT: sw a4, 44(sp)
+; RV32-NEXT: fcvt.w.s a2, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.s.h fa5, fa5
+; RV32-NEXT: sw t4, 16(sp)
+; RV32-NEXT: sw t3, 20(sp)
+; RV32-NEXT: sw t2, 24(sp)
+; RV32-NEXT: sw t1, 28(sp)
+; RV32-NEXT: fcvt.w.s a3, fa5, rmm
+; RV32-NEXT: sw a0, 0(sp)
+; RV32-NEXT: sw a3, 4(sp)
+; RV32-NEXT: sw a2, 8(sp)
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: mv a0, sp
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vle32.v v8, (a0)
+; RV32-NEXT: addi sp, s0, -128
+; RV32-NEXT: lw ra, 124(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 120(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 128
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v16f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: addi sp, sp, -128
+; RV64-i32-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
+; RV64-i32-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
+; RV64-i32-NEXT: addi s0, sp, 128
+; RV64-i32-NEXT: andi sp, sp, -64
+; RV64-i32-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 15
+; RV64-i32-NEXT: vslidedown.vi v12, v8, 14
+; RV64-i32-NEXT: vslidedown.vi v14, v8, 13
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v10
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 12
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v12
+; RV64-i32-NEXT: vslidedown.vi v12, v8, 11
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a2, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v14
+; RV64-i32-NEXT: vslidedown.vi v14, v8, 10
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a3, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v10
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 9
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a5, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v12
+; RV64-i32-NEXT: vslidedown.vi v12, v8, 8
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a4, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v14
+; RV64-i32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v9, v8, 7
+; RV64-i32-NEXT: vslidedown.vi v11, v8, 6
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a6, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v10
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 5
+; RV64-i32-NEXT: vslidedown.vi v13, v8, 4
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s a7, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v12
+; RV64-i32-NEXT: vslidedown.vi v12, v8, 3
+; RV64-i32-NEXT: vslidedown.vi v14, v8, 2
+; RV64-i32-NEXT: vslidedown.vi v8, v8, 1
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s t0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v9
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s t1, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v11
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s t2, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v10
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s t3, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v13
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: fcvt.w.s t4, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v12
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: sw a5, 48(sp)
+; RV64-i32-NEXT: sw a3, 52(sp)
+; RV64-i32-NEXT: sw a2, 56(sp)
+; RV64-i32-NEXT: sw a1, 60(sp)
+; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v14
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: sw t0, 32(sp)
+; RV64-i32-NEXT: sw a7, 36(sp)
+; RV64-i32-NEXT: sw a6, 40(sp)
+; RV64-i32-NEXT: sw a4, 44(sp)
+; RV64-i32-NEXT: fcvt.w.s a2, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: fcvt.s.h fa5, fa5
+; RV64-i32-NEXT: sw t4, 16(sp)
+; RV64-i32-NEXT: sw t3, 20(sp)
+; RV64-i32-NEXT: sw t2, 24(sp)
+; RV64-i32-NEXT: sw t1, 28(sp)
+; RV64-i32-NEXT: fcvt.w.s a3, fa5, rmm
+; RV64-i32-NEXT: sw a0, 0(sp)
+; RV64-i32-NEXT: sw a3, 4(sp)
+; RV64-i32-NEXT: sw a2, 8(sp)
+; RV64-i32-NEXT: sw a1, 12(sp)
+; RV64-i32-NEXT: mv a0, sp
+; RV64-i32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-i32-NEXT: vle32.v v8, (a0)
+; RV64-i32-NEXT: addi sp, s0, -128
+; RV64-i32-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
+; RV64-i32-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-i32-NEXT: addi sp, sp, 128
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v16f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: addi sp, sp, -256
+; RV64-i64-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT: addi s0, sp, 256
+; RV64-i64-NEXT: andi sp, sp, -128
+; RV64-i64-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 15
+; RV64-i64-NEXT: vslidedown.vi v12, v8, 14
+; RV64-i64-NEXT: vslidedown.vi v14, v8, 13
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 12
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v12
+; RV64-i64-NEXT: vslidedown.vi v12, v8, 11
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a2, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v14
+; RV64-i64-NEXT: vslidedown.vi v14, v8, 10
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a3, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 9
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a5, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v12
+; RV64-i64-NEXT: vslidedown.vi v12, v8, 8
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a4, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v14
+; RV64-i64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v9, v8, 7
+; RV64-i64-NEXT: vslidedown.vi v11, v8, 6
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a6, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 5
+; RV64-i64-NEXT: vslidedown.vi v13, v8, 4
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s a7, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v12
+; RV64-i64-NEXT: vslidedown.vi v12, v8, 3
+; RV64-i64-NEXT: vslidedown.vi v14, v8, 2
+; RV64-i64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s t0, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v9
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s t1, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v11
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s t2, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s t3, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v13
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: fcvt.l.s t4, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v12
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: sd a5, 96(sp)
+; RV64-i64-NEXT: sd a3, 104(sp)
+; RV64-i64-NEXT: sd a2, 112(sp)
+; RV64-i64-NEXT: sd a1, 120(sp)
+; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v14
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: sd t0, 64(sp)
+; RV64-i64-NEXT: sd a7, 72(sp)
+; RV64-i64-NEXT: sd a6, 80(sp)
+; RV64-i64-NEXT: sd a4, 88(sp)
+; RV64-i64-NEXT: fcvt.l.s a2, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: fcvt.s.h fa5, fa5
+; RV64-i64-NEXT: sd t4, 32(sp)
+; RV64-i64-NEXT: sd t3, 40(sp)
+; RV64-i64-NEXT: sd t2, 48(sp)
+; RV64-i64-NEXT: sd t1, 56(sp)
+; RV64-i64-NEXT: fcvt.l.s a3, fa5, rmm
+; RV64-i64-NEXT: sd a0, 0(sp)
+; RV64-i64-NEXT: sd a3, 8(sp)
+; RV64-i64-NEXT: sd a2, 16(sp)
+; RV64-i64-NEXT: sd a1, 24(sp)
+; RV64-i64-NEXT: mv a0, sp
+; RV64-i64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-i64-NEXT: vle64.v v8, (a0)
+; RV64-i64-NEXT: addi sp, s0, -256
+; RV64-i64-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT: addi sp, sp, 256
+; RV64-i64-NEXT: ret
+ %a = call <16 x iXLen> @llvm.lround.v16iXLen.v16f16(<16 x half> %x)
+ ret <16 x iXLen> %a
+}
+declare <16 x iXLen> @llvm.lround.v16iXLen.v16f16(<16 x half>)
+
+define <1 x iXLen> @lround_v1f32(<1 x float> %x) nounwind {
+; RV32-LABEL: lround_v1f32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vmv.s.x v8, a0
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v1f32:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vmv.s.x v8, a0
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v1f32:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-i64-NEXT: vmv.s.x v8, a0
+; RV64-i64-NEXT: ret
+ %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f32(<1 x float> %x)
+ ret <1 x iXLen> %a
+}
+declare <1 x iXLen> @llvm.lround.v1iXLen.v1f32(<1 x float>)
+
+define <2 x iXLen> @lround_v2f32(<2 x float> %x) nounwind {
+; RV32-LABEL: lround_v2f32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v9
+; RV32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v2f32:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v9
+; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT: vmv.v.x v8, a0
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a1
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v2f32:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v9
+; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-i64-NEXT: vmv.v.x v8, a0
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT: ret
+ %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f32(<2 x float> %x)
+ ret <2 x iXLen> %a
+}
+declare <2 x iXLen> @llvm.lround.v2iXLen.v2f32(<2 x float>)
+
+define <3 x iXLen> @lround_v3f32(<3 x float> %x) nounwind {
+; RV32-LABEL: lround_v3f32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: vslidedown.vi v10, v8, 2
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v9
+; RV32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v10
+; RV32-NEXT: vmv.v.x v9, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: vslide1down.vx v8, v9, a1
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v3f32:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 2
+; RV64-i32-NEXT: vslidedown.vi v8, v8, 3
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v9
+; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v10
+; RV64-i32-NEXT: vmv.v.x v9, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: vslide1down.vx v8, v9, a1
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v3f32:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-i64-NEXT: vslidedown.vi v11, v8, 3
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v9
+; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT: vmv.v.x v8, a0
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT: vfmv.f.s fa5, v11
+; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT: ret
+ %a = call <3 x iXLen> @llvm.lround.v3iXLen.v3f32(<3 x float> %x)
+ ret <3 x iXLen> %a
+}
+declare <3 x iXLen> @llvm.lround.v3iXLen.v3f32(<3 x float>)
+
+define <4 x iXLen> @lround_v4f32(<4 x float> %x) nounwind {
+; RV32-LABEL: lround_v4f32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: vslidedown.vi v10, v8, 2
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v9
+; RV32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v10
+; RV32-NEXT: vmv.v.x v9, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: vslide1down.vx v8, v9, a1
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v4f32:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 2
+; RV64-i32-NEXT: vslidedown.vi v8, v8, 3
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v9
+; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v10
+; RV64-i32-NEXT: vmv.v.x v9, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: vslide1down.vx v8, v9, a1
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v4f32:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-i64-NEXT: vslidedown.vi v11, v8, 3
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v9
+; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT: vmv.v.x v8, a0
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT: vfmv.f.s fa5, v11
+; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT: ret
+ %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f32(<4 x float> %x)
+ ret <4 x iXLen> %a
+}
+declare <4 x iXLen> @llvm.lround.v4iXLen.v4f32(<4 x float>)
+
+define <8 x iXLen> @lround_v8f32(<8 x float> %x) nounwind {
+; RV32-LABEL: lround_v8f32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v10, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: vslidedown.vi v11, v8, 2
+; RV32-NEXT: vslidedown.vi v12, v8, 3
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v10
+; RV32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v11
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v10, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v12
+; RV32-NEXT: vslidedown.vi v12, v8, 4
+; RV32-NEXT: vslide1down.vx v10, v10, a1
+; RV32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v12
+; RV32-NEXT: vslidedown.vi v12, v8, 5
+; RV32-NEXT: vslide1down.vx v10, v10, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v12
+; RV32-NEXT: vslidedown.vi v12, v8, 6
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vslide1down.vx v10, v10, a1
+; RV32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v12
+; RV32-NEXT: vslide1down.vx v10, v10, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: vslide1down.vx v8, v10, a1
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v8f32:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 1
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: vslidedown.vi v11, v8, 2
+; RV64-i32-NEXT: vslidedown.vi v12, v8, 3
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v10
+; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v11
+; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64-i32-NEXT: vmv.v.x v10, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v12
+; RV64-i32-NEXT: vslidedown.vi v12, v8, 4
+; RV64-i32-NEXT: vslide1down.vx v10, v10, a1
+; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v12
+; RV64-i32-NEXT: vslidedown.vi v12, v8, 5
+; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v12
+; RV64-i32-NEXT: vslidedown.vi v12, v8, 6
+; RV64-i32-NEXT: vslidedown.vi v8, v8, 7
+; RV64-i32-NEXT: vslide1down.vx v10, v10, a1
+; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v12
+; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: vslide1down.vx v8, v10, a1
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v8f32:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: addi sp, sp, -128
+; RV64-i64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT: addi s0, sp, 128
+; RV64-i64-NEXT: andi sp, sp, -64
+; RV64-i64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 7
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 6
+; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 5
+; RV64-i64-NEXT: fcvt.l.s a2, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 4
+; RV64-i64-NEXT: fcvt.l.s a3, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v9, v8, 3
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-i64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-i64-NEXT: fcvt.l.s a4, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v9
+; RV64-i64-NEXT: fcvt.l.s a5, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: fcvt.l.s a6, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: sd a4, 32(sp)
+; RV64-i64-NEXT: sd a3, 40(sp)
+; RV64-i64-NEXT: sd a2, 48(sp)
+; RV64-i64-NEXT: sd a1, 56(sp)
+; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT: sd a0, 0(sp)
+; RV64-i64-NEXT: sd a1, 8(sp)
+; RV64-i64-NEXT: sd a6, 16(sp)
+; RV64-i64-NEXT: sd a5, 24(sp)
+; RV64-i64-NEXT: mv a0, sp
+; RV64-i64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-i64-NEXT: vle64.v v8, (a0)
+; RV64-i64-NEXT: addi sp, s0, -128
+; RV64-i64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT: addi sp, sp, 128
+; RV64-i64-NEXT: ret
+ %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f32(<8 x float> %x)
+ ret <8 x iXLen> %a
+}
+declare <8 x iXLen> @llvm.lround.v8iXLen.v8f32(<8 x float>)
+
+define <16 x iXLen> @lround_v16f32(<16 x float> %x) nounwind {
+; RV32-LABEL: lround_v16f32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -192
+; RV32-NEXT: sw ra, 188(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 184(sp) # 4-byte Folded Spill
+; RV32-NEXT: addi s0, sp, 192
+; RV32-NEXT: andi sp, sp, -64
+; RV32-NEXT: mv a0, sp
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vse32.v v8, (a0)
+; RV32-NEXT: flw fa5, 60(sp)
+; RV32-NEXT: vfmv.f.s fa4, v8
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v10, v8, 3
+; RV32-NEXT: vslidedown.vi v11, v8, 2
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: sw a0, 124(sp)
+; RV32-NEXT: flw fa5, 56(sp)
+; RV32-NEXT: fcvt.w.s a0, fa4, rmm
+; RV32-NEXT: vfmv.f.s fa4, v10
+; RV32-NEXT: vslidedown.vi v10, v8, 1
+; RV32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV32-NEXT: sw a1, 120(sp)
+; RV32-NEXT: flw fa5, 52(sp)
+; RV32-NEXT: fcvt.w.s a1, fa4, rmm
+; RV32-NEXT: vfmv.f.s fa4, v11
+; RV32-NEXT: fcvt.w.s a2, fa4, rmm
+; RV32-NEXT: fcvt.w.s a3, fa5, rmm
+; RV32-NEXT: sw a3, 116(sp)
+; RV32-NEXT: flw fa5, 48(sp)
+; RV32-NEXT: vfmv.f.s fa4, v10
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v10, v8, 7
+; RV32-NEXT: fcvt.w.s a3, fa4, rmm
+; RV32-NEXT: fcvt.w.s a4, fa5, rmm
+; RV32-NEXT: sw a4, 112(sp)
+; RV32-NEXT: flw fa5, 44(sp)
+; RV32-NEXT: vfmv.f.s fa4, v10
+; RV32-NEXT: vslidedown.vi v10, v8, 6
+; RV32-NEXT: fcvt.w.s a4, fa4, rmm
+; RV32-NEXT: fcvt.w.s a5, fa5, rmm
+; RV32-NEXT: sw a5, 108(sp)
+; RV32-NEXT: flw fa5, 40(sp)
+; RV32-NEXT: vfmv.f.s fa4, v10
+; RV32-NEXT: vslidedown.vi v10, v8, 5
+; RV32-NEXT: fcvt.w.s a5, fa4, rmm
+; RV32-NEXT: fcvt.w.s a6, fa5, rmm
+; RV32-NEXT: sw a6, 104(sp)
+; RV32-NEXT: flw fa5, 36(sp)
+; RV32-NEXT: vfmv.f.s fa4, v10
+; RV32-NEXT: fcvt.w.s a6, fa4, rmm
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: fcvt.w.s a7, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: sw a7, 100(sp)
+; RV32-NEXT: fcvt.w.s a7, fa5, rmm
+; RV32-NEXT: flw fa5, 32(sp)
+; RV32-NEXT: sw a0, 64(sp)
+; RV32-NEXT: sw a3, 68(sp)
+; RV32-NEXT: sw a2, 72(sp)
+; RV32-NEXT: sw a1, 76(sp)
+; RV32-NEXT: sw a7, 80(sp)
+; RV32-NEXT: sw a6, 84(sp)
+; RV32-NEXT: sw a5, 88(sp)
+; RV32-NEXT: sw a4, 92(sp)
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: sw a0, 96(sp)
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vle32.v v8, (a0)
+; RV32-NEXT: addi sp, s0, -192
+; RV32-NEXT: lw ra, 188(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 184(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 192
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v16f32:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: addi sp, sp, -192
+; RV64-i32-NEXT: sd ra, 184(sp) # 8-byte Folded Spill
+; RV64-i32-NEXT: sd s0, 176(sp) # 8-byte Folded Spill
+; RV64-i32-NEXT: addi s0, sp, 192
+; RV64-i32-NEXT: andi sp, sp, -64
+; RV64-i32-NEXT: mv a0, sp
+; RV64-i32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-i32-NEXT: vse32.v v8, (a0)
+; RV64-i32-NEXT: flw fa5, 60(sp)
+; RV64-i32-NEXT: vfmv.f.s fa4, v8
+; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 3
+; RV64-i32-NEXT: vslidedown.vi v11, v8, 2
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: sw a0, 124(sp)
+; RV64-i32-NEXT: flw fa5, 56(sp)
+; RV64-i32-NEXT: fcvt.w.s a0, fa4, rmm
+; RV64-i32-NEXT: vfmv.f.s fa4, v10
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 1
+; RV64-i32-NEXT: fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT: sw a1, 120(sp)
+; RV64-i32-NEXT: flw fa5, 52(sp)
+; RV64-i32-NEXT: fcvt.w.s a1, fa4, rmm
+; RV64-i32-NEXT: vfmv.f.s fa4, v11
+; RV64-i32-NEXT: fcvt.w.s a2, fa4, rmm
+; RV64-i32-NEXT: fcvt.w.s a3, fa5, rmm
+; RV64-i32-NEXT: sw a3, 116(sp)
+; RV64-i32-NEXT: flw fa5, 48(sp)
+; RV64-i32-NEXT: vfmv.f.s fa4, v10
+; RV64-i32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 7
+; RV64-i32-NEXT: fcvt.w.s a3, fa4, rmm
+; RV64-i32-NEXT: fcvt.w.s a4, fa5, rmm
+; RV64-i32-NEXT: sw a4, 112(sp)
+; RV64-i32-NEXT: flw fa5, 44(sp)
+; RV64-i32-NEXT: vfmv.f.s fa4, v10
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 6
+; RV64-i32-NEXT: fcvt.w.s a4, fa4, rmm
+; RV64-i32-NEXT: fcvt.w.s a5, fa5, rmm
+; RV64-i32-NEXT: sw a5, 108(sp)
+; RV64-i32-NEXT: flw fa5, 40(sp)
+; RV64-i32-NEXT: vfmv.f.s fa4, v10
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 5
+; RV64-i32-NEXT: fcvt.w.s a5, fa4, rmm
+; RV64-i32-NEXT: fcvt.w.s a6, fa5, rmm
+; RV64-i32-NEXT: sw a6, 104(sp)
+; RV64-i32-NEXT: flw fa5, 36(sp)
+; RV64-i32-NEXT: vfmv.f.s fa4, v10
+; RV64-i32-NEXT: fcvt.w.s a6, fa4, rmm
+; RV64-i32-NEXT: vslidedown.vi v8, v8, 4
+; RV64-i32-NEXT: fcvt.w.s a7, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: sw a7, 100(sp)
+; RV64-i32-NEXT: fcvt.w.s a7, fa5, rmm
+; RV64-i32-NEXT: flw fa5, 32(sp)
+; RV64-i32-NEXT: sw a0, 64(sp)
+; RV64-i32-NEXT: sw a3, 68(sp)
+; RV64-i32-NEXT: sw a2, 72(sp)
+; RV64-i32-NEXT: sw a1, 76(sp)
+; RV64-i32-NEXT: sw a7, 80(sp)
+; RV64-i32-NEXT: sw a6, 84(sp)
+; RV64-i32-NEXT: sw a5, 88(sp)
+; RV64-i32-NEXT: sw a4, 92(sp)
+; RV64-i32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT: sw a0, 96(sp)
+; RV64-i32-NEXT: addi a0, sp, 64
+; RV64-i32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-i32-NEXT: vle32.v v8, (a0)
+; RV64-i32-NEXT: addi sp, s0, -192
+; RV64-i32-NEXT: ld ra, 184(sp) # 8-byte Folded Reload
+; RV64-i32-NEXT: ld s0, 176(sp) # 8-byte Folded Reload
+; RV64-i32-NEXT: addi sp, sp, 192
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v16f32:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: addi sp, sp, -384
+; RV64-i64-NEXT: sd ra, 376(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT: addi s0, sp, 384
+; RV64-i64-NEXT: andi sp, sp, -128
+; RV64-i64-NEXT: addi a0, sp, 64
+; RV64-i64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-i64-NEXT: vse32.v v8, (a0)
+; RV64-i64-NEXT: flw fa5, 124(sp)
+; RV64-i64-NEXT: vfmv.f.s fa4, v8
+; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 3
+; RV64-i64-NEXT: vslidedown.vi v12, v8, 2
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: sd a0, 248(sp)
+; RV64-i64-NEXT: flw fa5, 120(sp)
+; RV64-i64-NEXT: vslidedown.vi v13, v8, 1
+; RV64-i64-NEXT: fcvt.l.s a0, fa4, rmm
+; RV64-i64-NEXT: vfmv.f.s fa4, v10
+; RV64-i64-NEXT: fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT: sd a1, 240(sp)
+; RV64-i64-NEXT: flw fa5, 116(sp)
+; RV64-i64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 7
+; RV64-i64-NEXT: fcvt.l.s a1, fa4, rmm
+; RV64-i64-NEXT: vfmv.f.s fa4, v12
+; RV64-i64-NEXT: fcvt.l.s a2, fa5, rmm
+; RV64-i64-NEXT: sd a2, 232(sp)
+; RV64-i64-NEXT: flw fa5, 112(sp)
+; RV64-i64-NEXT: fcvt.l.s a2, fa4, rmm
+; RV64-i64-NEXT: vfmv.f.s fa4, v13
+; RV64-i64-NEXT: vslidedown.vi v12, v8, 6
+; RV64-i64-NEXT: fcvt.l.s a3, fa5, rmm
+; RV64-i64-NEXT: sd a3, 224(sp)
+; RV64-i64-NEXT: flw fa5, 108(sp)
+; RV64-i64-NEXT: fcvt.l.s a3, fa4, rmm
+; RV64-i64-NEXT: vfmv.f.s fa4, v10
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 5
+; RV64-i64-NEXT: fcvt.l.s a4, fa5, rmm
+; RV64-i64-NEXT: sd a4, 216(sp)
+; RV64-i64-NEXT: flw fa5, 104(sp)
+; RV64-i64-NEXT: fcvt.l.s a4, fa4, rmm
+; RV64-i64-NEXT: vfmv.f.s fa4, v12
+; RV64-i64-NEXT: fcvt.l.s a5, fa4, rmm
+; RV64-i64-NEXT: fcvt.l.s a6, fa5, rmm
+; RV64-i64-NEXT: sd a6, 208(sp)
+; RV64-i64-NEXT: flw fa5, 100(sp)
+; RV64-i64-NEXT: vfmv.f.s fa4, v10
+; RV64-i64-NEXT: fcvt.l.s a6, fa4, rmm
+; RV64-i64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-i64-NEXT: fcvt.l.s a7, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: sd a7, 200(sp)
+; RV64-i64-NEXT: fcvt.l.s a7, fa5, rmm
+; RV64-i64-NEXT: flw fa5, 96(sp)
+; RV64-i64-NEXT: sd a0, 128(sp)
+; RV64-i64-NEXT: sd a3, 136(sp)
+; RV64-i64-NEXT: sd a2, 144(sp)
+; RV64-i64-NEXT: sd a1, 152(sp)
+; RV64-i64-NEXT: sd a7, 160(sp)
+; RV64-i64-NEXT: sd a6, 168(sp)
+; RV64-i64-NEXT: sd a5, 176(sp)
+; RV64-i64-NEXT: sd a4, 184(sp)
+; RV64-i64-NEXT: fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT: sd a0, 192(sp)
+; RV64-i64-NEXT: addi a0, sp, 128
+; RV64-i64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-i64-NEXT: vle64.v v8, (a0)
+; RV64-i64-NEXT: addi sp, s0, -384
+; RV64-i64-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT: addi sp, sp, 384
+; RV64-i64-NEXT: ret
+ %a = call <16 x iXLen> @llvm.lround.v16iXLen.v16f32(<16 x float> %x)
+ ret <16 x iXLen> %a
+}
+declare <16 x iXLen> @llvm.lround.v16iXLen.v16f32(<16 x float>)
+
+define <1 x iXLen> @lround_v1f64(<1 x double> %x) nounwind {
+; RV32-LABEL: lround_v1f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.w.d a0, fa5, rmm
+; RV32-NEXT: vmv.s.x v8, a0
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v1f64:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: fcvt.w.d a0, fa5, rmm
+; RV64-i32-NEXT: vmv.s.x v8, a0
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v1f64:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: fcvt.l.d a0, fa5, rmm
+; RV64-i64-NEXT: vmv.s.x v8, a0
+; RV64-i64-NEXT: ret
+ %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f64(<1 x double> %x)
+ ret <1 x iXLen> %a
+}
+declare <1 x iXLen> @llvm.lround.v1iXLen.v1f64(<1 x double>)
+
+define <2 x iXLen> @lround_v2f64(<2 x double> %x) nounwind {
+; RV32-LABEL: lround_v2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: fcvt.w.d a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v9
+; RV32-NEXT: fcvt.w.d a1, fa5, rmm
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v2f64:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: fcvt.w.d a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v9
+; RV64-i32-NEXT: fcvt.w.d a1, fa5, rmm
+; RV64-i32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-i32-NEXT: vmv.v.x v8, a0
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a1
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v2f64:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: fcvt.l.d a0, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v9
+; RV64-i64-NEXT: fcvt.l.d a1, fa5, rmm
+; RV64-i64-NEXT: vmv.v.x v8, a0
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT: ret
+ %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f64(<2 x double> %x)
+ ret <2 x iXLen> %a
+}
+declare <2 x iXLen> @llvm.lround.v2iXLen.v2f64(<2 x double>)
+
+define <4 x iXLen> @lround_v4f64(<4 x double> %x) nounwind {
+; RV32-LABEL: lround_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v12, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v10, v8, 2
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: fcvt.w.d a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v12
+; RV32-NEXT: fcvt.w.d a1, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v10
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v9, a0
+; RV32-NEXT: fcvt.w.d a0, fa5, rmm
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v9, a1
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: fcvt.w.d a0, fa5, rmm
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v4f64:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v12, v8, 1
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 2
+; RV64-i32-NEXT: vslidedown.vi v8, v8, 3
+; RV64-i32-NEXT: fcvt.w.d a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v12
+; RV64-i32-NEXT: fcvt.w.d a1, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v10
+; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i32-NEXT: vmv.v.x v9, a0
+; RV64-i32-NEXT: fcvt.w.d a0, fa5, rmm
+; RV64-i32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT: vslide1down.vx v8, v9, a1
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: fcvt.w.d a0, fa5, rmm
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v4f64:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v12, v8, 1
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-i64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-i64-NEXT: fcvt.l.d a0, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v12
+; RV64-i64-NEXT: fcvt.l.d a1, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v10
+; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT: vmv.v.x v10, a0
+; RV64-i64-NEXT: fcvt.l.d a0, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: vslide1down.vx v8, v10, a1
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT: fcvt.l.d a0, fa5, rmm
+; RV64-i64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT: ret
+ %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f64(<4 x double> %x)
+ ret <4 x iXLen> %a
+}
+declare <4 x iXLen> @llvm.lround.v4iXLen.v4f64(<4 x double>)
+
+define <8 x iXLen> @lround_v8f64(<8 x double> %x) nounwind {
+; RV32-LABEL: lround_v8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -128
+; RV32-NEXT: sw ra, 124(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 120(sp) # 4-byte Folded Spill
+; RV32-NEXT: addi s0, sp, 128
+; RV32-NEXT: andi sp, sp, -64
+; RV32-NEXT: mv a0, sp
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v14, v8, 1
+; RV32-NEXT: vfmv.f.s fa5, v8
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v12, v8, 2
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vfmv.f.s fa4, v14
+; RV32-NEXT: fcvt.w.d a0, fa5, rmm
+; RV32-NEXT: vfmv.f.s fa5, v12
+; RV32-NEXT: vfmv.f.s fa3, v8
+; RV32-NEXT: fcvt.w.d a1, fa4, rmm
+; RV32-NEXT: fcvt.w.d a2, fa5, rmm
+; RV32-NEXT: fcvt.w.d a3, fa3, rmm
+; RV32-NEXT: fld fa5, 32(sp)
+; RV32-NEXT: fld fa4, 40(sp)
+; RV32-NEXT: fld fa3, 48(sp)
+; RV32-NEXT: fld fa2, 56(sp)
+; RV32-NEXT: fcvt.w.d a4, fa5, rmm
+; RV32-NEXT: fcvt.w.d a5, fa4, rmm
+; RV32-NEXT: fcvt.w.d a6, fa3, rmm
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: vslide1down.vx v8, v8, a2
+; RV32-NEXT: vslide1down.vx v8, v8, a3
+; RV32-NEXT: vslide1down.vx v8, v8, a4
+; RV32-NEXT: vslide1down.vx v8, v8, a5
+; RV32-NEXT: vslide1down.vx v8, v8, a6
+; RV32-NEXT: fcvt.w.d a0, fa2, rmm
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi sp, s0, -128
+; RV32-NEXT: lw ra, 124(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 120(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 128
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lround_v8f64:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: addi sp, sp, -128
+; RV64-i32-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
+; RV64-i32-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
+; RV64-i32-NEXT: addi s0, sp, 128
+; RV64-i32-NEXT: andi sp, sp, -64
+; RV64-i32-NEXT: mv a0, sp
+; RV64-i32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v14, v8, 1
+; RV64-i32-NEXT: vfmv.f.s fa5, v8
+; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v12, v8, 2
+; RV64-i32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-i32-NEXT: vse64.v v8, (a0)
+; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v8, v8, 3
+; RV64-i32-NEXT: vfmv.f.s fa4, v14
+; RV64-i32-NEXT: fcvt.w.d a0, fa5, rmm
+; RV64-i32-NEXT: vfmv.f.s fa5, v12
+; RV64-i32-NEXT: vfmv.f.s fa3, v8
+; RV64-i32-NEXT: fcvt.w.d a1, fa4, rmm
+; RV64-i32-NEXT: fcvt.w.d a2, fa5, rmm
+; RV64-i32-NEXT: fcvt.w.d a3, fa3, rmm
+; RV64-i32-NEXT: fld fa5, 32(sp)
+; RV64-i32-NEXT: fld fa4, 40(sp)
+; RV64-i32-NEXT: fld fa3, 48(sp)
+; RV64-i32-NEXT: fld fa2, 56(sp)
+; RV64-i32-NEXT: fcvt.w.d a4, fa5, rmm
+; RV64-i32-NEXT: fcvt.w.d a5, fa4, rmm
+; RV64-i32-NEXT: fcvt.w.d a6, fa3, rmm
+; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64-i32-NEXT: vmv.v.x v8, a0
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a1
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a2
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a3
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a4
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a5
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a6
+; RV64-i32-NEXT: fcvt.w.d a0, fa2, rmm
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: addi sp, s0, -128
+; RV64-i32-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
+; RV64-i32-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-i32-NEXT: addi sp, sp, 128
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lround_v8f64:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: addi sp, sp, -192
+; RV64-i64-NEXT: sd ra, 184(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT: sd s0, 176(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT: addi s0, sp, 192
+; RV64-i64-NEXT: andi sp, sp, -64
+; RV64-i64-NEXT: mv a0, sp
+; RV64-i64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-i64-NEXT: vse64.v v8, (a0)
+; RV64-i64-NEXT: fld fa5, 56(sp)
+; RV64-i64-NEXT: vfmv.f.s fa4, v8
+; RV64-i64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 1
+; RV64-i64-NEXT: fcvt.l.d a0, fa4, rmm
+; RV64-i64-NEXT: fcvt.l.d a1, fa5, rmm
+; RV64-i64-NEXT: sd a1, 120(sp)
+; RV64-i64-NEXT: fld fa5, 48(sp)
+; RV64-i64-NEXT: vfmv.f.s fa4, v10
+; RV64-i64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i64-NEXT: vslidedown.vi v10, v8, 3
+; RV64-i64-NEXT: fcvt.l.d a1, fa4, rmm
+; RV64-i64-NEXT: fcvt.l.d a2, fa5, rmm
+; RV64-i64-NEXT: sd a2, 112(sp)
+; RV64-i64-NEXT: fld fa5, 40(sp)
+; RV64-i64-NEXT: vfmv.f.s fa4, v10
+; RV64-i64-NEXT: fcvt.l.d a2, fa4, rmm
+; RV64-i64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-i64-NEXT: fcvt.l.d a3, fa5, rmm
+; RV64-i64-NEXT: vfmv.f.s fa5, v8
+; RV64-i64-NEXT: sd a3, 104(sp)
+; RV64-i64-NEXT: fcvt.l.d a3, fa5, rmm
+; RV64-i64-NEXT: fld fa5, 32(sp)
+; RV64-i64-NEXT: sd a0, 64(sp)
+; RV64-i64-NEXT: sd a1, 72(sp)
+; RV64-i64-NEXT: sd a3, 80(sp)
+; RV64-i64-NEXT: sd a2, 88(sp)
+; RV64-i64-NEXT: fcvt.l.d a0, fa5, rmm
+; RV64-i64-NEXT: sd a0, 96(sp)
+; RV64-i64-NEXT: addi a0, sp, 64
+; RV64-i64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-i64-NEXT: vle64.v v8, (a0)
+; RV64-i64-NEXT: addi sp, s0, -192
+; RV64-i64-NEXT: ld ra, 184(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT: ld s0, 176(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT: addi sp, sp, 192
+; RV64-i64-NEXT: ret
+ %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f64(<8 x double> %x)
+ ret <8 x iXLen> %a
+}
+declare <8 x iXLen> @llvm.lround.v8iXLen.v8f64(<8 x double>)
More information about the llvm-commits
mailing list