[llvm] [CodeGen/RISCV] Add fixed-vector [l]lround tests (PR #145926)

Ramkumar Ramachandra via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 26 10:05:15 PDT 2025


https://github.com/artagnon updated https://github.com/llvm/llvm-project/pull/145926

>From 9ed6e0c0ff9b2912375d91f2411a36f45704027d Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Thu, 26 Jun 2025 17:41:19 +0100
Subject: [PATCH 1/2] [CodeGen/RISCV] Add fixed-vector [l]lround tests

In preparation to unify the codegen of [l]lrint and [l]lround, making
the latter go through custom-lowering for vector-codegen, add some tests
showing the current fixed-vector-unrolled codegen of [l]lround.
---
 .../RISCV/rvv/fixed-vectors-llround.ll        | 1814 +++++++++++++++++
 .../CodeGen/RISCV/rvv/fixed-vectors-lround.ll | 1682 +++++++++++++++
 2 files changed, 3496 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll

diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll
new file mode 100644
index 0000000000000..acc05a50455fb
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll
@@ -0,0 +1,1814 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d,+zvfh -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfh -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64
+
+define <1 x i64> @llround_v1f16(<1 x half> %x) {
+; RV32-LABEL: llround_v1f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 0(sp)
+; RV32-NEXT:    sw a1, 4(sp)
+; RV32-NEXT:    mv a0, sp
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v8, (a0), zero
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v1f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT:    vmv.s.x v8, a0
+; RV64-NEXT:    ret
+  %a = call <1 x i64> @llvm.llround.v1i64.v1f16(<1 x half> %x)
+  ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llround.v1i64.v1f16(<1 x half>)
+
+define <2 x i64> @llround_v2f16(<2 x half> %x) {
+; RV32-LABEL: llround_v2f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 32
+; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    addi a2, a2, 16
+; RV32-NEXT:    vl1r.v v8, (a2) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 32
+; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v2f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v9
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-NEXT:    ret
+  %a = call <2 x i64> @llvm.llround.v2i64.v2f16(<2 x half> %x)
+  ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llround.v2i64.v2f16(<2 x half>)
+
+define <3 x i64> @llround_v3f16(<3 x half> %x) {
+; RV32-LABEL: llround_v3f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 32
+; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a1, a0, 1
+; RV32-NEXT:    add a0, a1, a0
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 3 * vlenb
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a1, a0, 1
+; RV32-NEXT:    add a0, a1, a0
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 32
+; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v3f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-NEXT:    vslidedown.vi v11, v8, 3
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v9
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-NEXT:    vfmv.f.s fa5, v11
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    ret
+  %a = call <3 x i64> @llvm.llround.v3i64.v3f16(<3 x half> %x)
+  ret <3 x i64> %a
+}
+declare <3 x i64> @llvm.llround.v3i64.v3f16(<3 x half>)
+
+define <4 x i64> @llround_v4f16(<4 x half> %x) {
+; RV32-LABEL: llround_v4f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 32
+; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a1, a0, 1
+; RV32-NEXT:    add a0, a1, a0
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 3 * vlenb
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a1, a0, 1
+; RV32-NEXT:    add a0, a1, a0
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 32
+; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v4f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-NEXT:    vslidedown.vi v11, v8, 3
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v9
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-NEXT:    vfmv.f.s fa5, v11
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    ret
+  %a = call <4 x i64> @llvm.llround.v4i64.v4f16(<4 x half> %x)
+  ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llround.v4i64.v4f16(<4 x half>)
+
+define <8 x i64> @llround_v8f16(<8 x half> %x) {
+; RV32-LABEL: llround_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -208
+; RV32-NEXT:    .cfi_def_cfa_offset 208
+; RV32-NEXT:    sw ra, 204(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 200(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    addi s0, sp, 208
+; RV32-NEXT:    .cfi_def_cfa s0, 0
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 64(sp)
+; RV32-NEXT:    sw a1, 68(sp)
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 7
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 120(sp)
+; RV32-NEXT:    sw a1, 124(sp)
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 6
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 112(sp)
+; RV32-NEXT:    sw a1, 116(sp)
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 5
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 104(sp)
+; RV32-NEXT:    sw a1, 108(sp)
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 4
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 96(sp)
+; RV32-NEXT:    sw a1, 100(sp)
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 88(sp)
+; RV32-NEXT:    sw a1, 92(sp)
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 80(sp)
+; RV32-NEXT:    sw a1, 84(sp)
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 72(sp)
+; RV32-NEXT:    sw a1, 76(sp)
+; RV32-NEXT:    addi a0, sp, 64
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vle32.v v8, (a0)
+; RV32-NEXT:    addi sp, s0, -208
+; RV32-NEXT:    .cfi_def_cfa sp, 208
+; RV32-NEXT:    lw ra, 204(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 200(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    addi sp, sp, 208
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -128
+; RV64-NEXT:    .cfi_def_cfa_offset 128
+; RV64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    .cfi_offset s0, -16
+; RV64-NEXT:    addi s0, sp, 128
+; RV64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-NEXT:    andi sp, sp, -64
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    vslidedown.vi v9, v8, 7
+; RV64-NEXT:    vslidedown.vi v10, v8, 6
+; RV64-NEXT:    vslidedown.vi v11, v8, 5
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v9
+; RV64-NEXT:    vslidedown.vi v9, v8, 4
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    vslidedown.vi v10, v8, 3
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a2, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v11
+; RV64-NEXT:    vslidedown.vi v11, v8, 2
+; RV64-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a3, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v9
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a4, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a5, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v11
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a6, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    sd a4, 32(sp)
+; RV64-NEXT:    sd a3, 40(sp)
+; RV64-NEXT:    sd a2, 48(sp)
+; RV64-NEXT:    sd a1, 56(sp)
+; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-NEXT:    sd a0, 0(sp)
+; RV64-NEXT:    sd a1, 8(sp)
+; RV64-NEXT:    sd a6, 16(sp)
+; RV64-NEXT:    sd a5, 24(sp)
+; RV64-NEXT:    mv a0, sp
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    addi sp, s0, -128
+; RV64-NEXT:    .cfi_def_cfa sp, 128
+; RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    addi sp, sp, 128
+; RV64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-NEXT:    ret
+  %a = call <8 x i64> @llvm.llround.v8i64.v8f16(<8 x half> %x)
+  ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llround.v8i64.v8f16(<8 x half>)
+
+define <16 x i64> @llround_v16f16(<16 x half> %x) {
+; RV32-LABEL: llround_v16f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -400
+; RV32-NEXT:    .cfi_def_cfa_offset 400
+; RV32-NEXT:    sw ra, 396(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 392(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    addi s0, sp, 400
+; RV32-NEXT:    .cfi_def_cfa s0, 0
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    andi sp, sp, -128
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 128(sp)
+; RV32-NEXT:    sw a1, 132(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 15
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 248(sp)
+; RV32-NEXT:    sw a1, 252(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 14
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 240(sp)
+; RV32-NEXT:    sw a1, 244(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 13
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 232(sp)
+; RV32-NEXT:    sw a1, 236(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 12
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 224(sp)
+; RV32-NEXT:    sw a1, 228(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 11
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 216(sp)
+; RV32-NEXT:    sw a1, 220(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 10
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 208(sp)
+; RV32-NEXT:    sw a1, 212(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 9
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 200(sp)
+; RV32-NEXT:    sw a1, 204(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 8
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 192(sp)
+; RV32-NEXT:    sw a1, 196(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 7
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 184(sp)
+; RV32-NEXT:    sw a1, 188(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 6
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 176(sp)
+; RV32-NEXT:    sw a1, 180(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 5
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 168(sp)
+; RV32-NEXT:    sw a1, 172(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 4
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 160(sp)
+; RV32-NEXT:    sw a1, 164(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 152(sp)
+; RV32-NEXT:    sw a1, 156(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 144(sp)
+; RV32-NEXT:    sw a1, 148(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa0, fa5
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 136(sp)
+; RV32-NEXT:    sw a1, 140(sp)
+; RV32-NEXT:    li a0, 32
+; RV32-NEXT:    addi a1, sp, 128
+; RV32-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT:    vle32.v v8, (a1)
+; RV32-NEXT:    addi sp, s0, -400
+; RV32-NEXT:    .cfi_def_cfa sp, 400
+; RV32-NEXT:    lw ra, 396(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 392(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    addi sp, sp, 400
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v16f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -256
+; RV64-NEXT:    .cfi_def_cfa_offset 256
+; RV64-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 240(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    .cfi_offset s0, -16
+; RV64-NEXT:    addi s0, sp, 256
+; RV64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-NEXT:    andi sp, sp, -128
+; RV64-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    vslidedown.vi v10, v8, 15
+; RV64-NEXT:    vslidedown.vi v12, v8, 14
+; RV64-NEXT:    vslidedown.vi v14, v8, 13
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    vslidedown.vi v10, v8, 12
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v12
+; RV64-NEXT:    vslidedown.vi v12, v8, 11
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a2, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v14
+; RV64-NEXT:    vslidedown.vi v14, v8, 10
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a3, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    vslidedown.vi v10, v8, 9
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a5, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v12
+; RV64-NEXT:    vslidedown.vi v12, v8, 8
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a4, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v14
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 7
+; RV64-NEXT:    vslidedown.vi v11, v8, 6
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a6, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    vslidedown.vi v10, v8, 5
+; RV64-NEXT:    vslidedown.vi v13, v8, 4
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s a7, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v12
+; RV64-NEXT:    vslidedown.vi v12, v8, 3
+; RV64-NEXT:    vslidedown.vi v14, v8, 2
+; RV64-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s t0, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v9
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s t1, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v11
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s t2, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s t3, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v13
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    fcvt.l.s t4, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v12
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    sd a5, 96(sp)
+; RV64-NEXT:    sd a3, 104(sp)
+; RV64-NEXT:    sd a2, 112(sp)
+; RV64-NEXT:    sd a1, 120(sp)
+; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v14
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    sd t0, 64(sp)
+; RV64-NEXT:    sd a7, 72(sp)
+; RV64-NEXT:    sd a6, 80(sp)
+; RV64-NEXT:    sd a4, 88(sp)
+; RV64-NEXT:    fcvt.l.s a2, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-NEXT:    sd t4, 32(sp)
+; RV64-NEXT:    sd t3, 40(sp)
+; RV64-NEXT:    sd t2, 48(sp)
+; RV64-NEXT:    sd t1, 56(sp)
+; RV64-NEXT:    fcvt.l.s a3, fa5, rmm
+; RV64-NEXT:    sd a0, 0(sp)
+; RV64-NEXT:    sd a3, 8(sp)
+; RV64-NEXT:    sd a2, 16(sp)
+; RV64-NEXT:    sd a1, 24(sp)
+; RV64-NEXT:    mv a0, sp
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    addi sp, s0, -256
+; RV64-NEXT:    .cfi_def_cfa sp, 256
+; RV64-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    addi sp, sp, 256
+; RV64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-NEXT:    ret
+  %a = call <16 x i64> @llvm.llround.v16i64.v16f16(<16 x half> %x)
+  ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.llround.v16i64.v16f16(<16 x half>)
+
+define <1 x i64> @llround_v1i64_v1f32(<1 x float> %x) {
+; RV32-LABEL: llround_v1i64_v1f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 0(sp)
+; RV32-NEXT:    sw a1, 4(sp)
+; RV32-NEXT:    mv a0, sp
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v8, (a0), zero
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v1i64_v1f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT:    vmv.s.x v8, a0
+; RV64-NEXT:    ret
+  %a = call <1 x i64> @llvm.llround.v1i64.v1f32(<1 x float> %x)
+  ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llround.v1i64.v1f32(<1 x float>)
+
+define <2 x i64> @llround_v2i64_v2f32(<2 x float> %x) {
+; RV32-LABEL: llround_v2i64_v2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 32
+; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    addi a2, a2, 16
+; RV32-NEXT:    vl1r.v v8, (a2) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 32
+; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v2i64_v2f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v9
+; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-NEXT:    ret
+  %a = call <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float> %x)
+  ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float>)
+
+define <3 x i64> @llround_v3i64_v3f32(<3 x float> %x) {
+; RV32-LABEL: llround_v3i64_v3f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 32
+; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a1, a0, 1
+; RV32-NEXT:    add a0, a1, a0
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 3 * vlenb
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a1, a0, 1
+; RV32-NEXT:    add a0, a1, a0
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 32
+; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v3i64_v3f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-NEXT:    vslidedown.vi v11, v8, 3
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v9
+; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64-NEXT:    vfmv.f.s fa5, v11
+; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    ret
+  %a = call <3 x i64> @llvm.llround.v3i64.v3f32(<3 x float> %x)
+  ret <3 x i64> %a
+}
+declare <3 x i64> @llvm.llround.v3i64.v3f32(<3 x float>)
+
+define <4 x i64> @llround_v4i64_v4f32(<4 x float> %x) {
+; RV32-LABEL: llround_v4i64_v4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 32
+; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a1, a0, 1
+; RV32-NEXT:    add a0, a1, a0
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 3 * vlenb
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a1, a0, 1
+; RV32-NEXT:    add a0, a1, a0
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 32
+; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v4i64_v4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-NEXT:    vslidedown.vi v11, v8, 3
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v9
+; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64-NEXT:    vfmv.f.s fa5, v11
+; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    ret
+  %a = call <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float> %x)
+  ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float>)
+
+define <8 x i64> @llround_v8i64_v8f32(<8 x float> %x) {
+; RV32-LABEL: llround_v8i64_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -208
+; RV32-NEXT:    .cfi_def_cfa_offset 208
+; RV32-NEXT:    sw ra, 204(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 200(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    addi s0, sp, 208
+; RV32-NEXT:    .cfi_def_cfa s0, 0
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 64(sp)
+; RV32-NEXT:    sw a1, 68(sp)
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 7
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 120(sp)
+; RV32-NEXT:    sw a1, 124(sp)
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 6
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 112(sp)
+; RV32-NEXT:    sw a1, 116(sp)
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 5
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 104(sp)
+; RV32-NEXT:    sw a1, 108(sp)
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 4
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 96(sp)
+; RV32-NEXT:    sw a1, 100(sp)
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 88(sp)
+; RV32-NEXT:    sw a1, 92(sp)
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 80(sp)
+; RV32-NEXT:    sw a1, 84(sp)
+; RV32-NEXT:    addi a0, sp, 192
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 72(sp)
+; RV32-NEXT:    sw a1, 76(sp)
+; RV32-NEXT:    addi a0, sp, 64
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vle32.v v8, (a0)
+; RV32-NEXT:    addi sp, s0, -208
+; RV32-NEXT:    .cfi_def_cfa sp, 208
+; RV32-NEXT:    lw ra, 204(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 200(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    addi sp, sp, 208
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v8i64_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -128
+; RV64-NEXT:    .cfi_def_cfa_offset 128
+; RV64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    .cfi_offset s0, -16
+; RV64-NEXT:    addi s0, sp, 128
+; RV64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-NEXT:    andi sp, sp, -64
+; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    vslidedown.vi v10, v8, 7
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    vslidedown.vi v10, v8, 6
+; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    vslidedown.vi v10, v8, 5
+; RV64-NEXT:    fcvt.l.s a2, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    vslidedown.vi v10, v8, 4
+; RV64-NEXT:    fcvt.l.s a3, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 3
+; RV64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-NEXT:    fcvt.l.s a4, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v9
+; RV64-NEXT:    fcvt.l.s a5, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    fcvt.l.s a6, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    sd a4, 32(sp)
+; RV64-NEXT:    sd a3, 40(sp)
+; RV64-NEXT:    sd a2, 48(sp)
+; RV64-NEXT:    sd a1, 56(sp)
+; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-NEXT:    sd a0, 0(sp)
+; RV64-NEXT:    sd a1, 8(sp)
+; RV64-NEXT:    sd a6, 16(sp)
+; RV64-NEXT:    sd a5, 24(sp)
+; RV64-NEXT:    mv a0, sp
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    addi sp, s0, -128
+; RV64-NEXT:    .cfi_def_cfa sp, 128
+; RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    addi sp, sp, 128
+; RV64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-NEXT:    ret
+  %a = call <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float> %x)
+  ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float>)
+
+define <16 x i64> @llround_v16i64_v16f32(<16 x float> %x) {
+; RV32-LABEL: llround_v16i64_v16f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -400
+; RV32-NEXT:    .cfi_def_cfa_offset 400
+; RV32-NEXT:    sw ra, 396(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 392(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    addi s0, sp, 400
+; RV32-NEXT:    .cfi_def_cfa s0, 0
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 2
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    andi sp, sp, -128
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vs4r.v v8, (a0) # vscale x 32-byte Folded Spill
+; RV32-NEXT:    addi a0, sp, 64
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vse32.v v8, (a0)
+; RV32-NEXT:    flw fa0, 124(sp)
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 248(sp)
+; RV32-NEXT:    sw a1, 252(sp)
+; RV32-NEXT:    flw fa0, 120(sp)
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 240(sp)
+; RV32-NEXT:    sw a1, 244(sp)
+; RV32-NEXT:    flw fa0, 116(sp)
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 232(sp)
+; RV32-NEXT:    sw a1, 236(sp)
+; RV32-NEXT:    flw fa0, 112(sp)
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 224(sp)
+; RV32-NEXT:    sw a1, 228(sp)
+; RV32-NEXT:    flw fa0, 108(sp)
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 216(sp)
+; RV32-NEXT:    sw a1, 220(sp)
+; RV32-NEXT:    flw fa0, 104(sp)
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 208(sp)
+; RV32-NEXT:    sw a1, 212(sp)
+; RV32-NEXT:    flw fa0, 100(sp)
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 200(sp)
+; RV32-NEXT:    sw a1, 204(sp)
+; RV32-NEXT:    flw fa0, 96(sp)
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 192(sp)
+; RV32-NEXT:    sw a1, 196(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 128(sp)
+; RV32-NEXT:    sw a1, 132(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 152(sp)
+; RV32-NEXT:    sw a1, 156(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 144(sp)
+; RV32-NEXT:    sw a1, 148(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 136(sp)
+; RV32-NEXT:    sw a1, 140(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 7
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 184(sp)
+; RV32-NEXT:    sw a1, 188(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 6
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 176(sp)
+; RV32-NEXT:    sw a1, 180(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 5
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 168(sp)
+; RV32-NEXT:    sw a1, 172(sp)
+; RV32-NEXT:    addi a0, sp, 384
+; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 4
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llroundf
+; RV32-NEXT:    sw a0, 160(sp)
+; RV32-NEXT:    sw a1, 164(sp)
+; RV32-NEXT:    li a0, 32
+; RV32-NEXT:    addi a1, sp, 128
+; RV32-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT:    vle32.v v8, (a1)
+; RV32-NEXT:    addi sp, s0, -400
+; RV32-NEXT:    .cfi_def_cfa sp, 400
+; RV32-NEXT:    lw ra, 396(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 392(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    addi sp, sp, 400
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v16i64_v16f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -384
+; RV64-NEXT:    .cfi_def_cfa_offset 384
+; RV64-NEXT:    sd ra, 376(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 368(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    .cfi_offset s0, -16
+; RV64-NEXT:    addi s0, sp, 384
+; RV64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-NEXT:    andi sp, sp, -128
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT:    vse32.v v8, (a0)
+; RV64-NEXT:    flw fa5, 124(sp)
+; RV64-NEXT:    vfmv.f.s fa4, v8
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 3
+; RV64-NEXT:    vslidedown.vi v12, v8, 2
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    sd a0, 248(sp)
+; RV64-NEXT:    flw fa5, 120(sp)
+; RV64-NEXT:    vslidedown.vi v13, v8, 1
+; RV64-NEXT:    fcvt.l.s a0, fa4, rmm
+; RV64-NEXT:    vfmv.f.s fa4, v10
+; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-NEXT:    sd a1, 240(sp)
+; RV64-NEXT:    flw fa5, 116(sp)
+; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 7
+; RV64-NEXT:    fcvt.l.s a1, fa4, rmm
+; RV64-NEXT:    vfmv.f.s fa4, v12
+; RV64-NEXT:    fcvt.l.s a2, fa5, rmm
+; RV64-NEXT:    sd a2, 232(sp)
+; RV64-NEXT:    flw fa5, 112(sp)
+; RV64-NEXT:    fcvt.l.s a2, fa4, rmm
+; RV64-NEXT:    vfmv.f.s fa4, v13
+; RV64-NEXT:    vslidedown.vi v12, v8, 6
+; RV64-NEXT:    fcvt.l.s a3, fa5, rmm
+; RV64-NEXT:    sd a3, 224(sp)
+; RV64-NEXT:    flw fa5, 108(sp)
+; RV64-NEXT:    fcvt.l.s a3, fa4, rmm
+; RV64-NEXT:    vfmv.f.s fa4, v10
+; RV64-NEXT:    vslidedown.vi v10, v8, 5
+; RV64-NEXT:    fcvt.l.s a4, fa5, rmm
+; RV64-NEXT:    sd a4, 216(sp)
+; RV64-NEXT:    flw fa5, 104(sp)
+; RV64-NEXT:    fcvt.l.s a4, fa4, rmm
+; RV64-NEXT:    vfmv.f.s fa4, v12
+; RV64-NEXT:    fcvt.l.s a5, fa4, rmm
+; RV64-NEXT:    fcvt.l.s a6, fa5, rmm
+; RV64-NEXT:    sd a6, 208(sp)
+; RV64-NEXT:    flw fa5, 100(sp)
+; RV64-NEXT:    vfmv.f.s fa4, v10
+; RV64-NEXT:    fcvt.l.s a6, fa4, rmm
+; RV64-NEXT:    vslidedown.vi v8, v8, 4
+; RV64-NEXT:    fcvt.l.s a7, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    sd a7, 200(sp)
+; RV64-NEXT:    fcvt.l.s a7, fa5, rmm
+; RV64-NEXT:    flw fa5, 96(sp)
+; RV64-NEXT:    sd a0, 128(sp)
+; RV64-NEXT:    sd a3, 136(sp)
+; RV64-NEXT:    sd a2, 144(sp)
+; RV64-NEXT:    sd a1, 152(sp)
+; RV64-NEXT:    sd a7, 160(sp)
+; RV64-NEXT:    sd a6, 168(sp)
+; RV64-NEXT:    sd a5, 176(sp)
+; RV64-NEXT:    sd a4, 184(sp)
+; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-NEXT:    sd a0, 192(sp)
+; RV64-NEXT:    addi a0, sp, 128
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    addi sp, s0, -384
+; RV64-NEXT:    .cfi_def_cfa sp, 384
+; RV64-NEXT:    ld ra, 376(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 368(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    addi sp, sp, 384
+; RV64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-NEXT:    ret
+  %a = call <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float> %x)
+  ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float>)
+
+define <1 x i64> @llround_v1i64_v1f64(<1 x double> %x) {
+; RV32-LABEL: llround_v1i64_v1f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llround
+; RV32-NEXT:    sw a0, 0(sp)
+; RV32-NEXT:    sw a1, 4(sp)
+; RV32-NEXT:    mv a0, sp
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v8, (a0), zero
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v1i64_v1f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    fcvt.l.d a0, fa5, rmm
+; RV64-NEXT:    vmv.s.x v8, a0
+; RV64-NEXT:    ret
+  %a = call <1 x i64> @llvm.llround.v1i64.v1f64(<1 x double> %x)
+  ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llround.v1i64.v1f64(<1 x double>)
+
+define <2 x i64> @llround_v2i64_v2f64(<2 x double> %x) {
+; RV32-LABEL: llround_v2i64_v2f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 32
+; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llround
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llround
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    addi a2, a2, 16
+; RV32-NEXT:    vl1r.v v8, (a2) # vscale x 8-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 32
+; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v2i64_v2f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    fcvt.l.d a0, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v9
+; RV64-NEXT:    fcvt.l.d a1, fa5, rmm
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-NEXT:    ret
+  %a = call <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double> %x)
+  ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double>)
+
+define <4 x i64> @llround_v4i64_v4f64(<4 x double> %x) {
+; RV32-LABEL: llround_v4i64_v4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 32
+; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 2
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 4 * vlenb
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llround
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llround
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llround
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llround
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 2
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa sp, 32
+; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v4i64_v4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v12, v8, 1
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-NEXT:    fcvt.l.d a0, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v12
+; RV64-NEXT:    fcvt.l.d a1, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT:    vmv.v.x v10, a0
+; RV64-NEXT:    fcvt.l.d a0, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    vslide1down.vx v8, v10, a1
+; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    fcvt.l.d a0, fa5, rmm
+; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    ret
+  %a = call <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double> %x)
+  ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double>)
+
+define <8 x i64> @llround_v8i64_v8f64(<8 x double> %x) {
+; RV32-LABEL: llround_v8i64_v8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -272
+; RV32-NEXT:    .cfi_def_cfa_offset 272
+; RV32-NEXT:    sw ra, 268(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 264(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    addi s0, sp, 272
+; RV32-NEXT:    .cfi_def_cfa s0, 0
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 2
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    addi a0, sp, 256
+; RV32-NEXT:    vs4r.v v8, (a0) # vscale x 32-byte Folded Spill
+; RV32-NEXT:    addi a0, sp, 64
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vse64.v v8, (a0)
+; RV32-NEXT:    fld fa0, 120(sp)
+; RV32-NEXT:    call llround
+; RV32-NEXT:    sw a0, 184(sp)
+; RV32-NEXT:    sw a1, 188(sp)
+; RV32-NEXT:    fld fa0, 112(sp)
+; RV32-NEXT:    call llround
+; RV32-NEXT:    sw a0, 176(sp)
+; RV32-NEXT:    sw a1, 180(sp)
+; RV32-NEXT:    fld fa0, 104(sp)
+; RV32-NEXT:    call llround
+; RV32-NEXT:    sw a0, 168(sp)
+; RV32-NEXT:    sw a1, 172(sp)
+; RV32-NEXT:    fld fa0, 96(sp)
+; RV32-NEXT:    call llround
+; RV32-NEXT:    sw a0, 160(sp)
+; RV32-NEXT:    sw a1, 164(sp)
+; RV32-NEXT:    addi a0, sp, 256
+; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llround
+; RV32-NEXT:    sw a0, 128(sp)
+; RV32-NEXT:    sw a1, 132(sp)
+; RV32-NEXT:    addi a0, sp, 256
+; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llround
+; RV32-NEXT:    sw a0, 136(sp)
+; RV32-NEXT:    sw a1, 140(sp)
+; RV32-NEXT:    addi a0, sp, 256
+; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llround
+; RV32-NEXT:    sw a0, 152(sp)
+; RV32-NEXT:    sw a1, 156(sp)
+; RV32-NEXT:    addi a0, sp, 256
+; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:    call llround
+; RV32-NEXT:    sw a0, 144(sp)
+; RV32-NEXT:    sw a1, 148(sp)
+; RV32-NEXT:    addi a0, sp, 128
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vle32.v v8, (a0)
+; RV32-NEXT:    addi sp, s0, -272
+; RV32-NEXT:    .cfi_def_cfa sp, 272
+; RV32-NEXT:    lw ra, 268(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 264(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    addi sp, sp, 272
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: llround_v8i64_v8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -192
+; RV64-NEXT:    .cfi_def_cfa_offset 192
+; RV64-NEXT:    sd ra, 184(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 176(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    .cfi_offset s0, -16
+; RV64-NEXT:    addi s0, sp, 192
+; RV64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-NEXT:    andi sp, sp, -64
+; RV64-NEXT:    mv a0, sp
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
+; RV64-NEXT:    fld fa5, 56(sp)
+; RV64-NEXT:    vfmv.f.s fa4, v8
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 1
+; RV64-NEXT:    fcvt.l.d a0, fa4, rmm
+; RV64-NEXT:    fcvt.l.d a1, fa5, rmm
+; RV64-NEXT:    sd a1, 120(sp)
+; RV64-NEXT:    fld fa5, 48(sp)
+; RV64-NEXT:    vfmv.f.s fa4, v10
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 3
+; RV64-NEXT:    fcvt.l.d a1, fa4, rmm
+; RV64-NEXT:    fcvt.l.d a2, fa5, rmm
+; RV64-NEXT:    sd a2, 112(sp)
+; RV64-NEXT:    fld fa5, 40(sp)
+; RV64-NEXT:    vfmv.f.s fa4, v10
+; RV64-NEXT:    fcvt.l.d a2, fa4, rmm
+; RV64-NEXT:    vslidedown.vi v8, v8, 2
+; RV64-NEXT:    fcvt.l.d a3, fa5, rmm
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    sd a3, 104(sp)
+; RV64-NEXT:    fcvt.l.d a3, fa5, rmm
+; RV64-NEXT:    fld fa5, 32(sp)
+; RV64-NEXT:    sd a0, 64(sp)
+; RV64-NEXT:    sd a1, 72(sp)
+; RV64-NEXT:    sd a3, 80(sp)
+; RV64-NEXT:    sd a2, 88(sp)
+; RV64-NEXT:    fcvt.l.d a0, fa5, rmm
+; RV64-NEXT:    sd a0, 96(sp)
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    addi sp, s0, -192
+; RV64-NEXT:    .cfi_def_cfa sp, 192
+; RV64-NEXT:    ld ra, 184(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 176(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    addi sp, sp, 192
+; RV64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-NEXT:    ret
+  %a = call <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double> %x)
+  ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll
new file mode 100644
index 0000000000000..6bda94439ed5c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll
@@ -0,0 +1,1682 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+f,+d,+zvfh \
+; RUN:     -target-abi=ilp32d -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfh \
+; RUN:     -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfh \
+; RUN:     -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i64
+
+define <1 x iXLen> @lround_v1f16(<1 x half> %x) {
+; RV32-LABEL: lround_v1f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT:    vmv.s.x v8, a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v1f16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT:    vmv.s.x v8, a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v1f16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV64-i64-NEXT:    vmv.s.x v8, a0
+; RV64-i64-NEXT:    ret
+  %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f16(<1 x half> %x)
+  ret <1 x iXLen> %a
+}
+declare <1 x iXLen> @llvm.lround.v1iXLen.v1f16(<1 x half>)
+
+define <2 x iXLen> @lround_v2f16(<2 x half> %x) {
+; RV32-LABEL: lround_v2f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v9
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v2f16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v9
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-i32-NEXT:    vmv.v.x v8, a0
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v2f16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v9
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-i64-NEXT:    vmv.v.x v8, a0
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT:    ret
+  %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f16(<2 x half> %x)
+  ret <2 x iXLen> %a
+}
+declare <2 x iXLen> @llvm.lround.v2iXLen.v2f16(<2 x half>)
+
+define <3 x iXLen> @lround_v3f16(<3 x half> %x) {
+; RV32-LABEL: lround_v3f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    vslidedown.vi v10, v8, 2
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v9
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.x v9, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v9, a1
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v3f16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-i32-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v9
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v10
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i32-NEXT:    vmv.v.x v9, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT:    vslide1down.vx v8, v9, a1
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v3f16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-i64-NEXT:    vslidedown.vi v11, v8, 3
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v9
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT:    vmv.v.x v8, a0
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-i64-NEXT:    vfmv.f.s fa5, v11
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT:    ret
+  %a = call <3 x iXLen> @llvm.lround.v3iXLen.v3f16(<3 x half> %x)
+  ret <3 x iXLen> %a
+}
+declare <3 x iXLen> @llvm.lround.v3iXLen.v3f16(<3 x half>)
+
+define <4 x iXLen> @lround_v4f16(<4 x half> %x) {
+; RV32-LABEL: lround_v4f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    vslidedown.vi v10, v8, 2
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v9
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.x v9, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v9, a1
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v4f16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-i32-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v9
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v10
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i32-NEXT:    vmv.v.x v9, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT:    vslide1down.vx v8, v9, a1
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v4f16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-i64-NEXT:    vslidedown.vi v11, v8, 3
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v9
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT:    vmv.v.x v8, a0
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-i64-NEXT:    vfmv.f.s fa5, v11
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT:    ret
+  %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f16(<4 x half> %x)
+  ret <4 x iXLen> %a
+}
+declare <4 x iXLen> @llvm.lround.v4iXLen.v4f16(<4 x half>)
+
+define <8 x iXLen> @lround_v8f16(<8 x half> %x) {
+; RV32-LABEL: lround_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    vslidedown.vi v10, v8, 2
+; RV32-NEXT:    vslidedown.vi v12, v8, 3
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v9
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.x v10, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v12
+; RV32-NEXT:    vslidedown.vi v9, v8, 4
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v10, v10, a1
+; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v9
+; RV32-NEXT:    vslidedown.vi v9, v8, 5
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v10, v10, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v9
+; RV32-NEXT:    vslidedown.vi v12, v8, 6
+; RV32-NEXT:    vslidedown.vi v13, v8, 7
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v10, a1
+; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v12
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v13
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v8f16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-i32-NEXT:    vslidedown.vi v12, v8, 3
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v9
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v10
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV64-i32-NEXT:    vmv.v.x v10, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64-i32-NEXT:    vfmv.f.s fa5, v12
+; RV64-i32-NEXT:    vslidedown.vi v9, v8, 4
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT:    vslide1down.vx v10, v10, a1
+; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64-i32-NEXT:    vfmv.f.s fa5, v9
+; RV64-i32-NEXT:    vslidedown.vi v9, v8, 5
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT:    vslide1down.vx v10, v10, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64-i32-NEXT:    vfmv.f.s fa5, v9
+; RV64-i32-NEXT:    vslidedown.vi v12, v8, 6
+; RV64-i32-NEXT:    vslidedown.vi v13, v8, 7
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT:    vslide1down.vx v8, v10, a1
+; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64-i32-NEXT:    vfmv.f.s fa5, v12
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64-i32-NEXT:    vfmv.f.s fa5, v13
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v8f16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    addi sp, sp, -128
+; RV64-i64-NEXT:    .cfi_def_cfa_offset 128
+; RV64-i64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT:    .cfi_offset ra, -8
+; RV64-i64-NEXT:    .cfi_offset s0, -16
+; RV64-i64-NEXT:    addi s0, sp, 128
+; RV64-i64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-i64-NEXT:    andi sp, sp, -64
+; RV64-i64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    vslidedown.vi v9, v8, 7
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 6
+; RV64-i64-NEXT:    vslidedown.vi v11, v8, 5
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v9
+; RV64-i64-NEXT:    vslidedown.vi v9, v8, 4
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 3
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a2, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v11
+; RV64-i64-NEXT:    vslidedown.vi v11, v8, 2
+; RV64-i64-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a3, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v9
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a4, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a5, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v11
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a6, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    sd a4, 32(sp)
+; RV64-i64-NEXT:    sd a3, 40(sp)
+; RV64-i64-NEXT:    sd a2, 48(sp)
+; RV64-i64-NEXT:    sd a1, 56(sp)
+; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT:    sd a0, 0(sp)
+; RV64-i64-NEXT:    sd a1, 8(sp)
+; RV64-i64-NEXT:    sd a6, 16(sp)
+; RV64-i64-NEXT:    sd a5, 24(sp)
+; RV64-i64-NEXT:    mv a0, sp
+; RV64-i64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-i64-NEXT:    vle64.v v8, (a0)
+; RV64-i64-NEXT:    addi sp, s0, -128
+; RV64-i64-NEXT:    .cfi_def_cfa sp, 128
+; RV64-i64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    .cfi_restore ra
+; RV64-i64-NEXT:    .cfi_restore s0
+; RV64-i64-NEXT:    addi sp, sp, 128
+; RV64-i64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-i64-NEXT:    ret
+  %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f16(<8 x half> %x)
+  ret <8 x iXLen> %a
+}
+declare <8 x iXLen> @llvm.lround.v8iXLen.v8f16(<8 x half>)
+
+define <16 x iXLen> @lround_v16f16(<16 x half> %x) {
+; RV32-LABEL: lround_v16f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -128
+; RV32-NEXT:    .cfi_def_cfa_offset 128
+; RV32-NEXT:    sw ra, 124(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 120(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    addi s0, sp, 128
+; RV32-NEXT:    .cfi_def_cfa s0, 0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    vslidedown.vi v10, v8, 15
+; RV32-NEXT:    vslidedown.vi v12, v8, 14
+; RV32-NEXT:    vslidedown.vi v14, v8, 13
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    vslidedown.vi v10, v8, 12
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v12
+; RV32-NEXT:    vslidedown.vi v12, v8, 11
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a2, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v14
+; RV32-NEXT:    vslidedown.vi v14, v8, 10
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a3, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    vslidedown.vi v10, v8, 9
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a5, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v12
+; RV32-NEXT:    vslidedown.vi v12, v8, 8
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a4, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v14
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 7
+; RV32-NEXT:    vslidedown.vi v11, v8, 6
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a6, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    vslidedown.vi v10, v8, 5
+; RV32-NEXT:    vslidedown.vi v13, v8, 4
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s a7, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v12
+; RV32-NEXT:    vslidedown.vi v12, v8, 3
+; RV32-NEXT:    vslidedown.vi v14, v8, 2
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s t0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v9
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s t1, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v11
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s t2, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s t3, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v13
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    fcvt.w.s t4, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v12
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    sw a5, 48(sp)
+; RV32-NEXT:    sw a3, 52(sp)
+; RV32-NEXT:    sw a2, 56(sp)
+; RV32-NEXT:    sw a1, 60(sp)
+; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v14
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    sw t0, 32(sp)
+; RV32-NEXT:    sw a7, 36(sp)
+; RV32-NEXT:    sw a6, 40(sp)
+; RV32-NEXT:    sw a4, 44(sp)
+; RV32-NEXT:    fcvt.w.s a2, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    sw t4, 16(sp)
+; RV32-NEXT:    sw t3, 20(sp)
+; RV32-NEXT:    sw t2, 24(sp)
+; RV32-NEXT:    sw t1, 28(sp)
+; RV32-NEXT:    fcvt.w.s a3, fa5, rmm
+; RV32-NEXT:    sw a0, 0(sp)
+; RV32-NEXT:    sw a3, 4(sp)
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    mv a0, sp
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vle32.v v8, (a0)
+; RV32-NEXT:    addi sp, s0, -128
+; RV32-NEXT:    .cfi_def_cfa sp, 128
+; RV32-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    addi sp, sp, 128
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v16f16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    addi sp, sp, -128
+; RV64-i32-NEXT:    .cfi_def_cfa_offset 128
+; RV64-i32-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
+; RV64-i32-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
+; RV64-i32-NEXT:    .cfi_offset ra, -8
+; RV64-i32-NEXT:    .cfi_offset s0, -16
+; RV64-i32-NEXT:    addi s0, sp, 128
+; RV64-i32-NEXT:    .cfi_def_cfa s0, 0
+; RV64-i32-NEXT:    andi sp, sp, -64
+; RV64-i32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 15
+; RV64-i32-NEXT:    vslidedown.vi v12, v8, 14
+; RV64-i32-NEXT:    vslidedown.vi v14, v8, 13
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v10
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 12
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v12
+; RV64-i32-NEXT:    vslidedown.vi v12, v8, 11
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a2, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v14
+; RV64-i32-NEXT:    vslidedown.vi v14, v8, 10
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a3, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v10
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 9
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a5, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v12
+; RV64-i32-NEXT:    vslidedown.vi v12, v8, 8
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a4, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v14
+; RV64-i32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v9, v8, 7
+; RV64-i32-NEXT:    vslidedown.vi v11, v8, 6
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a6, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v10
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 5
+; RV64-i32-NEXT:    vslidedown.vi v13, v8, 4
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s a7, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v12
+; RV64-i32-NEXT:    vslidedown.vi v12, v8, 3
+; RV64-i32-NEXT:    vslidedown.vi v14, v8, 2
+; RV64-i32-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s t0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v9
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s t1, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v11
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s t2, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v10
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s t3, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v13
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    fcvt.w.s t4, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v12
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    sw a5, 48(sp)
+; RV64-i32-NEXT:    sw a3, 52(sp)
+; RV64-i32-NEXT:    sw a2, 56(sp)
+; RV64-i32-NEXT:    sw a1, 60(sp)
+; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v14
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    sw t0, 32(sp)
+; RV64-i32-NEXT:    sw a7, 36(sp)
+; RV64-i32-NEXT:    sw a6, 40(sp)
+; RV64-i32-NEXT:    sw a4, 44(sp)
+; RV64-i32-NEXT:    fcvt.w.s a2, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    sw t4, 16(sp)
+; RV64-i32-NEXT:    sw t3, 20(sp)
+; RV64-i32-NEXT:    sw t2, 24(sp)
+; RV64-i32-NEXT:    sw t1, 28(sp)
+; RV64-i32-NEXT:    fcvt.w.s a3, fa5, rmm
+; RV64-i32-NEXT:    sw a0, 0(sp)
+; RV64-i32-NEXT:    sw a3, 4(sp)
+; RV64-i32-NEXT:    sw a2, 8(sp)
+; RV64-i32-NEXT:    sw a1, 12(sp)
+; RV64-i32-NEXT:    mv a0, sp
+; RV64-i32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV64-i32-NEXT:    vle32.v v8, (a0)
+; RV64-i32-NEXT:    addi sp, s0, -128
+; RV64-i32-NEXT:    .cfi_def_cfa sp, 128
+; RV64-i32-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
+; RV64-i32-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-i32-NEXT:    .cfi_restore ra
+; RV64-i32-NEXT:    .cfi_restore s0
+; RV64-i32-NEXT:    addi sp, sp, 128
+; RV64-i32-NEXT:    .cfi_def_cfa_offset 0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v16f16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    addi sp, sp, -256
+; RV64-i64-NEXT:    .cfi_def_cfa_offset 256
+; RV64-i64-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT:    sd s0, 240(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT:    .cfi_offset ra, -8
+; RV64-i64-NEXT:    .cfi_offset s0, -16
+; RV64-i64-NEXT:    addi s0, sp, 256
+; RV64-i64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-i64-NEXT:    andi sp, sp, -128
+; RV64-i64-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 15
+; RV64-i64-NEXT:    vslidedown.vi v12, v8, 14
+; RV64-i64-NEXT:    vslidedown.vi v14, v8, 13
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 12
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v12
+; RV64-i64-NEXT:    vslidedown.vi v12, v8, 11
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a2, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v14
+; RV64-i64-NEXT:    vslidedown.vi v14, v8, 10
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a3, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 9
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a5, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v12
+; RV64-i64-NEXT:    vslidedown.vi v12, v8, 8
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a4, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v14
+; RV64-i64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v9, v8, 7
+; RV64-i64-NEXT:    vslidedown.vi v11, v8, 6
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a6, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 5
+; RV64-i64-NEXT:    vslidedown.vi v13, v8, 4
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s a7, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v12
+; RV64-i64-NEXT:    vslidedown.vi v12, v8, 3
+; RV64-i64-NEXT:    vslidedown.vi v14, v8, 2
+; RV64-i64-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s t0, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v9
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s t1, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v11
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s t2, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s t3, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v13
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    fcvt.l.s t4, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v12
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    sd a5, 96(sp)
+; RV64-i64-NEXT:    sd a3, 104(sp)
+; RV64-i64-NEXT:    sd a2, 112(sp)
+; RV64-i64-NEXT:    sd a1, 120(sp)
+; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v14
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    sd t0, 64(sp)
+; RV64-i64-NEXT:    sd a7, 72(sp)
+; RV64-i64-NEXT:    sd a6, 80(sp)
+; RV64-i64-NEXT:    sd a4, 88(sp)
+; RV64-i64-NEXT:    fcvt.l.s a2, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i64-NEXT:    sd t4, 32(sp)
+; RV64-i64-NEXT:    sd t3, 40(sp)
+; RV64-i64-NEXT:    sd t2, 48(sp)
+; RV64-i64-NEXT:    sd t1, 56(sp)
+; RV64-i64-NEXT:    fcvt.l.s a3, fa5, rmm
+; RV64-i64-NEXT:    sd a0, 0(sp)
+; RV64-i64-NEXT:    sd a3, 8(sp)
+; RV64-i64-NEXT:    sd a2, 16(sp)
+; RV64-i64-NEXT:    sd a1, 24(sp)
+; RV64-i64-NEXT:    mv a0, sp
+; RV64-i64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-i64-NEXT:    vle64.v v8, (a0)
+; RV64-i64-NEXT:    addi sp, s0, -256
+; RV64-i64-NEXT:    .cfi_def_cfa sp, 256
+; RV64-i64-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    .cfi_restore ra
+; RV64-i64-NEXT:    .cfi_restore s0
+; RV64-i64-NEXT:    addi sp, sp, 256
+; RV64-i64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-i64-NEXT:    ret
+  %a = call <16 x iXLen> @llvm.lround.v16iXLen.v16f16(<16 x half> %x)
+  ret <16 x iXLen> %a
+}
+declare <16 x iXLen> @llvm.lround.v16iXLen.v16f16(<16 x half>)
+
+define <1 x iXLen> @lround_v1f32(<1 x float> %x) {
+; RV32-LABEL: lround_v1f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vmv.s.x v8, a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v1f32:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vmv.s.x v8, a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v1f32:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-i64-NEXT:    vmv.s.x v8, a0
+; RV64-i64-NEXT:    ret
+  %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f32(<1 x float> %x)
+  ret <1 x iXLen> %a
+}
+declare <1 x iXLen> @llvm.lround.v1iXLen.v1f32(<1 x float>)
+
+define <2 x iXLen> @lround_v2f32(<2 x float> %x) {
+; RV32-LABEL: lround_v2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v9
+; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v2f32:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v9
+; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT:    vmv.v.x v8, a0
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v2f32:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v9
+; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-i64-NEXT:    vmv.v.x v8, a0
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT:    ret
+  %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f32(<2 x float> %x)
+  ret <2 x iXLen> %a
+}
+declare <2 x iXLen> @llvm.lround.v2iXLen.v2f32(<2 x float>)
+
+define <3 x iXLen> @lround_v3f32(<3 x float> %x) {
+; RV32-LABEL: lround_v3f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    vslidedown.vi v10, v8, 2
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v9
+; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    vmv.v.x v9, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    vslide1down.vx v8, v9, a1
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v3f32:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-i32-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v9
+; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v10
+; RV64-i32-NEXT:    vmv.v.x v9, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    vslide1down.vx v8, v9, a1
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v3f32:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-i64-NEXT:    vslidedown.vi v11, v8, 3
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v9
+; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT:    vmv.v.x v8, a0
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT:    vfmv.f.s fa5, v11
+; RV64-i64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT:    ret
+  %a = call <3 x iXLen> @llvm.lround.v3iXLen.v3f32(<3 x float> %x)
+  ret <3 x iXLen> %a
+}
+declare <3 x iXLen> @llvm.lround.v3iXLen.v3f32(<3 x float>)
+
+define <4 x iXLen> @lround_v4f32(<4 x float> %x) {
+; RV32-LABEL: lround_v4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    vslidedown.vi v10, v8, 2
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v9
+; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    vmv.v.x v9, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    vslide1down.vx v8, v9, a1
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v4f32:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-i32-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v9
+; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v10
+; RV64-i32-NEXT:    vmv.v.x v9, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    vslide1down.vx v8, v9, a1
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v4f32:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-i64-NEXT:    vslidedown.vi v11, v8, 3
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v9
+; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT:    vmv.v.x v8, a0
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT:    vfmv.f.s fa5, v11
+; RV64-i64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT:    ret
+  %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f32(<4 x float> %x)
+  ret <4 x iXLen> %a
+}
+declare <4 x iXLen> @llvm.lround.v4iXLen.v4f32(<4 x float>)
+
+define <8 x iXLen> @lround_v8f32(<8 x float> %x) {
+; RV32-LABEL: lround_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    vslidedown.vi v11, v8, 2
+; RV32-NEXT:    vslidedown.vi v12, v8, 3
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v11
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.x v10, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v12
+; RV32-NEXT:    vslidedown.vi v12, v8, 4
+; RV32-NEXT:    vslide1down.vx v10, v10, a1
+; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v12
+; RV32-NEXT:    vslidedown.vi v12, v8, 5
+; RV32-NEXT:    vslide1down.vx v10, v10, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v12
+; RV32-NEXT:    vslidedown.vi v12, v8, 6
+; RV32-NEXT:    vslidedown.vi v8, v8, 7
+; RV32-NEXT:    vslide1down.vx v10, v10, a1
+; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v12
+; RV32-NEXT:    vslide1down.vx v10, v10, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    vslide1down.vx v8, v10, a1
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v8f32:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 1
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    vslidedown.vi v11, v8, 2
+; RV64-i32-NEXT:    vslidedown.vi v12, v8, 3
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v10
+; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v11
+; RV64-i32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV64-i32-NEXT:    vmv.v.x v10, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v12
+; RV64-i32-NEXT:    vslidedown.vi v12, v8, 4
+; RV64-i32-NEXT:    vslide1down.vx v10, v10, a1
+; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v12
+; RV64-i32-NEXT:    vslidedown.vi v12, v8, 5
+; RV64-i32-NEXT:    vslide1down.vx v10, v10, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v12
+; RV64-i32-NEXT:    vslidedown.vi v12, v8, 6
+; RV64-i32-NEXT:    vslidedown.vi v8, v8, 7
+; RV64-i32-NEXT:    vslide1down.vx v10, v10, a1
+; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v12
+; RV64-i32-NEXT:    vslide1down.vx v10, v10, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    vslide1down.vx v8, v10, a1
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v8f32:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    addi sp, sp, -128
+; RV64-i64-NEXT:    .cfi_def_cfa_offset 128
+; RV64-i64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT:    .cfi_offset ra, -8
+; RV64-i64-NEXT:    .cfi_offset s0, -16
+; RV64-i64-NEXT:    addi s0, sp, 128
+; RV64-i64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-i64-NEXT:    andi sp, sp, -64
+; RV64-i64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 7
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 6
+; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 5
+; RV64-i64-NEXT:    fcvt.l.s a2, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 4
+; RV64-i64-NEXT:    fcvt.l.s a3, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v9, v8, 3
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-i64-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-i64-NEXT:    fcvt.l.s a4, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v9
+; RV64-i64-NEXT:    fcvt.l.s a5, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    fcvt.l.s a6, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    sd a4, 32(sp)
+; RV64-i64-NEXT:    sd a3, 40(sp)
+; RV64-i64-NEXT:    sd a2, 48(sp)
+; RV64-i64-NEXT:    sd a1, 56(sp)
+; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT:    sd a0, 0(sp)
+; RV64-i64-NEXT:    sd a1, 8(sp)
+; RV64-i64-NEXT:    sd a6, 16(sp)
+; RV64-i64-NEXT:    sd a5, 24(sp)
+; RV64-i64-NEXT:    mv a0, sp
+; RV64-i64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-i64-NEXT:    vle64.v v8, (a0)
+; RV64-i64-NEXT:    addi sp, s0, -128
+; RV64-i64-NEXT:    .cfi_def_cfa sp, 128
+; RV64-i64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    .cfi_restore ra
+; RV64-i64-NEXT:    .cfi_restore s0
+; RV64-i64-NEXT:    addi sp, sp, 128
+; RV64-i64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-i64-NEXT:    ret
+  %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f32(<8 x float> %x)
+  ret <8 x iXLen> %a
+}
+declare <8 x iXLen> @llvm.lround.v8iXLen.v8f32(<8 x float>)
+
+define <16 x iXLen> @lround_v16f32(<16 x float> %x) {
+; RV32-LABEL: lround_v16f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -192
+; RV32-NEXT:    .cfi_def_cfa_offset 192
+; RV32-NEXT:    sw ra, 188(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 184(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    addi s0, sp, 192
+; RV32-NEXT:    .cfi_def_cfa s0, 0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    mv a0, sp
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vse32.v v8, (a0)
+; RV32-NEXT:    flw fa5, 60(sp)
+; RV32-NEXT:    vfmv.f.s fa4, v8
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 3
+; RV32-NEXT:    vslidedown.vi v11, v8, 2
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    sw a0, 124(sp)
+; RV32-NEXT:    flw fa5, 56(sp)
+; RV32-NEXT:    fcvt.w.s a0, fa4, rmm
+; RV32-NEXT:    vfmv.f.s fa4, v10
+; RV32-NEXT:    vslidedown.vi v10, v8, 1
+; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV32-NEXT:    sw a1, 120(sp)
+; RV32-NEXT:    flw fa5, 52(sp)
+; RV32-NEXT:    fcvt.w.s a1, fa4, rmm
+; RV32-NEXT:    vfmv.f.s fa4, v11
+; RV32-NEXT:    fcvt.w.s a2, fa4, rmm
+; RV32-NEXT:    fcvt.w.s a3, fa5, rmm
+; RV32-NEXT:    sw a3, 116(sp)
+; RV32-NEXT:    flw fa5, 48(sp)
+; RV32-NEXT:    vfmv.f.s fa4, v10
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 7
+; RV32-NEXT:    fcvt.w.s a3, fa4, rmm
+; RV32-NEXT:    fcvt.w.s a4, fa5, rmm
+; RV32-NEXT:    sw a4, 112(sp)
+; RV32-NEXT:    flw fa5, 44(sp)
+; RV32-NEXT:    vfmv.f.s fa4, v10
+; RV32-NEXT:    vslidedown.vi v10, v8, 6
+; RV32-NEXT:    fcvt.w.s a4, fa4, rmm
+; RV32-NEXT:    fcvt.w.s a5, fa5, rmm
+; RV32-NEXT:    sw a5, 108(sp)
+; RV32-NEXT:    flw fa5, 40(sp)
+; RV32-NEXT:    vfmv.f.s fa4, v10
+; RV32-NEXT:    vslidedown.vi v10, v8, 5
+; RV32-NEXT:    fcvt.w.s a5, fa4, rmm
+; RV32-NEXT:    fcvt.w.s a6, fa5, rmm
+; RV32-NEXT:    sw a6, 104(sp)
+; RV32-NEXT:    flw fa5, 36(sp)
+; RV32-NEXT:    vfmv.f.s fa4, v10
+; RV32-NEXT:    fcvt.w.s a6, fa4, rmm
+; RV32-NEXT:    vslidedown.vi v8, v8, 4
+; RV32-NEXT:    fcvt.w.s a7, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    sw a7, 100(sp)
+; RV32-NEXT:    fcvt.w.s a7, fa5, rmm
+; RV32-NEXT:    flw fa5, 32(sp)
+; RV32-NEXT:    sw a0, 64(sp)
+; RV32-NEXT:    sw a3, 68(sp)
+; RV32-NEXT:    sw a2, 72(sp)
+; RV32-NEXT:    sw a1, 76(sp)
+; RV32-NEXT:    sw a7, 80(sp)
+; RV32-NEXT:    sw a6, 84(sp)
+; RV32-NEXT:    sw a5, 88(sp)
+; RV32-NEXT:    sw a4, 92(sp)
+; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32-NEXT:    sw a0, 96(sp)
+; RV32-NEXT:    addi a0, sp, 64
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vle32.v v8, (a0)
+; RV32-NEXT:    addi sp, s0, -192
+; RV32-NEXT:    .cfi_def_cfa sp, 192
+; RV32-NEXT:    lw ra, 188(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 184(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    addi sp, sp, 192
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v16f32:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    addi sp, sp, -192
+; RV64-i32-NEXT:    .cfi_def_cfa_offset 192
+; RV64-i32-NEXT:    sd ra, 184(sp) # 8-byte Folded Spill
+; RV64-i32-NEXT:    sd s0, 176(sp) # 8-byte Folded Spill
+; RV64-i32-NEXT:    .cfi_offset ra, -8
+; RV64-i32-NEXT:    .cfi_offset s0, -16
+; RV64-i32-NEXT:    addi s0, sp, 192
+; RV64-i32-NEXT:    .cfi_def_cfa s0, 0
+; RV64-i32-NEXT:    andi sp, sp, -64
+; RV64-i32-NEXT:    mv a0, sp
+; RV64-i32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV64-i32-NEXT:    vse32.v v8, (a0)
+; RV64-i32-NEXT:    flw fa5, 60(sp)
+; RV64-i32-NEXT:    vfmv.f.s fa4, v8
+; RV64-i32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 3
+; RV64-i32-NEXT:    vslidedown.vi v11, v8, 2
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    sw a0, 124(sp)
+; RV64-i32-NEXT:    flw fa5, 56(sp)
+; RV64-i32-NEXT:    fcvt.w.s a0, fa4, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa4, v10
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 1
+; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
+; RV64-i32-NEXT:    sw a1, 120(sp)
+; RV64-i32-NEXT:    flw fa5, 52(sp)
+; RV64-i32-NEXT:    fcvt.w.s a1, fa4, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa4, v11
+; RV64-i32-NEXT:    fcvt.w.s a2, fa4, rmm
+; RV64-i32-NEXT:    fcvt.w.s a3, fa5, rmm
+; RV64-i32-NEXT:    sw a3, 116(sp)
+; RV64-i32-NEXT:    flw fa5, 48(sp)
+; RV64-i32-NEXT:    vfmv.f.s fa4, v10
+; RV64-i32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 7
+; RV64-i32-NEXT:    fcvt.w.s a3, fa4, rmm
+; RV64-i32-NEXT:    fcvt.w.s a4, fa5, rmm
+; RV64-i32-NEXT:    sw a4, 112(sp)
+; RV64-i32-NEXT:    flw fa5, 44(sp)
+; RV64-i32-NEXT:    vfmv.f.s fa4, v10
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 6
+; RV64-i32-NEXT:    fcvt.w.s a4, fa4, rmm
+; RV64-i32-NEXT:    fcvt.w.s a5, fa5, rmm
+; RV64-i32-NEXT:    sw a5, 108(sp)
+; RV64-i32-NEXT:    flw fa5, 40(sp)
+; RV64-i32-NEXT:    vfmv.f.s fa4, v10
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 5
+; RV64-i32-NEXT:    fcvt.w.s a5, fa4, rmm
+; RV64-i32-NEXT:    fcvt.w.s a6, fa5, rmm
+; RV64-i32-NEXT:    sw a6, 104(sp)
+; RV64-i32-NEXT:    flw fa5, 36(sp)
+; RV64-i32-NEXT:    vfmv.f.s fa4, v10
+; RV64-i32-NEXT:    fcvt.w.s a6, fa4, rmm
+; RV64-i32-NEXT:    vslidedown.vi v8, v8, 4
+; RV64-i32-NEXT:    fcvt.w.s a7, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    sw a7, 100(sp)
+; RV64-i32-NEXT:    fcvt.w.s a7, fa5, rmm
+; RV64-i32-NEXT:    flw fa5, 32(sp)
+; RV64-i32-NEXT:    sw a0, 64(sp)
+; RV64-i32-NEXT:    sw a3, 68(sp)
+; RV64-i32-NEXT:    sw a2, 72(sp)
+; RV64-i32-NEXT:    sw a1, 76(sp)
+; RV64-i32-NEXT:    sw a7, 80(sp)
+; RV64-i32-NEXT:    sw a6, 84(sp)
+; RV64-i32-NEXT:    sw a5, 88(sp)
+; RV64-i32-NEXT:    sw a4, 92(sp)
+; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64-i32-NEXT:    sw a0, 96(sp)
+; RV64-i32-NEXT:    addi a0, sp, 64
+; RV64-i32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV64-i32-NEXT:    vle32.v v8, (a0)
+; RV64-i32-NEXT:    addi sp, s0, -192
+; RV64-i32-NEXT:    .cfi_def_cfa sp, 192
+; RV64-i32-NEXT:    ld ra, 184(sp) # 8-byte Folded Reload
+; RV64-i32-NEXT:    ld s0, 176(sp) # 8-byte Folded Reload
+; RV64-i32-NEXT:    .cfi_restore ra
+; RV64-i32-NEXT:    .cfi_restore s0
+; RV64-i32-NEXT:    addi sp, sp, 192
+; RV64-i32-NEXT:    .cfi_def_cfa_offset 0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v16f32:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    addi sp, sp, -384
+; RV64-i64-NEXT:    .cfi_def_cfa_offset 384
+; RV64-i64-NEXT:    sd ra, 376(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT:    sd s0, 368(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT:    .cfi_offset ra, -8
+; RV64-i64-NEXT:    .cfi_offset s0, -16
+; RV64-i64-NEXT:    addi s0, sp, 384
+; RV64-i64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-i64-NEXT:    andi sp, sp, -128
+; RV64-i64-NEXT:    addi a0, sp, 64
+; RV64-i64-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV64-i64-NEXT:    vse32.v v8, (a0)
+; RV64-i64-NEXT:    flw fa5, 124(sp)
+; RV64-i64-NEXT:    vfmv.f.s fa4, v8
+; RV64-i64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 3
+; RV64-i64-NEXT:    vslidedown.vi v12, v8, 2
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    sd a0, 248(sp)
+; RV64-i64-NEXT:    flw fa5, 120(sp)
+; RV64-i64-NEXT:    vslidedown.vi v13, v8, 1
+; RV64-i64-NEXT:    fcvt.l.s a0, fa4, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa4, v10
+; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
+; RV64-i64-NEXT:    sd a1, 240(sp)
+; RV64-i64-NEXT:    flw fa5, 116(sp)
+; RV64-i64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 7
+; RV64-i64-NEXT:    fcvt.l.s a1, fa4, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa4, v12
+; RV64-i64-NEXT:    fcvt.l.s a2, fa5, rmm
+; RV64-i64-NEXT:    sd a2, 232(sp)
+; RV64-i64-NEXT:    flw fa5, 112(sp)
+; RV64-i64-NEXT:    fcvt.l.s a2, fa4, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa4, v13
+; RV64-i64-NEXT:    vslidedown.vi v12, v8, 6
+; RV64-i64-NEXT:    fcvt.l.s a3, fa5, rmm
+; RV64-i64-NEXT:    sd a3, 224(sp)
+; RV64-i64-NEXT:    flw fa5, 108(sp)
+; RV64-i64-NEXT:    fcvt.l.s a3, fa4, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa4, v10
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 5
+; RV64-i64-NEXT:    fcvt.l.s a4, fa5, rmm
+; RV64-i64-NEXT:    sd a4, 216(sp)
+; RV64-i64-NEXT:    flw fa5, 104(sp)
+; RV64-i64-NEXT:    fcvt.l.s a4, fa4, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa4, v12
+; RV64-i64-NEXT:    fcvt.l.s a5, fa4, rmm
+; RV64-i64-NEXT:    fcvt.l.s a6, fa5, rmm
+; RV64-i64-NEXT:    sd a6, 208(sp)
+; RV64-i64-NEXT:    flw fa5, 100(sp)
+; RV64-i64-NEXT:    vfmv.f.s fa4, v10
+; RV64-i64-NEXT:    fcvt.l.s a6, fa4, rmm
+; RV64-i64-NEXT:    vslidedown.vi v8, v8, 4
+; RV64-i64-NEXT:    fcvt.l.s a7, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    sd a7, 200(sp)
+; RV64-i64-NEXT:    fcvt.l.s a7, fa5, rmm
+; RV64-i64-NEXT:    flw fa5, 96(sp)
+; RV64-i64-NEXT:    sd a0, 128(sp)
+; RV64-i64-NEXT:    sd a3, 136(sp)
+; RV64-i64-NEXT:    sd a2, 144(sp)
+; RV64-i64-NEXT:    sd a1, 152(sp)
+; RV64-i64-NEXT:    sd a7, 160(sp)
+; RV64-i64-NEXT:    sd a6, 168(sp)
+; RV64-i64-NEXT:    sd a5, 176(sp)
+; RV64-i64-NEXT:    sd a4, 184(sp)
+; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
+; RV64-i64-NEXT:    sd a0, 192(sp)
+; RV64-i64-NEXT:    addi a0, sp, 128
+; RV64-i64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-i64-NEXT:    vle64.v v8, (a0)
+; RV64-i64-NEXT:    addi sp, s0, -384
+; RV64-i64-NEXT:    .cfi_def_cfa sp, 384
+; RV64-i64-NEXT:    ld ra, 376(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    ld s0, 368(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    .cfi_restore ra
+; RV64-i64-NEXT:    .cfi_restore s0
+; RV64-i64-NEXT:    addi sp, sp, 384
+; RV64-i64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-i64-NEXT:    ret
+  %a = call <16 x iXLen> @llvm.lround.v16iXLen.v16f32(<16 x float> %x)
+  ret <16 x iXLen> %a
+}
+declare <16 x iXLen> @llvm.lround.v16iXLen.v16f32(<16 x float>)
+
+define <1 x iXLen> @lround_v1f64(<1 x double> %x) {
+; RV32-LABEL: lround_v1f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.w.d a0, fa5, rmm
+; RV32-NEXT:    vmv.s.x v8, a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v1f64:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    fcvt.w.d a0, fa5, rmm
+; RV64-i32-NEXT:    vmv.s.x v8, a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v1f64:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    fcvt.l.d a0, fa5, rmm
+; RV64-i64-NEXT:    vmv.s.x v8, a0
+; RV64-i64-NEXT:    ret
+  %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f64(<1 x double> %x)
+  ret <1 x iXLen> %a
+}
+declare <1 x iXLen> @llvm.lround.v1iXLen.v1f64(<1 x double>)
+
+define <2 x iXLen> @lround_v2f64(<2 x double> %x) {
+; RV32-LABEL: lround_v2f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fcvt.w.d a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v9
+; RV32-NEXT:    fcvt.w.d a1, fa5, rmm
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v2f64:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    fcvt.w.d a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v9
+; RV64-i32-NEXT:    fcvt.w.d a1, fa5, rmm
+; RV64-i32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-i32-NEXT:    vmv.v.x v8, a0
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v2f64:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    fcvt.l.d a0, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v9
+; RV64-i64-NEXT:    fcvt.l.d a1, fa5, rmm
+; RV64-i64-NEXT:    vmv.v.x v8, a0
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT:    ret
+  %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f64(<2 x double> %x)
+  ret <2 x iXLen> %a
+}
+declare <2 x iXLen> @llvm.lround.v2iXLen.v2f64(<2 x double>)
+
+define <4 x iXLen> @lround_v4f64(<4 x double> %x) {
+; RV32-LABEL: lround_v4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 2
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    fcvt.w.d a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v12
+; RV32-NEXT:    fcvt.w.d a1, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.x v9, a0
+; RV32-NEXT:    fcvt.w.d a0, fa5, rmm
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT:    vslide1down.vx v8, v9, a1
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    fcvt.w.d a0, fa5, rmm
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v4f64:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v12, v8, 1
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-i32-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-i32-NEXT:    fcvt.w.d a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v12
+; RV64-i32-NEXT:    fcvt.w.d a1, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v10
+; RV64-i32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i32-NEXT:    vmv.v.x v9, a0
+; RV64-i32-NEXT:    fcvt.w.d a0, fa5, rmm
+; RV64-i32-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT:    vslide1down.vx v8, v9, a1
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    fcvt.w.d a0, fa5, rmm
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v4f64:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v12, v8, 1
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-i64-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-i64-NEXT:    fcvt.l.d a0, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v12
+; RV64-i64-NEXT:    fcvt.l.d a1, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT:    vmv.v.x v10, a0
+; RV64-i64-NEXT:    fcvt.l.d a0, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    vslide1down.vx v8, v10, a1
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT:    fcvt.l.d a0, fa5, rmm
+; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT:    ret
+  %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f64(<4 x double> %x)
+  ret <4 x iXLen> %a
+}
+declare <4 x iXLen> @llvm.lround.v4iXLen.v4f64(<4 x double>)
+
+define <8 x iXLen> @lround_v8f64(<8 x double> %x) {
+; RV32-LABEL: lround_v8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -128
+; RV32-NEXT:    .cfi_def_cfa_offset 128
+; RV32-NEXT:    sw ra, 124(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 120(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    addi s0, sp, 128
+; RV32-NEXT:    .cfi_def_cfa s0, 0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    mv a0, sp
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v14, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 2
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vse64.v v8, (a0)
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    vfmv.f.s fa4, v14
+; RV32-NEXT:    fcvt.w.d a0, fa5, rmm
+; RV32-NEXT:    vfmv.f.s fa5, v12
+; RV32-NEXT:    vfmv.f.s fa3, v8
+; RV32-NEXT:    fcvt.w.d a1, fa4, rmm
+; RV32-NEXT:    fcvt.w.d a2, fa5, rmm
+; RV32-NEXT:    fcvt.w.d a3, fa3, rmm
+; RV32-NEXT:    fld fa5, 32(sp)
+; RV32-NEXT:    fld fa4, 40(sp)
+; RV32-NEXT:    fld fa3, 48(sp)
+; RV32-NEXT:    fld fa2, 56(sp)
+; RV32-NEXT:    fcvt.w.d a4, fa5, rmm
+; RV32-NEXT:    fcvt.w.d a5, fa4, rmm
+; RV32-NEXT:    fcvt.w.d a6, fa3, rmm
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    vslide1down.vx v8, v8, a2
+; RV32-NEXT:    vslide1down.vx v8, v8, a3
+; RV32-NEXT:    vslide1down.vx v8, v8, a4
+; RV32-NEXT:    vslide1down.vx v8, v8, a5
+; RV32-NEXT:    vslide1down.vx v8, v8, a6
+; RV32-NEXT:    fcvt.w.d a0, fa2, rmm
+; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    addi sp, s0, -128
+; RV32-NEXT:    .cfi_def_cfa sp, 128
+; RV32-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    addi sp, sp, 128
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v8f64:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    addi sp, sp, -128
+; RV64-i32-NEXT:    .cfi_def_cfa_offset 128
+; RV64-i32-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
+; RV64-i32-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
+; RV64-i32-NEXT:    .cfi_offset ra, -8
+; RV64-i32-NEXT:    .cfi_offset s0, -16
+; RV64-i32-NEXT:    addi s0, sp, 128
+; RV64-i32-NEXT:    .cfi_def_cfa s0, 0
+; RV64-i32-NEXT:    andi sp, sp, -64
+; RV64-i32-NEXT:    mv a0, sp
+; RV64-i32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v14, v8, 1
+; RV64-i32-NEXT:    vfmv.f.s fa5, v8
+; RV64-i32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v12, v8, 2
+; RV64-i32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-i32-NEXT:    vse64.v v8, (a0)
+; RV64-i32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i32-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-i32-NEXT:    vfmv.f.s fa4, v14
+; RV64-i32-NEXT:    fcvt.w.d a0, fa5, rmm
+; RV64-i32-NEXT:    vfmv.f.s fa5, v12
+; RV64-i32-NEXT:    vfmv.f.s fa3, v8
+; RV64-i32-NEXT:    fcvt.w.d a1, fa4, rmm
+; RV64-i32-NEXT:    fcvt.w.d a2, fa5, rmm
+; RV64-i32-NEXT:    fcvt.w.d a3, fa3, rmm
+; RV64-i32-NEXT:    fld fa5, 32(sp)
+; RV64-i32-NEXT:    fld fa4, 40(sp)
+; RV64-i32-NEXT:    fld fa3, 48(sp)
+; RV64-i32-NEXT:    fld fa2, 56(sp)
+; RV64-i32-NEXT:    fcvt.w.d a4, fa5, rmm
+; RV64-i32-NEXT:    fcvt.w.d a5, fa4, rmm
+; RV64-i32-NEXT:    fcvt.w.d a6, fa3, rmm
+; RV64-i32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV64-i32-NEXT:    vmv.v.x v8, a0
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a2
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a3
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a4
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a5
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a6
+; RV64-i32-NEXT:    fcvt.w.d a0, fa2, rmm
+; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    addi sp, s0, -128
+; RV64-i32-NEXT:    .cfi_def_cfa sp, 128
+; RV64-i32-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
+; RV64-i32-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-i32-NEXT:    .cfi_restore ra
+; RV64-i32-NEXT:    .cfi_restore s0
+; RV64-i32-NEXT:    addi sp, sp, 128
+; RV64-i32-NEXT:    .cfi_def_cfa_offset 0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v8f64:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    addi sp, sp, -192
+; RV64-i64-NEXT:    .cfi_def_cfa_offset 192
+; RV64-i64-NEXT:    sd ra, 184(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT:    sd s0, 176(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT:    .cfi_offset ra, -8
+; RV64-i64-NEXT:    .cfi_offset s0, -16
+; RV64-i64-NEXT:    addi s0, sp, 192
+; RV64-i64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-i64-NEXT:    andi sp, sp, -64
+; RV64-i64-NEXT:    mv a0, sp
+; RV64-i64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-i64-NEXT:    vse64.v v8, (a0)
+; RV64-i64-NEXT:    fld fa5, 56(sp)
+; RV64-i64-NEXT:    vfmv.f.s fa4, v8
+; RV64-i64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 1
+; RV64-i64-NEXT:    fcvt.l.d a0, fa4, rmm
+; RV64-i64-NEXT:    fcvt.l.d a1, fa5, rmm
+; RV64-i64-NEXT:    sd a1, 120(sp)
+; RV64-i64-NEXT:    fld fa5, 48(sp)
+; RV64-i64-NEXT:    vfmv.f.s fa4, v10
+; RV64-i64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v10, v8, 3
+; RV64-i64-NEXT:    fcvt.l.d a1, fa4, rmm
+; RV64-i64-NEXT:    fcvt.l.d a2, fa5, rmm
+; RV64-i64-NEXT:    sd a2, 112(sp)
+; RV64-i64-NEXT:    fld fa5, 40(sp)
+; RV64-i64-NEXT:    vfmv.f.s fa4, v10
+; RV64-i64-NEXT:    fcvt.l.d a2, fa4, rmm
+; RV64-i64-NEXT:    vslidedown.vi v8, v8, 2
+; RV64-i64-NEXT:    fcvt.l.d a3, fa5, rmm
+; RV64-i64-NEXT:    vfmv.f.s fa5, v8
+; RV64-i64-NEXT:    sd a3, 104(sp)
+; RV64-i64-NEXT:    fcvt.l.d a3, fa5, rmm
+; RV64-i64-NEXT:    fld fa5, 32(sp)
+; RV64-i64-NEXT:    sd a0, 64(sp)
+; RV64-i64-NEXT:    sd a1, 72(sp)
+; RV64-i64-NEXT:    sd a3, 80(sp)
+; RV64-i64-NEXT:    sd a2, 88(sp)
+; RV64-i64-NEXT:    fcvt.l.d a0, fa5, rmm
+; RV64-i64-NEXT:    sd a0, 96(sp)
+; RV64-i64-NEXT:    addi a0, sp, 64
+; RV64-i64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-i64-NEXT:    vle64.v v8, (a0)
+; RV64-i64-NEXT:    addi sp, s0, -192
+; RV64-i64-NEXT:    .cfi_def_cfa sp, 192
+; RV64-i64-NEXT:    ld ra, 184(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    ld s0, 176(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    .cfi_restore ra
+; RV64-i64-NEXT:    .cfi_restore s0
+; RV64-i64-NEXT:    addi sp, sp, 192
+; RV64-i64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-i64-NEXT:    ret
+  %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f64(<8 x double> %x)
+  ret <8 x iXLen> %a
+}
+declare <8 x iXLen> @llvm.lround.v8iXLen.v8f64(<8 x double>)

>From 6f8a8caa1dbd326e1e108ce32b4eed763c379046 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Thu, 26 Jun 2025 18:04:20 +0100
Subject: [PATCH 2/2] [CodeGen/RISCV] Add nounwind attribue

---
 .../RISCV/rvv/fixed-vectors-llround.ll        | 172 ++----------------
 .../CodeGen/RISCV/rvv/fixed-vectors-lround.ll | 120 ++----------
 2 files changed, 32 insertions(+), 260 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll
index acc05a50455fb..b8ca7fd71cb93 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll
@@ -4,13 +4,11 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfh -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64
 
-define <1 x i64> @llround_v1f16(<1 x half> %x) {
+define <1 x i64> @llround_v1f16(<1 x half> %x) nounwind {
 ; RV32-LABEL: llround_v1f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV32-NEXT:    vfmv.f.s fa5, v8
 ; RV32-NEXT:    fcvt.s.h fa0, fa5
@@ -21,9 +19,7 @@ define <1 x i64> @llround_v1f16(<1 x half> %x) {
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v1f16:
@@ -40,17 +36,14 @@ define <1 x i64> @llround_v1f16(<1 x half> %x) {
 }
 declare <1 x i64> @llvm.llround.v1i64.v1f16(<1 x half>)
 
-define <2 x i64> @llround_v2f16(<2 x half> %x) {
+define <2 x i64> @llround_v2f16(<2 x half> %x) nounwind {
 ; RV32-LABEL: llround_v2f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb
 ; RV32-NEXT:    addi a0, sp, 16
 ; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
@@ -81,11 +74,8 @@ define <2 x i64> @llround_v2f16(<2 x half> %x) {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    .cfi_def_cfa sp, 32
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v2f16:
@@ -107,18 +97,15 @@ define <2 x i64> @llround_v2f16(<2 x half> %x) {
 }
 declare <2 x i64> @llvm.llround.v2i64.v2f16(<2 x half>)
 
-define <3 x i64> @llround_v3f16(<3 x half> %x) {
+define <3 x i64> @llround_v3f16(<3 x half> %x) nounwind {
 ; RV32-LABEL: llround_v3f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a1, a0, 1
 ; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 3 * vlenb
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    add a0, sp, a0
@@ -186,11 +173,8 @@ define <3 x i64> @llround_v3f16(<3 x half> %x) {
 ; RV32-NEXT:    slli a1, a0, 1
 ; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    .cfi_def_cfa sp, 32
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v3f16:
@@ -224,18 +208,15 @@ define <3 x i64> @llround_v3f16(<3 x half> %x) {
 }
 declare <3 x i64> @llvm.llround.v3i64.v3f16(<3 x half>)
 
-define <4 x i64> @llround_v4f16(<4 x half> %x) {
+define <4 x i64> @llround_v4f16(<4 x half> %x) nounwind {
 ; RV32-LABEL: llround_v4f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a1, a0, 1
 ; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 3 * vlenb
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    add a0, sp, a0
@@ -303,11 +284,8 @@ define <4 x i64> @llround_v4f16(<4 x half> %x) {
 ; RV32-NEXT:    slli a1, a0, 1
 ; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    .cfi_def_cfa sp, 32
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v4f16:
@@ -341,17 +319,13 @@ define <4 x i64> @llround_v4f16(<4 x half> %x) {
 }
 declare <4 x i64> @llvm.llround.v4i64.v4f16(<4 x half>)
 
-define <8 x i64> @llround_v8f16(<8 x half> %x) {
+define <8 x i64> @llround_v8f16(<8 x half> %x) nounwind {
 ; RV32-LABEL: llround_v8f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -208
-; RV32-NEXT:    .cfi_def_cfa_offset 208
 ; RV32-NEXT:    sw ra, 204(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw s0, 200(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    .cfi_offset s0, -8
 ; RV32-NEXT:    addi s0, sp, 208
-; RV32-NEXT:    .cfi_def_cfa s0, 0
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    sub sp, sp, a0
 ; RV32-NEXT:    andi sp, sp, -64
@@ -430,25 +404,17 @@ define <8 x i64> @llround_v8f16(<8 x half> %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    addi sp, s0, -208
-; RV32-NEXT:    .cfi_def_cfa sp, 208
 ; RV32-NEXT:    lw ra, 204(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 200(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
-; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 208
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v8f16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -128
-; RV64-NEXT:    .cfi_def_cfa_offset 128
 ; RV64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    .cfi_offset s0, -16
 ; RV64-NEXT:    addi s0, sp, 128
-; RV64-NEXT:    .cfi_def_cfa s0, 0
 ; RV64-NEXT:    andi sp, sp, -64
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64-NEXT:    vfmv.f.s fa5, v8
@@ -494,30 +460,22 @@ define <8 x i64> @llround_v8f16(<8 x half> %x) {
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    addi sp, s0, -128
-; RV64-NEXT:    .cfi_def_cfa sp, 128
 ; RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; RV64-NEXT:    .cfi_restore ra
-; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 128
-; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = call <8 x i64> @llvm.llround.v8i64.v8f16(<8 x half> %x)
   ret <8 x i64> %a
 }
 declare <8 x i64> @llvm.llround.v8i64.v8f16(<8 x half>)
 
-define <16 x i64> @llround_v16f16(<16 x half> %x) {
+define <16 x i64> @llround_v16f16(<16 x half> %x) nounwind {
 ; RV32-LABEL: llround_v16f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -400
-; RV32-NEXT:    .cfi_def_cfa_offset 400
 ; RV32-NEXT:    sw ra, 396(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw s0, 392(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    .cfi_offset s0, -8
 ; RV32-NEXT:    addi s0, sp, 400
-; RV32-NEXT:    .cfi_def_cfa s0, 0
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    sub sp, sp, a0
@@ -670,25 +628,17 @@ define <16 x i64> @llround_v16f16(<16 x half> %x) {
 ; RV32-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a1)
 ; RV32-NEXT:    addi sp, s0, -400
-; RV32-NEXT:    .cfi_def_cfa sp, 400
 ; RV32-NEXT:    lw ra, 396(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 392(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
-; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 400
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v16f16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -256
-; RV64-NEXT:    .cfi_def_cfa_offset 256
 ; RV64-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s0, 240(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    .cfi_offset s0, -16
 ; RV64-NEXT:    addi s0, sp, 256
-; RV64-NEXT:    .cfi_def_cfa s0, 0
 ; RV64-NEXT:    andi sp, sp, -128
 ; RV64-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
 ; RV64-NEXT:    vfmv.f.s fa5, v8
@@ -775,26 +725,20 @@ define <16 x i64> @llround_v16f16(<16 x half> %x) {
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    addi sp, s0, -256
-; RV64-NEXT:    .cfi_def_cfa sp, 256
 ; RV64-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
-; RV64-NEXT:    .cfi_restore ra
-; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 256
-; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = call <16 x i64> @llvm.llround.v16i64.v16f16(<16 x half> %x)
   ret <16 x i64> %a
 }
 declare <16 x i64> @llvm.llround.v16i64.v16f16(<16 x half>)
 
-define <1 x i64> @llround_v1i64_v1f32(<1 x float> %x) {
+define <1 x i64> @llround_v1i64_v1f32(<1 x float> %x) nounwind {
 ; RV32-LABEL: llround_v1i64_v1f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    call llroundf
@@ -804,9 +748,7 @@ define <1 x i64> @llround_v1i64_v1f32(<1 x float> %x) {
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v1i64_v1f32:
@@ -822,17 +764,14 @@ define <1 x i64> @llround_v1i64_v1f32(<1 x float> %x) {
 }
 declare <1 x i64> @llvm.llround.v1i64.v1f32(<1 x float>)
 
-define <2 x i64> @llround_v2i64_v2f32(<2 x float> %x) {
+define <2 x i64> @llround_v2i64_v2f32(<2 x float> %x) nounwind {
 ; RV32-LABEL: llround_v2i64_v2f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb
 ; RV32-NEXT:    addi a0, sp, 16
 ; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
@@ -861,11 +800,8 @@ define <2 x i64> @llround_v2i64_v2f32(<2 x float> %x) {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    .cfi_def_cfa sp, 32
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v2i64_v2f32:
@@ -885,18 +821,15 @@ define <2 x i64> @llround_v2i64_v2f32(<2 x float> %x) {
 }
 declare <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float>)
 
-define <3 x i64> @llround_v3i64_v3f32(<3 x float> %x) {
+define <3 x i64> @llround_v3i64_v3f32(<3 x float> %x) nounwind {
 ; RV32-LABEL: llround_v3i64_v3f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a1, a0, 1
 ; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 3 * vlenb
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    add a0, sp, a0
@@ -960,11 +893,8 @@ define <3 x i64> @llround_v3i64_v3f32(<3 x float> %x) {
 ; RV32-NEXT:    slli a1, a0, 1
 ; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    .cfi_def_cfa sp, 32
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v3i64_v3f32:
@@ -994,18 +924,15 @@ define <3 x i64> @llround_v3i64_v3f32(<3 x float> %x) {
 }
 declare <3 x i64> @llvm.llround.v3i64.v3f32(<3 x float>)
 
-define <4 x i64> @llround_v4i64_v4f32(<4 x float> %x) {
+define <4 x i64> @llround_v4i64_v4f32(<4 x float> %x) nounwind {
 ; RV32-LABEL: llround_v4i64_v4f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a1, a0, 1
 ; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 3 * vlenb
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    add a0, sp, a0
@@ -1069,11 +996,8 @@ define <4 x i64> @llround_v4i64_v4f32(<4 x float> %x) {
 ; RV32-NEXT:    slli a1, a0, 1
 ; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    .cfi_def_cfa sp, 32
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v4i64_v4f32:
@@ -1103,17 +1027,13 @@ define <4 x i64> @llround_v4i64_v4f32(<4 x float> %x) {
 }
 declare <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float>)
 
-define <8 x i64> @llround_v8i64_v8f32(<8 x float> %x) {
+define <8 x i64> @llround_v8i64_v8f32(<8 x float> %x) nounwind {
 ; RV32-LABEL: llround_v8i64_v8f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -208
-; RV32-NEXT:    .cfi_def_cfa_offset 208
 ; RV32-NEXT:    sw ra, 204(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw s0, 200(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    .cfi_offset s0, -8
 ; RV32-NEXT:    addi s0, sp, 208
-; RV32-NEXT:    .cfi_def_cfa s0, 0
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    sub sp, sp, a0
@@ -1185,25 +1105,17 @@ define <8 x i64> @llround_v8i64_v8f32(<8 x float> %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    addi sp, s0, -208
-; RV32-NEXT:    .cfi_def_cfa sp, 208
 ; RV32-NEXT:    lw ra, 204(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 200(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
-; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 208
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v8i64_v8f32:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -128
-; RV64-NEXT:    .cfi_def_cfa_offset 128
 ; RV64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    .cfi_offset s0, -16
 ; RV64-NEXT:    addi s0, sp, 128
-; RV64-NEXT:    .cfi_def_cfa s0, 0
 ; RV64-NEXT:    andi sp, sp, -64
 ; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; RV64-NEXT:    vfmv.f.s fa5, v8
@@ -1242,30 +1154,22 @@ define <8 x i64> @llround_v8i64_v8f32(<8 x float> %x) {
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    addi sp, s0, -128
-; RV64-NEXT:    .cfi_def_cfa sp, 128
 ; RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; RV64-NEXT:    .cfi_restore ra
-; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 128
-; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = call <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float> %x)
   ret <8 x i64> %a
 }
 declare <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float>)
 
-define <16 x i64> @llround_v16i64_v16f32(<16 x float> %x) {
+define <16 x i64> @llround_v16i64_v16f32(<16 x float> %x) nounwind {
 ; RV32-LABEL: llround_v16i64_v16f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -400
-; RV32-NEXT:    .cfi_def_cfa_offset 400
 ; RV32-NEXT:    sw ra, 396(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw s0, 392(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    .cfi_offset s0, -8
 ; RV32-NEXT:    addi s0, sp, 400
-; RV32-NEXT:    .cfi_def_cfa s0, 0
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 2
 ; RV32-NEXT:    sub sp, sp, a0
@@ -1375,25 +1279,17 @@ define <16 x i64> @llround_v16i64_v16f32(<16 x float> %x) {
 ; RV32-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a1)
 ; RV32-NEXT:    addi sp, s0, -400
-; RV32-NEXT:    .cfi_def_cfa sp, 400
 ; RV32-NEXT:    lw ra, 396(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 392(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
-; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 400
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v16i64_v16f32:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -384
-; RV64-NEXT:    .cfi_def_cfa_offset 384
 ; RV64-NEXT:    sd ra, 376(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s0, 368(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    .cfi_offset s0, -16
 ; RV64-NEXT:    addi s0, sp, 384
-; RV64-NEXT:    .cfi_def_cfa s0, 0
 ; RV64-NEXT:    andi sp, sp, -128
 ; RV64-NEXT:    addi a0, sp, 64
 ; RV64-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -1459,26 +1355,20 @@ define <16 x i64> @llround_v16i64_v16f32(<16 x float> %x) {
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    addi sp, s0, -384
-; RV64-NEXT:    .cfi_def_cfa sp, 384
 ; RV64-NEXT:    ld ra, 376(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 368(sp) # 8-byte Folded Reload
-; RV64-NEXT:    .cfi_restore ra
-; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 384
-; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = call <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float> %x)
   ret <16 x i64> %a
 }
 declare <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float>)
 
-define <1 x i64> @llround_v1i64_v1f64(<1 x double> %x) {
+define <1 x i64> @llround_v1i64_v1f64(<1 x double> %x) nounwind {
 ; RV32-LABEL: llround_v1i64_v1f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    call llround
@@ -1488,9 +1378,7 @@ define <1 x i64> @llround_v1i64_v1f64(<1 x double> %x) {
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v1i64_v1f64:
@@ -1505,17 +1393,14 @@ define <1 x i64> @llround_v1i64_v1f64(<1 x double> %x) {
 }
 declare <1 x i64> @llvm.llround.v1i64.v1f64(<1 x double>)
 
-define <2 x i64> @llround_v2i64_v2f64(<2 x double> %x) {
+define <2 x i64> @llround_v2i64_v2f64(<2 x double> %x) nounwind {
 ; RV32-LABEL: llround_v2i64_v2f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb
 ; RV32-NEXT:    addi a0, sp, 16
 ; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -1544,11 +1429,8 @@ define <2 x i64> @llround_v2i64_v2f64(<2 x double> %x) {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    .cfi_def_cfa sp, 32
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v2i64_v2f64:
@@ -1567,17 +1449,14 @@ define <2 x i64> @llround_v2i64_v2f64(<2 x double> %x) {
 }
 declare <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double>)
 
-define <4 x i64> @llround_v4i64_v4f64(<4 x double> %x) {
+define <4 x i64> @llround_v4i64_v4f64(<4 x double> %x) nounwind {
 ; RV32-LABEL: llround_v4i64_v4f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 2
 ; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 4 * vlenb
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    add a0, sp, a0
@@ -1640,11 +1519,8 @@ define <4 x i64> @llround_v4i64_v4f64(<4 x double> %x) {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 2
 ; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    .cfi_def_cfa sp, 32
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v4i64_v4f64:
@@ -1673,17 +1549,13 @@ define <4 x i64> @llround_v4i64_v4f64(<4 x double> %x) {
 }
 declare <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double>)
 
-define <8 x i64> @llround_v8i64_v8f64(<8 x double> %x) {
+define <8 x i64> @llround_v8i64_v8f64(<8 x double> %x) nounwind {
 ; RV32-LABEL: llround_v8i64_v8f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -272
-; RV32-NEXT:    .cfi_def_cfa_offset 272
 ; RV32-NEXT:    sw ra, 268(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw s0, 264(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    .cfi_offset s0, -8
 ; RV32-NEXT:    addi s0, sp, 272
-; RV32-NEXT:    .cfi_def_cfa s0, 0
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 2
 ; RV32-NEXT:    sub sp, sp, a0
@@ -1744,25 +1616,17 @@ define <8 x i64> @llround_v8i64_v8f64(<8 x double> %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    addi sp, s0, -272
-; RV32-NEXT:    .cfi_def_cfa sp, 272
 ; RV32-NEXT:    lw ra, 268(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 264(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
-; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 272
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v8i64_v8f64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -192
-; RV64-NEXT:    .cfi_def_cfa_offset 192
 ; RV64-NEXT:    sd ra, 184(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s0, 176(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    .cfi_offset s0, -16
 ; RV64-NEXT:    addi s0, sp, 192
-; RV64-NEXT:    .cfi_def_cfa s0, 0
 ; RV64-NEXT:    andi sp, sp, -64
 ; RV64-NEXT:    mv a0, sp
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -1800,13 +1664,9 @@ define <8 x i64> @llround_v8i64_v8f64(<8 x double> %x) {
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    addi sp, s0, -192
-; RV64-NEXT:    .cfi_def_cfa sp, 192
 ; RV64-NEXT:    ld ra, 184(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 176(sp) # 8-byte Folded Reload
-; RV64-NEXT:    .cfi_restore ra
-; RV64-NEXT:    .cfi_restore s0
 ; RV64-NEXT:    addi sp, sp, 192
-; RV64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-NEXT:    ret
   %a = call <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double> %x)
   ret <8 x i64> %a
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll
index 6bda94439ed5c..8289a8b8f833a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll
@@ -6,7 +6,7 @@
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfh \
 ; RUN:     -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i64
 
-define <1 x iXLen> @lround_v1f16(<1 x half> %x) {
+define <1 x iXLen> @lround_v1f16(<1 x half> %x) nounwind {
 ; RV32-LABEL: lround_v1f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
@@ -41,7 +41,7 @@ define <1 x iXLen> @lround_v1f16(<1 x half> %x) {
 }
 declare <1 x iXLen> @llvm.lround.v1iXLen.v1f16(<1 x half>)
 
-define <2 x iXLen> @lround_v2f16(<2 x half> %x) {
+define <2 x iXLen> @lround_v2f16(<2 x half> %x) nounwind {
 ; RV32-LABEL: lround_v2f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -91,7 +91,7 @@ define <2 x iXLen> @lround_v2f16(<2 x half> %x) {
 }
 declare <2 x iXLen> @llvm.lround.v2iXLen.v2f16(<2 x half>)
 
-define <3 x iXLen> @lround_v3f16(<3 x half> %x) {
+define <3 x iXLen> @lround_v3f16(<3 x half> %x) nounwind {
 ; RV32-LABEL: lround_v3f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
@@ -177,7 +177,7 @@ define <3 x iXLen> @lround_v3f16(<3 x half> %x) {
 }
 declare <3 x iXLen> @llvm.lround.v3iXLen.v3f16(<3 x half>)
 
-define <4 x iXLen> @lround_v4f16(<4 x half> %x) {
+define <4 x iXLen> @lround_v4f16(<4 x half> %x) nounwind {
 ; RV32-LABEL: lround_v4f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
@@ -263,7 +263,7 @@ define <4 x iXLen> @lround_v4f16(<4 x half> %x) {
 }
 declare <4 x iXLen> @llvm.lround.v4iXLen.v4f16(<4 x half>)
 
-define <8 x iXLen> @lround_v8f16(<8 x half> %x) {
+define <8 x iXLen> @lround_v8f16(<8 x half> %x) nounwind {
 ; RV32-LABEL: lround_v8f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
@@ -377,13 +377,9 @@ define <8 x iXLen> @lround_v8f16(<8 x half> %x) {
 ; RV64-i64-LABEL: lround_v8f16:
 ; RV64-i64:       # %bb.0:
 ; RV64-i64-NEXT:    addi sp, sp, -128
-; RV64-i64-NEXT:    .cfi_def_cfa_offset 128
 ; RV64-i64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; RV64-i64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; RV64-i64-NEXT:    .cfi_offset ra, -8
-; RV64-i64-NEXT:    .cfi_offset s0, -16
 ; RV64-i64-NEXT:    addi s0, sp, 128
-; RV64-i64-NEXT:    .cfi_def_cfa s0, 0
 ; RV64-i64-NEXT:    andi sp, sp, -64
 ; RV64-i64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64-i64-NEXT:    vfmv.f.s fa5, v8
@@ -429,30 +425,22 @@ define <8 x iXLen> @lround_v8f16(<8 x half> %x) {
 ; RV64-i64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-i64-NEXT:    vle64.v v8, (a0)
 ; RV64-i64-NEXT:    addi sp, s0, -128
-; RV64-i64-NEXT:    .cfi_def_cfa sp, 128
 ; RV64-i64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64-i64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; RV64-i64-NEXT:    .cfi_restore ra
-; RV64-i64-NEXT:    .cfi_restore s0
 ; RV64-i64-NEXT:    addi sp, sp, 128
-; RV64-i64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-i64-NEXT:    ret
   %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f16(<8 x half> %x)
   ret <8 x iXLen> %a
 }
 declare <8 x iXLen> @llvm.lround.v8iXLen.v8f16(<8 x half>)
 
-define <16 x iXLen> @lround_v16f16(<16 x half> %x) {
+define <16 x iXLen> @lround_v16f16(<16 x half> %x) nounwind {
 ; RV32-LABEL: lround_v16f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -128
-; RV32-NEXT:    .cfi_def_cfa_offset 128
 ; RV32-NEXT:    sw ra, 124(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw s0, 120(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    .cfi_offset s0, -8
 ; RV32-NEXT:    addi s0, sp, 128
-; RV32-NEXT:    .cfi_def_cfa s0, 0
 ; RV32-NEXT:    andi sp, sp, -64
 ; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
 ; RV32-NEXT:    vfmv.f.s fa5, v8
@@ -539,25 +527,17 @@ define <16 x iXLen> @lround_v16f16(<16 x half> %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    addi sp, s0, -128
-; RV32-NEXT:    .cfi_def_cfa sp, 128
 ; RV32-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
-; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 128
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v16f16:
 ; RV64-i32:       # %bb.0:
 ; RV64-i32-NEXT:    addi sp, sp, -128
-; RV64-i32-NEXT:    .cfi_def_cfa_offset 128
 ; RV64-i32-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; RV64-i32-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; RV64-i32-NEXT:    .cfi_offset ra, -8
-; RV64-i32-NEXT:    .cfi_offset s0, -16
 ; RV64-i32-NEXT:    addi s0, sp, 128
-; RV64-i32-NEXT:    .cfi_def_cfa s0, 0
 ; RV64-i32-NEXT:    andi sp, sp, -64
 ; RV64-i32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
 ; RV64-i32-NEXT:    vfmv.f.s fa5, v8
@@ -644,25 +624,17 @@ define <16 x iXLen> @lround_v16f16(<16 x half> %x) {
 ; RV64-i32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; RV64-i32-NEXT:    vle32.v v8, (a0)
 ; RV64-i32-NEXT:    addi sp, s0, -128
-; RV64-i32-NEXT:    .cfi_def_cfa sp, 128
 ; RV64-i32-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64-i32-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; RV64-i32-NEXT:    .cfi_restore ra
-; RV64-i32-NEXT:    .cfi_restore s0
 ; RV64-i32-NEXT:    addi sp, sp, 128
-; RV64-i32-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v16f16:
 ; RV64-i64:       # %bb.0:
 ; RV64-i64-NEXT:    addi sp, sp, -256
-; RV64-i64-NEXT:    .cfi_def_cfa_offset 256
 ; RV64-i64-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
 ; RV64-i64-NEXT:    sd s0, 240(sp) # 8-byte Folded Spill
-; RV64-i64-NEXT:    .cfi_offset ra, -8
-; RV64-i64-NEXT:    .cfi_offset s0, -16
 ; RV64-i64-NEXT:    addi s0, sp, 256
-; RV64-i64-NEXT:    .cfi_def_cfa s0, 0
 ; RV64-i64-NEXT:    andi sp, sp, -128
 ; RV64-i64-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
 ; RV64-i64-NEXT:    vfmv.f.s fa5, v8
@@ -749,20 +721,16 @@ define <16 x iXLen> @lround_v16f16(<16 x half> %x) {
 ; RV64-i64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-i64-NEXT:    vle64.v v8, (a0)
 ; RV64-i64-NEXT:    addi sp, s0, -256
-; RV64-i64-NEXT:    .cfi_def_cfa sp, 256
 ; RV64-i64-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
 ; RV64-i64-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
-; RV64-i64-NEXT:    .cfi_restore ra
-; RV64-i64-NEXT:    .cfi_restore s0
 ; RV64-i64-NEXT:    addi sp, sp, 256
-; RV64-i64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-i64-NEXT:    ret
   %a = call <16 x iXLen> @llvm.lround.v16iXLen.v16f16(<16 x half> %x)
   ret <16 x iXLen> %a
 }
 declare <16 x iXLen> @llvm.lround.v16iXLen.v16f16(<16 x half>)
 
-define <1 x iXLen> @lround_v1f32(<1 x float> %x) {
+define <1 x iXLen> @lround_v1f32(<1 x float> %x) nounwind {
 ; RV32-LABEL: lround_v1f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
@@ -792,7 +760,7 @@ define <1 x iXLen> @lround_v1f32(<1 x float> %x) {
 }
 declare <1 x iXLen> @llvm.lround.v1iXLen.v1f32(<1 x float>)
 
-define <2 x iXLen> @lround_v2f32(<2 x float> %x) {
+define <2 x iXLen> @lround_v2f32(<2 x float> %x) nounwind {
 ; RV32-LABEL: lround_v2f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -834,7 +802,7 @@ define <2 x iXLen> @lround_v2f32(<2 x float> %x) {
 }
 declare <2 x iXLen> @llvm.lround.v2iXLen.v2f32(<2 x float>)
 
-define <3 x iXLen> @lround_v3f32(<3 x float> %x) {
+define <3 x iXLen> @lround_v3f32(<3 x float> %x) nounwind {
 ; RV32-LABEL: lround_v3f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -902,7 +870,7 @@ define <3 x iXLen> @lround_v3f32(<3 x float> %x) {
 }
 declare <3 x iXLen> @llvm.lround.v3iXLen.v3f32(<3 x float>)
 
-define <4 x iXLen> @lround_v4f32(<4 x float> %x) {
+define <4 x iXLen> @lround_v4f32(<4 x float> %x) nounwind {
 ; RV32-LABEL: lround_v4f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -970,7 +938,7 @@ define <4 x iXLen> @lround_v4f32(<4 x float> %x) {
 }
 declare <4 x iXLen> @llvm.lround.v4iXLen.v4f32(<4 x float>)
 
-define <8 x iXLen> @lround_v8f32(<8 x float> %x) {
+define <8 x iXLen> @lround_v8f32(<8 x float> %x) nounwind {
 ; RV32-LABEL: lround_v8f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
@@ -1048,13 +1016,9 @@ define <8 x iXLen> @lround_v8f32(<8 x float> %x) {
 ; RV64-i64-LABEL: lround_v8f32:
 ; RV64-i64:       # %bb.0:
 ; RV64-i64-NEXT:    addi sp, sp, -128
-; RV64-i64-NEXT:    .cfi_def_cfa_offset 128
 ; RV64-i64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; RV64-i64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; RV64-i64-NEXT:    .cfi_offset ra, -8
-; RV64-i64-NEXT:    .cfi_offset s0, -16
 ; RV64-i64-NEXT:    addi s0, sp, 128
-; RV64-i64-NEXT:    .cfi_def_cfa s0, 0
 ; RV64-i64-NEXT:    andi sp, sp, -64
 ; RV64-i64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; RV64-i64-NEXT:    vfmv.f.s fa5, v8
@@ -1093,30 +1057,22 @@ define <8 x iXLen> @lround_v8f32(<8 x float> %x) {
 ; RV64-i64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-i64-NEXT:    vle64.v v8, (a0)
 ; RV64-i64-NEXT:    addi sp, s0, -128
-; RV64-i64-NEXT:    .cfi_def_cfa sp, 128
 ; RV64-i64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64-i64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; RV64-i64-NEXT:    .cfi_restore ra
-; RV64-i64-NEXT:    .cfi_restore s0
 ; RV64-i64-NEXT:    addi sp, sp, 128
-; RV64-i64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-i64-NEXT:    ret
   %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f32(<8 x float> %x)
   ret <8 x iXLen> %a
 }
 declare <8 x iXLen> @llvm.lround.v8iXLen.v8f32(<8 x float>)
 
-define <16 x iXLen> @lround_v16f32(<16 x float> %x) {
+define <16 x iXLen> @lround_v16f32(<16 x float> %x) nounwind {
 ; RV32-LABEL: lround_v16f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -192
-; RV32-NEXT:    .cfi_def_cfa_offset 192
 ; RV32-NEXT:    sw ra, 188(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw s0, 184(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    .cfi_offset s0, -8
 ; RV32-NEXT:    addi s0, sp, 192
-; RV32-NEXT:    .cfi_def_cfa s0, 0
 ; RV32-NEXT:    andi sp, sp, -64
 ; RV32-NEXT:    mv a0, sp
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -1182,25 +1138,17 @@ define <16 x iXLen> @lround_v16f32(<16 x float> %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    addi sp, s0, -192
-; RV32-NEXT:    .cfi_def_cfa sp, 192
 ; RV32-NEXT:    lw ra, 188(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 184(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
-; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 192
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v16f32:
 ; RV64-i32:       # %bb.0:
 ; RV64-i32-NEXT:    addi sp, sp, -192
-; RV64-i32-NEXT:    .cfi_def_cfa_offset 192
 ; RV64-i32-NEXT:    sd ra, 184(sp) # 8-byte Folded Spill
 ; RV64-i32-NEXT:    sd s0, 176(sp) # 8-byte Folded Spill
-; RV64-i32-NEXT:    .cfi_offset ra, -8
-; RV64-i32-NEXT:    .cfi_offset s0, -16
 ; RV64-i32-NEXT:    addi s0, sp, 192
-; RV64-i32-NEXT:    .cfi_def_cfa s0, 0
 ; RV64-i32-NEXT:    andi sp, sp, -64
 ; RV64-i32-NEXT:    mv a0, sp
 ; RV64-i32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -1266,25 +1214,17 @@ define <16 x iXLen> @lround_v16f32(<16 x float> %x) {
 ; RV64-i32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; RV64-i32-NEXT:    vle32.v v8, (a0)
 ; RV64-i32-NEXT:    addi sp, s0, -192
-; RV64-i32-NEXT:    .cfi_def_cfa sp, 192
 ; RV64-i32-NEXT:    ld ra, 184(sp) # 8-byte Folded Reload
 ; RV64-i32-NEXT:    ld s0, 176(sp) # 8-byte Folded Reload
-; RV64-i32-NEXT:    .cfi_restore ra
-; RV64-i32-NEXT:    .cfi_restore s0
 ; RV64-i32-NEXT:    addi sp, sp, 192
-; RV64-i32-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v16f32:
 ; RV64-i64:       # %bb.0:
 ; RV64-i64-NEXT:    addi sp, sp, -384
-; RV64-i64-NEXT:    .cfi_def_cfa_offset 384
 ; RV64-i64-NEXT:    sd ra, 376(sp) # 8-byte Folded Spill
 ; RV64-i64-NEXT:    sd s0, 368(sp) # 8-byte Folded Spill
-; RV64-i64-NEXT:    .cfi_offset ra, -8
-; RV64-i64-NEXT:    .cfi_offset s0, -16
 ; RV64-i64-NEXT:    addi s0, sp, 384
-; RV64-i64-NEXT:    .cfi_def_cfa s0, 0
 ; RV64-i64-NEXT:    andi sp, sp, -128
 ; RV64-i64-NEXT:    addi a0, sp, 64
 ; RV64-i64-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -1350,20 +1290,16 @@ define <16 x iXLen> @lround_v16f32(<16 x float> %x) {
 ; RV64-i64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-i64-NEXT:    vle64.v v8, (a0)
 ; RV64-i64-NEXT:    addi sp, s0, -384
-; RV64-i64-NEXT:    .cfi_def_cfa sp, 384
 ; RV64-i64-NEXT:    ld ra, 376(sp) # 8-byte Folded Reload
 ; RV64-i64-NEXT:    ld s0, 368(sp) # 8-byte Folded Reload
-; RV64-i64-NEXT:    .cfi_restore ra
-; RV64-i64-NEXT:    .cfi_restore s0
 ; RV64-i64-NEXT:    addi sp, sp, 384
-; RV64-i64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-i64-NEXT:    ret
   %a = call <16 x iXLen> @llvm.lround.v16iXLen.v16f32(<16 x float> %x)
   ret <16 x iXLen> %a
 }
 declare <16 x iXLen> @llvm.lround.v16iXLen.v16f32(<16 x float>)
 
-define <1 x iXLen> @lround_v1f64(<1 x double> %x) {
+define <1 x iXLen> @lround_v1f64(<1 x double> %x) nounwind {
 ; RV32-LABEL: lround_v1f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -1392,7 +1328,7 @@ define <1 x iXLen> @lround_v1f64(<1 x double> %x) {
 }
 declare <1 x iXLen> @llvm.lround.v1iXLen.v1f64(<1 x double>)
 
-define <2 x iXLen> @lround_v2f64(<2 x double> %x) {
+define <2 x iXLen> @lround_v2f64(<2 x double> %x) nounwind {
 ; RV32-LABEL: lround_v2f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -1435,7 +1371,7 @@ define <2 x iXLen> @lround_v2f64(<2 x double> %x) {
 }
 declare <2 x iXLen> @llvm.lround.v2iXLen.v2f64(<2 x double>)
 
-define <4 x iXLen> @lround_v4f64(<4 x double> %x) {
+define <4 x iXLen> @lround_v4f64(<4 x double> %x) nounwind {
 ; RV32-LABEL: lround_v4f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -1510,17 +1446,13 @@ define <4 x iXLen> @lround_v4f64(<4 x double> %x) {
 }
 declare <4 x iXLen> @llvm.lround.v4iXLen.v4f64(<4 x double>)
 
-define <8 x iXLen> @lround_v8f64(<8 x double> %x) {
+define <8 x iXLen> @lround_v8f64(<8 x double> %x) nounwind {
 ; RV32-LABEL: lround_v8f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -128
-; RV32-NEXT:    .cfi_def_cfa_offset 128
 ; RV32-NEXT:    sw ra, 124(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw s0, 120(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    .cfi_offset s0, -8
 ; RV32-NEXT:    addi s0, sp, 128
-; RV32-NEXT:    .cfi_def_cfa s0, 0
 ; RV32-NEXT:    andi sp, sp, -64
 ; RV32-NEXT:    mv a0, sp
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -1557,25 +1489,17 @@ define <8 x iXLen> @lround_v8f64(<8 x double> %x) {
 ; RV32-NEXT:    fcvt.w.d a0, fa2, rmm
 ; RV32-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, s0, -128
-; RV32-NEXT:    .cfi_def_cfa sp, 128
 ; RV32-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
-; RV32-NEXT:    .cfi_restore ra
-; RV32-NEXT:    .cfi_restore s0
 ; RV32-NEXT:    addi sp, sp, 128
-; RV32-NEXT:    .cfi_def_cfa_offset 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v8f64:
 ; RV64-i32:       # %bb.0:
 ; RV64-i32-NEXT:    addi sp, sp, -128
-; RV64-i32-NEXT:    .cfi_def_cfa_offset 128
 ; RV64-i32-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; RV64-i32-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; RV64-i32-NEXT:    .cfi_offset ra, -8
-; RV64-i32-NEXT:    .cfi_offset s0, -16
 ; RV64-i32-NEXT:    addi s0, sp, 128
-; RV64-i32-NEXT:    .cfi_def_cfa s0, 0
 ; RV64-i32-NEXT:    andi sp, sp, -64
 ; RV64-i32-NEXT:    mv a0, sp
 ; RV64-i32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -1612,25 +1536,17 @@ define <8 x iXLen> @lround_v8f64(<8 x double> %x) {
 ; RV64-i32-NEXT:    fcvt.w.d a0, fa2, rmm
 ; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-i32-NEXT:    addi sp, s0, -128
-; RV64-i32-NEXT:    .cfi_def_cfa sp, 128
 ; RV64-i32-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64-i32-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; RV64-i32-NEXT:    .cfi_restore ra
-; RV64-i32-NEXT:    .cfi_restore s0
 ; RV64-i32-NEXT:    addi sp, sp, 128
-; RV64-i32-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v8f64:
 ; RV64-i64:       # %bb.0:
 ; RV64-i64-NEXT:    addi sp, sp, -192
-; RV64-i64-NEXT:    .cfi_def_cfa_offset 192
 ; RV64-i64-NEXT:    sd ra, 184(sp) # 8-byte Folded Spill
 ; RV64-i64-NEXT:    sd s0, 176(sp) # 8-byte Folded Spill
-; RV64-i64-NEXT:    .cfi_offset ra, -8
-; RV64-i64-NEXT:    .cfi_offset s0, -16
 ; RV64-i64-NEXT:    addi s0, sp, 192
-; RV64-i64-NEXT:    .cfi_def_cfa s0, 0
 ; RV64-i64-NEXT:    andi sp, sp, -64
 ; RV64-i64-NEXT:    mv a0, sp
 ; RV64-i64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -1668,13 +1584,9 @@ define <8 x iXLen> @lround_v8f64(<8 x double> %x) {
 ; RV64-i64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-i64-NEXT:    vle64.v v8, (a0)
 ; RV64-i64-NEXT:    addi sp, s0, -192
-; RV64-i64-NEXT:    .cfi_def_cfa sp, 192
 ; RV64-i64-NEXT:    ld ra, 184(sp) # 8-byte Folded Reload
 ; RV64-i64-NEXT:    ld s0, 176(sp) # 8-byte Folded Reload
-; RV64-i64-NEXT:    .cfi_restore ra
-; RV64-i64-NEXT:    .cfi_restore s0
 ; RV64-i64-NEXT:    addi sp, sp, 192
-; RV64-i64-NEXT:    .cfi_def_cfa_offset 0
 ; RV64-i64-NEXT:    ret
   %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f64(<8 x double> %x)
   ret <8 x iXLen> %a



More information about the llvm-commits mailing list