[llvm] [AArch64] Lower llvm.lrint to SVE2p2 FRINT*X instructions (PR #187319)
Jacob Crawley via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 2 01:32:38 PDT 2026
https://github.com/jacob-crawley updated https://github.com/llvm/llvm-project/pull/187319
>From 9a2a72382e522ff67a3a6c8fc63b27733b5ba508 Mon Sep 17 00:00:00 2001
From: Jacob Crawley <jacob.crawley at arm.com>
Date: Wed, 18 Mar 2026 14:45:18 +0000
Subject: [PATCH 1/4] [AArch64] Lower llvm.lrint to SVE2p2 FRINT*X instructions
Simplifies the codegen for the llvm.lrint() and llvm.llrint() intrinsics
by using the SVE 2.2 FRINT32X/FRINT64X instructions to perform the
rounding, followed by an additional FCVTZS to convert the FP result
to an integer.
---
.../Target/AArch64/AArch64ISelLowering.cpp | 14 +
llvm/test/CodeGen/AArch64/sve2p2-llrint.ll | 834 +++++++++++
llvm/test/CodeGen/AArch64/sve2p2-lrint.ll | 1276 +++++++++++++++++
3 files changed, 2124 insertions(+)
create mode 100644 llvm/test/CodeGen/AArch64/sve2p2-llrint.ll
create mode 100644 llvm/test/CodeGen/AArch64/sve2p2-lrint.ll
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 38db1ac4a2fb9..9185ab02426fb 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5297,6 +5297,20 @@ SDValue AArch64TargetLowering::LowerVectorXRINT(SDValue Op,
EVT CastVT = VT.changeVectorElementType(
*DAG.getContext(), Src.getValueType().getVectorElementType());
+ unsigned IntBits = VT.getScalarSizeInBits();
+ unsigned FPBits = CastVT.getScalarSizeInBits();
+
+ // Use FRINT32X/FRINT64X if Sve2p2 is available
+ if (Subtarget->isSVEorStreamingSVEAvailable() && (Subtarget->hasSVE2p2()) &&
+ (FPBits == 32 || FPBits == 64)) {
+ unsigned FrintOp = (IntBits == 32) ? AArch64ISD::FRINT32_MERGE_PASSTHRU
+ : AArch64ISD::FRINT64_MERGE_PASSTHRU;
+ SDValue Pg = getPredicateForVector(DAG, DL, CastVT);
+ SDValue Passthru = DAG.getUNDEF(CastVT);
+ SDValue FOp = DAG.getNode(FrintOp, DL, CastVT, Pg, Src, Passthru);
+ return DAG.getNode(ISD::FP_TO_SINT, DL, VT, FOp);
+ }
+
// Round the floating-point value into a floating-point register with the
// current rounding mode.
SDValue FOp = DAG.getNode(ISD::FRINT, DL, CastVT, Src);
diff --git a/llvm/test/CodeGen/AArch64/sve2p2-llrint.ll b/llvm/test/CodeGen/AArch64/sve2p2-llrint.ll
new file mode 100644
index 0000000000000..6a10635d130b8
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve2p2-llrint.ll
@@ -0,0 +1,834 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2p2 | FileCheck %s
+
+define <vscale x 1 x i64> @llrint_v1i64_v1f16(<vscale x 1 x half> %x) {
+; CHECK-LABEL: llrint_v1i64_v1f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov w8, #31743 // =0x7bff
+; CHECK-NEXT: mov z2.d, #0x8000000000000000
+; CHECK-NEXT: frintx z0.h, p0/z, z0.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z1.h, w8
+; CHECK-NEXT: fcvtzs z2.d, p1/m, z0.h
+; CHECK-NEXT: fcmgt p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z1.d, #0x7fffffffffffffff
+; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
+; CHECK-NEXT: sel z0.d, p1, z1.d, z2.d
+; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT: ret
+ %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f16(<vscale x 1 x half> %x)
+ ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f16(<vscale x 1 x half>)
+
+define <vscale x 2 x i64> @llrint_v1i64_v2f16(<vscale x 2 x half> %x) {
+; CHECK-LABEL: llrint_v1i64_v2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov w8, #31743 // =0x7bff
+; CHECK-NEXT: mov z2.d, #0x8000000000000000
+; CHECK-NEXT: frintx z0.h, p0/z, z0.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z1.h, w8
+; CHECK-NEXT: fcvtzs z2.d, p1/m, z0.h
+; CHECK-NEXT: fcmgt p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z1.d, #0x7fffffffffffffff
+; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
+; CHECK-NEXT: sel z0.d, p1, z1.d, z2.d
+; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT: ret
+ %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f16(<vscale x 2 x half> %x)
+ ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f16(<vscale x 2 x half>)
+
+define <vscale x 4 x i64> @llrint_v4i64_v4f16(<vscale x 4 x half> %x) {
+; CHECK-LABEL: llrint_v4i64_v4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uunpklo z1.d, z0.s
+; CHECK-NEXT: uunpkhi z0.d, z0.s
+; CHECK-NEXT: mov w8, #31743 // =0x7bff
+; CHECK-NEXT: mov z2.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z3.d, #0x8000000000000000
+; CHECK-NEXT: mov z4.d, #0x8000000000000000
+; CHECK-NEXT: mov z5.d, #0x7fffffffffffffff
+; CHECK-NEXT: frintx z1.h, p0/z, z1.h
+; CHECK-NEXT: frintx z0.h, p0/z, z0.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z1.h, z2.h
+; CHECK-NEXT: fcmge p2.h, p0/z, z0.h, z2.h
+; CHECK-NEXT: mov z2.h, w8
+; CHECK-NEXT: fcmuo p3.h, p0/z, z1.h, z1.h
+; CHECK-NEXT: fcvtzs z4.d, p1/m, z1.h
+; CHECK-NEXT: fcmgt p1.h, p0/z, z1.h, z2.h
+; CHECK-NEXT: fcvtzs z3.d, p2/m, z0.h
+; CHECK-NEXT: fcmgt p2.h, p0/z, z0.h, z2.h
+; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
+; CHECK-NEXT: sel z0.d, p1, z5.d, z4.d
+; CHECK-NEXT: sel z1.d, p2, z5.d, z3.d
+; CHECK-NEXT: mov z0.d, p3/m, #0 // =0x0
+; CHECK-NEXT: mov z1.d, p0/m, #0 // =0x0
+; CHECK-NEXT: ret
+ %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f16(<vscale x 4 x half> %x)
+ ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f16(<vscale x 4 x half>)
+
+define <vscale x 8 x i64> @llrint_v8i64_v8f16(<vscale x 8 x half> %x) {
+; CHECK-LABEL: llrint_v8i64_v8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: uunpklo z1.s, z0.h
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: mov w8, #31743 // =0x7bff
+; CHECK-NEXT: mov z4.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z5.d, #0x8000000000000000
+; CHECK-NEXT: mov z6.d, #0x8000000000000000
+; CHECK-NEXT: mov z7.d, #0x8000000000000000
+; CHECK-NEXT: mov z24.h, w8
+; CHECK-NEXT: mov z25.d, #0x8000000000000000
+; CHECK-NEXT: uunpklo z2.d, z1.s
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: uunpklo z3.d, z0.s
+; CHECK-NEXT: uunpkhi z0.d, z0.s
+; CHECK-NEXT: frintx z2.h, p0/z, z2.h
+; CHECK-NEXT: frintx z1.h, p0/z, z1.h
+; CHECK-NEXT: frintx z3.h, p0/z, z3.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z2.h, z4.h
+; CHECK-NEXT: frintx z26.h, p0/z, z0.h
+; CHECK-NEXT: fcmge p2.h, p0/z, z1.h, z4.h
+; CHECK-NEXT: fcmge p3.h, p0/z, z3.h, z4.h
+; CHECK-NEXT: fcmgt p4.h, p0/z, z1.h, z24.h
+; CHECK-NEXT: fcvtzs z5.d, p1/m, z2.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z26.h, z4.h
+; CHECK-NEXT: fcvtzs z6.d, p2/m, z1.h
+; CHECK-NEXT: mov z4.d, #0x7fffffffffffffff
+; CHECK-NEXT: fcmgt p2.h, p0/z, z2.h, z24.h
+; CHECK-NEXT: fcvtzs z7.d, p3/m, z3.h
+; CHECK-NEXT: fcmgt p5.h, p0/z, z3.h, z24.h
+; CHECK-NEXT: fcmgt p6.h, p0/z, z26.h, z24.h
+; CHECK-NEXT: fcvtzs z25.d, p1/m, z26.h
+; CHECK-NEXT: fcmuo p3.h, p0/z, z2.h, z2.h
+; CHECK-NEXT: sel z0.d, p2, z4.d, z5.d
+; CHECK-NEXT: fcmuo p1.h, p0/z, z1.h, z1.h
+; CHECK-NEXT: sel z1.d, p4, z4.d, z6.d
+; CHECK-NEXT: sel z2.d, p5, z4.d, z7.d
+; CHECK-NEXT: fcmuo p2.h, p0/z, z3.h, z3.h
+; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT: fcmuo p0.h, p0/z, z26.h, z26.h
+; CHECK-NEXT: sel z3.d, p6, z4.d, z25.d
+; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT: mov z0.d, p3/m, #0 // =0x0
+; CHECK-NEXT: mov z1.d, p1/m, #0 // =0x0
+; CHECK-NEXT: mov z2.d, p2/m, #0 // =0x0
+; CHECK-NEXT: mov z3.d, p0/m, #0 // =0x0
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f16(<vscale x 8 x half> %x)
+ ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f16(<vscale x 8 x half>)
+
+define <vscale x 16 x i64> @llrint_v16i64_v16f16(<vscale x 16 x half> %x) {
+; CHECK-LABEL: llrint_v16i64_v16f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p10, [sp, #1, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p9, [sp, #2, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: uunpkhi z3.s, z0.h
+; CHECK-NEXT: uunpklo z2.s, z0.h
+; CHECK-NEXT: mov w8, #31743 // =0x7bff
+; CHECK-NEXT: uunpklo z7.s, z1.h
+; CHECK-NEXT: mov z0.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: mov z4.d, #0x8000000000000000
+; CHECK-NEXT: mov z6.d, #0x8000000000000000
+; CHECK-NEXT: mov z5.d, #0x8000000000000000
+; CHECK-NEXT: mov z29.h, w8
+; CHECK-NEXT: mov z30.d, #0x8000000000000000
+; CHECK-NEXT: uunpklo z26.d, z3.s
+; CHECK-NEXT: uunpklo z24.d, z2.s
+; CHECK-NEXT: uunpkhi z25.d, z2.s
+; CHECK-NEXT: uunpklo z28.d, z7.s
+; CHECK-NEXT: uunpkhi z27.d, z3.s
+; CHECK-NEXT: uunpkhi z7.d, z7.s
+; CHECK-NEXT: mov z2.d, #0x8000000000000000
+; CHECK-NEXT: mov z3.d, #0x8000000000000000
+; CHECK-NEXT: uunpklo z31.d, z1.s
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: frintx z26.h, p0/z, z26.h
+; CHECK-NEXT: frintx z24.h, p0/z, z24.h
+; CHECK-NEXT: frintx z25.h, p0/z, z25.h
+; CHECK-NEXT: frintx z28.h, p0/z, z28.h
+; CHECK-NEXT: frintx z27.h, p0/z, z27.h
+; CHECK-NEXT: frintx z7.h, p0/z, z7.h
+; CHECK-NEXT: fcmge p4.h, p0/z, z26.h, z0.h
+; CHECK-NEXT: fcmge p2.h, p0/z, z24.h, z0.h
+; CHECK-NEXT: fcmge p3.h, p0/z, z25.h, z0.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z28.h, z0.h
+; CHECK-NEXT: fcmge p5.h, p0/z, z27.h, z0.h
+; CHECK-NEXT: fcvtzs z4.d, p4/m, z26.h
+; CHECK-NEXT: fcvtzs z2.d, p2/m, z24.h
+; CHECK-NEXT: fcmge p4.h, p0/z, z7.h, z0.h
+; CHECK-NEXT: fcvtzs z3.d, p3/m, z25.h
+; CHECK-NEXT: fcvtzs z6.d, p1/m, z28.h
+; CHECK-NEXT: fcmgt p3.h, p0/z, z24.h, z29.h
+; CHECK-NEXT: fcvtzs z5.d, p5/m, z27.h
+; CHECK-NEXT: fcmuo p1.h, p0/z, z24.h, z24.h
+; CHECK-NEXT: frintx z24.h, p0/z, z31.h
+; CHECK-NEXT: mov z31.d, #0x8000000000000000
+; CHECK-NEXT: fcvtzs z30.d, p4/m, z7.h
+; CHECK-NEXT: fcmgt p5.h, p0/z, z25.h, z29.h
+; CHECK-NEXT: fcmuo p2.h, p0/z, z25.h, z25.h
+; CHECK-NEXT: mov z25.d, #0x8000000000000000
+; CHECK-NEXT: fcmge p4.h, p0/z, z24.h, z0.h
+; CHECK-NEXT: fcmgt p6.h, p0/z, z26.h, z29.h
+; CHECK-NEXT: fcmuo p7.h, p0/z, z26.h, z26.h
+; CHECK-NEXT: mov z26.d, #0x7fffffffffffffff
+; CHECK-NEXT: fcmgt p8.h, p0/z, z27.h, z29.h
+; CHECK-NEXT: fcvtzs z25.d, p4/m, z24.h
+; CHECK-NEXT: fcmuo p10.h, p0/z, z27.h, z27.h
+; CHECK-NEXT: frintx z27.h, p0/z, z1.h
+; CHECK-NEXT: sel z1.d, p5, z26.d, z3.d
+; CHECK-NEXT: sel z3.d, p8, z26.d, z5.d
+; CHECK-NEXT: fcmge p4.h, p0/z, z27.h, z0.h
+; CHECK-NEXT: sel z0.d, p3, z26.d, z2.d
+; CHECK-NEXT: sel z2.d, p6, z26.d, z4.d
+; CHECK-NEXT: mov z1.d, p2/m, #0 // =0x0
+; CHECK-NEXT: mov z3.d, p10/m, #0 // =0x0
+; CHECK-NEXT: ldr p10, [sp, #1, mul vl] // 2-byte Reload
+; CHECK-NEXT: fcmgt p9.h, p0/z, z28.h, z29.h
+; CHECK-NEXT: mov z2.d, p7/m, #0 // =0x0
+; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT: fcvtzs z31.d, p4/m, z27.h
+; CHECK-NEXT: mov z0.d, p1/m, #0 // =0x0
+; CHECK-NEXT: fcmgt p5.h, p0/z, z7.h, z29.h
+; CHECK-NEXT: fcmgt p6.h, p0/z, z24.h, z29.h
+; CHECK-NEXT: sel z4.d, p9, z26.d, z6.d
+; CHECK-NEXT: fcmgt p4.h, p0/z, z27.h, z29.h
+; CHECK-NEXT: fcmuo p8.h, p0/z, z7.h, z7.h
+; CHECK-NEXT: sel z5.d, p5, z26.d, z30.d
+; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT: sel z6.d, p6, z26.d, z25.d
+; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT: fcmuo p9.h, p0/z, z24.h, z24.h
+; CHECK-NEXT: fcmuo p3.h, p0/z, z28.h, z28.h
+; CHECK-NEXT: sel z7.d, p4, z26.d, z31.d
+; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT: mov z5.d, p8/m, #0 // =0x0
+; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Reload
+; CHECK-NEXT: fcmuo p0.h, p0/z, z27.h, z27.h
+; CHECK-NEXT: mov z6.d, p9/m, #0 // =0x0
+; CHECK-NEXT: ldr p9, [sp, #2, mul vl] // 2-byte Reload
+; CHECK-NEXT: mov z4.d, p3/m, #0 // =0x0
+; CHECK-NEXT: mov z7.d, p0/m, #0 // =0x0
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f16(<vscale x 16 x half> %x)
+ ret <vscale x 16 x i64> %a
+}
+declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f16(<vscale x 16 x half>)
+
+define <vscale x 32 x i64> @llrint_v32i64_v32f16(<vscale x 32 x half> %x) {
+; CHECK-LABEL: llrint_v32i64_v32f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-17
+; CHECK-NEXT: str p10, [sp, #1, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p9, [sp, #2, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
+; CHECK-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
+; CHECK-NEXT: uunpklo z4.s, z0.h
+; CHECK-NEXT: uunpkhi z6.s, z0.h
+; CHECK-NEXT: mov w9, #31743 // =0x7bff
+; CHECK-NEXT: uunpklo z7.s, z1.h
+; CHECK-NEXT: mov z27.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: uunpkhi z31.s, z1.h
+; CHECK-NEXT: mov z0.d, #0x8000000000000000
+; CHECK-NEXT: mov z30.d, #0x8000000000000000
+; CHECK-NEXT: uunpkhi z17.s, z2.h
+; CHECK-NEXT: mov z9.d, #0x8000000000000000
+; CHECK-NEXT: uunpklo z18.s, z3.h
+; CHECK-NEXT: uunpklo z24.d, z4.s
+; CHECK-NEXT: uunpkhi z25.d, z4.s
+; CHECK-NEXT: uunpkhi z28.d, z6.s
+; CHECK-NEXT: uunpklo z29.d, z7.s
+; CHECK-NEXT: uunpkhi z8.d, z7.s
+; CHECK-NEXT: uunpklo z26.d, z6.s
+; CHECK-NEXT: mov z4.d, #0x8000000000000000
+; CHECK-NEXT: uunpklo z12.d, z31.s
+; CHECK-NEXT: uunpkhi z13.d, z31.s
+; CHECK-NEXT: uunpkhi z3.s, z3.h
+; CHECK-NEXT: uunpkhi z19.d, z18.s
+; CHECK-NEXT: mov z5.d, #0x8000000000000000
+; CHECK-NEXT: frintx z1.h, p0/z, z24.h
+; CHECK-NEXT: frintx z7.h, p0/z, z25.h
+; CHECK-NEXT: frintx z25.h, p0/z, z28.h
+; CHECK-NEXT: frintx z11.h, p0/z, z29.h
+; CHECK-NEXT: frintx z28.h, p0/z, z8.h
+; CHECK-NEXT: frintx z24.h, p0/z, z26.h
+; CHECK-NEXT: frintx z31.h, p0/z, z12.h
+; CHECK-NEXT: frintx z15.h, p0/z, z13.h
+; CHECK-NEXT: uunpklo z8.s, z2.h
+; CHECK-NEXT: frintx z19.h, p0/z, z19.h
+; CHECK-NEXT: mov z26.h, w9
+; CHECK-NEXT: mov z12.d, #0x8000000000000000
+; CHECK-NEXT: mov z13.d, #0x8000000000000000
+; CHECK-NEXT: uunpklo z20.d, z3.s
+; CHECK-NEXT: mov z6.d, #0x8000000000000000
+; CHECK-NEXT: uunpkhi z3.d, z3.s
+; CHECK-NEXT: mov z29.d, #0x7fffffffffffffff
+; CHECK-NEXT: uunpklo z18.d, z18.s
+; CHECK-NEXT: uunpkhi z16.d, z8.s
+; CHECK-NEXT: mov z22.d, #0x8000000000000000
+; CHECK-NEXT: mov z23.d, #0x8000000000000000
+; CHECK-NEXT: mov z10.d, #0x8000000000000000
+; CHECK-NEXT: mov z21.d, #0x8000000000000000
+; CHECK-NEXT: frintx z20.h, p0/z, z20.h
+; CHECK-NEXT: fcmge p3.h, p0/z, z1.h, z27.h
+; CHECK-NEXT: frintx z2.h, p0/z, z16.h
+; CHECK-NEXT: uunpklo z16.d, z17.s
+; CHECK-NEXT: uunpkhi z17.d, z17.s
+; CHECK-NEXT: fcmge p4.h, p0/z, z7.h, z27.h
+; CHECK-NEXT: fcmgt p9.h, p0/z, z11.h, z26.h
+; CHECK-NEXT: fcvtzs z0.d, p3/m, z1.h
+; CHECK-NEXT: frintx z16.h, p0/z, z16.h
+; CHECK-NEXT: frintx z17.h, p0/z, z17.h
+; CHECK-NEXT: fcmge p3.h, p0/z, z11.h, z27.h
+; CHECK-NEXT: fcvtzs z4.d, p4/m, z7.h
+; CHECK-NEXT: fcmge p4.h, p0/z, z28.h, z27.h
+; CHECK-NEXT: fcmuo p8.h, p0/z, z11.h, z11.h
+; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: frintx z0.h, p0/z, z3.h
+; CHECK-NEXT: fcmge p6.h, p0/z, z15.h, z27.h
+; CHECK-NEXT: fcvtzs z30.d, p3/m, z11.h
+; CHECK-NEXT: uunpklo z11.d, z8.s
+; CHECK-NEXT: mov z8.d, #0x8000000000000000
+; CHECK-NEXT: fcvtzs z9.d, p4/m, z28.h
+; CHECK-NEXT: fcmge p4.h, p0/z, z31.h, z27.h
+; CHECK-NEXT: fcmge p7.h, p0/z, z2.h, z27.h
+; CHECK-NEXT: fcvtzs z12.d, p6/m, z15.h
+; CHECK-NEXT: frintx z14.h, p0/z, z11.h
+; CHECK-NEXT: mov z11.d, #0x8000000000000000
+; CHECK-NEXT: sel z3.d, p9, z29.d, z30.d
+; CHECK-NEXT: fcmge p5.h, p0/z, z14.h, z27.h
+; CHECK-NEXT: fcvtzs z8.d, p4/m, z31.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z24.h, z27.h
+; CHECK-NEXT: mov z3.d, p8/m, #0 // =0x0
+; CHECK-NEXT: fcvtzs z11.d, p7/m, z2.h
+; CHECK-NEXT: fcmge p2.h, p0/z, z25.h, z27.h
+; CHECK-NEXT: fcvtzs z13.d, p5/m, z14.h
+; CHECK-NEXT: fcmgt p6.h, p0/z, z15.h, z26.h
+; CHECK-NEXT: fcvtzs z5.d, p1/m, z24.h
+; CHECK-NEXT: fcmuo p4.h, p0/z, z15.h, z15.h
+; CHECK-NEXT: mov z15.d, #0x8000000000000000
+; CHECK-NEXT: fcvtzs z6.d, p2/m, z25.h
+; CHECK-NEXT: fcmge p7.h, p0/z, z16.h, z27.h
+; CHECK-NEXT: fcmgt p3.h, p0/z, z28.h, z26.h
+; CHECK-NEXT: fcmgt p10.h, p0/z, z14.h, z26.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z19.h, z27.h
+; CHECK-NEXT: fcmge p2.h, p0/z, z20.h, z27.h
+; CHECK-NEXT: fcvtzs z15.d, p7/m, z16.h
+; CHECK-NEXT: sel z30.d, p3, z29.d, z9.d
+; CHECK-NEXT: sel z9.d, p6, z29.d, z12.d
+; CHECK-NEXT: sel z12.d, p10, z29.d, z13.d
+; CHECK-NEXT: frintx z13.h, p0/z, z18.h
+; CHECK-NEXT: fcmge p5.h, p0/z, z17.h, z27.h
+; CHECK-NEXT: fcvtzs z22.d, p1/m, z19.h
+; CHECK-NEXT: fcvtzs z23.d, p2/m, z20.h
+; CHECK-NEXT: mov z9.d, p4/m, #0 // =0x0
+; CHECK-NEXT: fcmge p7.h, p0/z, z0.h, z27.h
+; CHECK-NEXT: fcmuo p1.h, p0/z, z14.h, z14.h
+; CHECK-NEXT: str z9, [x8, #7, mul vl]
+; CHECK-NEXT: fcvtzs z21.d, p5/m, z17.h
+; CHECK-NEXT: fcmge p2.h, p0/z, z13.h, z27.h
+; CHECK-NEXT: mov z27.d, #0x8000000000000000
+; CHECK-NEXT: fcvtzs z10.d, p7/m, z0.h
+; CHECK-NEXT: fcmgt p5.h, p0/z, z0.h, z26.h
+; CHECK-NEXT: mov z12.d, p1/m, #0 // =0x0
+; CHECK-NEXT: fcmgt p6.h, p0/z, z20.h, z26.h
+; CHECK-NEXT: fcvtzs z27.d, p2/m, z13.h
+; CHECK-NEXT: fcmgt p3.h, p0/z, z19.h, z26.h
+; CHECK-NEXT: str z12, [x8, #8, mul vl]
+; CHECK-NEXT: fcmuo p1.h, p0/z, z0.h, z0.h
+; CHECK-NEXT: sel z0.d, p5, z29.d, z10.d
+; CHECK-NEXT: sel z10.d, p6, z29.d, z23.d
+; CHECK-NEXT: fcmuo p2.h, p0/z, z20.h, z20.h
+; CHECK-NEXT: fcmuo p7.h, p0/z, z19.h, z19.h
+; CHECK-NEXT: fcmgt p5.h, p0/z, z2.h, z26.h
+; CHECK-NEXT: mov z0.d, p1/m, #0 // =0x0
+; CHECK-NEXT: fcmuo p6.h, p0/z, z2.h, z2.h
+; CHECK-NEXT: sel z2.d, p3, z29.d, z22.d
+; CHECK-NEXT: mov z10.d, p2/m, #0 // =0x0
+; CHECK-NEXT: str z0, [x8, #15, mul vl]
+; CHECK-NEXT: fcmgt p3.h, p0/z, z13.h, z26.h
+; CHECK-NEXT: mov z2.d, p7/m, #0 // =0x0
+; CHECK-NEXT: sel z0.d, p5, z29.d, z11.d
+; CHECK-NEXT: str z10, [x8, #14, mul vl]
+; CHECK-NEXT: fcmgt p1.h, p0/z, z17.h, z26.h
+; CHECK-NEXT: fcmgt p2.h, p0/z, z16.h, z26.h
+; CHECK-NEXT: str z2, [x8, #13, mul vl]
+; CHECK-NEXT: mov z0.d, p6/m, #0 // =0x0
+; CHECK-NEXT: fcmuo p7.h, p0/z, z13.h, z13.h
+; CHECK-NEXT: mov z27.d, p3/m, z29.d
+; CHECK-NEXT: fcmuo p5.h, p0/z, z17.h, z17.h
+; CHECK-NEXT: str z0, [x8, #9, mul vl]
+; CHECK-NEXT: sel z2.d, p1, z29.d, z21.d
+; CHECK-NEXT: fcmuo p3.h, p0/z, z16.h, z16.h
+; CHECK-NEXT: sel z10.d, p2, z29.d, z15.d
+; CHECK-NEXT: mov z27.d, p7/m, #0 // =0x0
+; CHECK-NEXT: fcmgt p1.h, p0/z, z7.h, z26.h
+; CHECK-NEXT: mov z2.d, p5/m, #0 // =0x0
+; CHECK-NEXT: fcmgt p2.h, p0/z, z24.h, z26.h
+; CHECK-NEXT: str z27, [x8, #12, mul vl]
+; CHECK-NEXT: mov z10.d, p3/m, #0 // =0x0
+; CHECK-NEXT: fcmuo p3.h, p0/z, z28.h, z28.h
+; CHECK-NEXT: str z2, [x8, #11, mul vl]
+; CHECK-NEXT: fcmgt p7.h, p0/z, z31.h, z26.h
+; CHECK-NEXT: mov z4.d, p1/m, z29.d
+; CHECK-NEXT: str z10, [x8, #10, mul vl]
+; CHECK-NEXT: mov z5.d, p2/m, z29.d
+; CHECK-NEXT: fcmgt p5.h, p0/z, z25.h, z26.h
+; CHECK-NEXT: fcmgt p1.h, p0/z, z1.h, z26.h
+; CHECK-NEXT: mov z30.d, p3/m, #0 // =0x0
+; CHECK-NEXT: sel z2.d, p7, z29.d, z8.d
+; CHECK-NEXT: fcmuo p6.h, p0/z, z31.h, z31.h
+; CHECK-NEXT: fcmuo p2.h, p0/z, z25.h, z25.h
+; CHECK-NEXT: sel z0.d, p5, z29.d, z6.d
+; CHECK-NEXT: fcmuo p3.h, p0/z, z24.h, z24.h
+; CHECK-NEXT: fcmuo p4.h, p0/z, z7.h, z7.h
+; CHECK-NEXT: mov z2.d, p6/m, #0 // =0x0
+; CHECK-NEXT: fcmuo p0.h, p0/z, z1.h, z1.h
+; CHECK-NEXT: ldr z1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: str z30, [x8, #5, mul vl]
+; CHECK-NEXT: mov z0.d, p2/m, #0 // =0x0
+; CHECK-NEXT: str z3, [x8, #4, mul vl]
+; CHECK-NEXT: mov z5.d, p3/m, #0 // =0x0
+; CHECK-NEXT: mov z4.d, p4/m, #0 // =0x0
+; CHECK-NEXT: str z2, [x8, #6, mul vl]
+; CHECK-NEXT: mov z1.d, p1/m, z29.d
+; CHECK-NEXT: str z0, [x8, #3, mul vl]
+; CHECK-NEXT: str z5, [x8, #2, mul vl]
+; CHECK-NEXT: mov z1.d, p0/m, #0 // =0x0
+; CHECK-NEXT: str z4, [x8, #1, mul vl]
+; CHECK-NEXT: str z1, [x8]
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #1, mul vl] // 2-byte Reload
+; CHECK-NEXT: ldr p9, [sp, #2, mul vl] // 2-byte Reload
+; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Reload
+; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT: addvl sp, sp, #17
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %a = call <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f16(<vscale x 32 x half> %x)
+ ret <vscale x 32 x i64> %a
+}
+declare <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f16(<vscale x 32 x half>)
+
+define <vscale x 1 x i64> @llrint_v1i64_v1f32(<vscale x 1 x float> %x) {
+; CHECK-LABEL: llrint_v1i64_v1f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float> %x)
+ ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float>)
+
+define <vscale x 2 x i64> @llrint_v2i64_v2f32(<vscale x 2 x float> %x) {
+; CHECK-LABEL: llrint_v2i64_v2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float> %x)
+ ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float>)
+
+define <vscale x 4 x i64> @llrint_v4i64_v4f32(<vscale x 4 x float> %x) {
+; CHECK-LABEL: llrint_v4i64_v4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uunpklo z1.d, z0.s
+; CHECK-NEXT: uunpkhi z0.d, z0.s
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z1.s, p0/z, z1.s
+; CHECK-NEXT: frint64x z2.s, p0/z, z0.s
+; CHECK-NEXT: movprfx z0, z1
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.s
+; CHECK-NEXT: movprfx z1, z2
+; CHECK-NEXT: fcvtzs z1.d, p0/m, z2.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float> %x)
+ ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float>)
+
+define <vscale x 8 x i64> @llrint_v8i64_v8f32(<vscale x 8 x float> %x) {
+; CHECK-LABEL: llrint_v8i64_v8f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uunpklo z2.d, z0.s
+; CHECK-NEXT: uunpkhi z0.d, z0.s
+; CHECK-NEXT: uunpklo z3.d, z1.s
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z2.s, p0/z, z2.s
+; CHECK-NEXT: frint64x z4.s, p0/z, z0.s
+; CHECK-NEXT: frint64x z3.s, p0/z, z3.s
+; CHECK-NEXT: frint64x z5.s, p0/z, z1.s
+; CHECK-NEXT: movprfx z0, z2
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z2.s
+; CHECK-NEXT: movprfx z1, z4
+; CHECK-NEXT: fcvtzs z1.d, p0/m, z4.s
+; CHECK-NEXT: movprfx z2, z3
+; CHECK-NEXT: fcvtzs z2.d, p0/m, z3.s
+; CHECK-NEXT: movprfx z3, z5
+; CHECK-NEXT: fcvtzs z3.d, p0/m, z5.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float> %x)
+ ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float>)
+
+define <vscale x 16 x i64> @llrint_v16i64_v16f32(<vscale x 16 x float> %x) {
+; CHECK-LABEL: llrint_v16i64_v16f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uunpklo z4.d, z1.s
+; CHECK-NEXT: uunpklo z5.d, z0.s
+; CHECK-NEXT: uunpkhi z0.d, z0.s
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: uunpklo z6.d, z2.s
+; CHECK-NEXT: uunpkhi z7.d, z2.s
+; CHECK-NEXT: uunpklo z24.d, z3.s
+; CHECK-NEXT: uunpkhi z25.d, z3.s
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z2.s, p0/z, z5.s
+; CHECK-NEXT: frint64x z3.s, p0/z, z0.s
+; CHECK-NEXT: frint64x z4.s, p0/z, z4.s
+; CHECK-NEXT: frint64x z5.s, p0/z, z1.s
+; CHECK-NEXT: movprfx z0, z2
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z2.s
+; CHECK-NEXT: movprfx z1, z3
+; CHECK-NEXT: fcvtzs z1.d, p0/m, z3.s
+; CHECK-NEXT: movprfx z2, z4
+; CHECK-NEXT: fcvtzs z2.d, p0/m, z4.s
+; CHECK-NEXT: movprfx z3, z5
+; CHECK-NEXT: fcvtzs z3.d, p0/m, z5.s
+; CHECK-NEXT: frint64x z4.s, p0/z, z6.s
+; CHECK-NEXT: frint64x z5.s, p0/z, z7.s
+; CHECK-NEXT: frint64x z6.s, p0/z, z24.s
+; CHECK-NEXT: frint64x z7.s, p0/z, z25.s
+; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.s
+; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.s
+; CHECK-NEXT: fcvtzs z7.d, p0/m, z7.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float> %x)
+ ret <vscale x 16 x i64> %a
+}
+declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float>)
+
+define <vscale x 32 x i64> @llrint_v32i64_v32f32(<vscale x 32 x float> %x) {
+; CHECK-LABEL: llrint_v32i64_v32f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uunpkhi z25.d, z7.s
+; CHECK-NEXT: uunpkhi z27.d, z6.s
+; CHECK-NEXT: uunpklo z6.d, z6.s
+; CHECK-NEXT: uunpklo z29.d, z3.s
+; CHECK-NEXT: uunpkhi z30.d, z5.s
+; CHECK-NEXT: uunpklo z5.d, z5.s
+; CHECK-NEXT: uunpkhi z31.d, z4.s
+; CHECK-NEXT: uunpklo z4.d, z4.s
+; CHECK-NEXT: uunpkhi z3.d, z3.s
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: uunpklo z24.d, z0.s
+; CHECK-NEXT: uunpkhi z0.d, z0.s
+; CHECK-NEXT: uunpklo z7.d, z7.s
+; CHECK-NEXT: uunpklo z26.d, z1.s
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: frint64x z25.s, p0/z, z25.s
+; CHECK-NEXT: frint64x z6.s, p0/z, z6.s
+; CHECK-NEXT: frint64x z5.s, p0/z, z5.s
+; CHECK-NEXT: frint64x z4.s, p0/z, z4.s
+; CHECK-NEXT: frint64x z3.s, p0/z, z3.s
+; CHECK-NEXT: frint64x z27.s, p0/z, z27.s
+; CHECK-NEXT: uunpklo z28.d, z2.s
+; CHECK-NEXT: frint64x z30.s, p0/z, z30.s
+; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
+; CHECK-NEXT: uunpkhi z2.d, z2.s
+; CHECK-NEXT: fcvtzs z25.d, p0/m, z25.s
+; CHECK-NEXT: frint64x z7.s, p0/z, z7.s
+; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.s
+; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.s
+; CHECK-NEXT: frint64x z1.s, p0/z, z1.s
+; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.s
+; CHECK-NEXT: fcvtzs z7.d, p0/m, z7.s
+; CHECK-NEXT: fcvtzs z27.d, p0/m, z27.s
+; CHECK-NEXT: fcvtzs z30.d, p0/m, z30.s
+; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.s
+; CHECK-NEXT: str z25, [x8, #15, mul vl]
+; CHECK-NEXT: frint64x z25.s, p0/z, z31.s
+; CHECK-NEXT: frint64x z2.s, p0/z, z2.s
+; CHECK-NEXT: str z6, [x8, #12, mul vl]
+; CHECK-NEXT: frint64x z6.s, p0/z, z29.s
+; CHECK-NEXT: fcvtzs z25.d, p0/m, z25.s
+; CHECK-NEXT: str z5, [x8, #10, mul vl]
+; CHECK-NEXT: frint64x z5.s, p0/z, z28.s
+; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.s
+; CHECK-NEXT: str z4, [x8, #8, mul vl]
+; CHECK-NEXT: frint64x z4.s, p0/z, z26.s
+; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.s
+; CHECK-NEXT: str z3, [x8, #7, mul vl]
+; CHECK-NEXT: frint64x z3.s, p0/z, z24.s
+; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.s
+; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.s
+; CHECK-NEXT: str z7, [x8, #14, mul vl]
+; CHECK-NEXT: str z27, [x8, #13, mul vl]
+; CHECK-NEXT: str z30, [x8, #11, mul vl]
+; CHECK-NEXT: str z25, [x8, #9, mul vl]
+; CHECK-NEXT: str z6, [x8, #6, mul vl]
+; CHECK-NEXT: str z2, [x8, #5, mul vl]
+; CHECK-NEXT: str z5, [x8, #4, mul vl]
+; CHECK-NEXT: str z1, [x8, #3, mul vl]
+; CHECK-NEXT: str z4, [x8, #2, mul vl]
+; CHECK-NEXT: str z0, [x8, #1, mul vl]
+; CHECK-NEXT: str z3, [x8]
+; CHECK-NEXT: ret
+ %a = call <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f32(<vscale x 32 x float> %x)
+ ret <vscale x 32 x i64> %a
+}
+declare <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f32(<vscale x 32 x float>)
+
+define <vscale x 1 x i64> @llrint_v1i64_v1f64(<vscale x 1 x double> %x) {
+; CHECK-LABEL: llrint_v1i64_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: ret
+ %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double> %x)
+ ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double>)
+
+define <vscale x 2 x i64> @llrint_v2i64_v2f64(<vscale x 2 x double> %x) {
+; CHECK-LABEL: llrint_v2i64_v2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: ret
+ %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double> %x)
+ ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double>)
+
+define <vscale x 4 x i64> @llrint_v4i64_v4f64(<vscale x 4 x double> %x) {
+; CHECK-LABEL: llrint_v4i64_v4f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: ret
+ %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double> %x)
+ ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double>)
+
+define <vscale x 8 x i64> @llrint_v8i64_v8f64(<vscale x 8 x double> %x) {
+; CHECK-LABEL: llrint_v8i64_v8f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
+; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
+; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
+; CHECK-NEXT: ret
+ %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double> %x)
+ ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double>)
+
+define <vscale x 16 x i64> @llrint_v16f64(<vscale x 16 x double> %x) {
+; CHECK-LABEL: llrint_v16f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
+; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
+; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
+; CHECK-NEXT: frint64x z4.d, p0/z, z4.d
+; CHECK-NEXT: frint64x z5.d, p0/z, z5.d
+; CHECK-NEXT: frint64x z6.d, p0/z, z6.d
+; CHECK-NEXT: frint64x z7.d, p0/z, z7.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
+; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
+; CHECK-NEXT: fcvtzs z5.d, p0/z, z5.d
+; CHECK-NEXT: fcvtzs z6.d, p0/z, z6.d
+; CHECK-NEXT: fcvtzs z7.d, p0/z, z7.d
+; CHECK-NEXT: ret
+ %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f64(<vscale x 16 x double> %x)
+ ret <vscale x 16 x i64> %a
+}
+declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f64(<vscale x 16 x double>)
+
+define <vscale x 32 x i64> @llrint_v32f64(<vscale x 32 x double> %x) {
+; CHECK-LABEL: llrint_v32f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z2, [x0, #15, mul vl]
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z5, [x0, #12, mul vl]
+; CHECK-NEXT: ldr z3, [x0, #14, mul vl]
+; CHECK-NEXT: ldr z24, [x0, #9, mul vl]
+; CHECK-NEXT: ldr z1, [x0, #7, mul vl]
+; CHECK-NEXT: ldr z0, [x0, #6, mul vl]
+; CHECK-NEXT: ldr z4, [x0, #13, mul vl]
+; CHECK-NEXT: ldr z6, [x0, #11, mul vl]
+; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
+; CHECK-NEXT: ldr z7, [x0, #10, mul vl]
+; CHECK-NEXT: ldr z25, [x0, #8, mul vl]
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
+; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
+; CHECK-NEXT: ldr z26, [x0, #5, mul vl]
+; CHECK-NEXT: ldr z27, [x0, #4, mul vl]
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: ldr z28, [x0, #3, mul vl]
+; CHECK-NEXT: ldr z29, [x0, #2, mul vl]
+; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
+; CHECK-NEXT: ldr z30, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z31, [x0]
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: str z2, [x8, #15, mul vl]
+; CHECK-NEXT: frint64x z2.d, p0/z, z5.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: frint64x z4.d, p0/z, z4.d
+; CHECK-NEXT: str z3, [x8, #14, mul vl]
+; CHECK-NEXT: str z2, [x8, #12, mul vl]
+; CHECK-NEXT: frint64x z2.d, p0/z, z24.d
+; CHECK-NEXT: frint64x z3.d, p0/z, z6.d
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: str z1, [x8, #7, mul vl]
+; CHECK-NEXT: frint64x z1.d, p0/z, z27.d
+; CHECK-NEXT: str z2, [x8, #9, mul vl]
+; CHECK-NEXT: frint64x z2.d, p0/z, z26.d
+; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
+; CHECK-NEXT: str z0, [x8, #6, mul vl]
+; CHECK-NEXT: frint64x z0.d, p0/z, z28.d
+; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: str z4, [x8, #13, mul vl]
+; CHECK-NEXT: frint64x z4.d, p0/z, z7.d
+; CHECK-NEXT: str z3, [x8, #11, mul vl]
+; CHECK-NEXT: frint64x z3.d, p0/z, z25.d
+; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
+; CHECK-NEXT: str z2, [x8, #5, mul vl]
+; CHECK-NEXT: frint64x z2.d, p0/z, z29.d
+; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
+; CHECK-NEXT: str z1, [x8, #4, mul vl]
+; CHECK-NEXT: frint64x z1.d, p0/z, z30.d
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: str z0, [x8, #3, mul vl]
+; CHECK-NEXT: frint64x z0.d, p0/z, z31.d
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: str z4, [x8, #10, mul vl]
+; CHECK-NEXT: str z3, [x8, #8, mul vl]
+; CHECK-NEXT: str z2, [x8, #2, mul vl]
+; CHECK-NEXT: str z1, [x8, #1, mul vl]
+; CHECK-NEXT: str z0, [x8]
+; CHECK-NEXT: ret
+ %a = call <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv16f64(<vscale x 32 x double> %x)
+ ret <vscale x 32 x i64> %a
+}
+declare <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f64(<vscale x 32 x double>)
diff --git a/llvm/test/CodeGen/AArch64/sve2p2-lrint.ll b/llvm/test/CodeGen/AArch64/sve2p2-lrint.ll
new file mode 100644
index 0000000000000..6432abe6977cb
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve2p2-lrint.ll
@@ -0,0 +1,1276 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64 -mattr=+sve2p2 < %s | FileCheck %s
+
+define <vscale x 2 x i32> @lrint_v2_i32_f16(<vscale x 2 x half> %x) {
+; CHECK-LABEL: lrint_v2_i32_f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov w8, #31743 // =0x7bff
+; CHECK-NEXT: mov z2.d, #0x8000000000000000
+; CHECK-NEXT: frintx z0.h, p0/z, z0.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z1.h, w8
+; CHECK-NEXT: fcvtzs z2.d, p1/m, z0.h
+; CHECK-NEXT: fcmgt p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z1.d, #0x7fffffffffffffff
+; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
+; CHECK-NEXT: sel z0.d, p1, z1.d, z2.d
+; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT: ret
+ %a = call <vscale x 2 x i32> @llvm.lrint.nxv4i32.nxv4f16(<vscale x 2 x half> %x)
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @lrint_v4_i32_f16(<vscale x 4 x half> %x) {
+; CHECK-LABEL: lrint_v4_i32_f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: mov w8, #31743 // =0x7bff
+; CHECK-NEXT: mov z2.s, #0x80000000
+; CHECK-NEXT: frintx z0.h, p0/z, z0.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z1.h, w8
+; CHECK-NEXT: fcvtzs z2.s, p1/m, z0.h
+; CHECK-NEXT: fcmgt p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z1.s, #0x7fffffff
+; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
+; CHECK-NEXT: sel z0.s, p1, z1.s, z2.s
+; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0
+; CHECK-NEXT: ret
+ %a = call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4f16(<vscale x 4 x half> %x)
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @lrint_v8_i32_f16(<vscale x 8 x half> %x) {
+; CHECK-LABEL: lrint_v8_i32_f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uunpklo z1.s, z0.h
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: mov w8, #31743 // =0x7bff
+; CHECK-NEXT: mov z2.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: mov z3.s, #0x80000000
+; CHECK-NEXT: mov z4.s, #0x80000000
+; CHECK-NEXT: mov z5.s, #0x7fffffff
+; CHECK-NEXT: frintx z1.h, p0/z, z1.h
+; CHECK-NEXT: frintx z0.h, p0/z, z0.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z1.h, z2.h
+; CHECK-NEXT: fcmge p2.h, p0/z, z0.h, z2.h
+; CHECK-NEXT: mov z2.h, w8
+; CHECK-NEXT: fcmuo p3.h, p0/z, z1.h, z1.h
+; CHECK-NEXT: fcvtzs z4.s, p1/m, z1.h
+; CHECK-NEXT: fcmgt p1.h, p0/z, z1.h, z2.h
+; CHECK-NEXT: fcvtzs z3.s, p2/m, z0.h
+; CHECK-NEXT: fcmgt p2.h, p0/z, z0.h, z2.h
+; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
+; CHECK-NEXT: sel z0.s, p1, z5.s, z4.s
+; CHECK-NEXT: sel z1.s, p2, z5.s, z3.s
+; CHECK-NEXT: mov z0.s, p3/m, #0 // =0x0
+; CHECK-NEXT: mov z1.s, p0/m, #0 // =0x0
+; CHECK-NEXT: ret
+ %a = call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8f16(<vscale x 8 x half> %x)
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @lrint_v16_i32_f16(<vscale x 16 x half> %x) {
+; CHECK-LABEL: lrint_v16_i32_f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: uunpklo z2.s, z0.h
+; CHECK-NEXT: mov z4.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: mov w8, #31743 // =0x7bff
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: uunpklo z3.s, z1.h
+; CHECK-NEXT: mov z5.s, #0x80000000
+; CHECK-NEXT: mov z24.h, w8
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: mov z6.s, #0x80000000
+; CHECK-NEXT: mov z7.s, #0x80000000
+; CHECK-NEXT: mov z26.s, #0x7fffffff
+; CHECK-NEXT: frintx z2.h, p0/z, z2.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z2.h, z4.h
+; CHECK-NEXT: frintx z25.h, p0/z, z0.h
+; CHECK-NEXT: frintx z3.h, p0/z, z3.h
+; CHECK-NEXT: fcmge p2.h, p0/z, z25.h, z4.h
+; CHECK-NEXT: frintx z1.h, p0/z, z1.h
+; CHECK-NEXT: fcmge p3.h, p0/z, z3.h, z4.h
+; CHECK-NEXT: fcmgt p4.h, p0/z, z2.h, z24.h
+; CHECK-NEXT: fcvtzs z5.s, p1/m, z2.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z1.h, z4.h
+; CHECK-NEXT: mov z4.s, #0x80000000
+; CHECK-NEXT: fcvtzs z6.s, p2/m, z25.h
+; CHECK-NEXT: fcvtzs z7.s, p3/m, z3.h
+; CHECK-NEXT: fcmgt p3.h, p0/z, z25.h, z24.h
+; CHECK-NEXT: sel z0.s, p4, z26.s, z5.s
+; CHECK-NEXT: fcmgt p4.h, p0/z, z3.h, z24.h
+; CHECK-NEXT: fcvtzs z4.s, p1/m, z1.h
+; CHECK-NEXT: fcmgt p1.h, p0/z, z1.h, z24.h
+; CHECK-NEXT: fcmuo p5.h, p0/z, z25.h, z25.h
+; CHECK-NEXT: fcmuo p6.h, p0/z, z3.h, z3.h
+; CHECK-NEXT: fcmuo p2.h, p0/z, z2.h, z2.h
+; CHECK-NEXT: sel z2.s, p4, z26.s, z7.s
+; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT: sel z3.s, p1, z26.s, z4.s
+; CHECK-NEXT: fcmuo p0.h, p0/z, z1.h, z1.h
+; CHECK-NEXT: sel z1.s, p3, z26.s, z6.s
+; CHECK-NEXT: mov z2.s, p6/m, #0 // =0x0
+; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT: mov z0.s, p2/m, #0 // =0x0
+; CHECK-NEXT: mov z1.s, p5/m, #0 // =0x0
+; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT: mov z3.s, p0/m, #0 // =0x0
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %a = call <vscale x 16 x i32> @llvm.lrint.nxv16i32.nxv16f16(<vscale x 16 x half> %x)
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 32 x i32> @lrint_v32_i32_f16(<vscale x 32 x half> %x) {
+; CHECK-LABEL: lrint_v32_i32_f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-3
+; CHECK-NEXT: str p10, [sp, #1, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p9, [sp, #2, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
+; CHECK-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z8, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: uunpklo z24.s, z1.h
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: mov w8, #31743 // =0x7bff
+; CHECK-NEXT: mov z4.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: uunpklo z6.s, z0.h
+; CHECK-NEXT: uunpkhi z7.s, z0.h
+; CHECK-NEXT: uunpklo z25.s, z2.h
+; CHECK-NEXT: uunpkhi z2.s, z2.h
+; CHECK-NEXT: mov z27.s, #0x80000000
+; CHECK-NEXT: mov z0.s, #0x80000000
+; CHECK-NEXT: mov z26.s, #0x80000000
+; CHECK-NEXT: frintx z1.h, p0/z, z1.h
+; CHECK-NEXT: frintx z24.h, p0/z, z24.h
+; CHECK-NEXT: mov z5.s, #0x80000000
+; CHECK-NEXT: frintx z6.h, p0/z, z6.h
+; CHECK-NEXT: mov z28.s, #0x80000000
+; CHECK-NEXT: mov z29.h, w8
+; CHECK-NEXT: frintx z7.h, p0/z, z7.h
+; CHECK-NEXT: frintx z25.h, p0/z, z25.h
+; CHECK-NEXT: frintx z30.h, p0/z, z2.h
+; CHECK-NEXT: fcmge p4.h, p0/z, z1.h, z4.h
+; CHECK-NEXT: uunpklo z2.s, z3.h
+; CHECK-NEXT: mov z31.s, #0x80000000
+; CHECK-NEXT: mov z9.s, #0x80000000
+; CHECK-NEXT: fcmge p1.h, p0/z, z6.h, z4.h
+; CHECK-NEXT: fcmge p3.h, p0/z, z24.h, z4.h
+; CHECK-NEXT: fcmge p2.h, p0/z, z7.h, z4.h
+; CHECK-NEXT: fcvtzs z27.s, p4/m, z1.h
+; CHECK-NEXT: fcmge p5.h, p0/z, z25.h, z4.h
+; CHECK-NEXT: fcvtzs z0.s, p1/m, z6.h
+; CHECK-NEXT: fcmge p4.h, p0/z, z30.h, z4.h
+; CHECK-NEXT: fcvtzs z26.s, p3/m, z24.h
+; CHECK-NEXT: fcvtzs z5.s, p2/m, z7.h
+; CHECK-NEXT: fcmgt p3.h, p0/z, z6.h, z29.h
+; CHECK-NEXT: fcvtzs z28.s, p5/m, z25.h
+; CHECK-NEXT: fcmuo p1.h, p0/z, z6.h, z6.h
+; CHECK-NEXT: frintx z6.h, p0/z, z2.h
+; CHECK-NEXT: uunpkhi z2.s, z3.h
+; CHECK-NEXT: fcvtzs z31.s, p4/m, z30.h
+; CHECK-NEXT: fcmgt p5.h, p0/z, z7.h, z29.h
+; CHECK-NEXT: fcmuo p2.h, p0/z, z7.h, z7.h
+; CHECK-NEXT: mov z7.s, #0x80000000
+; CHECK-NEXT: frintx z8.h, p0/z, z2.h
+; CHECK-NEXT: fcmge p4.h, p0/z, z6.h, z4.h
+; CHECK-NEXT: fcmgt p6.h, p0/z, z24.h, z29.h
+; CHECK-NEXT: fcmuo p7.h, p0/z, z24.h, z24.h
+; CHECK-NEXT: mov z24.s, #0x7fffffff
+; CHECK-NEXT: fcmgt p8.h, p0/z, z1.h, z29.h
+; CHECK-NEXT: fcvtzs z7.s, p4/m, z6.h
+; CHECK-NEXT: fcmge p4.h, p0/z, z8.h, z4.h
+; CHECK-NEXT: fcmgt p9.h, p0/z, z25.h, z29.h
+; CHECK-NEXT: sel z2.s, p6, z24.s, z26.s
+; CHECK-NEXT: mov z0.s, p3/m, z24.s
+; CHECK-NEXT: sel z3.s, p8, z24.s, z27.s
+; CHECK-NEXT: fcmuo p10.h, p0/z, z1.h, z1.h
+; CHECK-NEXT: sel z1.s, p5, z24.s, z5.s
+; CHECK-NEXT: fcvtzs z9.s, p4/m, z8.h
+; CHECK-NEXT: mov z2.s, p7/m, #0 // =0x0
+; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT: sel z4.s, p9, z24.s, z28.s
+; CHECK-NEXT: mov z0.s, p1/m, #0 // =0x0
+; CHECK-NEXT: mov z1.s, p2/m, #0 // =0x0
+; CHECK-NEXT: mov z3.s, p10/m, #0 // =0x0
+; CHECK-NEXT: ldr p10, [sp, #1, mul vl] // 2-byte Reload
+; CHECK-NEXT: fcmgt p5.h, p0/z, z30.h, z29.h
+; CHECK-NEXT: fcmgt p6.h, p0/z, z6.h, z29.h
+; CHECK-NEXT: fcmgt p4.h, p0/z, z8.h, z29.h
+; CHECK-NEXT: fcmuo p8.h, p0/z, z30.h, z30.h
+; CHECK-NEXT: fcmuo p9.h, p0/z, z6.h, z6.h
+; CHECK-NEXT: sel z5.s, p5, z24.s, z31.s
+; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT: sel z6.s, p6, z24.s, z7.s
+; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT: fcmuo p3.h, p0/z, z25.h, z25.h
+; CHECK-NEXT: sel z7.s, p4, z24.s, z9.s
+; CHECK-NEXT: ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT: mov z5.s, p8/m, #0 // =0x0
+; CHECK-NEXT: fcmuo p0.h, p0/z, z8.h, z8.h
+; CHECK-NEXT: ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Reload
+; CHECK-NEXT: mov z6.s, p9/m, #0 // =0x0
+; CHECK-NEXT: ldr p9, [sp, #2, mul vl] // 2-byte Reload
+; CHECK-NEXT: mov z4.s, p3/m, #0 // =0x0
+; CHECK-NEXT: mov z7.s, p0/m, #0 // =0x0
+; CHECK-NEXT: addvl sp, sp, #3
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %a = call <vscale x 32 x i32> @llvm.lrint.nxv32i32.nxv32f16(<vscale x 32 x half> %x)
+ ret <vscale x 32 x i32> %a
+}
+
+define <vscale x 2 x i32> @lrint_v2_i32_f32(<vscale x 2 x float> %x) {
+; CHECK-LABEL: lrint_v2_i32_f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 2 x i32> @llvm.lrint.nxv4i32.nxv4f32(<vscale x 2 x float> %x)
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @lrint_v4_i32_f32(<vscale x 4 x float> %x) {
+; CHECK-LABEL: lrint_v4_i32_f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: frint32x z0.s, p0/z, z0.s
+; CHECK-NEXT: fcvtzs z0.s, p0/z, z0.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4f32(<vscale x 4 x float> %x)
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @lrint_v8_i32_f32(<vscale x 8 x float> %x) {
+; CHECK-LABEL: lrint_v8_i32_f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: frint32x z0.s, p0/z, z0.s
+; CHECK-NEXT: frint32x z1.s, p0/z, z1.s
+; CHECK-NEXT: fcvtzs z0.s, p0/z, z0.s
+; CHECK-NEXT: fcvtzs z1.s, p0/z, z1.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8f32(<vscale x 8 x float> %x)
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @lrint_v16_i32_f32(<vscale x 16 x float> %x) {
+; CHECK-LABEL: lrint_v16_i32_f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: frint32x z0.s, p0/z, z0.s
+; CHECK-NEXT: frint32x z1.s, p0/z, z1.s
+; CHECK-NEXT: frint32x z2.s, p0/z, z2.s
+; CHECK-NEXT: frint32x z3.s, p0/z, z3.s
+; CHECK-NEXT: fcvtzs z0.s, p0/z, z0.s
+; CHECK-NEXT: fcvtzs z1.s, p0/z, z1.s
+; CHECK-NEXT: fcvtzs z2.s, p0/z, z2.s
+; CHECK-NEXT: fcvtzs z3.s, p0/z, z3.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 16 x i32> @llvm.lrint.nxv16i32.nxv16f32(<vscale x 16 x float> %x)
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 32 x i32> @lrint_v32_i32_f32(<vscale x 32 x float> %x) {
+; CHECK-LABEL: lrint_v32_i32_f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: frint32x z0.s, p0/z, z0.s
+; CHECK-NEXT: frint32x z1.s, p0/z, z1.s
+; CHECK-NEXT: frint32x z2.s, p0/z, z2.s
+; CHECK-NEXT: frint32x z3.s, p0/z, z3.s
+; CHECK-NEXT: frint32x z4.s, p0/z, z4.s
+; CHECK-NEXT: frint32x z5.s, p0/z, z5.s
+; CHECK-NEXT: frint32x z6.s, p0/z, z6.s
+; CHECK-NEXT: frint32x z7.s, p0/z, z7.s
+; CHECK-NEXT: fcvtzs z0.s, p0/z, z0.s
+; CHECK-NEXT: fcvtzs z1.s, p0/z, z1.s
+; CHECK-NEXT: fcvtzs z2.s, p0/z, z2.s
+; CHECK-NEXT: fcvtzs z3.s, p0/z, z3.s
+; CHECK-NEXT: fcvtzs z4.s, p0/z, z4.s
+; CHECK-NEXT: fcvtzs z5.s, p0/z, z5.s
+; CHECK-NEXT: fcvtzs z6.s, p0/z, z6.s
+; CHECK-NEXT: fcvtzs z7.s, p0/z, z7.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 32 x i32> @llvm.lrint.nxv32i32.nxv32f32(<vscale x 32 x float> %x)
+ ret <vscale x 32 x i32> %a
+}
+
+define <vscale x 2 x i32> @lrint_v2_i32_f64(<vscale x 2 x double> %x) {
+; CHECK-LABEL: lrint_v2_i32_f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: ret
+ %a = call <vscale x 2 x i32> @llvm.lrint.nxv2i32.nxv2f64(<vscale x 2 x double> %x)
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @lrint_v4_i32_f64(<vscale x 4 x double> %x) {
+; CHECK-LABEL: lrint_v4_i32_f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4f64(<vscale x 4 x double> %x)
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @lrint_v8_i32_f64(<vscale x 8 x double> %x) {
+; CHECK-LABEL: lrint_v8_i32_f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
+; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
+; CHECK-NEXT: uzp1 z1.s, z2.s, z3.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8f64(<vscale x 8 x double> %x)
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @lrint_v16_i32_f64(<vscale x 16 x double> %x) {
+; CHECK-LABEL: lrint_v16_i32_f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: frint64x z4.d, p0/z, z4.d
+; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
+; CHECK-NEXT: frint64x z1.d, p0/z, z2.d
+; CHECK-NEXT: frint64x z2.d, p0/z, z5.d
+; CHECK-NEXT: frint64x z5.d, p0/z, z7.d
+; CHECK-NEXT: frint64x z6.d, p0/z, z6.d
+; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
+; CHECK-NEXT: fcvtzs z5.d, p0/z, z5.d
+; CHECK-NEXT: fcvtzs z6.d, p0/z, z6.d
+; CHECK-NEXT: uzp1 z1.s, z1.s, z3.s
+; CHECK-NEXT: uzp1 z2.s, z4.s, z2.s
+; CHECK-NEXT: uzp1 z3.s, z6.s, z5.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 16 x i32> @llvm.lrint.nxv16i32.nxv16f64(<vscale x 16 x double> %x)
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 32 x i32> @lrint_v32_i32_f64(<vscale x 32 x double> %x) {
+; CHECK-LABEL: lrint_v32_i32_f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z2, [x0, #3, mul vl]
+; CHECK-NEXT: ldr z3, [x0, #2, mul vl]
+; CHECK-NEXT: ldr z4, [x0, #8, mul vl]
+; CHECK-NEXT: ldr z27, [x0, #9, mul vl]
+; CHECK-NEXT: ldr z28, [x0, #7, mul vl]
+; CHECK-NEXT: ldr z29, [x0, #4, mul vl]
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
+; CHECK-NEXT: ldr z30, [x0, #5, mul vl]
+; CHECK-NEXT: ldr z31, [x0, #6, mul vl]
+; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
+; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
+; CHECK-NEXT: ldr z5, [x0, #14, mul vl]
+; CHECK-NEXT: ldr z6, [x0, #15, mul vl]
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: ldr z7, [x0, #12, mul vl]
+; CHECK-NEXT: ldr z24, [x0, #13, mul vl]
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: ldr z25, [x0, #10, mul vl]
+; CHECK-NEXT: ldr z26, [x0, #11, mul vl]
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
+; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s
+; CHECK-NEXT: frint64x z28.d, p0/z, z28.d
+; CHECK-NEXT: uzp1 z1.s, z3.s, z2.s
+; CHECK-NEXT: frint64x z2.d, p0/z, z30.d
+; CHECK-NEXT: frint64x z3.d, p0/z, z29.d
+; CHECK-NEXT: frint64x z29.d, p0/z, z31.d
+; CHECK-NEXT: frint64x z27.d, p0/z, z27.d
+; CHECK-NEXT: frint64x z4.d, p0/z, z4.d
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
+; CHECK-NEXT: fcvtzs z28.d, p0/z, z28.d
+; CHECK-NEXT: fcvtzs z29.d, p0/z, z29.d
+; CHECK-NEXT: fcvtzs z27.d, p0/z, z27.d
+; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
+; CHECK-NEXT: frint64x z26.d, p0/z, z26.d
+; CHECK-NEXT: frint64x z25.d, p0/z, z25.d
+; CHECK-NEXT: frint64x z24.d, p0/z, z24.d
+; CHECK-NEXT: frint64x z7.d, p0/z, z7.d
+; CHECK-NEXT: frint64x z6.d, p0/z, z6.d
+; CHECK-NEXT: frint64x z5.d, p0/z, z5.d
+; CHECK-NEXT: uzp1 z2.s, z3.s, z2.s
+; CHECK-NEXT: uzp1 z3.s, z29.s, z28.s
+; CHECK-NEXT: uzp1 z4.s, z4.s, z27.s
+; CHECK-NEXT: fcvtzs z26.d, p0/z, z26.d
+; CHECK-NEXT: fcvtzs z25.d, p0/z, z25.d
+; CHECK-NEXT: fcvtzs z24.d, p0/z, z24.d
+; CHECK-NEXT: fcvtzs z7.d, p0/z, z7.d
+; CHECK-NEXT: fcvtzs z27.d, p0/z, z6.d
+; CHECK-NEXT: fcvtzs z28.d, p0/z, z5.d
+; CHECK-NEXT: uzp1 z5.s, z25.s, z26.s
+; CHECK-NEXT: uzp1 z6.s, z7.s, z24.s
+; CHECK-NEXT: uzp1 z7.s, z28.s, z27.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 32 x i32> @llvm.lrint.nxv32i32.nxv32f64(<vscale x 32 x double> %x)
+ ret <vscale x 32 x i32> %a
+}
+
+define <vscale x 1 x i64> @lrint_v1f16(<vscale x 1 x half> %x) {
+; CHECK-LABEL: lrint_v1f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov w8, #31743 // =0x7bff
+; CHECK-NEXT: mov z2.d, #0x8000000000000000
+; CHECK-NEXT: frintx z0.h, p0/z, z0.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z1.h, w8
+; CHECK-NEXT: fcvtzs z2.d, p1/m, z0.h
+; CHECK-NEXT: fcmgt p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z1.d, #0x7fffffffffffffff
+; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
+; CHECK-NEXT: sel z0.d, p1, z1.d, z2.d
+; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT: ret
+ %a = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f16(<vscale x 1 x half> %x)
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @lrint_v2f16(<vscale x 2 x half> %x) {
+; CHECK-LABEL: lrint_v2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z1.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov w8, #31743 // =0x7bff
+; CHECK-NEXT: mov z2.d, #0x8000000000000000
+; CHECK-NEXT: frintx z0.h, p0/z, z0.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z1.h, w8
+; CHECK-NEXT: fcvtzs z2.d, p1/m, z0.h
+; CHECK-NEXT: fcmgt p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z1.d, #0x7fffffffffffffff
+; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
+; CHECK-NEXT: sel z0.d, p1, z1.d, z2.d
+; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT: ret
+ %a = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f16(<vscale x 2 x half> %x)
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @lrint_v4f16(<vscale x 4 x half> %x) {
+; CHECK-LABEL: lrint_v4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uunpklo z1.d, z0.s
+; CHECK-NEXT: uunpkhi z0.d, z0.s
+; CHECK-NEXT: mov w8, #31743 // =0x7bff
+; CHECK-NEXT: mov z2.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z3.d, #0x8000000000000000
+; CHECK-NEXT: mov z4.d, #0x8000000000000000
+; CHECK-NEXT: mov z5.d, #0x7fffffffffffffff
+; CHECK-NEXT: frintx z1.h, p0/z, z1.h
+; CHECK-NEXT: frintx z0.h, p0/z, z0.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z1.h, z2.h
+; CHECK-NEXT: fcmge p2.h, p0/z, z0.h, z2.h
+; CHECK-NEXT: mov z2.h, w8
+; CHECK-NEXT: fcmuo p3.h, p0/z, z1.h, z1.h
+; CHECK-NEXT: fcvtzs z4.d, p1/m, z1.h
+; CHECK-NEXT: fcmgt p1.h, p0/z, z1.h, z2.h
+; CHECK-NEXT: fcvtzs z3.d, p2/m, z0.h
+; CHECK-NEXT: fcmgt p2.h, p0/z, z0.h, z2.h
+; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
+; CHECK-NEXT: sel z0.d, p1, z5.d, z4.d
+; CHECK-NEXT: sel z1.d, p2, z5.d, z3.d
+; CHECK-NEXT: mov z0.d, p3/m, #0 // =0x0
+; CHECK-NEXT: mov z1.d, p0/m, #0 // =0x0
+; CHECK-NEXT: ret
+ %a = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f16(<vscale x 4 x half> %x)
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @lrint_v8f16(<vscale x 8 x half> %x) {
+; CHECK-LABEL: lrint_v8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: uunpklo z1.s, z0.h
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: mov w8, #31743 // =0x7bff
+; CHECK-NEXT: mov z4.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z5.d, #0x8000000000000000
+; CHECK-NEXT: mov z6.d, #0x8000000000000000
+; CHECK-NEXT: mov z7.d, #0x8000000000000000
+; CHECK-NEXT: mov z24.h, w8
+; CHECK-NEXT: mov z25.d, #0x8000000000000000
+; CHECK-NEXT: uunpklo z2.d, z1.s
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: uunpklo z3.d, z0.s
+; CHECK-NEXT: uunpkhi z0.d, z0.s
+; CHECK-NEXT: frintx z2.h, p0/z, z2.h
+; CHECK-NEXT: frintx z1.h, p0/z, z1.h
+; CHECK-NEXT: frintx z3.h, p0/z, z3.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z2.h, z4.h
+; CHECK-NEXT: frintx z26.h, p0/z, z0.h
+; CHECK-NEXT: fcmge p2.h, p0/z, z1.h, z4.h
+; CHECK-NEXT: fcmge p3.h, p0/z, z3.h, z4.h
+; CHECK-NEXT: fcmgt p4.h, p0/z, z1.h, z24.h
+; CHECK-NEXT: fcvtzs z5.d, p1/m, z2.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z26.h, z4.h
+; CHECK-NEXT: fcvtzs z6.d, p2/m, z1.h
+; CHECK-NEXT: mov z4.d, #0x7fffffffffffffff
+; CHECK-NEXT: fcmgt p2.h, p0/z, z2.h, z24.h
+; CHECK-NEXT: fcvtzs z7.d, p3/m, z3.h
+; CHECK-NEXT: fcmgt p5.h, p0/z, z3.h, z24.h
+; CHECK-NEXT: fcmgt p6.h, p0/z, z26.h, z24.h
+; CHECK-NEXT: fcvtzs z25.d, p1/m, z26.h
+; CHECK-NEXT: fcmuo p3.h, p0/z, z2.h, z2.h
+; CHECK-NEXT: sel z0.d, p2, z4.d, z5.d
+; CHECK-NEXT: fcmuo p1.h, p0/z, z1.h, z1.h
+; CHECK-NEXT: sel z1.d, p4, z4.d, z6.d
+; CHECK-NEXT: sel z2.d, p5, z4.d, z7.d
+; CHECK-NEXT: fcmuo p2.h, p0/z, z3.h, z3.h
+; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT: fcmuo p0.h, p0/z, z26.h, z26.h
+; CHECK-NEXT: sel z3.d, p6, z4.d, z25.d
+; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT: mov z0.d, p3/m, #0 // =0x0
+; CHECK-NEXT: mov z1.d, p1/m, #0 // =0x0
+; CHECK-NEXT: mov z2.d, p2/m, #0 // =0x0
+; CHECK-NEXT: mov z3.d, p0/m, #0 // =0x0
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %a = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f16(<vscale x 8 x half> %x)
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 16 x i64> @lrint_v16f16(<vscale x 16 x half> %x) {
+; CHECK-LABEL: lrint_v16f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p10, [sp, #1, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p9, [sp, #2, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: uunpkhi z3.s, z0.h
+; CHECK-NEXT: uunpklo z2.s, z0.h
+; CHECK-NEXT: mov w8, #31743 // =0x7bff
+; CHECK-NEXT: uunpklo z7.s, z1.h
+; CHECK-NEXT: mov z0.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: mov z4.d, #0x8000000000000000
+; CHECK-NEXT: mov z6.d, #0x8000000000000000
+; CHECK-NEXT: mov z5.d, #0x8000000000000000
+; CHECK-NEXT: mov z29.h, w8
+; CHECK-NEXT: mov z30.d, #0x8000000000000000
+; CHECK-NEXT: uunpklo z26.d, z3.s
+; CHECK-NEXT: uunpklo z24.d, z2.s
+; CHECK-NEXT: uunpkhi z25.d, z2.s
+; CHECK-NEXT: uunpklo z28.d, z7.s
+; CHECK-NEXT: uunpkhi z27.d, z3.s
+; CHECK-NEXT: uunpkhi z7.d, z7.s
+; CHECK-NEXT: mov z2.d, #0x8000000000000000
+; CHECK-NEXT: mov z3.d, #0x8000000000000000
+; CHECK-NEXT: uunpklo z31.d, z1.s
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: frintx z26.h, p0/z, z26.h
+; CHECK-NEXT: frintx z24.h, p0/z, z24.h
+; CHECK-NEXT: frintx z25.h, p0/z, z25.h
+; CHECK-NEXT: frintx z28.h, p0/z, z28.h
+; CHECK-NEXT: frintx z27.h, p0/z, z27.h
+; CHECK-NEXT: frintx z7.h, p0/z, z7.h
+; CHECK-NEXT: fcmge p4.h, p0/z, z26.h, z0.h
+; CHECK-NEXT: fcmge p2.h, p0/z, z24.h, z0.h
+; CHECK-NEXT: fcmge p3.h, p0/z, z25.h, z0.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z28.h, z0.h
+; CHECK-NEXT: fcmge p5.h, p0/z, z27.h, z0.h
+; CHECK-NEXT: fcvtzs z4.d, p4/m, z26.h
+; CHECK-NEXT: fcvtzs z2.d, p2/m, z24.h
+; CHECK-NEXT: fcmge p4.h, p0/z, z7.h, z0.h
+; CHECK-NEXT: fcvtzs z3.d, p3/m, z25.h
+; CHECK-NEXT: fcvtzs z6.d, p1/m, z28.h
+; CHECK-NEXT: fcmgt p3.h, p0/z, z24.h, z29.h
+; CHECK-NEXT: fcvtzs z5.d, p5/m, z27.h
+; CHECK-NEXT: fcmuo p1.h, p0/z, z24.h, z24.h
+; CHECK-NEXT: frintx z24.h, p0/z, z31.h
+; CHECK-NEXT: mov z31.d, #0x8000000000000000
+; CHECK-NEXT: fcvtzs z30.d, p4/m, z7.h
+; CHECK-NEXT: fcmgt p5.h, p0/z, z25.h, z29.h
+; CHECK-NEXT: fcmuo p2.h, p0/z, z25.h, z25.h
+; CHECK-NEXT: mov z25.d, #0x8000000000000000
+; CHECK-NEXT: fcmge p4.h, p0/z, z24.h, z0.h
+; CHECK-NEXT: fcmgt p6.h, p0/z, z26.h, z29.h
+; CHECK-NEXT: fcmuo p7.h, p0/z, z26.h, z26.h
+; CHECK-NEXT: mov z26.d, #0x7fffffffffffffff
+; CHECK-NEXT: fcmgt p8.h, p0/z, z27.h, z29.h
+; CHECK-NEXT: fcvtzs z25.d, p4/m, z24.h
+; CHECK-NEXT: fcmuo p10.h, p0/z, z27.h, z27.h
+; CHECK-NEXT: frintx z27.h, p0/z, z1.h
+; CHECK-NEXT: sel z1.d, p5, z26.d, z3.d
+; CHECK-NEXT: sel z3.d, p8, z26.d, z5.d
+; CHECK-NEXT: fcmge p4.h, p0/z, z27.h, z0.h
+; CHECK-NEXT: sel z0.d, p3, z26.d, z2.d
+; CHECK-NEXT: sel z2.d, p6, z26.d, z4.d
+; CHECK-NEXT: mov z1.d, p2/m, #0 // =0x0
+; CHECK-NEXT: mov z3.d, p10/m, #0 // =0x0
+; CHECK-NEXT: ldr p10, [sp, #1, mul vl] // 2-byte Reload
+; CHECK-NEXT: fcmgt p9.h, p0/z, z28.h, z29.h
+; CHECK-NEXT: mov z2.d, p7/m, #0 // =0x0
+; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT: fcvtzs z31.d, p4/m, z27.h
+; CHECK-NEXT: mov z0.d, p1/m, #0 // =0x0
+; CHECK-NEXT: fcmgt p5.h, p0/z, z7.h, z29.h
+; CHECK-NEXT: fcmgt p6.h, p0/z, z24.h, z29.h
+; CHECK-NEXT: sel z4.d, p9, z26.d, z6.d
+; CHECK-NEXT: fcmgt p4.h, p0/z, z27.h, z29.h
+; CHECK-NEXT: fcmuo p8.h, p0/z, z7.h, z7.h
+; CHECK-NEXT: sel z5.d, p5, z26.d, z30.d
+; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT: sel z6.d, p6, z26.d, z25.d
+; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT: fcmuo p9.h, p0/z, z24.h, z24.h
+; CHECK-NEXT: fcmuo p3.h, p0/z, z28.h, z28.h
+; CHECK-NEXT: sel z7.d, p4, z26.d, z31.d
+; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT: mov z5.d, p8/m, #0 // =0x0
+; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Reload
+; CHECK-NEXT: fcmuo p0.h, p0/z, z27.h, z27.h
+; CHECK-NEXT: mov z6.d, p9/m, #0 // =0x0
+; CHECK-NEXT: ldr p9, [sp, #2, mul vl] // 2-byte Reload
+; CHECK-NEXT: mov z4.d, p3/m, #0 // =0x0
+; CHECK-NEXT: mov z7.d, p0/m, #0 // =0x0
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %a = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f16(<vscale x 16 x half> %x)
+ ret <vscale x 16 x i64> %a
+}
+
+define <vscale x 32 x i64> @lrint_v32f16(<vscale x 32 x half> %x) {
+; CHECK-LABEL: lrint_v32f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-17
+; CHECK-NEXT: str p10, [sp, #1, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p9, [sp, #2, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
+; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
+; CHECK-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
+; CHECK-NEXT: uunpklo z4.s, z0.h
+; CHECK-NEXT: uunpkhi z6.s, z0.h
+; CHECK-NEXT: mov w9, #31743 // =0x7bff
+; CHECK-NEXT: uunpklo z7.s, z1.h
+; CHECK-NEXT: mov z27.h, #-1025 // =0xfffffffffffffbff
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: uunpkhi z31.s, z1.h
+; CHECK-NEXT: mov z0.d, #0x8000000000000000
+; CHECK-NEXT: mov z30.d, #0x8000000000000000
+; CHECK-NEXT: uunpkhi z17.s, z2.h
+; CHECK-NEXT: mov z9.d, #0x8000000000000000
+; CHECK-NEXT: uunpklo z18.s, z3.h
+; CHECK-NEXT: uunpklo z24.d, z4.s
+; CHECK-NEXT: uunpkhi z25.d, z4.s
+; CHECK-NEXT: uunpkhi z28.d, z6.s
+; CHECK-NEXT: uunpklo z29.d, z7.s
+; CHECK-NEXT: uunpkhi z8.d, z7.s
+; CHECK-NEXT: uunpklo z26.d, z6.s
+; CHECK-NEXT: mov z4.d, #0x8000000000000000
+; CHECK-NEXT: uunpklo z12.d, z31.s
+; CHECK-NEXT: uunpkhi z13.d, z31.s
+; CHECK-NEXT: uunpkhi z3.s, z3.h
+; CHECK-NEXT: uunpkhi z19.d, z18.s
+; CHECK-NEXT: mov z5.d, #0x8000000000000000
+; CHECK-NEXT: frintx z1.h, p0/z, z24.h
+; CHECK-NEXT: frintx z7.h, p0/z, z25.h
+; CHECK-NEXT: frintx z25.h, p0/z, z28.h
+; CHECK-NEXT: frintx z11.h, p0/z, z29.h
+; CHECK-NEXT: frintx z28.h, p0/z, z8.h
+; CHECK-NEXT: frintx z24.h, p0/z, z26.h
+; CHECK-NEXT: frintx z31.h, p0/z, z12.h
+; CHECK-NEXT: frintx z15.h, p0/z, z13.h
+; CHECK-NEXT: uunpklo z8.s, z2.h
+; CHECK-NEXT: frintx z19.h, p0/z, z19.h
+; CHECK-NEXT: mov z26.h, w9
+; CHECK-NEXT: mov z12.d, #0x8000000000000000
+; CHECK-NEXT: mov z13.d, #0x8000000000000000
+; CHECK-NEXT: uunpklo z20.d, z3.s
+; CHECK-NEXT: mov z6.d, #0x8000000000000000
+; CHECK-NEXT: uunpkhi z3.d, z3.s
+; CHECK-NEXT: mov z29.d, #0x7fffffffffffffff
+; CHECK-NEXT: uunpklo z18.d, z18.s
+; CHECK-NEXT: uunpkhi z16.d, z8.s
+; CHECK-NEXT: mov z22.d, #0x8000000000000000
+; CHECK-NEXT: mov z23.d, #0x8000000000000000
+; CHECK-NEXT: mov z10.d, #0x8000000000000000
+; CHECK-NEXT: mov z21.d, #0x8000000000000000
+; CHECK-NEXT: frintx z20.h, p0/z, z20.h
+; CHECK-NEXT: fcmge p3.h, p0/z, z1.h, z27.h
+; CHECK-NEXT: frintx z2.h, p0/z, z16.h
+; CHECK-NEXT: uunpklo z16.d, z17.s
+; CHECK-NEXT: uunpkhi z17.d, z17.s
+; CHECK-NEXT: fcmge p4.h, p0/z, z7.h, z27.h
+; CHECK-NEXT: fcmgt p9.h, p0/z, z11.h, z26.h
+; CHECK-NEXT: fcvtzs z0.d, p3/m, z1.h
+; CHECK-NEXT: frintx z16.h, p0/z, z16.h
+; CHECK-NEXT: frintx z17.h, p0/z, z17.h
+; CHECK-NEXT: fcmge p3.h, p0/z, z11.h, z27.h
+; CHECK-NEXT: fcvtzs z4.d, p4/m, z7.h
+; CHECK-NEXT: fcmge p4.h, p0/z, z28.h, z27.h
+; CHECK-NEXT: fcmuo p8.h, p0/z, z11.h, z11.h
+; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: frintx z0.h, p0/z, z3.h
+; CHECK-NEXT: fcmge p6.h, p0/z, z15.h, z27.h
+; CHECK-NEXT: fcvtzs z30.d, p3/m, z11.h
+; CHECK-NEXT: uunpklo z11.d, z8.s
+; CHECK-NEXT: mov z8.d, #0x8000000000000000
+; CHECK-NEXT: fcvtzs z9.d, p4/m, z28.h
+; CHECK-NEXT: fcmge p4.h, p0/z, z31.h, z27.h
+; CHECK-NEXT: fcmge p7.h, p0/z, z2.h, z27.h
+; CHECK-NEXT: fcvtzs z12.d, p6/m, z15.h
+; CHECK-NEXT: frintx z14.h, p0/z, z11.h
+; CHECK-NEXT: mov z11.d, #0x8000000000000000
+; CHECK-NEXT: sel z3.d, p9, z29.d, z30.d
+; CHECK-NEXT: fcmge p5.h, p0/z, z14.h, z27.h
+; CHECK-NEXT: fcvtzs z8.d, p4/m, z31.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z24.h, z27.h
+; CHECK-NEXT: mov z3.d, p8/m, #0 // =0x0
+; CHECK-NEXT: fcvtzs z11.d, p7/m, z2.h
+; CHECK-NEXT: fcmge p2.h, p0/z, z25.h, z27.h
+; CHECK-NEXT: fcvtzs z13.d, p5/m, z14.h
+; CHECK-NEXT: fcmgt p6.h, p0/z, z15.h, z26.h
+; CHECK-NEXT: fcvtzs z5.d, p1/m, z24.h
+; CHECK-NEXT: fcmuo p4.h, p0/z, z15.h, z15.h
+; CHECK-NEXT: mov z15.d, #0x8000000000000000
+; CHECK-NEXT: fcvtzs z6.d, p2/m, z25.h
+; CHECK-NEXT: fcmge p7.h, p0/z, z16.h, z27.h
+; CHECK-NEXT: fcmgt p3.h, p0/z, z28.h, z26.h
+; CHECK-NEXT: fcmgt p10.h, p0/z, z14.h, z26.h
+; CHECK-NEXT: fcmge p1.h, p0/z, z19.h, z27.h
+; CHECK-NEXT: fcmge p2.h, p0/z, z20.h, z27.h
+; CHECK-NEXT: fcvtzs z15.d, p7/m, z16.h
+; CHECK-NEXT: sel z30.d, p3, z29.d, z9.d
+; CHECK-NEXT: sel z9.d, p6, z29.d, z12.d
+; CHECK-NEXT: sel z12.d, p10, z29.d, z13.d
+; CHECK-NEXT: frintx z13.h, p0/z, z18.h
+; CHECK-NEXT: fcmge p5.h, p0/z, z17.h, z27.h
+; CHECK-NEXT: fcvtzs z22.d, p1/m, z19.h
+; CHECK-NEXT: fcvtzs z23.d, p2/m, z20.h
+; CHECK-NEXT: mov z9.d, p4/m, #0 // =0x0
+; CHECK-NEXT: fcmge p7.h, p0/z, z0.h, z27.h
+; CHECK-NEXT: fcmuo p1.h, p0/z, z14.h, z14.h
+; CHECK-NEXT: str z9, [x8, #7, mul vl]
+; CHECK-NEXT: fcvtzs z21.d, p5/m, z17.h
+; CHECK-NEXT: fcmge p2.h, p0/z, z13.h, z27.h
+; CHECK-NEXT: mov z27.d, #0x8000000000000000
+; CHECK-NEXT: fcvtzs z10.d, p7/m, z0.h
+; CHECK-NEXT: fcmgt p5.h, p0/z, z0.h, z26.h
+; CHECK-NEXT: mov z12.d, p1/m, #0 // =0x0
+; CHECK-NEXT: fcmgt p6.h, p0/z, z20.h, z26.h
+; CHECK-NEXT: fcvtzs z27.d, p2/m, z13.h
+; CHECK-NEXT: fcmgt p3.h, p0/z, z19.h, z26.h
+; CHECK-NEXT: str z12, [x8, #8, mul vl]
+; CHECK-NEXT: fcmuo p1.h, p0/z, z0.h, z0.h
+; CHECK-NEXT: sel z0.d, p5, z29.d, z10.d
+; CHECK-NEXT: sel z10.d, p6, z29.d, z23.d
+; CHECK-NEXT: fcmuo p2.h, p0/z, z20.h, z20.h
+; CHECK-NEXT: fcmuo p7.h, p0/z, z19.h, z19.h
+; CHECK-NEXT: fcmgt p5.h, p0/z, z2.h, z26.h
+; CHECK-NEXT: mov z0.d, p1/m, #0 // =0x0
+; CHECK-NEXT: fcmuo p6.h, p0/z, z2.h, z2.h
+; CHECK-NEXT: sel z2.d, p3, z29.d, z22.d
+; CHECK-NEXT: mov z10.d, p2/m, #0 // =0x0
+; CHECK-NEXT: str z0, [x8, #15, mul vl]
+; CHECK-NEXT: fcmgt p3.h, p0/z, z13.h, z26.h
+; CHECK-NEXT: mov z2.d, p7/m, #0 // =0x0
+; CHECK-NEXT: sel z0.d, p5, z29.d, z11.d
+; CHECK-NEXT: str z10, [x8, #14, mul vl]
+; CHECK-NEXT: fcmgt p1.h, p0/z, z17.h, z26.h
+; CHECK-NEXT: fcmgt p2.h, p0/z, z16.h, z26.h
+; CHECK-NEXT: str z2, [x8, #13, mul vl]
+; CHECK-NEXT: mov z0.d, p6/m, #0 // =0x0
+; CHECK-NEXT: fcmuo p7.h, p0/z, z13.h, z13.h
+; CHECK-NEXT: mov z27.d, p3/m, z29.d
+; CHECK-NEXT: fcmuo p5.h, p0/z, z17.h, z17.h
+; CHECK-NEXT: str z0, [x8, #9, mul vl]
+; CHECK-NEXT: sel z2.d, p1, z29.d, z21.d
+; CHECK-NEXT: fcmuo p3.h, p0/z, z16.h, z16.h
+; CHECK-NEXT: sel z10.d, p2, z29.d, z15.d
+; CHECK-NEXT: mov z27.d, p7/m, #0 // =0x0
+; CHECK-NEXT: fcmgt p1.h, p0/z, z7.h, z26.h
+; CHECK-NEXT: mov z2.d, p5/m, #0 // =0x0
+; CHECK-NEXT: fcmgt p2.h, p0/z, z24.h, z26.h
+; CHECK-NEXT: str z27, [x8, #12, mul vl]
+; CHECK-NEXT: mov z10.d, p3/m, #0 // =0x0
+; CHECK-NEXT: fcmuo p3.h, p0/z, z28.h, z28.h
+; CHECK-NEXT: str z2, [x8, #11, mul vl]
+; CHECK-NEXT: fcmgt p7.h, p0/z, z31.h, z26.h
+; CHECK-NEXT: mov z4.d, p1/m, z29.d
+; CHECK-NEXT: str z10, [x8, #10, mul vl]
+; CHECK-NEXT: mov z5.d, p2/m, z29.d
+; CHECK-NEXT: fcmgt p5.h, p0/z, z25.h, z26.h
+; CHECK-NEXT: fcmgt p1.h, p0/z, z1.h, z26.h
+; CHECK-NEXT: mov z30.d, p3/m, #0 // =0x0
+; CHECK-NEXT: sel z2.d, p7, z29.d, z8.d
+; CHECK-NEXT: fcmuo p6.h, p0/z, z31.h, z31.h
+; CHECK-NEXT: fcmuo p2.h, p0/z, z25.h, z25.h
+; CHECK-NEXT: sel z0.d, p5, z29.d, z6.d
+; CHECK-NEXT: fcmuo p3.h, p0/z, z24.h, z24.h
+; CHECK-NEXT: fcmuo p4.h, p0/z, z7.h, z7.h
+; CHECK-NEXT: mov z2.d, p6/m, #0 // =0x0
+; CHECK-NEXT: fcmuo p0.h, p0/z, z1.h, z1.h
+; CHECK-NEXT: ldr z1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: str z30, [x8, #5, mul vl]
+; CHECK-NEXT: mov z0.d, p2/m, #0 // =0x0
+; CHECK-NEXT: str z3, [x8, #4, mul vl]
+; CHECK-NEXT: mov z5.d, p3/m, #0 // =0x0
+; CHECK-NEXT: mov z4.d, p4/m, #0 // =0x0
+; CHECK-NEXT: str z2, [x8, #6, mul vl]
+; CHECK-NEXT: mov z1.d, p1/m, z29.d
+; CHECK-NEXT: str z0, [x8, #3, mul vl]
+; CHECK-NEXT: str z5, [x8, #2, mul vl]
+; CHECK-NEXT: mov z1.d, p0/m, #0 // =0x0
+; CHECK-NEXT: str z4, [x8, #1, mul vl]
+; CHECK-NEXT: str z1, [x8]
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #1, mul vl] // 2-byte Reload
+; CHECK-NEXT: ldr p9, [sp, #2, mul vl] // 2-byte Reload
+; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Reload
+; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT: addvl sp, sp, #17
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %a = call <vscale x 32 x i64> @llvm.lrint.nxv32i64.nxv32f16(<vscale x 32 x half> %x)
+ ret <vscale x 32 x i64> %a
+}
+
+define <vscale x 1 x i64> @lrint_v1f32(<vscale x 1 x float> %x) {
+; CHECK-LABEL: lrint_v1f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f32(<vscale x 1 x float> %x)
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @lrint_v2f32(<vscale x 2 x float> %x) {
+; CHECK-LABEL: lrint_v2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f32(<vscale x 2 x float> %x)
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @lrint_v4f32(<vscale x 4 x float> %x) {
+; CHECK-LABEL: lrint_v4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uunpklo z1.d, z0.s
+; CHECK-NEXT: uunpkhi z0.d, z0.s
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z1.s, p0/z, z1.s
+; CHECK-NEXT: frint64x z2.s, p0/z, z0.s
+; CHECK-NEXT: movprfx z0, z1
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.s
+; CHECK-NEXT: movprfx z1, z2
+; CHECK-NEXT: fcvtzs z1.d, p0/m, z2.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f32(<vscale x 4 x float> %x)
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @lrint_v8f32(<vscale x 8 x float> %x) {
+; CHECK-LABEL: lrint_v8f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uunpklo z2.d, z0.s
+; CHECK-NEXT: uunpkhi z0.d, z0.s
+; CHECK-NEXT: uunpklo z3.d, z1.s
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z2.s, p0/z, z2.s
+; CHECK-NEXT: frint64x z4.s, p0/z, z0.s
+; CHECK-NEXT: frint64x z3.s, p0/z, z3.s
+; CHECK-NEXT: frint64x z5.s, p0/z, z1.s
+; CHECK-NEXT: movprfx z0, z2
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z2.s
+; CHECK-NEXT: movprfx z1, z4
+; CHECK-NEXT: fcvtzs z1.d, p0/m, z4.s
+; CHECK-NEXT: movprfx z2, z3
+; CHECK-NEXT: fcvtzs z2.d, p0/m, z3.s
+; CHECK-NEXT: movprfx z3, z5
+; CHECK-NEXT: fcvtzs z3.d, p0/m, z5.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f32(<vscale x 8 x float> %x)
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 16 x i64> @lrint_v16f32(<vscale x 16 x float> %x) {
+; CHECK-LABEL: lrint_v16f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uunpklo z4.d, z1.s
+; CHECK-NEXT: uunpklo z5.d, z0.s
+; CHECK-NEXT: uunpkhi z0.d, z0.s
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: uunpklo z6.d, z2.s
+; CHECK-NEXT: uunpkhi z7.d, z2.s
+; CHECK-NEXT: uunpklo z24.d, z3.s
+; CHECK-NEXT: uunpkhi z25.d, z3.s
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z2.s, p0/z, z5.s
+; CHECK-NEXT: frint64x z3.s, p0/z, z0.s
+; CHECK-NEXT: frint64x z4.s, p0/z, z4.s
+; CHECK-NEXT: frint64x z5.s, p0/z, z1.s
+; CHECK-NEXT: movprfx z0, z2
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z2.s
+; CHECK-NEXT: movprfx z1, z3
+; CHECK-NEXT: fcvtzs z1.d, p0/m, z3.s
+; CHECK-NEXT: movprfx z2, z4
+; CHECK-NEXT: fcvtzs z2.d, p0/m, z4.s
+; CHECK-NEXT: movprfx z3, z5
+; CHECK-NEXT: fcvtzs z3.d, p0/m, z5.s
+; CHECK-NEXT: frint64x z4.s, p0/z, z6.s
+; CHECK-NEXT: frint64x z5.s, p0/z, z7.s
+; CHECK-NEXT: frint64x z6.s, p0/z, z24.s
+; CHECK-NEXT: frint64x z7.s, p0/z, z25.s
+; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.s
+; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.s
+; CHECK-NEXT: fcvtzs z7.d, p0/m, z7.s
+; CHECK-NEXT: ret
+ %a = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f32(<vscale x 16 x float> %x)
+ ret <vscale x 16 x i64> %a
+}
+
+define <vscale x 32 x i64> @lrint_v32f32(<vscale x 32 x float> %x) {
+; CHECK-LABEL: lrint_v32f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uunpkhi z25.d, z7.s
+; CHECK-NEXT: uunpkhi z27.d, z6.s
+; CHECK-NEXT: uunpklo z6.d, z6.s
+; CHECK-NEXT: uunpklo z29.d, z3.s
+; CHECK-NEXT: uunpkhi z30.d, z5.s
+; CHECK-NEXT: uunpklo z5.d, z5.s
+; CHECK-NEXT: uunpkhi z31.d, z4.s
+; CHECK-NEXT: uunpklo z4.d, z4.s
+; CHECK-NEXT: uunpkhi z3.d, z3.s
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: uunpklo z24.d, z0.s
+; CHECK-NEXT: uunpkhi z0.d, z0.s
+; CHECK-NEXT: uunpklo z7.d, z7.s
+; CHECK-NEXT: uunpklo z26.d, z1.s
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: frint64x z25.s, p0/z, z25.s
+; CHECK-NEXT: frint64x z6.s, p0/z, z6.s
+; CHECK-NEXT: frint64x z5.s, p0/z, z5.s
+; CHECK-NEXT: frint64x z4.s, p0/z, z4.s
+; CHECK-NEXT: frint64x z3.s, p0/z, z3.s
+; CHECK-NEXT: frint64x z27.s, p0/z, z27.s
+; CHECK-NEXT: uunpklo z28.d, z2.s
+; CHECK-NEXT: frint64x z30.s, p0/z, z30.s
+; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
+; CHECK-NEXT: uunpkhi z2.d, z2.s
+; CHECK-NEXT: fcvtzs z25.d, p0/m, z25.s
+; CHECK-NEXT: frint64x z7.s, p0/z, z7.s
+; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.s
+; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.s
+; CHECK-NEXT: frint64x z1.s, p0/z, z1.s
+; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.s
+; CHECK-NEXT: fcvtzs z7.d, p0/m, z7.s
+; CHECK-NEXT: fcvtzs z27.d, p0/m, z27.s
+; CHECK-NEXT: fcvtzs z30.d, p0/m, z30.s
+; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.s
+; CHECK-NEXT: str z25, [x8, #15, mul vl]
+; CHECK-NEXT: frint64x z25.s, p0/z, z31.s
+; CHECK-NEXT: frint64x z2.s, p0/z, z2.s
+; CHECK-NEXT: str z6, [x8, #12, mul vl]
+; CHECK-NEXT: frint64x z6.s, p0/z, z29.s
+; CHECK-NEXT: fcvtzs z25.d, p0/m, z25.s
+; CHECK-NEXT: str z5, [x8, #10, mul vl]
+; CHECK-NEXT: frint64x z5.s, p0/z, z28.s
+; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.s
+; CHECK-NEXT: str z4, [x8, #8, mul vl]
+; CHECK-NEXT: frint64x z4.s, p0/z, z26.s
+; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.s
+; CHECK-NEXT: str z3, [x8, #7, mul vl]
+; CHECK-NEXT: frint64x z3.s, p0/z, z24.s
+; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.s
+; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.s
+; CHECK-NEXT: str z7, [x8, #14, mul vl]
+; CHECK-NEXT: str z27, [x8, #13, mul vl]
+; CHECK-NEXT: str z30, [x8, #11, mul vl]
+; CHECK-NEXT: str z25, [x8, #9, mul vl]
+; CHECK-NEXT: str z6, [x8, #6, mul vl]
+; CHECK-NEXT: str z2, [x8, #5, mul vl]
+; CHECK-NEXT: str z5, [x8, #4, mul vl]
+; CHECK-NEXT: str z1, [x8, #3, mul vl]
+; CHECK-NEXT: str z4, [x8, #2, mul vl]
+; CHECK-NEXT: str z0, [x8, #1, mul vl]
+; CHECK-NEXT: str z3, [x8]
+; CHECK-NEXT: ret
+ %a = call <vscale x 32 x i64> @llvm.lrint.nxv32i64.nxv32f32(<vscale x 32 x float> %x)
+ ret <vscale x 32 x i64> %a
+}
+
+define <vscale x 1 x i64> @lrint_v1f64(<vscale x 1 x double> %x) {
+; CHECK-LABEL: lrint_v1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: ret
+ %a = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f64(<vscale x 1 x double> %x)
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @lrint_v2f64(<vscale x 2 x double> %x) {
+; CHECK-LABEL: lrint_v2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: ret
+ %a = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f64(<vscale x 2 x double> %x)
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @lrint_v4f64(<vscale x 4 x double> %x) {
+; CHECK-LABEL: lrint_v4f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: ret
+ %a = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f64(<vscale x 4 x double> %x)
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @lrint_v8f64(<vscale x 8 x double> %x) {
+; CHECK-LABEL: lrint_v8f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
+; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
+; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
+; CHECK-NEXT: ret
+ %a = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f64(<vscale x 8 x double> %x)
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 16 x i64> @lrint_v16f64(<vscale x 16 x double> %x) {
+; CHECK-LABEL: lrint_v16f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
+; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
+; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
+; CHECK-NEXT: frint64x z4.d, p0/z, z4.d
+; CHECK-NEXT: frint64x z5.d, p0/z, z5.d
+; CHECK-NEXT: frint64x z6.d, p0/z, z6.d
+; CHECK-NEXT: frint64x z7.d, p0/z, z7.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
+; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
+; CHECK-NEXT: fcvtzs z5.d, p0/z, z5.d
+; CHECK-NEXT: fcvtzs z6.d, p0/z, z6.d
+; CHECK-NEXT: fcvtzs z7.d, p0/z, z7.d
+; CHECK-NEXT: ret
+ %a = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f64(<vscale x 16 x double> %x)
+ ret <vscale x 16 x i64> %a
+}
+
+define <vscale x 32 x i64> @lrint_v32f64(<vscale x 32 x double> %x) {
+; CHECK-LABEL: lrint_v32f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z2, [x0, #15, mul vl]
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z5, [x0, #12, mul vl]
+; CHECK-NEXT: ldr z3, [x0, #14, mul vl]
+; CHECK-NEXT: ldr z24, [x0, #9, mul vl]
+; CHECK-NEXT: ldr z1, [x0, #7, mul vl]
+; CHECK-NEXT: ldr z0, [x0, #6, mul vl]
+; CHECK-NEXT: ldr z4, [x0, #13, mul vl]
+; CHECK-NEXT: ldr z6, [x0, #11, mul vl]
+; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
+; CHECK-NEXT: ldr z7, [x0, #10, mul vl]
+; CHECK-NEXT: ldr z25, [x0, #8, mul vl]
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
+; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
+; CHECK-NEXT: ldr z26, [x0, #5, mul vl]
+; CHECK-NEXT: ldr z27, [x0, #4, mul vl]
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: ldr z28, [x0, #3, mul vl]
+; CHECK-NEXT: ldr z29, [x0, #2, mul vl]
+; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
+; CHECK-NEXT: ldr z30, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z31, [x0]
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: str z2, [x8, #15, mul vl]
+; CHECK-NEXT: frint64x z2.d, p0/z, z5.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: frint64x z4.d, p0/z, z4.d
+; CHECK-NEXT: str z3, [x8, #14, mul vl]
+; CHECK-NEXT: str z2, [x8, #12, mul vl]
+; CHECK-NEXT: frint64x z2.d, p0/z, z24.d
+; CHECK-NEXT: frint64x z3.d, p0/z, z6.d
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: str z1, [x8, #7, mul vl]
+; CHECK-NEXT: frint64x z1.d, p0/z, z27.d
+; CHECK-NEXT: str z2, [x8, #9, mul vl]
+; CHECK-NEXT: frint64x z2.d, p0/z, z26.d
+; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
+; CHECK-NEXT: str z0, [x8, #6, mul vl]
+; CHECK-NEXT: frint64x z0.d, p0/z, z28.d
+; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: str z4, [x8, #13, mul vl]
+; CHECK-NEXT: frint64x z4.d, p0/z, z7.d
+; CHECK-NEXT: str z3, [x8, #11, mul vl]
+; CHECK-NEXT: frint64x z3.d, p0/z, z25.d
+; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
+; CHECK-NEXT: str z2, [x8, #5, mul vl]
+; CHECK-NEXT: frint64x z2.d, p0/z, z29.d
+; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
+; CHECK-NEXT: str z1, [x8, #4, mul vl]
+; CHECK-NEXT: frint64x z1.d, p0/z, z30.d
+; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
+; CHECK-NEXT: str z0, [x8, #3, mul vl]
+; CHECK-NEXT: frint64x z0.d, p0/z, z31.d
+; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: str z4, [x8, #10, mul vl]
+; CHECK-NEXT: str z3, [x8, #8, mul vl]
+; CHECK-NEXT: str z2, [x8, #2, mul vl]
+; CHECK-NEXT: str z1, [x8, #1, mul vl]
+; CHECK-NEXT: str z0, [x8]
+; CHECK-NEXT: ret
+ %a = call <vscale x 32 x i64> @llvm.lrint.nxv32i64.nxv16f64(<vscale x 32 x double> %x)
+ ret <vscale x 32 x i64> %a
+}
>From d4f04a3ac71c56236843cf4d79856877b029de1f Mon Sep 17 00:00:00 2001
From: Jacob Crawley <jacob.crawley at arm.com>
Date: Thu, 19 Mar 2026 15:55:04 +0000
Subject: [PATCH 2/4] Delete tests with illegal types and combine into one file
---
.../Target/AArch64/AArch64ISelLowering.cpp | 3 +-
llvm/test/CodeGen/AArch64/sve2p2-llrint.ll | 834 -----------
llvm/test/CodeGen/AArch64/sve2p2-lrint.ll | 1236 +----------------
3 files changed, 44 insertions(+), 2029 deletions(-)
delete mode 100644 llvm/test/CodeGen/AArch64/sve2p2-llrint.ll
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 9185ab02426fb..fd12a8b9ae8be 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5301,8 +5301,9 @@ SDValue AArch64TargetLowering::LowerVectorXRINT(SDValue Op,
unsigned FPBits = CastVT.getScalarSizeInBits();
// Use FRINT32X/FRINT64X if Sve2p2 is available
- if (Subtarget->isSVEorStreamingSVEAvailable() && (Subtarget->hasSVE2p2()) &&
+ if (Subtarget->isSVEorStreamingSVEAvailable() && Subtarget->hasSVE2p2() &&
(FPBits == 32 || FPBits == 64)) {
+ assert(IntBits == 32 || IntBits == 64);
unsigned FrintOp = (IntBits == 32) ? AArch64ISD::FRINT32_MERGE_PASSTHRU
: AArch64ISD::FRINT64_MERGE_PASSTHRU;
SDValue Pg = getPredicateForVector(DAG, DL, CastVT);
diff --git a/llvm/test/CodeGen/AArch64/sve2p2-llrint.ll b/llvm/test/CodeGen/AArch64/sve2p2-llrint.ll
deleted file mode 100644
index 6a10635d130b8..0000000000000
--- a/llvm/test/CodeGen/AArch64/sve2p2-llrint.ll
+++ /dev/null
@@ -1,834 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2p2 | FileCheck %s
-
-define <vscale x 1 x i64> @llrint_v1i64_v1f16(<vscale x 1 x half> %x) {
-; CHECK-LABEL: llrint_v1i64_v1f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: mov w8, #31743 // =0x7bff
-; CHECK-NEXT: mov z2.d, #0x8000000000000000
-; CHECK-NEXT: frintx z0.h, p0/z, z0.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z1.h, w8
-; CHECK-NEXT: fcvtzs z2.d, p1/m, z0.h
-; CHECK-NEXT: fcmgt p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z1.d, #0x7fffffffffffffff
-; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
-; CHECK-NEXT: sel z0.d, p1, z1.d, z2.d
-; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
-; CHECK-NEXT: ret
- %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f16(<vscale x 1 x half> %x)
- ret <vscale x 1 x i64> %a
-}
-declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f16(<vscale x 1 x half>)
-
-define <vscale x 2 x i64> @llrint_v1i64_v2f16(<vscale x 2 x half> %x) {
-; CHECK-LABEL: llrint_v1i64_v2f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: mov w8, #31743 // =0x7bff
-; CHECK-NEXT: mov z2.d, #0x8000000000000000
-; CHECK-NEXT: frintx z0.h, p0/z, z0.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z1.h, w8
-; CHECK-NEXT: fcvtzs z2.d, p1/m, z0.h
-; CHECK-NEXT: fcmgt p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z1.d, #0x7fffffffffffffff
-; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
-; CHECK-NEXT: sel z0.d, p1, z1.d, z2.d
-; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
-; CHECK-NEXT: ret
- %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f16(<vscale x 2 x half> %x)
- ret <vscale x 2 x i64> %a
-}
-declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f16(<vscale x 2 x half>)
-
-define <vscale x 4 x i64> @llrint_v4i64_v4f16(<vscale x 4 x half> %x) {
-; CHECK-LABEL: llrint_v4i64_v4f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: uunpklo z1.d, z0.s
-; CHECK-NEXT: uunpkhi z0.d, z0.s
-; CHECK-NEXT: mov w8, #31743 // =0x7bff
-; CHECK-NEXT: mov z2.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: mov z3.d, #0x8000000000000000
-; CHECK-NEXT: mov z4.d, #0x8000000000000000
-; CHECK-NEXT: mov z5.d, #0x7fffffffffffffff
-; CHECK-NEXT: frintx z1.h, p0/z, z1.h
-; CHECK-NEXT: frintx z0.h, p0/z, z0.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z1.h, z2.h
-; CHECK-NEXT: fcmge p2.h, p0/z, z0.h, z2.h
-; CHECK-NEXT: mov z2.h, w8
-; CHECK-NEXT: fcmuo p3.h, p0/z, z1.h, z1.h
-; CHECK-NEXT: fcvtzs z4.d, p1/m, z1.h
-; CHECK-NEXT: fcmgt p1.h, p0/z, z1.h, z2.h
-; CHECK-NEXT: fcvtzs z3.d, p2/m, z0.h
-; CHECK-NEXT: fcmgt p2.h, p0/z, z0.h, z2.h
-; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
-; CHECK-NEXT: sel z0.d, p1, z5.d, z4.d
-; CHECK-NEXT: sel z1.d, p2, z5.d, z3.d
-; CHECK-NEXT: mov z0.d, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z1.d, p0/m, #0 // =0x0
-; CHECK-NEXT: ret
- %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f16(<vscale x 4 x half> %x)
- ret <vscale x 4 x i64> %a
-}
-declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f16(<vscale x 4 x half>)
-
-define <vscale x 8 x i64> @llrint_v8i64_v8f16(<vscale x 8 x half> %x) {
-; CHECK-LABEL: llrint_v8i64_v8f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: uunpklo z1.s, z0.h
-; CHECK-NEXT: uunpkhi z0.s, z0.h
-; CHECK-NEXT: mov w8, #31743 // =0x7bff
-; CHECK-NEXT: mov z4.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: mov z5.d, #0x8000000000000000
-; CHECK-NEXT: mov z6.d, #0x8000000000000000
-; CHECK-NEXT: mov z7.d, #0x8000000000000000
-; CHECK-NEXT: mov z24.h, w8
-; CHECK-NEXT: mov z25.d, #0x8000000000000000
-; CHECK-NEXT: uunpklo z2.d, z1.s
-; CHECK-NEXT: uunpkhi z1.d, z1.s
-; CHECK-NEXT: uunpklo z3.d, z0.s
-; CHECK-NEXT: uunpkhi z0.d, z0.s
-; CHECK-NEXT: frintx z2.h, p0/z, z2.h
-; CHECK-NEXT: frintx z1.h, p0/z, z1.h
-; CHECK-NEXT: frintx z3.h, p0/z, z3.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z2.h, z4.h
-; CHECK-NEXT: frintx z26.h, p0/z, z0.h
-; CHECK-NEXT: fcmge p2.h, p0/z, z1.h, z4.h
-; CHECK-NEXT: fcmge p3.h, p0/z, z3.h, z4.h
-; CHECK-NEXT: fcmgt p4.h, p0/z, z1.h, z24.h
-; CHECK-NEXT: fcvtzs z5.d, p1/m, z2.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z26.h, z4.h
-; CHECK-NEXT: fcvtzs z6.d, p2/m, z1.h
-; CHECK-NEXT: mov z4.d, #0x7fffffffffffffff
-; CHECK-NEXT: fcmgt p2.h, p0/z, z2.h, z24.h
-; CHECK-NEXT: fcvtzs z7.d, p3/m, z3.h
-; CHECK-NEXT: fcmgt p5.h, p0/z, z3.h, z24.h
-; CHECK-NEXT: fcmgt p6.h, p0/z, z26.h, z24.h
-; CHECK-NEXT: fcvtzs z25.d, p1/m, z26.h
-; CHECK-NEXT: fcmuo p3.h, p0/z, z2.h, z2.h
-; CHECK-NEXT: sel z0.d, p2, z4.d, z5.d
-; CHECK-NEXT: fcmuo p1.h, p0/z, z1.h, z1.h
-; CHECK-NEXT: sel z1.d, p4, z4.d, z6.d
-; CHECK-NEXT: sel z2.d, p5, z4.d, z7.d
-; CHECK-NEXT: fcmuo p2.h, p0/z, z3.h, z3.h
-; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT: fcmuo p0.h, p0/z, z26.h, z26.h
-; CHECK-NEXT: sel z3.d, p6, z4.d, z25.d
-; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT: mov z0.d, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z1.d, p1/m, #0 // =0x0
-; CHECK-NEXT: mov z2.d, p2/m, #0 // =0x0
-; CHECK-NEXT: mov z3.d, p0/m, #0 // =0x0
-; CHECK-NEXT: addvl sp, sp, #1
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT: ret
- %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f16(<vscale x 8 x half> %x)
- ret <vscale x 8 x i64> %a
-}
-declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f16(<vscale x 8 x half>)
-
-define <vscale x 16 x i64> @llrint_v16i64_v16f16(<vscale x 16 x half> %x) {
-; CHECK-LABEL: llrint_v16i64_v16f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: str p10, [sp, #1, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p9, [sp, #2, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: uunpkhi z3.s, z0.h
-; CHECK-NEXT: uunpklo z2.s, z0.h
-; CHECK-NEXT: mov w8, #31743 // =0x7bff
-; CHECK-NEXT: uunpklo z7.s, z1.h
-; CHECK-NEXT: mov z0.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: uunpkhi z1.s, z1.h
-; CHECK-NEXT: mov z4.d, #0x8000000000000000
-; CHECK-NEXT: mov z6.d, #0x8000000000000000
-; CHECK-NEXT: mov z5.d, #0x8000000000000000
-; CHECK-NEXT: mov z29.h, w8
-; CHECK-NEXT: mov z30.d, #0x8000000000000000
-; CHECK-NEXT: uunpklo z26.d, z3.s
-; CHECK-NEXT: uunpklo z24.d, z2.s
-; CHECK-NEXT: uunpkhi z25.d, z2.s
-; CHECK-NEXT: uunpklo z28.d, z7.s
-; CHECK-NEXT: uunpkhi z27.d, z3.s
-; CHECK-NEXT: uunpkhi z7.d, z7.s
-; CHECK-NEXT: mov z2.d, #0x8000000000000000
-; CHECK-NEXT: mov z3.d, #0x8000000000000000
-; CHECK-NEXT: uunpklo z31.d, z1.s
-; CHECK-NEXT: uunpkhi z1.d, z1.s
-; CHECK-NEXT: frintx z26.h, p0/z, z26.h
-; CHECK-NEXT: frintx z24.h, p0/z, z24.h
-; CHECK-NEXT: frintx z25.h, p0/z, z25.h
-; CHECK-NEXT: frintx z28.h, p0/z, z28.h
-; CHECK-NEXT: frintx z27.h, p0/z, z27.h
-; CHECK-NEXT: frintx z7.h, p0/z, z7.h
-; CHECK-NEXT: fcmge p4.h, p0/z, z26.h, z0.h
-; CHECK-NEXT: fcmge p2.h, p0/z, z24.h, z0.h
-; CHECK-NEXT: fcmge p3.h, p0/z, z25.h, z0.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z28.h, z0.h
-; CHECK-NEXT: fcmge p5.h, p0/z, z27.h, z0.h
-; CHECK-NEXT: fcvtzs z4.d, p4/m, z26.h
-; CHECK-NEXT: fcvtzs z2.d, p2/m, z24.h
-; CHECK-NEXT: fcmge p4.h, p0/z, z7.h, z0.h
-; CHECK-NEXT: fcvtzs z3.d, p3/m, z25.h
-; CHECK-NEXT: fcvtzs z6.d, p1/m, z28.h
-; CHECK-NEXT: fcmgt p3.h, p0/z, z24.h, z29.h
-; CHECK-NEXT: fcvtzs z5.d, p5/m, z27.h
-; CHECK-NEXT: fcmuo p1.h, p0/z, z24.h, z24.h
-; CHECK-NEXT: frintx z24.h, p0/z, z31.h
-; CHECK-NEXT: mov z31.d, #0x8000000000000000
-; CHECK-NEXT: fcvtzs z30.d, p4/m, z7.h
-; CHECK-NEXT: fcmgt p5.h, p0/z, z25.h, z29.h
-; CHECK-NEXT: fcmuo p2.h, p0/z, z25.h, z25.h
-; CHECK-NEXT: mov z25.d, #0x8000000000000000
-; CHECK-NEXT: fcmge p4.h, p0/z, z24.h, z0.h
-; CHECK-NEXT: fcmgt p6.h, p0/z, z26.h, z29.h
-; CHECK-NEXT: fcmuo p7.h, p0/z, z26.h, z26.h
-; CHECK-NEXT: mov z26.d, #0x7fffffffffffffff
-; CHECK-NEXT: fcmgt p8.h, p0/z, z27.h, z29.h
-; CHECK-NEXT: fcvtzs z25.d, p4/m, z24.h
-; CHECK-NEXT: fcmuo p10.h, p0/z, z27.h, z27.h
-; CHECK-NEXT: frintx z27.h, p0/z, z1.h
-; CHECK-NEXT: sel z1.d, p5, z26.d, z3.d
-; CHECK-NEXT: sel z3.d, p8, z26.d, z5.d
-; CHECK-NEXT: fcmge p4.h, p0/z, z27.h, z0.h
-; CHECK-NEXT: sel z0.d, p3, z26.d, z2.d
-; CHECK-NEXT: sel z2.d, p6, z26.d, z4.d
-; CHECK-NEXT: mov z1.d, p2/m, #0 // =0x0
-; CHECK-NEXT: mov z3.d, p10/m, #0 // =0x0
-; CHECK-NEXT: ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT: fcmgt p9.h, p0/z, z28.h, z29.h
-; CHECK-NEXT: mov z2.d, p7/m, #0 // =0x0
-; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT: fcvtzs z31.d, p4/m, z27.h
-; CHECK-NEXT: mov z0.d, p1/m, #0 // =0x0
-; CHECK-NEXT: fcmgt p5.h, p0/z, z7.h, z29.h
-; CHECK-NEXT: fcmgt p6.h, p0/z, z24.h, z29.h
-; CHECK-NEXT: sel z4.d, p9, z26.d, z6.d
-; CHECK-NEXT: fcmgt p4.h, p0/z, z27.h, z29.h
-; CHECK-NEXT: fcmuo p8.h, p0/z, z7.h, z7.h
-; CHECK-NEXT: sel z5.d, p5, z26.d, z30.d
-; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT: sel z6.d, p6, z26.d, z25.d
-; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT: fcmuo p9.h, p0/z, z24.h, z24.h
-; CHECK-NEXT: fcmuo p3.h, p0/z, z28.h, z28.h
-; CHECK-NEXT: sel z7.d, p4, z26.d, z31.d
-; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT: mov z5.d, p8/m, #0 // =0x0
-; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT: fcmuo p0.h, p0/z, z27.h, z27.h
-; CHECK-NEXT: mov z6.d, p9/m, #0 // =0x0
-; CHECK-NEXT: ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT: mov z4.d, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z7.d, p0/m, #0 // =0x0
-; CHECK-NEXT: addvl sp, sp, #1
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT: ret
- %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f16(<vscale x 16 x half> %x)
- ret <vscale x 16 x i64> %a
-}
-declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f16(<vscale x 16 x half>)
-
-define <vscale x 32 x i64> @llrint_v32i64_v32f16(<vscale x 32 x half> %x) {
-; CHECK-LABEL: llrint_v32i64_v32f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-17
-; CHECK-NEXT: str p10, [sp, #1, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p9, [sp, #2, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
-; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
-; CHECK-NEXT: uunpklo z4.s, z0.h
-; CHECK-NEXT: uunpkhi z6.s, z0.h
-; CHECK-NEXT: mov w9, #31743 // =0x7bff
-; CHECK-NEXT: uunpklo z7.s, z1.h
-; CHECK-NEXT: mov z27.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: uunpkhi z31.s, z1.h
-; CHECK-NEXT: mov z0.d, #0x8000000000000000
-; CHECK-NEXT: mov z30.d, #0x8000000000000000
-; CHECK-NEXT: uunpkhi z17.s, z2.h
-; CHECK-NEXT: mov z9.d, #0x8000000000000000
-; CHECK-NEXT: uunpklo z18.s, z3.h
-; CHECK-NEXT: uunpklo z24.d, z4.s
-; CHECK-NEXT: uunpkhi z25.d, z4.s
-; CHECK-NEXT: uunpkhi z28.d, z6.s
-; CHECK-NEXT: uunpklo z29.d, z7.s
-; CHECK-NEXT: uunpkhi z8.d, z7.s
-; CHECK-NEXT: uunpklo z26.d, z6.s
-; CHECK-NEXT: mov z4.d, #0x8000000000000000
-; CHECK-NEXT: uunpklo z12.d, z31.s
-; CHECK-NEXT: uunpkhi z13.d, z31.s
-; CHECK-NEXT: uunpkhi z3.s, z3.h
-; CHECK-NEXT: uunpkhi z19.d, z18.s
-; CHECK-NEXT: mov z5.d, #0x8000000000000000
-; CHECK-NEXT: frintx z1.h, p0/z, z24.h
-; CHECK-NEXT: frintx z7.h, p0/z, z25.h
-; CHECK-NEXT: frintx z25.h, p0/z, z28.h
-; CHECK-NEXT: frintx z11.h, p0/z, z29.h
-; CHECK-NEXT: frintx z28.h, p0/z, z8.h
-; CHECK-NEXT: frintx z24.h, p0/z, z26.h
-; CHECK-NEXT: frintx z31.h, p0/z, z12.h
-; CHECK-NEXT: frintx z15.h, p0/z, z13.h
-; CHECK-NEXT: uunpklo z8.s, z2.h
-; CHECK-NEXT: frintx z19.h, p0/z, z19.h
-; CHECK-NEXT: mov z26.h, w9
-; CHECK-NEXT: mov z12.d, #0x8000000000000000
-; CHECK-NEXT: mov z13.d, #0x8000000000000000
-; CHECK-NEXT: uunpklo z20.d, z3.s
-; CHECK-NEXT: mov z6.d, #0x8000000000000000
-; CHECK-NEXT: uunpkhi z3.d, z3.s
-; CHECK-NEXT: mov z29.d, #0x7fffffffffffffff
-; CHECK-NEXT: uunpklo z18.d, z18.s
-; CHECK-NEXT: uunpkhi z16.d, z8.s
-; CHECK-NEXT: mov z22.d, #0x8000000000000000
-; CHECK-NEXT: mov z23.d, #0x8000000000000000
-; CHECK-NEXT: mov z10.d, #0x8000000000000000
-; CHECK-NEXT: mov z21.d, #0x8000000000000000
-; CHECK-NEXT: frintx z20.h, p0/z, z20.h
-; CHECK-NEXT: fcmge p3.h, p0/z, z1.h, z27.h
-; CHECK-NEXT: frintx z2.h, p0/z, z16.h
-; CHECK-NEXT: uunpklo z16.d, z17.s
-; CHECK-NEXT: uunpkhi z17.d, z17.s
-; CHECK-NEXT: fcmge p4.h, p0/z, z7.h, z27.h
-; CHECK-NEXT: fcmgt p9.h, p0/z, z11.h, z26.h
-; CHECK-NEXT: fcvtzs z0.d, p3/m, z1.h
-; CHECK-NEXT: frintx z16.h, p0/z, z16.h
-; CHECK-NEXT: frintx z17.h, p0/z, z17.h
-; CHECK-NEXT: fcmge p3.h, p0/z, z11.h, z27.h
-; CHECK-NEXT: fcvtzs z4.d, p4/m, z7.h
-; CHECK-NEXT: fcmge p4.h, p0/z, z28.h, z27.h
-; CHECK-NEXT: fcmuo p8.h, p0/z, z11.h, z11.h
-; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: frintx z0.h, p0/z, z3.h
-; CHECK-NEXT: fcmge p6.h, p0/z, z15.h, z27.h
-; CHECK-NEXT: fcvtzs z30.d, p3/m, z11.h
-; CHECK-NEXT: uunpklo z11.d, z8.s
-; CHECK-NEXT: mov z8.d, #0x8000000000000000
-; CHECK-NEXT: fcvtzs z9.d, p4/m, z28.h
-; CHECK-NEXT: fcmge p4.h, p0/z, z31.h, z27.h
-; CHECK-NEXT: fcmge p7.h, p0/z, z2.h, z27.h
-; CHECK-NEXT: fcvtzs z12.d, p6/m, z15.h
-; CHECK-NEXT: frintx z14.h, p0/z, z11.h
-; CHECK-NEXT: mov z11.d, #0x8000000000000000
-; CHECK-NEXT: sel z3.d, p9, z29.d, z30.d
-; CHECK-NEXT: fcmge p5.h, p0/z, z14.h, z27.h
-; CHECK-NEXT: fcvtzs z8.d, p4/m, z31.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z24.h, z27.h
-; CHECK-NEXT: mov z3.d, p8/m, #0 // =0x0
-; CHECK-NEXT: fcvtzs z11.d, p7/m, z2.h
-; CHECK-NEXT: fcmge p2.h, p0/z, z25.h, z27.h
-; CHECK-NEXT: fcvtzs z13.d, p5/m, z14.h
-; CHECK-NEXT: fcmgt p6.h, p0/z, z15.h, z26.h
-; CHECK-NEXT: fcvtzs z5.d, p1/m, z24.h
-; CHECK-NEXT: fcmuo p4.h, p0/z, z15.h, z15.h
-; CHECK-NEXT: mov z15.d, #0x8000000000000000
-; CHECK-NEXT: fcvtzs z6.d, p2/m, z25.h
-; CHECK-NEXT: fcmge p7.h, p0/z, z16.h, z27.h
-; CHECK-NEXT: fcmgt p3.h, p0/z, z28.h, z26.h
-; CHECK-NEXT: fcmgt p10.h, p0/z, z14.h, z26.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z19.h, z27.h
-; CHECK-NEXT: fcmge p2.h, p0/z, z20.h, z27.h
-; CHECK-NEXT: fcvtzs z15.d, p7/m, z16.h
-; CHECK-NEXT: sel z30.d, p3, z29.d, z9.d
-; CHECK-NEXT: sel z9.d, p6, z29.d, z12.d
-; CHECK-NEXT: sel z12.d, p10, z29.d, z13.d
-; CHECK-NEXT: frintx z13.h, p0/z, z18.h
-; CHECK-NEXT: fcmge p5.h, p0/z, z17.h, z27.h
-; CHECK-NEXT: fcvtzs z22.d, p1/m, z19.h
-; CHECK-NEXT: fcvtzs z23.d, p2/m, z20.h
-; CHECK-NEXT: mov z9.d, p4/m, #0 // =0x0
-; CHECK-NEXT: fcmge p7.h, p0/z, z0.h, z27.h
-; CHECK-NEXT: fcmuo p1.h, p0/z, z14.h, z14.h
-; CHECK-NEXT: str z9, [x8, #7, mul vl]
-; CHECK-NEXT: fcvtzs z21.d, p5/m, z17.h
-; CHECK-NEXT: fcmge p2.h, p0/z, z13.h, z27.h
-; CHECK-NEXT: mov z27.d, #0x8000000000000000
-; CHECK-NEXT: fcvtzs z10.d, p7/m, z0.h
-; CHECK-NEXT: fcmgt p5.h, p0/z, z0.h, z26.h
-; CHECK-NEXT: mov z12.d, p1/m, #0 // =0x0
-; CHECK-NEXT: fcmgt p6.h, p0/z, z20.h, z26.h
-; CHECK-NEXT: fcvtzs z27.d, p2/m, z13.h
-; CHECK-NEXT: fcmgt p3.h, p0/z, z19.h, z26.h
-; CHECK-NEXT: str z12, [x8, #8, mul vl]
-; CHECK-NEXT: fcmuo p1.h, p0/z, z0.h, z0.h
-; CHECK-NEXT: sel z0.d, p5, z29.d, z10.d
-; CHECK-NEXT: sel z10.d, p6, z29.d, z23.d
-; CHECK-NEXT: fcmuo p2.h, p0/z, z20.h, z20.h
-; CHECK-NEXT: fcmuo p7.h, p0/z, z19.h, z19.h
-; CHECK-NEXT: fcmgt p5.h, p0/z, z2.h, z26.h
-; CHECK-NEXT: mov z0.d, p1/m, #0 // =0x0
-; CHECK-NEXT: fcmuo p6.h, p0/z, z2.h, z2.h
-; CHECK-NEXT: sel z2.d, p3, z29.d, z22.d
-; CHECK-NEXT: mov z10.d, p2/m, #0 // =0x0
-; CHECK-NEXT: str z0, [x8, #15, mul vl]
-; CHECK-NEXT: fcmgt p3.h, p0/z, z13.h, z26.h
-; CHECK-NEXT: mov z2.d, p7/m, #0 // =0x0
-; CHECK-NEXT: sel z0.d, p5, z29.d, z11.d
-; CHECK-NEXT: str z10, [x8, #14, mul vl]
-; CHECK-NEXT: fcmgt p1.h, p0/z, z17.h, z26.h
-; CHECK-NEXT: fcmgt p2.h, p0/z, z16.h, z26.h
-; CHECK-NEXT: str z2, [x8, #13, mul vl]
-; CHECK-NEXT: mov z0.d, p6/m, #0 // =0x0
-; CHECK-NEXT: fcmuo p7.h, p0/z, z13.h, z13.h
-; CHECK-NEXT: mov z27.d, p3/m, z29.d
-; CHECK-NEXT: fcmuo p5.h, p0/z, z17.h, z17.h
-; CHECK-NEXT: str z0, [x8, #9, mul vl]
-; CHECK-NEXT: sel z2.d, p1, z29.d, z21.d
-; CHECK-NEXT: fcmuo p3.h, p0/z, z16.h, z16.h
-; CHECK-NEXT: sel z10.d, p2, z29.d, z15.d
-; CHECK-NEXT: mov z27.d, p7/m, #0 // =0x0
-; CHECK-NEXT: fcmgt p1.h, p0/z, z7.h, z26.h
-; CHECK-NEXT: mov z2.d, p5/m, #0 // =0x0
-; CHECK-NEXT: fcmgt p2.h, p0/z, z24.h, z26.h
-; CHECK-NEXT: str z27, [x8, #12, mul vl]
-; CHECK-NEXT: mov z10.d, p3/m, #0 // =0x0
-; CHECK-NEXT: fcmuo p3.h, p0/z, z28.h, z28.h
-; CHECK-NEXT: str z2, [x8, #11, mul vl]
-; CHECK-NEXT: fcmgt p7.h, p0/z, z31.h, z26.h
-; CHECK-NEXT: mov z4.d, p1/m, z29.d
-; CHECK-NEXT: str z10, [x8, #10, mul vl]
-; CHECK-NEXT: mov z5.d, p2/m, z29.d
-; CHECK-NEXT: fcmgt p5.h, p0/z, z25.h, z26.h
-; CHECK-NEXT: fcmgt p1.h, p0/z, z1.h, z26.h
-; CHECK-NEXT: mov z30.d, p3/m, #0 // =0x0
-; CHECK-NEXT: sel z2.d, p7, z29.d, z8.d
-; CHECK-NEXT: fcmuo p6.h, p0/z, z31.h, z31.h
-; CHECK-NEXT: fcmuo p2.h, p0/z, z25.h, z25.h
-; CHECK-NEXT: sel z0.d, p5, z29.d, z6.d
-; CHECK-NEXT: fcmuo p3.h, p0/z, z24.h, z24.h
-; CHECK-NEXT: fcmuo p4.h, p0/z, z7.h, z7.h
-; CHECK-NEXT: mov z2.d, p6/m, #0 // =0x0
-; CHECK-NEXT: fcmuo p0.h, p0/z, z1.h, z1.h
-; CHECK-NEXT: ldr z1, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: str z30, [x8, #5, mul vl]
-; CHECK-NEXT: mov z0.d, p2/m, #0 // =0x0
-; CHECK-NEXT: str z3, [x8, #4, mul vl]
-; CHECK-NEXT: mov z5.d, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z4.d, p4/m, #0 // =0x0
-; CHECK-NEXT: str z2, [x8, #6, mul vl]
-; CHECK-NEXT: mov z1.d, p1/m, z29.d
-; CHECK-NEXT: str z0, [x8, #3, mul vl]
-; CHECK-NEXT: str z5, [x8, #2, mul vl]
-; CHECK-NEXT: mov z1.d, p0/m, #0 // =0x0
-; CHECK-NEXT: str z4, [x8, #1, mul vl]
-; CHECK-NEXT: str z1, [x8]
-; CHECK-NEXT: addvl sp, sp, #1
-; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT: ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT: addvl sp, sp, #17
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT: ret
- %a = call <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f16(<vscale x 32 x half> %x)
- ret <vscale x 32 x i64> %a
-}
-declare <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f16(<vscale x 32 x half>)
-
-define <vscale x 1 x i64> @llrint_v1i64_v1f32(<vscale x 1 x float> %x) {
-; CHECK-LABEL: llrint_v1i64_v1f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
-; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
-; CHECK-NEXT: ret
- %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float> %x)
- ret <vscale x 1 x i64> %a
-}
-declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float>)
-
-define <vscale x 2 x i64> @llrint_v2i64_v2f32(<vscale x 2 x float> %x) {
-; CHECK-LABEL: llrint_v2i64_v2f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
-; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
-; CHECK-NEXT: ret
- %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float> %x)
- ret <vscale x 2 x i64> %a
-}
-declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float>)
-
-define <vscale x 4 x i64> @llrint_v4i64_v4f32(<vscale x 4 x float> %x) {
-; CHECK-LABEL: llrint_v4i64_v4f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: uunpklo z1.d, z0.s
-; CHECK-NEXT: uunpkhi z0.d, z0.s
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z1.s, p0/z, z1.s
-; CHECK-NEXT: frint64x z2.s, p0/z, z0.s
-; CHECK-NEXT: movprfx z0, z1
-; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fcvtzs z1.d, p0/m, z2.s
-; CHECK-NEXT: ret
- %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float> %x)
- ret <vscale x 4 x i64> %a
-}
-declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float>)
-
-define <vscale x 8 x i64> @llrint_v8i64_v8f32(<vscale x 8 x float> %x) {
-; CHECK-LABEL: llrint_v8i64_v8f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: uunpklo z2.d, z0.s
-; CHECK-NEXT: uunpkhi z0.d, z0.s
-; CHECK-NEXT: uunpklo z3.d, z1.s
-; CHECK-NEXT: uunpkhi z1.d, z1.s
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z2.s, p0/z, z2.s
-; CHECK-NEXT: frint64x z4.s, p0/z, z0.s
-; CHECK-NEXT: frint64x z3.s, p0/z, z3.s
-; CHECK-NEXT: frint64x z5.s, p0/z, z1.s
-; CHECK-NEXT: movprfx z0, z2
-; CHECK-NEXT: fcvtzs z0.d, p0/m, z2.s
-; CHECK-NEXT: movprfx z1, z4
-; CHECK-NEXT: fcvtzs z1.d, p0/m, z4.s
-; CHECK-NEXT: movprfx z2, z3
-; CHECK-NEXT: fcvtzs z2.d, p0/m, z3.s
-; CHECK-NEXT: movprfx z3, z5
-; CHECK-NEXT: fcvtzs z3.d, p0/m, z5.s
-; CHECK-NEXT: ret
- %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float> %x)
- ret <vscale x 8 x i64> %a
-}
-declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float>)
-
-define <vscale x 16 x i64> @llrint_v16i64_v16f32(<vscale x 16 x float> %x) {
-; CHECK-LABEL: llrint_v16i64_v16f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: uunpklo z4.d, z1.s
-; CHECK-NEXT: uunpklo z5.d, z0.s
-; CHECK-NEXT: uunpkhi z0.d, z0.s
-; CHECK-NEXT: uunpkhi z1.d, z1.s
-; CHECK-NEXT: uunpklo z6.d, z2.s
-; CHECK-NEXT: uunpkhi z7.d, z2.s
-; CHECK-NEXT: uunpklo z24.d, z3.s
-; CHECK-NEXT: uunpkhi z25.d, z3.s
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z2.s, p0/z, z5.s
-; CHECK-NEXT: frint64x z3.s, p0/z, z0.s
-; CHECK-NEXT: frint64x z4.s, p0/z, z4.s
-; CHECK-NEXT: frint64x z5.s, p0/z, z1.s
-; CHECK-NEXT: movprfx z0, z2
-; CHECK-NEXT: fcvtzs z0.d, p0/m, z2.s
-; CHECK-NEXT: movprfx z1, z3
-; CHECK-NEXT: fcvtzs z1.d, p0/m, z3.s
-; CHECK-NEXT: movprfx z2, z4
-; CHECK-NEXT: fcvtzs z2.d, p0/m, z4.s
-; CHECK-NEXT: movprfx z3, z5
-; CHECK-NEXT: fcvtzs z3.d, p0/m, z5.s
-; CHECK-NEXT: frint64x z4.s, p0/z, z6.s
-; CHECK-NEXT: frint64x z5.s, p0/z, z7.s
-; CHECK-NEXT: frint64x z6.s, p0/z, z24.s
-; CHECK-NEXT: frint64x z7.s, p0/z, z25.s
-; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.s
-; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.s
-; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.s
-; CHECK-NEXT: fcvtzs z7.d, p0/m, z7.s
-; CHECK-NEXT: ret
- %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float> %x)
- ret <vscale x 16 x i64> %a
-}
-declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float>)
-
-define <vscale x 32 x i64> @llrint_v32i64_v32f32(<vscale x 32 x float> %x) {
-; CHECK-LABEL: llrint_v32i64_v32f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: uunpkhi z25.d, z7.s
-; CHECK-NEXT: uunpkhi z27.d, z6.s
-; CHECK-NEXT: uunpklo z6.d, z6.s
-; CHECK-NEXT: uunpklo z29.d, z3.s
-; CHECK-NEXT: uunpkhi z30.d, z5.s
-; CHECK-NEXT: uunpklo z5.d, z5.s
-; CHECK-NEXT: uunpkhi z31.d, z4.s
-; CHECK-NEXT: uunpklo z4.d, z4.s
-; CHECK-NEXT: uunpkhi z3.d, z3.s
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: uunpklo z24.d, z0.s
-; CHECK-NEXT: uunpkhi z0.d, z0.s
-; CHECK-NEXT: uunpklo z7.d, z7.s
-; CHECK-NEXT: uunpklo z26.d, z1.s
-; CHECK-NEXT: uunpkhi z1.d, z1.s
-; CHECK-NEXT: frint64x z25.s, p0/z, z25.s
-; CHECK-NEXT: frint64x z6.s, p0/z, z6.s
-; CHECK-NEXT: frint64x z5.s, p0/z, z5.s
-; CHECK-NEXT: frint64x z4.s, p0/z, z4.s
-; CHECK-NEXT: frint64x z3.s, p0/z, z3.s
-; CHECK-NEXT: frint64x z27.s, p0/z, z27.s
-; CHECK-NEXT: uunpklo z28.d, z2.s
-; CHECK-NEXT: frint64x z30.s, p0/z, z30.s
-; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
-; CHECK-NEXT: uunpkhi z2.d, z2.s
-; CHECK-NEXT: fcvtzs z25.d, p0/m, z25.s
-; CHECK-NEXT: frint64x z7.s, p0/z, z7.s
-; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.s
-; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.s
-; CHECK-NEXT: frint64x z1.s, p0/z, z1.s
-; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.s
-; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.s
-; CHECK-NEXT: fcvtzs z7.d, p0/m, z7.s
-; CHECK-NEXT: fcvtzs z27.d, p0/m, z27.s
-; CHECK-NEXT: fcvtzs z30.d, p0/m, z30.s
-; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.s
-; CHECK-NEXT: str z25, [x8, #15, mul vl]
-; CHECK-NEXT: frint64x z25.s, p0/z, z31.s
-; CHECK-NEXT: frint64x z2.s, p0/z, z2.s
-; CHECK-NEXT: str z6, [x8, #12, mul vl]
-; CHECK-NEXT: frint64x z6.s, p0/z, z29.s
-; CHECK-NEXT: fcvtzs z25.d, p0/m, z25.s
-; CHECK-NEXT: str z5, [x8, #10, mul vl]
-; CHECK-NEXT: frint64x z5.s, p0/z, z28.s
-; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.s
-; CHECK-NEXT: str z4, [x8, #8, mul vl]
-; CHECK-NEXT: frint64x z4.s, p0/z, z26.s
-; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.s
-; CHECK-NEXT: str z3, [x8, #7, mul vl]
-; CHECK-NEXT: frint64x z3.s, p0/z, z24.s
-; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.s
-; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.s
-; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
-; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.s
-; CHECK-NEXT: str z7, [x8, #14, mul vl]
-; CHECK-NEXT: str z27, [x8, #13, mul vl]
-; CHECK-NEXT: str z30, [x8, #11, mul vl]
-; CHECK-NEXT: str z25, [x8, #9, mul vl]
-; CHECK-NEXT: str z6, [x8, #6, mul vl]
-; CHECK-NEXT: str z2, [x8, #5, mul vl]
-; CHECK-NEXT: str z5, [x8, #4, mul vl]
-; CHECK-NEXT: str z1, [x8, #3, mul vl]
-; CHECK-NEXT: str z4, [x8, #2, mul vl]
-; CHECK-NEXT: str z0, [x8, #1, mul vl]
-; CHECK-NEXT: str z3, [x8]
-; CHECK-NEXT: ret
- %a = call <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f32(<vscale x 32 x float> %x)
- ret <vscale x 32 x i64> %a
-}
-declare <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f32(<vscale x 32 x float>)
-
-define <vscale x 1 x i64> @llrint_v1i64_v1f64(<vscale x 1 x double> %x) {
-; CHECK-LABEL: llrint_v1i64_v1f64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: ret
- %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double> %x)
- ret <vscale x 1 x i64> %a
-}
-declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double>)
-
-define <vscale x 2 x i64> @llrint_v2i64_v2f64(<vscale x 2 x double> %x) {
-; CHECK-LABEL: llrint_v2i64_v2f64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: ret
- %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double> %x)
- ret <vscale x 2 x i64> %a
-}
-declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double>)
-
-define <vscale x 4 x i64> @llrint_v4i64_v4f64(<vscale x 4 x double> %x) {
-; CHECK-LABEL: llrint_v4i64_v4f64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: ret
- %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double> %x)
- ret <vscale x 4 x i64> %a
-}
-declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double>)
-
-define <vscale x 8 x i64> @llrint_v8i64_v8f64(<vscale x 8 x double> %x) {
-; CHECK-LABEL: llrint_v8i64_v8f64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
-; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
-; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
-; CHECK-NEXT: ret
- %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double> %x)
- ret <vscale x 8 x i64> %a
-}
-declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double>)
-
-define <vscale x 16 x i64> @llrint_v16f64(<vscale x 16 x double> %x) {
-; CHECK-LABEL: llrint_v16f64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
-; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
-; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
-; CHECK-NEXT: frint64x z4.d, p0/z, z4.d
-; CHECK-NEXT: frint64x z5.d, p0/z, z5.d
-; CHECK-NEXT: frint64x z6.d, p0/z, z6.d
-; CHECK-NEXT: frint64x z7.d, p0/z, z7.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
-; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
-; CHECK-NEXT: fcvtzs z5.d, p0/z, z5.d
-; CHECK-NEXT: fcvtzs z6.d, p0/z, z6.d
-; CHECK-NEXT: fcvtzs z7.d, p0/z, z7.d
-; CHECK-NEXT: ret
- %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f64(<vscale x 16 x double> %x)
- ret <vscale x 16 x i64> %a
-}
-declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f64(<vscale x 16 x double>)
-
-define <vscale x 32 x i64> @llrint_v32f64(<vscale x 32 x double> %x) {
-; CHECK-LABEL: llrint_v32f64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr z2, [x0, #15, mul vl]
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ldr z5, [x0, #12, mul vl]
-; CHECK-NEXT: ldr z3, [x0, #14, mul vl]
-; CHECK-NEXT: ldr z24, [x0, #9, mul vl]
-; CHECK-NEXT: ldr z1, [x0, #7, mul vl]
-; CHECK-NEXT: ldr z0, [x0, #6, mul vl]
-; CHECK-NEXT: ldr z4, [x0, #13, mul vl]
-; CHECK-NEXT: ldr z6, [x0, #11, mul vl]
-; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
-; CHECK-NEXT: ldr z7, [x0, #10, mul vl]
-; CHECK-NEXT: ldr z25, [x0, #8, mul vl]
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
-; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
-; CHECK-NEXT: ldr z26, [x0, #5, mul vl]
-; CHECK-NEXT: ldr z27, [x0, #4, mul vl]
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: ldr z28, [x0, #3, mul vl]
-; CHECK-NEXT: ldr z29, [x0, #2, mul vl]
-; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
-; CHECK-NEXT: ldr z30, [x0, #1, mul vl]
-; CHECK-NEXT: ldr z31, [x0]
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: str z2, [x8, #15, mul vl]
-; CHECK-NEXT: frint64x z2.d, p0/z, z5.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: frint64x z4.d, p0/z, z4.d
-; CHECK-NEXT: str z3, [x8, #14, mul vl]
-; CHECK-NEXT: str z2, [x8, #12, mul vl]
-; CHECK-NEXT: frint64x z2.d, p0/z, z24.d
-; CHECK-NEXT: frint64x z3.d, p0/z, z6.d
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: str z1, [x8, #7, mul vl]
-; CHECK-NEXT: frint64x z1.d, p0/z, z27.d
-; CHECK-NEXT: str z2, [x8, #9, mul vl]
-; CHECK-NEXT: frint64x z2.d, p0/z, z26.d
-; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
-; CHECK-NEXT: str z0, [x8, #6, mul vl]
-; CHECK-NEXT: frint64x z0.d, p0/z, z28.d
-; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: str z4, [x8, #13, mul vl]
-; CHECK-NEXT: frint64x z4.d, p0/z, z7.d
-; CHECK-NEXT: str z3, [x8, #11, mul vl]
-; CHECK-NEXT: frint64x z3.d, p0/z, z25.d
-; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
-; CHECK-NEXT: str z2, [x8, #5, mul vl]
-; CHECK-NEXT: frint64x z2.d, p0/z, z29.d
-; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
-; CHECK-NEXT: str z1, [x8, #4, mul vl]
-; CHECK-NEXT: frint64x z1.d, p0/z, z30.d
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: str z0, [x8, #3, mul vl]
-; CHECK-NEXT: frint64x z0.d, p0/z, z31.d
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: str z4, [x8, #10, mul vl]
-; CHECK-NEXT: str z3, [x8, #8, mul vl]
-; CHECK-NEXT: str z2, [x8, #2, mul vl]
-; CHECK-NEXT: str z1, [x8, #1, mul vl]
-; CHECK-NEXT: str z0, [x8]
-; CHECK-NEXT: ret
- %a = call <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv16f64(<vscale x 32 x double> %x)
- ret <vscale x 32 x i64> %a
-}
-declare <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f64(<vscale x 32 x double>)
diff --git a/llvm/test/CodeGen/AArch64/sve2p2-lrint.ll b/llvm/test/CodeGen/AArch64/sve2p2-lrint.ll
index 6432abe6977cb..5456b09e9992f 100644
--- a/llvm/test/CodeGen/AArch64/sve2p2-lrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p2-lrint.ll
@@ -1,262 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64 -mattr=+sve2p2 < %s | FileCheck %s
-define <vscale x 2 x i32> @lrint_v2_i32_f16(<vscale x 2 x half> %x) {
-; CHECK-LABEL: lrint_v2_i32_f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: mov w8, #31743 // =0x7bff
-; CHECK-NEXT: mov z2.d, #0x8000000000000000
-; CHECK-NEXT: frintx z0.h, p0/z, z0.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z1.h, w8
-; CHECK-NEXT: fcvtzs z2.d, p1/m, z0.h
-; CHECK-NEXT: fcmgt p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z1.d, #0x7fffffffffffffff
-; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
-; CHECK-NEXT: sel z0.d, p1, z1.d, z2.d
-; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
-; CHECK-NEXT: ret
- %a = call <vscale x 2 x i32> @llvm.lrint.nxv4i32.nxv4f16(<vscale x 2 x half> %x)
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @lrint_v4_i32_f16(<vscale x 4 x half> %x) {
-; CHECK-LABEL: lrint_v4_i32_f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: mov w8, #31743 // =0x7bff
-; CHECK-NEXT: mov z2.s, #0x80000000
-; CHECK-NEXT: frintx z0.h, p0/z, z0.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z1.h, w8
-; CHECK-NEXT: fcvtzs z2.s, p1/m, z0.h
-; CHECK-NEXT: fcmgt p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z1.s, #0x7fffffff
-; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
-; CHECK-NEXT: sel z0.s, p1, z1.s, z2.s
-; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0
-; CHECK-NEXT: ret
- %a = call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4f16(<vscale x 4 x half> %x)
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @lrint_v8_i32_f16(<vscale x 8 x half> %x) {
-; CHECK-LABEL: lrint_v8_i32_f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: uunpklo z1.s, z0.h
-; CHECK-NEXT: uunpkhi z0.s, z0.h
-; CHECK-NEXT: mov w8, #31743 // =0x7bff
-; CHECK-NEXT: mov z2.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: mov z3.s, #0x80000000
-; CHECK-NEXT: mov z4.s, #0x80000000
-; CHECK-NEXT: mov z5.s, #0x7fffffff
-; CHECK-NEXT: frintx z1.h, p0/z, z1.h
-; CHECK-NEXT: frintx z0.h, p0/z, z0.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z1.h, z2.h
-; CHECK-NEXT: fcmge p2.h, p0/z, z0.h, z2.h
-; CHECK-NEXT: mov z2.h, w8
-; CHECK-NEXT: fcmuo p3.h, p0/z, z1.h, z1.h
-; CHECK-NEXT: fcvtzs z4.s, p1/m, z1.h
-; CHECK-NEXT: fcmgt p1.h, p0/z, z1.h, z2.h
-; CHECK-NEXT: fcvtzs z3.s, p2/m, z0.h
-; CHECK-NEXT: fcmgt p2.h, p0/z, z0.h, z2.h
-; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
-; CHECK-NEXT: sel z0.s, p1, z5.s, z4.s
-; CHECK-NEXT: sel z1.s, p2, z5.s, z3.s
-; CHECK-NEXT: mov z0.s, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z1.s, p0/m, #0 // =0x0
-; CHECK-NEXT: ret
- %a = call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8f16(<vscale x 8 x half> %x)
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @lrint_v16_i32_f16(<vscale x 16 x half> %x) {
-; CHECK-LABEL: lrint_v16_i32_f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: uunpklo z2.s, z0.h
-; CHECK-NEXT: mov z4.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: mov w8, #31743 // =0x7bff
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: uunpkhi z0.s, z0.h
-; CHECK-NEXT: uunpklo z3.s, z1.h
-; CHECK-NEXT: mov z5.s, #0x80000000
-; CHECK-NEXT: mov z24.h, w8
-; CHECK-NEXT: uunpkhi z1.s, z1.h
-; CHECK-NEXT: mov z6.s, #0x80000000
-; CHECK-NEXT: mov z7.s, #0x80000000
-; CHECK-NEXT: mov z26.s, #0x7fffffff
-; CHECK-NEXT: frintx z2.h, p0/z, z2.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z2.h, z4.h
-; CHECK-NEXT: frintx z25.h, p0/z, z0.h
-; CHECK-NEXT: frintx z3.h, p0/z, z3.h
-; CHECK-NEXT: fcmge p2.h, p0/z, z25.h, z4.h
-; CHECK-NEXT: frintx z1.h, p0/z, z1.h
-; CHECK-NEXT: fcmge p3.h, p0/z, z3.h, z4.h
-; CHECK-NEXT: fcmgt p4.h, p0/z, z2.h, z24.h
-; CHECK-NEXT: fcvtzs z5.s, p1/m, z2.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z1.h, z4.h
-; CHECK-NEXT: mov z4.s, #0x80000000
-; CHECK-NEXT: fcvtzs z6.s, p2/m, z25.h
-; CHECK-NEXT: fcvtzs z7.s, p3/m, z3.h
-; CHECK-NEXT: fcmgt p3.h, p0/z, z25.h, z24.h
-; CHECK-NEXT: sel z0.s, p4, z26.s, z5.s
-; CHECK-NEXT: fcmgt p4.h, p0/z, z3.h, z24.h
-; CHECK-NEXT: fcvtzs z4.s, p1/m, z1.h
-; CHECK-NEXT: fcmgt p1.h, p0/z, z1.h, z24.h
-; CHECK-NEXT: fcmuo p5.h, p0/z, z25.h, z25.h
-; CHECK-NEXT: fcmuo p6.h, p0/z, z3.h, z3.h
-; CHECK-NEXT: fcmuo p2.h, p0/z, z2.h, z2.h
-; CHECK-NEXT: sel z2.s, p4, z26.s, z7.s
-; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT: sel z3.s, p1, z26.s, z4.s
-; CHECK-NEXT: fcmuo p0.h, p0/z, z1.h, z1.h
-; CHECK-NEXT: sel z1.s, p3, z26.s, z6.s
-; CHECK-NEXT: mov z2.s, p6/m, #0 // =0x0
-; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT: mov z0.s, p2/m, #0 // =0x0
-; CHECK-NEXT: mov z1.s, p5/m, #0 // =0x0
-; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT: mov z3.s, p0/m, #0 // =0x0
-; CHECK-NEXT: addvl sp, sp, #1
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT: ret
- %a = call <vscale x 16 x i32> @llvm.lrint.nxv16i32.nxv16f16(<vscale x 16 x half> %x)
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 32 x i32> @lrint_v32_i32_f16(<vscale x 32 x half> %x) {
-; CHECK-LABEL: lrint_v32_i32_f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-3
-; CHECK-NEXT: str p10, [sp, #1, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p9, [sp, #2, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z8, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
-; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
-; CHECK-NEXT: uunpklo z24.s, z1.h
-; CHECK-NEXT: uunpkhi z1.s, z1.h
-; CHECK-NEXT: mov w8, #31743 // =0x7bff
-; CHECK-NEXT: mov z4.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: uunpklo z6.s, z0.h
-; CHECK-NEXT: uunpkhi z7.s, z0.h
-; CHECK-NEXT: uunpklo z25.s, z2.h
-; CHECK-NEXT: uunpkhi z2.s, z2.h
-; CHECK-NEXT: mov z27.s, #0x80000000
-; CHECK-NEXT: mov z0.s, #0x80000000
-; CHECK-NEXT: mov z26.s, #0x80000000
-; CHECK-NEXT: frintx z1.h, p0/z, z1.h
-; CHECK-NEXT: frintx z24.h, p0/z, z24.h
-; CHECK-NEXT: mov z5.s, #0x80000000
-; CHECK-NEXT: frintx z6.h, p0/z, z6.h
-; CHECK-NEXT: mov z28.s, #0x80000000
-; CHECK-NEXT: mov z29.h, w8
-; CHECK-NEXT: frintx z7.h, p0/z, z7.h
-; CHECK-NEXT: frintx z25.h, p0/z, z25.h
-; CHECK-NEXT: frintx z30.h, p0/z, z2.h
-; CHECK-NEXT: fcmge p4.h, p0/z, z1.h, z4.h
-; CHECK-NEXT: uunpklo z2.s, z3.h
-; CHECK-NEXT: mov z31.s, #0x80000000
-; CHECK-NEXT: mov z9.s, #0x80000000
-; CHECK-NEXT: fcmge p1.h, p0/z, z6.h, z4.h
-; CHECK-NEXT: fcmge p3.h, p0/z, z24.h, z4.h
-; CHECK-NEXT: fcmge p2.h, p0/z, z7.h, z4.h
-; CHECK-NEXT: fcvtzs z27.s, p4/m, z1.h
-; CHECK-NEXT: fcmge p5.h, p0/z, z25.h, z4.h
-; CHECK-NEXT: fcvtzs z0.s, p1/m, z6.h
-; CHECK-NEXT: fcmge p4.h, p0/z, z30.h, z4.h
-; CHECK-NEXT: fcvtzs z26.s, p3/m, z24.h
-; CHECK-NEXT: fcvtzs z5.s, p2/m, z7.h
-; CHECK-NEXT: fcmgt p3.h, p0/z, z6.h, z29.h
-; CHECK-NEXT: fcvtzs z28.s, p5/m, z25.h
-; CHECK-NEXT: fcmuo p1.h, p0/z, z6.h, z6.h
-; CHECK-NEXT: frintx z6.h, p0/z, z2.h
-; CHECK-NEXT: uunpkhi z2.s, z3.h
-; CHECK-NEXT: fcvtzs z31.s, p4/m, z30.h
-; CHECK-NEXT: fcmgt p5.h, p0/z, z7.h, z29.h
-; CHECK-NEXT: fcmuo p2.h, p0/z, z7.h, z7.h
-; CHECK-NEXT: mov z7.s, #0x80000000
-; CHECK-NEXT: frintx z8.h, p0/z, z2.h
-; CHECK-NEXT: fcmge p4.h, p0/z, z6.h, z4.h
-; CHECK-NEXT: fcmgt p6.h, p0/z, z24.h, z29.h
-; CHECK-NEXT: fcmuo p7.h, p0/z, z24.h, z24.h
-; CHECK-NEXT: mov z24.s, #0x7fffffff
-; CHECK-NEXT: fcmgt p8.h, p0/z, z1.h, z29.h
-; CHECK-NEXT: fcvtzs z7.s, p4/m, z6.h
-; CHECK-NEXT: fcmge p4.h, p0/z, z8.h, z4.h
-; CHECK-NEXT: fcmgt p9.h, p0/z, z25.h, z29.h
-; CHECK-NEXT: sel z2.s, p6, z24.s, z26.s
-; CHECK-NEXT: mov z0.s, p3/m, z24.s
-; CHECK-NEXT: sel z3.s, p8, z24.s, z27.s
-; CHECK-NEXT: fcmuo p10.h, p0/z, z1.h, z1.h
-; CHECK-NEXT: sel z1.s, p5, z24.s, z5.s
-; CHECK-NEXT: fcvtzs z9.s, p4/m, z8.h
-; CHECK-NEXT: mov z2.s, p7/m, #0 // =0x0
-; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT: sel z4.s, p9, z24.s, z28.s
-; CHECK-NEXT: mov z0.s, p1/m, #0 // =0x0
-; CHECK-NEXT: mov z1.s, p2/m, #0 // =0x0
-; CHECK-NEXT: mov z3.s, p10/m, #0 // =0x0
-; CHECK-NEXT: ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT: fcmgt p5.h, p0/z, z30.h, z29.h
-; CHECK-NEXT: fcmgt p6.h, p0/z, z6.h, z29.h
-; CHECK-NEXT: fcmgt p4.h, p0/z, z8.h, z29.h
-; CHECK-NEXT: fcmuo p8.h, p0/z, z30.h, z30.h
-; CHECK-NEXT: fcmuo p9.h, p0/z, z6.h, z6.h
-; CHECK-NEXT: sel z5.s, p5, z24.s, z31.s
-; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT: sel z6.s, p6, z24.s, z7.s
-; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT: fcmuo p3.h, p0/z, z25.h, z25.h
-; CHECK-NEXT: sel z7.s, p4, z24.s, z9.s
-; CHECK-NEXT: ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT: mov z5.s, p8/m, #0 // =0x0
-; CHECK-NEXT: fcmuo p0.h, p0/z, z8.h, z8.h
-; CHECK-NEXT: ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT: mov z6.s, p9/m, #0 // =0x0
-; CHECK-NEXT: ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT: mov z4.s, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z7.s, p0/m, #0 // =0x0
-; CHECK-NEXT: addvl sp, sp, #3
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT: ret
- %a = call <vscale x 32 x i32> @llvm.lrint.nxv32i32.nxv32f16(<vscale x 32 x half> %x)
- ret <vscale x 32 x i32> %a
-}
-
-define <vscale x 2 x i32> @lrint_v2_i32_f32(<vscale x 2 x float> %x) {
-; CHECK-LABEL: lrint_v2_i32_f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
-; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
-; CHECK-NEXT: ret
- %a = call <vscale x 2 x i32> @llvm.lrint.nxv4i32.nxv4f32(<vscale x 2 x float> %x)
- ret <vscale x 2 x i32> %a
-}
-
define <vscale x 4 x i32> @lrint_v4_i32_f32(<vscale x 4 x float> %x) {
; CHECK-LABEL: lrint_v4_i32_f32:
; CHECK: // %bb.0:
@@ -268,61 +12,6 @@ define <vscale x 4 x i32> @lrint_v4_i32_f32(<vscale x 4 x float> %x) {
ret <vscale x 4 x i32> %a
}
-define <vscale x 8 x i32> @lrint_v8_i32_f32(<vscale x 8 x float> %x) {
-; CHECK-LABEL: lrint_v8_i32_f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: frint32x z0.s, p0/z, z0.s
-; CHECK-NEXT: frint32x z1.s, p0/z, z1.s
-; CHECK-NEXT: fcvtzs z0.s, p0/z, z0.s
-; CHECK-NEXT: fcvtzs z1.s, p0/z, z1.s
-; CHECK-NEXT: ret
- %a = call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8f32(<vscale x 8 x float> %x)
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @lrint_v16_i32_f32(<vscale x 16 x float> %x) {
-; CHECK-LABEL: lrint_v16_i32_f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: frint32x z0.s, p0/z, z0.s
-; CHECK-NEXT: frint32x z1.s, p0/z, z1.s
-; CHECK-NEXT: frint32x z2.s, p0/z, z2.s
-; CHECK-NEXT: frint32x z3.s, p0/z, z3.s
-; CHECK-NEXT: fcvtzs z0.s, p0/z, z0.s
-; CHECK-NEXT: fcvtzs z1.s, p0/z, z1.s
-; CHECK-NEXT: fcvtzs z2.s, p0/z, z2.s
-; CHECK-NEXT: fcvtzs z3.s, p0/z, z3.s
-; CHECK-NEXT: ret
- %a = call <vscale x 16 x i32> @llvm.lrint.nxv16i32.nxv16f32(<vscale x 16 x float> %x)
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 32 x i32> @lrint_v32_i32_f32(<vscale x 32 x float> %x) {
-; CHECK-LABEL: lrint_v32_i32_f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: frint32x z0.s, p0/z, z0.s
-; CHECK-NEXT: frint32x z1.s, p0/z, z1.s
-; CHECK-NEXT: frint32x z2.s, p0/z, z2.s
-; CHECK-NEXT: frint32x z3.s, p0/z, z3.s
-; CHECK-NEXT: frint32x z4.s, p0/z, z4.s
-; CHECK-NEXT: frint32x z5.s, p0/z, z5.s
-; CHECK-NEXT: frint32x z6.s, p0/z, z6.s
-; CHECK-NEXT: frint32x z7.s, p0/z, z7.s
-; CHECK-NEXT: fcvtzs z0.s, p0/z, z0.s
-; CHECK-NEXT: fcvtzs z1.s, p0/z, z1.s
-; CHECK-NEXT: fcvtzs z2.s, p0/z, z2.s
-; CHECK-NEXT: fcvtzs z3.s, p0/z, z3.s
-; CHECK-NEXT: fcvtzs z4.s, p0/z, z4.s
-; CHECK-NEXT: fcvtzs z5.s, p0/z, z5.s
-; CHECK-NEXT: fcvtzs z6.s, p0/z, z6.s
-; CHECK-NEXT: fcvtzs z7.s, p0/z, z7.s
-; CHECK-NEXT: ret
- %a = call <vscale x 32 x i32> @llvm.lrint.nxv32i32.nxv32f32(<vscale x 32 x float> %x)
- ret <vscale x 32 x i32> %a
-}
-
define <vscale x 2 x i32> @lrint_v2_i32_f64(<vscale x 2 x double> %x) {
; CHECK-LABEL: lrint_v2_i32_f64:
; CHECK: // %bb.0:
@@ -334,647 +23,30 @@ define <vscale x 2 x i32> @lrint_v2_i32_f64(<vscale x 2 x double> %x) {
ret <vscale x 2 x i32> %a
}
-define <vscale x 4 x i32> @lrint_v4_i32_f64(<vscale x 4 x double> %x) {
-; CHECK-LABEL: lrint_v4_i32_f64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
-; CHECK-NEXT: ret
- %a = call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4f64(<vscale x 4 x double> %x)
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @lrint_v8_i32_f64(<vscale x 8 x double> %x) {
-; CHECK-LABEL: lrint_v8_i32_f64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
-; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
-; CHECK-NEXT: uzp1 z1.s, z2.s, z3.s
-; CHECK-NEXT: ret
- %a = call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8f64(<vscale x 8 x double> %x)
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @lrint_v16_i32_f64(<vscale x 16 x double> %x) {
-; CHECK-LABEL: lrint_v16_i32_f64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: frint64x z4.d, p0/z, z4.d
-; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
-; CHECK-NEXT: frint64x z1.d, p0/z, z2.d
-; CHECK-NEXT: frint64x z2.d, p0/z, z5.d
-; CHECK-NEXT: frint64x z5.d, p0/z, z7.d
-; CHECK-NEXT: frint64x z6.d, p0/z, z6.d
-; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
-; CHECK-NEXT: fcvtzs z5.d, p0/z, z5.d
-; CHECK-NEXT: fcvtzs z6.d, p0/z, z6.d
-; CHECK-NEXT: uzp1 z1.s, z1.s, z3.s
-; CHECK-NEXT: uzp1 z2.s, z4.s, z2.s
-; CHECK-NEXT: uzp1 z3.s, z6.s, z5.s
-; CHECK-NEXT: ret
- %a = call <vscale x 16 x i32> @llvm.lrint.nxv16i32.nxv16f64(<vscale x 16 x double> %x)
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 32 x i32> @lrint_v32_i32_f64(<vscale x 32 x double> %x) {
-; CHECK-LABEL: lrint_v32_i32_f64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
-; CHECK-NEXT: ldr z1, [x0]
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ldr z2, [x0, #3, mul vl]
-; CHECK-NEXT: ldr z3, [x0, #2, mul vl]
-; CHECK-NEXT: ldr z4, [x0, #8, mul vl]
-; CHECK-NEXT: ldr z27, [x0, #9, mul vl]
-; CHECK-NEXT: ldr z28, [x0, #7, mul vl]
-; CHECK-NEXT: ldr z29, [x0, #4, mul vl]
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
-; CHECK-NEXT: ldr z30, [x0, #5, mul vl]
-; CHECK-NEXT: ldr z31, [x0, #6, mul vl]
-; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
-; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
-; CHECK-NEXT: ldr z5, [x0, #14, mul vl]
-; CHECK-NEXT: ldr z6, [x0, #15, mul vl]
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: ldr z7, [x0, #12, mul vl]
-; CHECK-NEXT: ldr z24, [x0, #13, mul vl]
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: ldr z25, [x0, #10, mul vl]
-; CHECK-NEXT: ldr z26, [x0, #11, mul vl]
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
-; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s
-; CHECK-NEXT: frint64x z28.d, p0/z, z28.d
-; CHECK-NEXT: uzp1 z1.s, z3.s, z2.s
-; CHECK-NEXT: frint64x z2.d, p0/z, z30.d
-; CHECK-NEXT: frint64x z3.d, p0/z, z29.d
-; CHECK-NEXT: frint64x z29.d, p0/z, z31.d
-; CHECK-NEXT: frint64x z27.d, p0/z, z27.d
-; CHECK-NEXT: frint64x z4.d, p0/z, z4.d
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
-; CHECK-NEXT: fcvtzs z28.d, p0/z, z28.d
-; CHECK-NEXT: fcvtzs z29.d, p0/z, z29.d
-; CHECK-NEXT: fcvtzs z27.d, p0/z, z27.d
-; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
-; CHECK-NEXT: frint64x z26.d, p0/z, z26.d
-; CHECK-NEXT: frint64x z25.d, p0/z, z25.d
-; CHECK-NEXT: frint64x z24.d, p0/z, z24.d
-; CHECK-NEXT: frint64x z7.d, p0/z, z7.d
-; CHECK-NEXT: frint64x z6.d, p0/z, z6.d
-; CHECK-NEXT: frint64x z5.d, p0/z, z5.d
-; CHECK-NEXT: uzp1 z2.s, z3.s, z2.s
-; CHECK-NEXT: uzp1 z3.s, z29.s, z28.s
-; CHECK-NEXT: uzp1 z4.s, z4.s, z27.s
-; CHECK-NEXT: fcvtzs z26.d, p0/z, z26.d
-; CHECK-NEXT: fcvtzs z25.d, p0/z, z25.d
-; CHECK-NEXT: fcvtzs z24.d, p0/z, z24.d
-; CHECK-NEXT: fcvtzs z7.d, p0/z, z7.d
-; CHECK-NEXT: fcvtzs z27.d, p0/z, z6.d
-; CHECK-NEXT: fcvtzs z28.d, p0/z, z5.d
-; CHECK-NEXT: uzp1 z5.s, z25.s, z26.s
-; CHECK-NEXT: uzp1 z6.s, z7.s, z24.s
-; CHECK-NEXT: uzp1 z7.s, z28.s, z27.s
-; CHECK-NEXT: ret
- %a = call <vscale x 32 x i32> @llvm.lrint.nxv32i32.nxv32f64(<vscale x 32 x double> %x)
- ret <vscale x 32 x i32> %a
-}
-
-define <vscale x 1 x i64> @lrint_v1f16(<vscale x 1 x half> %x) {
-; CHECK-LABEL: lrint_v1f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: mov w8, #31743 // =0x7bff
-; CHECK-NEXT: mov z2.d, #0x8000000000000000
-; CHECK-NEXT: frintx z0.h, p0/z, z0.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z1.h, w8
-; CHECK-NEXT: fcvtzs z2.d, p1/m, z0.h
-; CHECK-NEXT: fcmgt p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z1.d, #0x7fffffffffffffff
-; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
-; CHECK-NEXT: sel z0.d, p1, z1.d, z2.d
-; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
-; CHECK-NEXT: ret
- %a = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f16(<vscale x 1 x half> %x)
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @lrint_v2f16(<vscale x 2 x half> %x) {
-; CHECK-LABEL: lrint_v2f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: mov w8, #31743 // =0x7bff
-; CHECK-NEXT: mov z2.d, #0x8000000000000000
-; CHECK-NEXT: frintx z0.h, p0/z, z0.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z1.h, w8
-; CHECK-NEXT: fcvtzs z2.d, p1/m, z0.h
-; CHECK-NEXT: fcmgt p1.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: mov z1.d, #0x7fffffffffffffff
-; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
-; CHECK-NEXT: sel z0.d, p1, z1.d, z2.d
-; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
-; CHECK-NEXT: ret
- %a = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f16(<vscale x 2 x half> %x)
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @lrint_v4f16(<vscale x 4 x half> %x) {
-; CHECK-LABEL: lrint_v4f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: uunpklo z1.d, z0.s
-; CHECK-NEXT: uunpkhi z0.d, z0.s
-; CHECK-NEXT: mov w8, #31743 // =0x7bff
-; CHECK-NEXT: mov z2.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: mov z3.d, #0x8000000000000000
-; CHECK-NEXT: mov z4.d, #0x8000000000000000
-; CHECK-NEXT: mov z5.d, #0x7fffffffffffffff
-; CHECK-NEXT: frintx z1.h, p0/z, z1.h
-; CHECK-NEXT: frintx z0.h, p0/z, z0.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z1.h, z2.h
-; CHECK-NEXT: fcmge p2.h, p0/z, z0.h, z2.h
-; CHECK-NEXT: mov z2.h, w8
-; CHECK-NEXT: fcmuo p3.h, p0/z, z1.h, z1.h
-; CHECK-NEXT: fcvtzs z4.d, p1/m, z1.h
-; CHECK-NEXT: fcmgt p1.h, p0/z, z1.h, z2.h
-; CHECK-NEXT: fcvtzs z3.d, p2/m, z0.h
-; CHECK-NEXT: fcmgt p2.h, p0/z, z0.h, z2.h
-; CHECK-NEXT: fcmuo p0.h, p0/z, z0.h, z0.h
-; CHECK-NEXT: sel z0.d, p1, z5.d, z4.d
-; CHECK-NEXT: sel z1.d, p2, z5.d, z3.d
-; CHECK-NEXT: mov z0.d, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z1.d, p0/m, #0 // =0x0
-; CHECK-NEXT: ret
- %a = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f16(<vscale x 4 x half> %x)
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @lrint_v8f16(<vscale x 8 x half> %x) {
-; CHECK-LABEL: lrint_v8f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: uunpklo z1.s, z0.h
-; CHECK-NEXT: uunpkhi z0.s, z0.h
-; CHECK-NEXT: mov w8, #31743 // =0x7bff
-; CHECK-NEXT: mov z4.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: mov z5.d, #0x8000000000000000
-; CHECK-NEXT: mov z6.d, #0x8000000000000000
-; CHECK-NEXT: mov z7.d, #0x8000000000000000
-; CHECK-NEXT: mov z24.h, w8
-; CHECK-NEXT: mov z25.d, #0x8000000000000000
-; CHECK-NEXT: uunpklo z2.d, z1.s
-; CHECK-NEXT: uunpkhi z1.d, z1.s
-; CHECK-NEXT: uunpklo z3.d, z0.s
-; CHECK-NEXT: uunpkhi z0.d, z0.s
-; CHECK-NEXT: frintx z2.h, p0/z, z2.h
-; CHECK-NEXT: frintx z1.h, p0/z, z1.h
-; CHECK-NEXT: frintx z3.h, p0/z, z3.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z2.h, z4.h
-; CHECK-NEXT: frintx z26.h, p0/z, z0.h
-; CHECK-NEXT: fcmge p2.h, p0/z, z1.h, z4.h
-; CHECK-NEXT: fcmge p3.h, p0/z, z3.h, z4.h
-; CHECK-NEXT: fcmgt p4.h, p0/z, z1.h, z24.h
-; CHECK-NEXT: fcvtzs z5.d, p1/m, z2.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z26.h, z4.h
-; CHECK-NEXT: fcvtzs z6.d, p2/m, z1.h
-; CHECK-NEXT: mov z4.d, #0x7fffffffffffffff
-; CHECK-NEXT: fcmgt p2.h, p0/z, z2.h, z24.h
-; CHECK-NEXT: fcvtzs z7.d, p3/m, z3.h
-; CHECK-NEXT: fcmgt p5.h, p0/z, z3.h, z24.h
-; CHECK-NEXT: fcmgt p6.h, p0/z, z26.h, z24.h
-; CHECK-NEXT: fcvtzs z25.d, p1/m, z26.h
-; CHECK-NEXT: fcmuo p3.h, p0/z, z2.h, z2.h
-; CHECK-NEXT: sel z0.d, p2, z4.d, z5.d
-; CHECK-NEXT: fcmuo p1.h, p0/z, z1.h, z1.h
-; CHECK-NEXT: sel z1.d, p4, z4.d, z6.d
-; CHECK-NEXT: sel z2.d, p5, z4.d, z7.d
-; CHECK-NEXT: fcmuo p2.h, p0/z, z3.h, z3.h
-; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT: fcmuo p0.h, p0/z, z26.h, z26.h
-; CHECK-NEXT: sel z3.d, p6, z4.d, z25.d
-; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT: mov z0.d, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z1.d, p1/m, #0 // =0x0
-; CHECK-NEXT: mov z2.d, p2/m, #0 // =0x0
-; CHECK-NEXT: mov z3.d, p0/m, #0 // =0x0
-; CHECK-NEXT: addvl sp, sp, #1
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT: ret
- %a = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f16(<vscale x 8 x half> %x)
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 16 x i64> @lrint_v16f16(<vscale x 16 x half> %x) {
-; CHECK-LABEL: lrint_v16f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: str p10, [sp, #1, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p9, [sp, #2, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: uunpkhi z3.s, z0.h
-; CHECK-NEXT: uunpklo z2.s, z0.h
-; CHECK-NEXT: mov w8, #31743 // =0x7bff
-; CHECK-NEXT: uunpklo z7.s, z1.h
-; CHECK-NEXT: mov z0.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: uunpkhi z1.s, z1.h
-; CHECK-NEXT: mov z4.d, #0x8000000000000000
-; CHECK-NEXT: mov z6.d, #0x8000000000000000
-; CHECK-NEXT: mov z5.d, #0x8000000000000000
-; CHECK-NEXT: mov z29.h, w8
-; CHECK-NEXT: mov z30.d, #0x8000000000000000
-; CHECK-NEXT: uunpklo z26.d, z3.s
-; CHECK-NEXT: uunpklo z24.d, z2.s
-; CHECK-NEXT: uunpkhi z25.d, z2.s
-; CHECK-NEXT: uunpklo z28.d, z7.s
-; CHECK-NEXT: uunpkhi z27.d, z3.s
-; CHECK-NEXT: uunpkhi z7.d, z7.s
-; CHECK-NEXT: mov z2.d, #0x8000000000000000
-; CHECK-NEXT: mov z3.d, #0x8000000000000000
-; CHECK-NEXT: uunpklo z31.d, z1.s
-; CHECK-NEXT: uunpkhi z1.d, z1.s
-; CHECK-NEXT: frintx z26.h, p0/z, z26.h
-; CHECK-NEXT: frintx z24.h, p0/z, z24.h
-; CHECK-NEXT: frintx z25.h, p0/z, z25.h
-; CHECK-NEXT: frintx z28.h, p0/z, z28.h
-; CHECK-NEXT: frintx z27.h, p0/z, z27.h
-; CHECK-NEXT: frintx z7.h, p0/z, z7.h
-; CHECK-NEXT: fcmge p4.h, p0/z, z26.h, z0.h
-; CHECK-NEXT: fcmge p2.h, p0/z, z24.h, z0.h
-; CHECK-NEXT: fcmge p3.h, p0/z, z25.h, z0.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z28.h, z0.h
-; CHECK-NEXT: fcmge p5.h, p0/z, z27.h, z0.h
-; CHECK-NEXT: fcvtzs z4.d, p4/m, z26.h
-; CHECK-NEXT: fcvtzs z2.d, p2/m, z24.h
-; CHECK-NEXT: fcmge p4.h, p0/z, z7.h, z0.h
-; CHECK-NEXT: fcvtzs z3.d, p3/m, z25.h
-; CHECK-NEXT: fcvtzs z6.d, p1/m, z28.h
-; CHECK-NEXT: fcmgt p3.h, p0/z, z24.h, z29.h
-; CHECK-NEXT: fcvtzs z5.d, p5/m, z27.h
-; CHECK-NEXT: fcmuo p1.h, p0/z, z24.h, z24.h
-; CHECK-NEXT: frintx z24.h, p0/z, z31.h
-; CHECK-NEXT: mov z31.d, #0x8000000000000000
-; CHECK-NEXT: fcvtzs z30.d, p4/m, z7.h
-; CHECK-NEXT: fcmgt p5.h, p0/z, z25.h, z29.h
-; CHECK-NEXT: fcmuo p2.h, p0/z, z25.h, z25.h
-; CHECK-NEXT: mov z25.d, #0x8000000000000000
-; CHECK-NEXT: fcmge p4.h, p0/z, z24.h, z0.h
-; CHECK-NEXT: fcmgt p6.h, p0/z, z26.h, z29.h
-; CHECK-NEXT: fcmuo p7.h, p0/z, z26.h, z26.h
-; CHECK-NEXT: mov z26.d, #0x7fffffffffffffff
-; CHECK-NEXT: fcmgt p8.h, p0/z, z27.h, z29.h
-; CHECK-NEXT: fcvtzs z25.d, p4/m, z24.h
-; CHECK-NEXT: fcmuo p10.h, p0/z, z27.h, z27.h
-; CHECK-NEXT: frintx z27.h, p0/z, z1.h
-; CHECK-NEXT: sel z1.d, p5, z26.d, z3.d
-; CHECK-NEXT: sel z3.d, p8, z26.d, z5.d
-; CHECK-NEXT: fcmge p4.h, p0/z, z27.h, z0.h
-; CHECK-NEXT: sel z0.d, p3, z26.d, z2.d
-; CHECK-NEXT: sel z2.d, p6, z26.d, z4.d
-; CHECK-NEXT: mov z1.d, p2/m, #0 // =0x0
-; CHECK-NEXT: mov z3.d, p10/m, #0 // =0x0
-; CHECK-NEXT: ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT: fcmgt p9.h, p0/z, z28.h, z29.h
-; CHECK-NEXT: mov z2.d, p7/m, #0 // =0x0
-; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT: fcvtzs z31.d, p4/m, z27.h
-; CHECK-NEXT: mov z0.d, p1/m, #0 // =0x0
-; CHECK-NEXT: fcmgt p5.h, p0/z, z7.h, z29.h
-; CHECK-NEXT: fcmgt p6.h, p0/z, z24.h, z29.h
-; CHECK-NEXT: sel z4.d, p9, z26.d, z6.d
-; CHECK-NEXT: fcmgt p4.h, p0/z, z27.h, z29.h
-; CHECK-NEXT: fcmuo p8.h, p0/z, z7.h, z7.h
-; CHECK-NEXT: sel z5.d, p5, z26.d, z30.d
-; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT: sel z6.d, p6, z26.d, z25.d
-; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT: fcmuo p9.h, p0/z, z24.h, z24.h
-; CHECK-NEXT: fcmuo p3.h, p0/z, z28.h, z28.h
-; CHECK-NEXT: sel z7.d, p4, z26.d, z31.d
-; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT: mov z5.d, p8/m, #0 // =0x0
-; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT: fcmuo p0.h, p0/z, z27.h, z27.h
-; CHECK-NEXT: mov z6.d, p9/m, #0 // =0x0
-; CHECK-NEXT: ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT: mov z4.d, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z7.d, p0/m, #0 // =0x0
-; CHECK-NEXT: addvl sp, sp, #1
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT: ret
- %a = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f16(<vscale x 16 x half> %x)
- ret <vscale x 16 x i64> %a
-}
-
-define <vscale x 32 x i64> @lrint_v32f16(<vscale x 32 x half> %x) {
-; CHECK-LABEL: lrint_v32f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-17
-; CHECK-NEXT: str p10, [sp, #1, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p9, [sp, #2, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
-; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
-; CHECK-NEXT: uunpklo z4.s, z0.h
-; CHECK-NEXT: uunpkhi z6.s, z0.h
-; CHECK-NEXT: mov w9, #31743 // =0x7bff
-; CHECK-NEXT: uunpklo z7.s, z1.h
-; CHECK-NEXT: mov z27.h, #-1025 // =0xfffffffffffffbff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: uunpkhi z31.s, z1.h
-; CHECK-NEXT: mov z0.d, #0x8000000000000000
-; CHECK-NEXT: mov z30.d, #0x8000000000000000
-; CHECK-NEXT: uunpkhi z17.s, z2.h
-; CHECK-NEXT: mov z9.d, #0x8000000000000000
-; CHECK-NEXT: uunpklo z18.s, z3.h
-; CHECK-NEXT: uunpklo z24.d, z4.s
-; CHECK-NEXT: uunpkhi z25.d, z4.s
-; CHECK-NEXT: uunpkhi z28.d, z6.s
-; CHECK-NEXT: uunpklo z29.d, z7.s
-; CHECK-NEXT: uunpkhi z8.d, z7.s
-; CHECK-NEXT: uunpklo z26.d, z6.s
-; CHECK-NEXT: mov z4.d, #0x8000000000000000
-; CHECK-NEXT: uunpklo z12.d, z31.s
-; CHECK-NEXT: uunpkhi z13.d, z31.s
-; CHECK-NEXT: uunpkhi z3.s, z3.h
-; CHECK-NEXT: uunpkhi z19.d, z18.s
-; CHECK-NEXT: mov z5.d, #0x8000000000000000
-; CHECK-NEXT: frintx z1.h, p0/z, z24.h
-; CHECK-NEXT: frintx z7.h, p0/z, z25.h
-; CHECK-NEXT: frintx z25.h, p0/z, z28.h
-; CHECK-NEXT: frintx z11.h, p0/z, z29.h
-; CHECK-NEXT: frintx z28.h, p0/z, z8.h
-; CHECK-NEXT: frintx z24.h, p0/z, z26.h
-; CHECK-NEXT: frintx z31.h, p0/z, z12.h
-; CHECK-NEXT: frintx z15.h, p0/z, z13.h
-; CHECK-NEXT: uunpklo z8.s, z2.h
-; CHECK-NEXT: frintx z19.h, p0/z, z19.h
-; CHECK-NEXT: mov z26.h, w9
-; CHECK-NEXT: mov z12.d, #0x8000000000000000
-; CHECK-NEXT: mov z13.d, #0x8000000000000000
-; CHECK-NEXT: uunpklo z20.d, z3.s
-; CHECK-NEXT: mov z6.d, #0x8000000000000000
-; CHECK-NEXT: uunpkhi z3.d, z3.s
-; CHECK-NEXT: mov z29.d, #0x7fffffffffffffff
-; CHECK-NEXT: uunpklo z18.d, z18.s
-; CHECK-NEXT: uunpkhi z16.d, z8.s
-; CHECK-NEXT: mov z22.d, #0x8000000000000000
-; CHECK-NEXT: mov z23.d, #0x8000000000000000
-; CHECK-NEXT: mov z10.d, #0x8000000000000000
-; CHECK-NEXT: mov z21.d, #0x8000000000000000
-; CHECK-NEXT: frintx z20.h, p0/z, z20.h
-; CHECK-NEXT: fcmge p3.h, p0/z, z1.h, z27.h
-; CHECK-NEXT: frintx z2.h, p0/z, z16.h
-; CHECK-NEXT: uunpklo z16.d, z17.s
-; CHECK-NEXT: uunpkhi z17.d, z17.s
-; CHECK-NEXT: fcmge p4.h, p0/z, z7.h, z27.h
-; CHECK-NEXT: fcmgt p9.h, p0/z, z11.h, z26.h
-; CHECK-NEXT: fcvtzs z0.d, p3/m, z1.h
-; CHECK-NEXT: frintx z16.h, p0/z, z16.h
-; CHECK-NEXT: frintx z17.h, p0/z, z17.h
-; CHECK-NEXT: fcmge p3.h, p0/z, z11.h, z27.h
-; CHECK-NEXT: fcvtzs z4.d, p4/m, z7.h
-; CHECK-NEXT: fcmge p4.h, p0/z, z28.h, z27.h
-; CHECK-NEXT: fcmuo p8.h, p0/z, z11.h, z11.h
-; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: frintx z0.h, p0/z, z3.h
-; CHECK-NEXT: fcmge p6.h, p0/z, z15.h, z27.h
-; CHECK-NEXT: fcvtzs z30.d, p3/m, z11.h
-; CHECK-NEXT: uunpklo z11.d, z8.s
-; CHECK-NEXT: mov z8.d, #0x8000000000000000
-; CHECK-NEXT: fcvtzs z9.d, p4/m, z28.h
-; CHECK-NEXT: fcmge p4.h, p0/z, z31.h, z27.h
-; CHECK-NEXT: fcmge p7.h, p0/z, z2.h, z27.h
-; CHECK-NEXT: fcvtzs z12.d, p6/m, z15.h
-; CHECK-NEXT: frintx z14.h, p0/z, z11.h
-; CHECK-NEXT: mov z11.d, #0x8000000000000000
-; CHECK-NEXT: sel z3.d, p9, z29.d, z30.d
-; CHECK-NEXT: fcmge p5.h, p0/z, z14.h, z27.h
-; CHECK-NEXT: fcvtzs z8.d, p4/m, z31.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z24.h, z27.h
-; CHECK-NEXT: mov z3.d, p8/m, #0 // =0x0
-; CHECK-NEXT: fcvtzs z11.d, p7/m, z2.h
-; CHECK-NEXT: fcmge p2.h, p0/z, z25.h, z27.h
-; CHECK-NEXT: fcvtzs z13.d, p5/m, z14.h
-; CHECK-NEXT: fcmgt p6.h, p0/z, z15.h, z26.h
-; CHECK-NEXT: fcvtzs z5.d, p1/m, z24.h
-; CHECK-NEXT: fcmuo p4.h, p0/z, z15.h, z15.h
-; CHECK-NEXT: mov z15.d, #0x8000000000000000
-; CHECK-NEXT: fcvtzs z6.d, p2/m, z25.h
-; CHECK-NEXT: fcmge p7.h, p0/z, z16.h, z27.h
-; CHECK-NEXT: fcmgt p3.h, p0/z, z28.h, z26.h
-; CHECK-NEXT: fcmgt p10.h, p0/z, z14.h, z26.h
-; CHECK-NEXT: fcmge p1.h, p0/z, z19.h, z27.h
-; CHECK-NEXT: fcmge p2.h, p0/z, z20.h, z27.h
-; CHECK-NEXT: fcvtzs z15.d, p7/m, z16.h
-; CHECK-NEXT: sel z30.d, p3, z29.d, z9.d
-; CHECK-NEXT: sel z9.d, p6, z29.d, z12.d
-; CHECK-NEXT: sel z12.d, p10, z29.d, z13.d
-; CHECK-NEXT: frintx z13.h, p0/z, z18.h
-; CHECK-NEXT: fcmge p5.h, p0/z, z17.h, z27.h
-; CHECK-NEXT: fcvtzs z22.d, p1/m, z19.h
-; CHECK-NEXT: fcvtzs z23.d, p2/m, z20.h
-; CHECK-NEXT: mov z9.d, p4/m, #0 // =0x0
-; CHECK-NEXT: fcmge p7.h, p0/z, z0.h, z27.h
-; CHECK-NEXT: fcmuo p1.h, p0/z, z14.h, z14.h
-; CHECK-NEXT: str z9, [x8, #7, mul vl]
-; CHECK-NEXT: fcvtzs z21.d, p5/m, z17.h
-; CHECK-NEXT: fcmge p2.h, p0/z, z13.h, z27.h
-; CHECK-NEXT: mov z27.d, #0x8000000000000000
-; CHECK-NEXT: fcvtzs z10.d, p7/m, z0.h
-; CHECK-NEXT: fcmgt p5.h, p0/z, z0.h, z26.h
-; CHECK-NEXT: mov z12.d, p1/m, #0 // =0x0
-; CHECK-NEXT: fcmgt p6.h, p0/z, z20.h, z26.h
-; CHECK-NEXT: fcvtzs z27.d, p2/m, z13.h
-; CHECK-NEXT: fcmgt p3.h, p0/z, z19.h, z26.h
-; CHECK-NEXT: str z12, [x8, #8, mul vl]
-; CHECK-NEXT: fcmuo p1.h, p0/z, z0.h, z0.h
-; CHECK-NEXT: sel z0.d, p5, z29.d, z10.d
-; CHECK-NEXT: sel z10.d, p6, z29.d, z23.d
-; CHECK-NEXT: fcmuo p2.h, p0/z, z20.h, z20.h
-; CHECK-NEXT: fcmuo p7.h, p0/z, z19.h, z19.h
-; CHECK-NEXT: fcmgt p5.h, p0/z, z2.h, z26.h
-; CHECK-NEXT: mov z0.d, p1/m, #0 // =0x0
-; CHECK-NEXT: fcmuo p6.h, p0/z, z2.h, z2.h
-; CHECK-NEXT: sel z2.d, p3, z29.d, z22.d
-; CHECK-NEXT: mov z10.d, p2/m, #0 // =0x0
-; CHECK-NEXT: str z0, [x8, #15, mul vl]
-; CHECK-NEXT: fcmgt p3.h, p0/z, z13.h, z26.h
-; CHECK-NEXT: mov z2.d, p7/m, #0 // =0x0
-; CHECK-NEXT: sel z0.d, p5, z29.d, z11.d
-; CHECK-NEXT: str z10, [x8, #14, mul vl]
-; CHECK-NEXT: fcmgt p1.h, p0/z, z17.h, z26.h
-; CHECK-NEXT: fcmgt p2.h, p0/z, z16.h, z26.h
-; CHECK-NEXT: str z2, [x8, #13, mul vl]
-; CHECK-NEXT: mov z0.d, p6/m, #0 // =0x0
-; CHECK-NEXT: fcmuo p7.h, p0/z, z13.h, z13.h
-; CHECK-NEXT: mov z27.d, p3/m, z29.d
-; CHECK-NEXT: fcmuo p5.h, p0/z, z17.h, z17.h
-; CHECK-NEXT: str z0, [x8, #9, mul vl]
-; CHECK-NEXT: sel z2.d, p1, z29.d, z21.d
-; CHECK-NEXT: fcmuo p3.h, p0/z, z16.h, z16.h
-; CHECK-NEXT: sel z10.d, p2, z29.d, z15.d
-; CHECK-NEXT: mov z27.d, p7/m, #0 // =0x0
-; CHECK-NEXT: fcmgt p1.h, p0/z, z7.h, z26.h
-; CHECK-NEXT: mov z2.d, p5/m, #0 // =0x0
-; CHECK-NEXT: fcmgt p2.h, p0/z, z24.h, z26.h
-; CHECK-NEXT: str z27, [x8, #12, mul vl]
-; CHECK-NEXT: mov z10.d, p3/m, #0 // =0x0
-; CHECK-NEXT: fcmuo p3.h, p0/z, z28.h, z28.h
-; CHECK-NEXT: str z2, [x8, #11, mul vl]
-; CHECK-NEXT: fcmgt p7.h, p0/z, z31.h, z26.h
-; CHECK-NEXT: mov z4.d, p1/m, z29.d
-; CHECK-NEXT: str z10, [x8, #10, mul vl]
-; CHECK-NEXT: mov z5.d, p2/m, z29.d
-; CHECK-NEXT: fcmgt p5.h, p0/z, z25.h, z26.h
-; CHECK-NEXT: fcmgt p1.h, p0/z, z1.h, z26.h
-; CHECK-NEXT: mov z30.d, p3/m, #0 // =0x0
-; CHECK-NEXT: sel z2.d, p7, z29.d, z8.d
-; CHECK-NEXT: fcmuo p6.h, p0/z, z31.h, z31.h
-; CHECK-NEXT: fcmuo p2.h, p0/z, z25.h, z25.h
-; CHECK-NEXT: sel z0.d, p5, z29.d, z6.d
-; CHECK-NEXT: fcmuo p3.h, p0/z, z24.h, z24.h
-; CHECK-NEXT: fcmuo p4.h, p0/z, z7.h, z7.h
-; CHECK-NEXT: mov z2.d, p6/m, #0 // =0x0
-; CHECK-NEXT: fcmuo p0.h, p0/z, z1.h, z1.h
-; CHECK-NEXT: ldr z1, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: str z30, [x8, #5, mul vl]
-; CHECK-NEXT: mov z0.d, p2/m, #0 // =0x0
-; CHECK-NEXT: str z3, [x8, #4, mul vl]
-; CHECK-NEXT: mov z5.d, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z4.d, p4/m, #0 // =0x0
-; CHECK-NEXT: str z2, [x8, #6, mul vl]
-; CHECK-NEXT: mov z1.d, p1/m, z29.d
-; CHECK-NEXT: str z0, [x8, #3, mul vl]
-; CHECK-NEXT: str z5, [x8, #2, mul vl]
-; CHECK-NEXT: mov z1.d, p0/m, #0 // =0x0
-; CHECK-NEXT: str z4, [x8, #1, mul vl]
-; CHECK-NEXT: str z1, [x8]
-; CHECK-NEXT: addvl sp, sp, #1
-; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT: ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT: addvl sp, sp, #17
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT: ret
- %a = call <vscale x 32 x i64> @llvm.lrint.nxv32i64.nxv32f16(<vscale x 32 x half> %x)
- ret <vscale x 32 x i64> %a
-}
-
-define <vscale x 1 x i64> @lrint_v1f32(<vscale x 1 x float> %x) {
-; CHECK-LABEL: lrint_v1f32:
+define <vscale x 2 x i64> @lrint_v2_i64_f32(<vscale x 2 x float> %x) vscale_range(1, 16){
+; CHECK-LABEL: lrint_v2_i64_f32:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
; CHECK-NEXT: ret
- %a = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f32(<vscale x 1 x float> %x)
- ret <vscale x 1 x i64> %a
+ %a = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f32(<vscale x 2 x float> %x)
+ ret <vscale x 2 x i64> %a
}
-define <vscale x 2 x i64> @lrint_v2f32(<vscale x 2 x float> %x) {
-; CHECK-LABEL: lrint_v2f32:
+define <vscale x 2 x i64> @lrint_v2_i64_f64(<vscale x 2 x double> %x) {
+; CHECK-LABEL: lrint_v2_i64_f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
-; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
; CHECK-NEXT: ret
- %a = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f32(<vscale x 2 x float> %x)
+ %a = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f64(<vscale x 2 x double> %x)
ret <vscale x 2 x i64> %a
}
-define <vscale x 4 x i64> @lrint_v4f32(<vscale x 4 x float> %x) {
-; CHECK-LABEL: lrint_v4f32:
+define <vscale x 4 x i64> @lrint_v4_i64_f64(<vscale x 4 x float> %x) vscale_range(2,16){
+; CHECK-LABEL: lrint_v4_i64_f64:
; CHECK: // %bb.0:
; CHECK-NEXT: uunpklo z1.d, z0.s
; CHECK-NEXT: uunpkhi z0.d, z0.s
@@ -990,287 +62,63 @@ define <vscale x 4 x i64> @lrint_v4f32(<vscale x 4 x float> %x) {
ret <vscale x 4 x i64> %a
}
-define <vscale x 8 x i64> @lrint_v8f32(<vscale x 8 x float> %x) {
-; CHECK-LABEL: lrint_v8f32:
+define <vscale x 4 x i32> @llrint_v4_i32_f32(<vscale x 4 x float> %x) {
+; CHECK-LABEL: llrint_v4_i32_f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: uunpklo z2.d, z0.s
-; CHECK-NEXT: uunpkhi z0.d, z0.s
-; CHECK-NEXT: uunpklo z3.d, z1.s
-; CHECK-NEXT: uunpkhi z1.d, z1.s
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z2.s, p0/z, z2.s
-; CHECK-NEXT: frint64x z4.s, p0/z, z0.s
-; CHECK-NEXT: frint64x z3.s, p0/z, z3.s
-; CHECK-NEXT: frint64x z5.s, p0/z, z1.s
-; CHECK-NEXT: movprfx z0, z2
-; CHECK-NEXT: fcvtzs z0.d, p0/m, z2.s
-; CHECK-NEXT: movprfx z1, z4
-; CHECK-NEXT: fcvtzs z1.d, p0/m, z4.s
-; CHECK-NEXT: movprfx z2, z3
-; CHECK-NEXT: fcvtzs z2.d, p0/m, z3.s
-; CHECK-NEXT: movprfx z3, z5
-; CHECK-NEXT: fcvtzs z3.d, p0/m, z5.s
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: frint32x z0.s, p0/z, z0.s
+; CHECK-NEXT: fcvtzs z0.s, p0/z, z0.s
; CHECK-NEXT: ret
- %a = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f32(<vscale x 8 x float> %x)
- ret <vscale x 8 x i64> %a
+ %a = call <vscale x 4 x i32> @llvm.llrint.nxv4i32.nxv4f32(<vscale x 4 x float> %x)
+ ret <vscale x 4 x i32> %a
}
-define <vscale x 16 x i64> @lrint_v16f32(<vscale x 16 x float> %x) {
-; CHECK-LABEL: lrint_v16f32:
+define <vscale x 2 x i32> @llrint_v2_i32_f64(<vscale x 2 x double> %x) {
+; CHECK-LABEL: llrint_v2_i32_f64:
; CHECK: // %bb.0:
-; CHECK-NEXT: uunpklo z4.d, z1.s
-; CHECK-NEXT: uunpklo z5.d, z0.s
-; CHECK-NEXT: uunpkhi z0.d, z0.s
-; CHECK-NEXT: uunpkhi z1.d, z1.s
-; CHECK-NEXT: uunpklo z6.d, z2.s
-; CHECK-NEXT: uunpkhi z7.d, z2.s
-; CHECK-NEXT: uunpklo z24.d, z3.s
-; CHECK-NEXT: uunpkhi z25.d, z3.s
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z2.s, p0/z, z5.s
-; CHECK-NEXT: frint64x z3.s, p0/z, z0.s
-; CHECK-NEXT: frint64x z4.s, p0/z, z4.s
-; CHECK-NEXT: frint64x z5.s, p0/z, z1.s
-; CHECK-NEXT: movprfx z0, z2
-; CHECK-NEXT: fcvtzs z0.d, p0/m, z2.s
-; CHECK-NEXT: movprfx z1, z3
-; CHECK-NEXT: fcvtzs z1.d, p0/m, z3.s
-; CHECK-NEXT: movprfx z2, z4
-; CHECK-NEXT: fcvtzs z2.d, p0/m, z4.s
-; CHECK-NEXT: movprfx z3, z5
-; CHECK-NEXT: fcvtzs z3.d, p0/m, z5.s
-; CHECK-NEXT: frint64x z4.s, p0/z, z6.s
-; CHECK-NEXT: frint64x z5.s, p0/z, z7.s
-; CHECK-NEXT: frint64x z6.s, p0/z, z24.s
-; CHECK-NEXT: frint64x z7.s, p0/z, z25.s
-; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.s
-; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.s
-; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.s
-; CHECK-NEXT: fcvtzs z7.d, p0/m, z7.s
+; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
+; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
; CHECK-NEXT: ret
- %a = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f32(<vscale x 16 x float> %x)
- ret <vscale x 16 x i64> %a
+ %a = call <vscale x 2 x i32> @llvm.llrint.nxv2i32.nxv2f64(<vscale x 2 x double> %x)
+ ret <vscale x 2 x i32> %a
}
-define <vscale x 32 x i64> @lrint_v32f32(<vscale x 32 x float> %x) {
-; CHECK-LABEL: lrint_v32f32:
+define <vscale x 2 x i64> @llrint_v2_i64_f32(<vscale x 2 x float> %x) vscale_range(1, 16){
+; CHECK-LABEL: llrint_v2_i64_f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: uunpkhi z25.d, z7.s
-; CHECK-NEXT: uunpkhi z27.d, z6.s
-; CHECK-NEXT: uunpklo z6.d, z6.s
-; CHECK-NEXT: uunpklo z29.d, z3.s
-; CHECK-NEXT: uunpkhi z30.d, z5.s
-; CHECK-NEXT: uunpklo z5.d, z5.s
-; CHECK-NEXT: uunpkhi z31.d, z4.s
-; CHECK-NEXT: uunpklo z4.d, z4.s
-; CHECK-NEXT: uunpkhi z3.d, z3.s
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: uunpklo z24.d, z0.s
-; CHECK-NEXT: uunpkhi z0.d, z0.s
-; CHECK-NEXT: uunpklo z7.d, z7.s
-; CHECK-NEXT: uunpklo z26.d, z1.s
-; CHECK-NEXT: uunpkhi z1.d, z1.s
-; CHECK-NEXT: frint64x z25.s, p0/z, z25.s
-; CHECK-NEXT: frint64x z6.s, p0/z, z6.s
-; CHECK-NEXT: frint64x z5.s, p0/z, z5.s
-; CHECK-NEXT: frint64x z4.s, p0/z, z4.s
-; CHECK-NEXT: frint64x z3.s, p0/z, z3.s
-; CHECK-NEXT: frint64x z27.s, p0/z, z27.s
-; CHECK-NEXT: uunpklo z28.d, z2.s
-; CHECK-NEXT: frint64x z30.s, p0/z, z30.s
; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
-; CHECK-NEXT: uunpkhi z2.d, z2.s
-; CHECK-NEXT: fcvtzs z25.d, p0/m, z25.s
-; CHECK-NEXT: frint64x z7.s, p0/z, z7.s
-; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.s
-; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.s
-; CHECK-NEXT: frint64x z1.s, p0/z, z1.s
-; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.s
-; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.s
-; CHECK-NEXT: fcvtzs z7.d, p0/m, z7.s
-; CHECK-NEXT: fcvtzs z27.d, p0/m, z27.s
-; CHECK-NEXT: fcvtzs z30.d, p0/m, z30.s
-; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.s
-; CHECK-NEXT: str z25, [x8, #15, mul vl]
-; CHECK-NEXT: frint64x z25.s, p0/z, z31.s
-; CHECK-NEXT: frint64x z2.s, p0/z, z2.s
-; CHECK-NEXT: str z6, [x8, #12, mul vl]
-; CHECK-NEXT: frint64x z6.s, p0/z, z29.s
-; CHECK-NEXT: fcvtzs z25.d, p0/m, z25.s
-; CHECK-NEXT: str z5, [x8, #10, mul vl]
-; CHECK-NEXT: frint64x z5.s, p0/z, z28.s
-; CHECK-NEXT: fcvtzs z6.d, p0/m, z6.s
-; CHECK-NEXT: str z4, [x8, #8, mul vl]
-; CHECK-NEXT: frint64x z4.s, p0/z, z26.s
-; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.s
-; CHECK-NEXT: str z3, [x8, #7, mul vl]
-; CHECK-NEXT: frint64x z3.s, p0/z, z24.s
-; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.s
-; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.s
; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
-; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.s
-; CHECK-NEXT: str z7, [x8, #14, mul vl]
-; CHECK-NEXT: str z27, [x8, #13, mul vl]
-; CHECK-NEXT: str z30, [x8, #11, mul vl]
-; CHECK-NEXT: str z25, [x8, #9, mul vl]
-; CHECK-NEXT: str z6, [x8, #6, mul vl]
-; CHECK-NEXT: str z2, [x8, #5, mul vl]
-; CHECK-NEXT: str z5, [x8, #4, mul vl]
-; CHECK-NEXT: str z1, [x8, #3, mul vl]
-; CHECK-NEXT: str z4, [x8, #2, mul vl]
-; CHECK-NEXT: str z0, [x8, #1, mul vl]
-; CHECK-NEXT: str z3, [x8]
-; CHECK-NEXT: ret
- %a = call <vscale x 32 x i64> @llvm.lrint.nxv32i64.nxv32f32(<vscale x 32 x float> %x)
- ret <vscale x 32 x i64> %a
-}
-
-define <vscale x 1 x i64> @lrint_v1f64(<vscale x 1 x double> %x) {
-; CHECK-LABEL: lrint_v1f64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
; CHECK-NEXT: ret
- %a = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f64(<vscale x 1 x double> %x)
- ret <vscale x 1 x i64> %a
+ %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float> %x)
+ ret <vscale x 2 x i64> %a
}
-define <vscale x 2 x i64> @lrint_v2f64(<vscale x 2 x double> %x) {
-; CHECK-LABEL: lrint_v2f64:
+define <vscale x 2 x i64> @llrint_v2_i64_f64(<vscale x 2 x double> %x) {
+; CHECK-LABEL: llrint_v2_i64_f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
; CHECK-NEXT: ret
- %a = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f64(<vscale x 2 x double> %x)
+ %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double> %x)
ret <vscale x 2 x i64> %a
}
-define <vscale x 4 x i64> @lrint_v4f64(<vscale x 4 x double> %x) {
-; CHECK-LABEL: lrint_v4f64:
+define <vscale x 4 x i64> @llrint_v4_i64_f64(<vscale x 4 x float> %x) vscale_range(2,16){
+; CHECK-LABEL: llrint_v4_i64_f64:
; CHECK: // %bb.0:
+; CHECK-NEXT: uunpklo z1.d, z0.s
+; CHECK-NEXT: uunpkhi z0.d, z0.s
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
+; CHECK-NEXT: frint64x z1.s, p0/z, z1.s
+; CHECK-NEXT: frint64x z2.s, p0/z, z0.s
+; CHECK-NEXT: movprfx z0, z1
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.s
+; CHECK-NEXT: movprfx z1, z2
+; CHECK-NEXT: fcvtzs z1.d, p0/m, z2.s
; CHECK-NEXT: ret
- %a = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f64(<vscale x 4 x double> %x)
+ %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float> %x)
ret <vscale x 4 x i64> %a
}
-
-define <vscale x 8 x i64> @lrint_v8f64(<vscale x 8 x double> %x) {
-; CHECK-LABEL: lrint_v8f64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
-; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
-; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
-; CHECK-NEXT: ret
- %a = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f64(<vscale x 8 x double> %x)
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 16 x i64> @lrint_v16f64(<vscale x 16 x double> %x) {
-; CHECK-LABEL: lrint_v16f64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
-; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
-; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
-; CHECK-NEXT: frint64x z4.d, p0/z, z4.d
-; CHECK-NEXT: frint64x z5.d, p0/z, z5.d
-; CHECK-NEXT: frint64x z6.d, p0/z, z6.d
-; CHECK-NEXT: frint64x z7.d, p0/z, z7.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
-; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
-; CHECK-NEXT: fcvtzs z5.d, p0/z, z5.d
-; CHECK-NEXT: fcvtzs z6.d, p0/z, z6.d
-; CHECK-NEXT: fcvtzs z7.d, p0/z, z7.d
-; CHECK-NEXT: ret
- %a = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f64(<vscale x 16 x double> %x)
- ret <vscale x 16 x i64> %a
-}
-
-define <vscale x 32 x i64> @lrint_v32f64(<vscale x 32 x double> %x) {
-; CHECK-LABEL: lrint_v32f64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr z2, [x0, #15, mul vl]
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ldr z5, [x0, #12, mul vl]
-; CHECK-NEXT: ldr z3, [x0, #14, mul vl]
-; CHECK-NEXT: ldr z24, [x0, #9, mul vl]
-; CHECK-NEXT: ldr z1, [x0, #7, mul vl]
-; CHECK-NEXT: ldr z0, [x0, #6, mul vl]
-; CHECK-NEXT: ldr z4, [x0, #13, mul vl]
-; CHECK-NEXT: ldr z6, [x0, #11, mul vl]
-; CHECK-NEXT: frint64x z2.d, p0/z, z2.d
-; CHECK-NEXT: ldr z7, [x0, #10, mul vl]
-; CHECK-NEXT: ldr z25, [x0, #8, mul vl]
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: frint64x z3.d, p0/z, z3.d
-; CHECK-NEXT: frint64x z1.d, p0/z, z1.d
-; CHECK-NEXT: ldr z26, [x0, #5, mul vl]
-; CHECK-NEXT: ldr z27, [x0, #4, mul vl]
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: ldr z28, [x0, #3, mul vl]
-; CHECK-NEXT: ldr z29, [x0, #2, mul vl]
-; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
-; CHECK-NEXT: ldr z30, [x0, #1, mul vl]
-; CHECK-NEXT: ldr z31, [x0]
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: str z2, [x8, #15, mul vl]
-; CHECK-NEXT: frint64x z2.d, p0/z, z5.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: frint64x z4.d, p0/z, z4.d
-; CHECK-NEXT: str z3, [x8, #14, mul vl]
-; CHECK-NEXT: str z2, [x8, #12, mul vl]
-; CHECK-NEXT: frint64x z2.d, p0/z, z24.d
-; CHECK-NEXT: frint64x z3.d, p0/z, z6.d
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: str z1, [x8, #7, mul vl]
-; CHECK-NEXT: frint64x z1.d, p0/z, z27.d
-; CHECK-NEXT: str z2, [x8, #9, mul vl]
-; CHECK-NEXT: frint64x z2.d, p0/z, z26.d
-; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
-; CHECK-NEXT: str z0, [x8, #6, mul vl]
-; CHECK-NEXT: frint64x z0.d, p0/z, z28.d
-; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: str z4, [x8, #13, mul vl]
-; CHECK-NEXT: frint64x z4.d, p0/z, z7.d
-; CHECK-NEXT: str z3, [x8, #11, mul vl]
-; CHECK-NEXT: frint64x z3.d, p0/z, z25.d
-; CHECK-NEXT: fcvtzs z4.d, p0/z, z4.d
-; CHECK-NEXT: str z2, [x8, #5, mul vl]
-; CHECK-NEXT: frint64x z2.d, p0/z, z29.d
-; CHECK-NEXT: fcvtzs z3.d, p0/z, z3.d
-; CHECK-NEXT: str z1, [x8, #4, mul vl]
-; CHECK-NEXT: frint64x z1.d, p0/z, z30.d
-; CHECK-NEXT: fcvtzs z2.d, p0/z, z2.d
-; CHECK-NEXT: str z0, [x8, #3, mul vl]
-; CHECK-NEXT: frint64x z0.d, p0/z, z31.d
-; CHECK-NEXT: fcvtzs z1.d, p0/z, z1.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
-; CHECK-NEXT: str z4, [x8, #10, mul vl]
-; CHECK-NEXT: str z3, [x8, #8, mul vl]
-; CHECK-NEXT: str z2, [x8, #2, mul vl]
-; CHECK-NEXT: str z1, [x8, #1, mul vl]
-; CHECK-NEXT: str z0, [x8]
-; CHECK-NEXT: ret
- %a = call <vscale x 32 x i64> @llvm.lrint.nxv32i64.nxv16f64(<vscale x 32 x double> %x)
- ret <vscale x 32 x i64> %a
-}
>From 459536433f0a653b3dd578ea69d76793f1b50b39 Mon Sep 17 00:00:00 2001
From: Jacob Crawley <jacob.crawley at arm.com>
Date: Thu, 26 Mar 2026 16:30:05 +0000
Subject: [PATCH 3/4] Re-emit opcode with scalable vectors for fixed vector
input.
---
.../Target/AArch64/AArch64ISelLowering.cpp | 19 +++++-
llvm/test/CodeGen/AArch64/sve2p2-lrint.ll | 66 ++++++-------------
2 files changed, 36 insertions(+), 49 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index fd12a8b9ae8be..290a615a52c98 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5300,9 +5300,22 @@ SDValue AArch64TargetLowering::LowerVectorXRINT(SDValue Op,
unsigned IntBits = VT.getScalarSizeInBits();
unsigned FPBits = CastVT.getScalarSizeInBits();
- // Use FRINT32X/FRINT64X if Sve2p2 is available
- if (Subtarget->isSVEorStreamingSVEAvailable() && Subtarget->hasSVE2p2() &&
- (FPBits == 32 || FPBits == 64)) {
+ // Convert fixed-length vectors to scalable and re-emit the same opcode.
+ if (useSVEForFixedLengthVectorVT(Op.getValueType(),
+ !Subtarget->isNeonAvailable())) {
+ EVT ContainerSrcVT =
+ getContainerForFixedLengthVector(DAG, Src.getValueType());
+ EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
+ SDValue ScalableSrc = convertToScalableVector(DAG, ContainerSrcVT, Src);
+
+ SDValue ScalableRes =
+ DAG.getNode(Op.getOpcode(), DL, ContainerVT, ScalableSrc);
+ return convertFromScalableVector(DAG, VT, ScalableRes);
+ }
+
+ // Lower to FRINT32X/FRINT64X for scalable vectors if Sve2p2 is available.
+ if (VT.isScalableVector() && Subtarget->isSVEorStreamingSVEAvailable() &&
+ Subtarget->hasSVE2p2() && (FPBits == 32 || FPBits == 64)) {
assert(IntBits == 32 || IntBits == 64);
unsigned FrintOp = (IntBits == 32) ? AArch64ISD::FRINT32_MERGE_PASSTHRU
: AArch64ISD::FRINT64_MERGE_PASSTHRU;
diff --git a/llvm/test/CodeGen/AArch64/sve2p2-lrint.ll b/llvm/test/CodeGen/AArch64/sve2p2-lrint.ll
index 5456b09e9992f..eda26e076897b 100644
--- a/llvm/test/CodeGen/AArch64/sve2p2-lrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p2-lrint.ll
@@ -45,43 +45,19 @@ define <vscale x 2 x i64> @lrint_v2_i64_f64(<vscale x 2 x double> %x) {
ret <vscale x 2 x i64> %a
}
-define <vscale x 4 x i64> @lrint_v4_i64_f64(<vscale x 4 x float> %x) vscale_range(2,16){
-; CHECK-LABEL: lrint_v4_i64_f64:
+define void @lrint_v4_i64_f32(<4 x float> %x, ptr %dst) vscale_range(2, 16) {
+; CHECK-LABEL: lrint_v4_i64_f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: uunpklo z1.d, z0.s
-; CHECK-NEXT: uunpkhi z0.d, z0.s
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z1.s, p0/z, z1.s
-; CHECK-NEXT: frint64x z2.s, p0/z, z0.s
-; CHECK-NEXT: movprfx z0, z1
-; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fcvtzs z1.d, p0/m, z2.s
-; CHECK-NEXT: ret
- %a = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f32(<vscale x 4 x float> %x)
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i32> @llrint_v4_i32_f32(<vscale x 4 x float> %x) {
-; CHECK-LABEL: llrint_v4_i32_f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: frint32x z0.s, p0/z, z0.s
-; CHECK-NEXT: fcvtzs z0.s, p0/z, z0.s
-; CHECK-NEXT: ret
- %a = call <vscale x 4 x i32> @llvm.llrint.nxv4i32.nxv4f32(<vscale x 4 x float> %x)
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 2 x i32> @llrint_v2_i32_f64(<vscale x 2 x double> %x) {
-; CHECK-LABEL: llrint_v2_i32_f64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z0.d, p0/z, z0.d
-; CHECK-NEXT: fcvtzs z0.d, p0/z, z0.d
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT: ptrue p0.d, vl4
+; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
- %a = call <vscale x 2 x i32> @llvm.llrint.nxv2i32.nxv2f64(<vscale x 2 x double> %x)
- ret <vscale x 2 x i32> %a
+ %a = call <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float> %x)
+ store <4 x i64> %a, ptr %dst
+ ret void
}
define <vscale x 2 x i64> @llrint_v2_i64_f32(<vscale x 2 x float> %x) vscale_range(1, 16){
@@ -106,19 +82,17 @@ define <vscale x 2 x i64> @llrint_v2_i64_f64(<vscale x 2 x double> %x) {
ret <vscale x 2 x i64> %a
}
-define <vscale x 4 x i64> @llrint_v4_i64_f64(<vscale x 4 x float> %x) vscale_range(2,16){
-; CHECK-LABEL: llrint_v4_i64_f64:
+define void @llrint_v4_i64_f32(<4 x float> %x, ptr %dst) vscale_range(2, 16) {
+; CHECK-LABEL: llrint_v4_i64_f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: uunpklo z1.d, z0.s
-; CHECK-NEXT: uunpkhi z0.d, z0.s
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: frint64x z1.s, p0/z, z1.s
-; CHECK-NEXT: frint64x z2.s, p0/z, z0.s
-; CHECK-NEXT: movprfx z0, z1
-; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fcvtzs z1.d, p0/m, z2.s
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: frint64x z0.s, p0/z, z0.s
+; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT: ptrue p0.d, vl4
+; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
- %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float> %x)
- ret <vscale x 4 x i64> %a
+ %a = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> %x)
+ store <4 x i64> %a, ptr %dst
+ ret void
}
>From 47b60662bb07da251e9389acbf251794e350d06b Mon Sep 17 00:00:00 2001
From: Jacob Crawley <jacob.crawley at arm.com>
Date: Thu, 2 Apr 2026 08:25:50 +0000
Subject: [PATCH 4/4] Restrict fixed vector conversion to SVE2p2
---
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 290a615a52c98..6eef5295acfc8 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5301,7 +5301,8 @@ SDValue AArch64TargetLowering::LowerVectorXRINT(SDValue Op,
unsigned FPBits = CastVT.getScalarSizeInBits();
// Convert fixed-length vectors to scalable and re-emit the same opcode.
- if (useSVEForFixedLengthVectorVT(Op.getValueType(),
+ if (Subtarget->hasSVE2p2() &&
+ useSVEForFixedLengthVectorVT(Op.getValueType(),
!Subtarget->isNeonAvailable())) {
EVT ContainerSrcVT =
getContainerForFixedLengthVector(DAG, Src.getValueType());
More information about the llvm-commits
mailing list