[llvm] ISel/AArch64/SVE: custom lower vector ISD::[L]LRINT (PR #89035)

Ramkumar Ramachandra via llvm-commits llvm-commits at lists.llvm.org
Thu May 9 07:28:16 PDT 2024


https://github.com/artagnon updated https://github.com/llvm/llvm-project/pull/89035

>From 62b681b6d8d160f5e28897fdf145577820236434 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <r at artagnon.com>
Date: Tue, 16 Apr 2024 15:15:11 +0100
Subject: [PATCH 1/4] ISel/AArch64/SVE: custom lower vector ISD::[L]LRINT

Since 98c90a1 (ISel: introduce vector ISD::LRINT, ISD::LLRINT; custom
RISCV lowering), ISD::LRINT and ISD::LLRINT now have vector variants,
that are custom lowered on RISCV, and scalarized on all other targets.
Since 2302e4c (Reland "VectorUtils: mark xrint as trivially
vectorizable"), lrint and llrint are trivially vectorizable, so all the
vectorizers in-tree will produce vector variants when possible. Add a
custom lowering for AArch64 to custom-lower the vector variants natively
using a combination of frintx, fcvte, and fcvtzs, when SVE is present.
---
 .../Target/AArch64/AArch64ISelLowering.cpp    |  66 +-
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |   1 +
 .../AArch64/sve-fixed-vector-llrint.ll        | 893 ++++++++++++++++++
 .../CodeGen/AArch64/sve-fixed-vector-lrint.ll | 893 ++++++++++++++++++
 llvm/test/CodeGen/AArch64/sve-llrint.ll       | 492 ++++++++++
 llvm/test/CodeGen/AArch64/sve-lrint.ll        | 492 ++++++++++
 llvm/test/CodeGen/AArch64/vector-llrint.ll    | 335 +++++++
 llvm/test/CodeGen/AArch64/vector-lrint.ll     | 372 +++++++-
 8 files changed, 3512 insertions(+), 32 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll
 create mode 100644 llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll
 create mode 100644 llvm/test/CodeGen/AArch64/sve-llrint.ll
 create mode 100644 llvm/test/CodeGen/AArch64/sve-lrint.ll

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7344387ffe552..41372b5432a0e 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1526,6 +1526,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::FNEARBYINT, VT, Custom);
       setOperationAction(ISD::FRINT, VT, Custom);
       setOperationAction(ISD::FROUND, VT, Custom);
+      setOperationAction(ISD::LRINT, VT, Custom);
+      setOperationAction(ISD::LLRINT, VT, Custom);
       setOperationAction(ISD::FROUNDEVEN, VT, Custom);
       setOperationAction(ISD::FTRUNC, VT, Custom);
       setOperationAction(ISD::FSQRT, VT, Custom);
@@ -1940,6 +1942,8 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
   setOperationAction(ISD::FP_TO_UINT, VT, Default);
   setOperationAction(ISD::FRINT, VT, Default);
   setOperationAction(ISD::FROUND, VT, Default);
+  setOperationAction(ISD::LRINT, VT, Default);
+  setOperationAction(ISD::LLRINT, VT, Default);
   setOperationAction(ISD::FROUNDEVEN, VT, Default);
   setOperationAction(ISD::FSQRT, VT, Default);
   setOperationAction(ISD::FSUB, VT, Default);
@@ -4362,6 +4366,59 @@ SDValue AArch64TargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
   return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Sat);
 }
 
+SDValue AArch64TargetLowering::LowerVectorXRINT(SDValue Op,
+                                                SelectionDAG &DAG) const {
+  EVT VT = Op.getValueType();
+  SDValue Src = Op.getOperand(0);
+  SDLoc DL(Op);
+
+  assert(VT.isVector() && "Expected vector type");
+
+  // We can't custom-lower ISD::[L]LRINT without SVE, since it requires
+  // AArch64ISD::FCVTZS_MERGE_PASSTHRU.
+  if (!Subtarget->isSVEAvailable())
+    return SDValue();
+
+  EVT ContainerVT = VT;
+  EVT SrcVT = Src.getValueType();
+  EVT CastVT =
+      ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
+
+  if (VT.isFixedLengthVector()) {
+    ContainerVT = getContainerForFixedLengthVector(DAG, VT);
+    CastVT = ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
+    Src = convertToScalableVector(DAG, CastVT, Src);
+  }
+
+  // First, round the floating-point value into a floating-point register with
+  // the current rounding mode.
+  SDValue FOp = DAG.getNode(ISD::FRINT, DL, CastVT, Src);
+
+  // In the case of vector filled with f32, ftrunc will convert it to an i32,
+  // but a vector filled with i32 isn't legal. So, FP_EXTEND the f32 into the
+  // required size.
+  size_t SrcSz = SrcVT.getScalarSizeInBits();
+  size_t ContainerSz = ContainerVT.getScalarSizeInBits();
+  if (ContainerSz > SrcSz) {
+    EVT SizedVT = MVT::getVectorVT(MVT::getFloatingPointVT(ContainerSz),
+                                   ContainerVT.getVectorElementCount());
+    FOp = DAG.getNode(ISD::FP_EXTEND, DL, SizedVT, FOp.getOperand(0));
+  }
+
+  // Finally, truncate the rounded floating point to an integer, rounding to
+  // zero.
+  SDValue Pred = getPredicateForVector(DAG, DL, ContainerVT);
+  SDValue Undef = DAG.getUNDEF(ContainerVT);
+  SDValue Truncated =
+      DAG.getNode(AArch64ISD::FCVTZS_MERGE_PASSTHRU, DL, ContainerVT,
+                  {Pred, FOp.getOperand(0), Undef}, FOp->getFlags());
+
+  if (VT.isScalableVector())
+    return Truncated;
+
+  return convertFromScalableVector(DAG, VT, Truncated);
+}
+
 SDValue AArch64TargetLowering::LowerVectorINT_TO_FP(SDValue Op,
                                                     SelectionDAG &DAG) const {
   // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp.
@@ -6685,10 +6742,13 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
     return LowerVECTOR_DEINTERLEAVE(Op, DAG);
   case ISD::VECTOR_INTERLEAVE:
     return LowerVECTOR_INTERLEAVE(Op, DAG);
-  case ISD::LROUND:
-  case ISD::LLROUND:
   case ISD::LRINT:
-  case ISD::LLRINT: {
+  case ISD::LLRINT:
+    if (Op.getValueType().isVector())
+      return LowerVectorXRINT(Op, DAG);
+    [[fallthrough]];
+  case ISD::LROUND:
+  case ISD::LLROUND: {
     assert((Op.getOperand(0).getValueType() == MVT::f16 ||
             Op.getOperand(0).getValueType() == MVT::bf16) &&
            "Expected custom lowering of rounding operations only for f16");
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index fbdc4de5617fe..b3e282a040603 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -1165,6 +1165,7 @@ class AArch64TargetLowering : public TargetLowering {
   SDValue LowerVectorFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerVectorXRINT(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll b/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll
new file mode 100644
index 0000000000000..febfa785eaeff
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll
@@ -0,0 +1,893 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve | FileCheck %s
+
+define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) {
+; CHECK-LABEL: llrint_v1i64_v1f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx h0, h0
+; CHECK-NEXT:    fcvtzs x8, h0
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+  %a = call <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half> %x)
+  ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half>)
+
+define <2 x i64> @llrint_v1i64_v2f16(<2 x half> %x) {
+; CHECK-LABEL: llrint_v1i64_v2f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov h1, v0.h[1]
+; CHECK-NEXT:    frintx h0, h0
+; CHECK-NEXT:    frintx h1, h1
+; CHECK-NEXT:    fcvtzs x8, h0
+; CHECK-NEXT:    fcvtzs x9, h1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    ret
+  %a = call <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half> %x)
+  ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half>)
+
+define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) {
+; CHECK-LABEL: llrint_v4i64_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov h1, v0.h[2]
+; CHECK-NEXT:    mov h2, v0.h[1]
+; CHECK-NEXT:    mov h3, v0.h[3]
+; CHECK-NEXT:    frintx h0, h0
+; CHECK-NEXT:    frintx h1, h1
+; CHECK-NEXT:    frintx h2, h2
+; CHECK-NEXT:    frintx h3, h3
+; CHECK-NEXT:    fcvtzs x8, h0
+; CHECK-NEXT:    fcvtzs x9, h1
+; CHECK-NEXT:    fcvtzs x10, h2
+; CHECK-NEXT:    fcvtzs x11, h3
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    mov v0.d[1], x10
+; CHECK-NEXT:    mov v1.d[1], x11
+; CHECK-NEXT:    ret
+  %a = call <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half> %x)
+  ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half>)
+
+define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) {
+; CHECK-LABEL: llrint_v8i64_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    mov h4, v0.h[2]
+; CHECK-NEXT:    mov h3, v0.h[1]
+; CHECK-NEXT:    mov h7, v0.h[3]
+; CHECK-NEXT:    frintx h0, h0
+; CHECK-NEXT:    mov h2, v1.h[2]
+; CHECK-NEXT:    mov h5, v1.h[1]
+; CHECK-NEXT:    mov h6, v1.h[3]
+; CHECK-NEXT:    frintx h1, h1
+; CHECK-NEXT:    frintx h4, h4
+; CHECK-NEXT:    frintx h3, h3
+; CHECK-NEXT:    frintx h7, h7
+; CHECK-NEXT:    fcvtzs x9, h0
+; CHECK-NEXT:    frintx h2, h2
+; CHECK-NEXT:    frintx h5, h5
+; CHECK-NEXT:    frintx h6, h6
+; CHECK-NEXT:    fcvtzs x8, h1
+; CHECK-NEXT:    fcvtzs x12, h4
+; CHECK-NEXT:    fcvtzs x11, h3
+; CHECK-NEXT:    fcvtzs x15, h7
+; CHECK-NEXT:    fmov d0, x9
+; CHECK-NEXT:    fcvtzs x10, h2
+; CHECK-NEXT:    fcvtzs x13, h5
+; CHECK-NEXT:    fcvtzs x14, h6
+; CHECK-NEXT:    fmov d2, x8
+; CHECK-NEXT:    fmov d1, x12
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    fmov d3, x10
+; CHECK-NEXT:    mov v2.d[1], x13
+; CHECK-NEXT:    mov v1.d[1], x15
+; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    ret
+  %a = call <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half> %x)
+  ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half>)
+
+define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
+; CHECK-LABEL: llrint_v16i64_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    mov h4, v0.h[1]
+; CHECK-NEXT:    frintx h5, h0
+; CHECK-NEXT:    mov h18, v0.h[2]
+; CHECK-NEXT:    mov h0, v0.h[3]
+; CHECK-NEXT:    frintx h6, h2
+; CHECK-NEXT:    mov h7, v2.h[1]
+; CHECK-NEXT:    mov h16, v2.h[2]
+; CHECK-NEXT:    mov h17, v3.h[2]
+; CHECK-NEXT:    frintx h19, h3
+; CHECK-NEXT:    frintx h4, h4
+; CHECK-NEXT:    fcvtzs x8, h5
+; CHECK-NEXT:    mov h5, v1.h[1]
+; CHECK-NEXT:    mov h2, v2.h[3]
+; CHECK-NEXT:    frintx h18, h18
+; CHECK-NEXT:    frintx h0, h0
+; CHECK-NEXT:    fcvtzs x9, h6
+; CHECK-NEXT:    frintx h6, h7
+; CHECK-NEXT:    frintx h7, h16
+; CHECK-NEXT:    mov h16, v1.h[2]
+; CHECK-NEXT:    frintx h17, h17
+; CHECK-NEXT:    fcvtzs x10, h19
+; CHECK-NEXT:    mov h19, v3.h[1]
+; CHECK-NEXT:    fcvtzs x11, h4
+; CHECK-NEXT:    mov h4, v1.h[3]
+; CHECK-NEXT:    mov h3, v3.h[3]
+; CHECK-NEXT:    frintx h1, h1
+; CHECK-NEXT:    frintx h5, h5
+; CHECK-NEXT:    fcvtzs x13, h7
+; CHECK-NEXT:    fcvtzs x12, h6
+; CHECK-NEXT:    fcvtzs x15, h18
+; CHECK-NEXT:    frintx h7, h16
+; CHECK-NEXT:    fcvtzs x14, h17
+; CHECK-NEXT:    frintx h16, h2
+; CHECK-NEXT:    frintx h17, h19
+; CHECK-NEXT:    frintx h4, h4
+; CHECK-NEXT:    fmov d2, x9
+; CHECK-NEXT:    frintx h19, h3
+; CHECK-NEXT:    fcvtzs x9, h1
+; CHECK-NEXT:    fmov d6, x10
+; CHECK-NEXT:    fmov d3, x13
+; CHECK-NEXT:    fcvtzs x13, h0
+; CHECK-NEXT:    fcvtzs x16, h5
+; CHECK-NEXT:    fcvtzs x10, h7
+; CHECK-NEXT:    fmov d7, x14
+; CHECK-NEXT:    fcvtzs x14, h16
+; CHECK-NEXT:    fcvtzs x17, h17
+; CHECK-NEXT:    fcvtzs x0, h4
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fcvtzs x18, h19
+; CHECK-NEXT:    fmov d1, x15
+; CHECK-NEXT:    fmov d4, x9
+; CHECK-NEXT:    mov v2.d[1], x12
+; CHECK-NEXT:    fmov d5, x10
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    mov v1.d[1], x13
+; CHECK-NEXT:    mov v4.d[1], x16
+; CHECK-NEXT:    mov v6.d[1], x17
+; CHECK-NEXT:    mov v7.d[1], x18
+; CHECK-NEXT:    mov v5.d[1], x0
+; CHECK-NEXT:    ret
+  %a = call <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half> %x)
+  ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half>)
+
+define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
+; CHECK-LABEL: llrint_v32i64_v32f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v4.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v5.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    ext v6.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    ext v7.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    frintx h21, h1
+; CHECK-NEXT:    frintx h22, h2
+; CHECK-NEXT:    mov h26, v2.h[2]
+; CHECK-NEXT:    frintx h19, h0
+; CHECK-NEXT:    mov h27, v3.h[2]
+; CHECK-NEXT:    mov h20, v2.h[1]
+; CHECK-NEXT:    mov h18, v1.h[1]
+; CHECK-NEXT:    mov h16, v4.h[2]
+; CHECK-NEXT:    mov h17, v5.h[2]
+; CHECK-NEXT:    frintx h23, h5
+; CHECK-NEXT:    frintx h24, h6
+; CHECK-NEXT:    mov h25, v6.h[2]
+; CHECK-NEXT:    fcvtzs x9, h21
+; CHECK-NEXT:    fcvtzs x11, h22
+; CHECK-NEXT:    frintx h22, h7
+; CHECK-NEXT:    mov h21, v3.h[3]
+; CHECK-NEXT:    fcvtzs x10, h19
+; CHECK-NEXT:    frintx h27, h27
+; CHECK-NEXT:    frintx h20, h20
+; CHECK-NEXT:    frintx h16, h16
+; CHECK-NEXT:    frintx h17, h17
+; CHECK-NEXT:    fcvtzs x12, h23
+; CHECK-NEXT:    fcvtzs x13, h24
+; CHECK-NEXT:    frintx h23, h25
+; CHECK-NEXT:    frintx h25, h26
+; CHECK-NEXT:    mov h26, v3.h[1]
+; CHECK-NEXT:    mov h24, v2.h[3]
+; CHECK-NEXT:    fmov d19, x9
+; CHECK-NEXT:    fcvtzs x9, h22
+; CHECK-NEXT:    frintx h22, h3
+; CHECK-NEXT:    frintx h21, h21
+; CHECK-NEXT:    fcvtzs x14, h16
+; CHECK-NEXT:    fcvtzs x15, h17
+; CHECK-NEXT:    fmov d2, x12
+; CHECK-NEXT:    fmov d16, x13
+; CHECK-NEXT:    fcvtzs x12, h23
+; CHECK-NEXT:    fcvtzs x13, h25
+; CHECK-NEXT:    mov h23, v1.h[2]
+; CHECK-NEXT:    frintx h25, h26
+; CHECK-NEXT:    frintx h24, h24
+; CHECK-NEXT:    mov h1, v1.h[3]
+; CHECK-NEXT:    fmov d26, x11
+; CHECK-NEXT:    fcvtzs x11, h21
+; CHECK-NEXT:    fmov d3, x14
+; CHECK-NEXT:    fmov d17, x15
+; CHECK-NEXT:    fcvtzs x14, h22
+; CHECK-NEXT:    fcvtzs x15, h27
+; CHECK-NEXT:    mov h22, v0.h[2]
+; CHECK-NEXT:    frintx h18, h18
+; CHECK-NEXT:    frintx h21, h23
+; CHECK-NEXT:    fmov d23, x13
+; CHECK-NEXT:    fcvtzs x13, h25
+; CHECK-NEXT:    frintx h1, h1
+; CHECK-NEXT:    fmov d25, x14
+; CHECK-NEXT:    fcvtzs x14, h24
+; CHECK-NEXT:    fmov d24, x15
+; CHECK-NEXT:    frintx h22, h22
+; CHECK-NEXT:    fcvtzs x15, h18
+; CHECK-NEXT:    mov h18, v7.h[1]
+; CHECK-NEXT:    mov v25.d[1], x13
+; CHECK-NEXT:    fcvtzs x13, h21
+; CHECK-NEXT:    mov h21, v7.h[2]
+; CHECK-NEXT:    mov v24.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, h20
+; CHECK-NEXT:    mov h20, v0.h[1]
+; CHECK-NEXT:    mov h0, v0.h[3]
+; CHECK-NEXT:    mov v23.d[1], x14
+; CHECK-NEXT:    fcvtzs x14, h1
+; CHECK-NEXT:    mov h1, v6.h[3]
+; CHECK-NEXT:    mov h6, v6.h[1]
+; CHECK-NEXT:    mov v19.d[1], x15
+; CHECK-NEXT:    mov h7, v7.h[3]
+; CHECK-NEXT:    stp q25, q24, [x8, #192]
+; CHECK-NEXT:    fmov d24, x13
+; CHECK-NEXT:    frintx h20, h20
+; CHECK-NEXT:    mov v26.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, h22
+; CHECK-NEXT:    mov h22, v5.h[1]
+; CHECK-NEXT:    mov h5, v5.h[3]
+; CHECK-NEXT:    frintx h0, h0
+; CHECK-NEXT:    frintx h1, h1
+; CHECK-NEXT:    mov v24.d[1], x14
+; CHECK-NEXT:    mov h25, v4.h[3]
+; CHECK-NEXT:    frintx h6, h6
+; CHECK-NEXT:    stp q26, q23, [x8, #128]
+; CHECK-NEXT:    fmov d23, x12
+; CHECK-NEXT:    fcvtzs x12, h20
+; CHECK-NEXT:    mov h20, v4.h[1]
+; CHECK-NEXT:    frintx h5, h5
+; CHECK-NEXT:    fcvtzs x13, h0
+; CHECK-NEXT:    stp q19, q24, [x8, #64]
+; CHECK-NEXT:    frintx h22, h22
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fmov d19, x11
+; CHECK-NEXT:    frintx h4, h4
+; CHECK-NEXT:    fcvtzs x10, h1
+; CHECK-NEXT:    frintx h1, h21
+; CHECK-NEXT:    frintx h24, h25
+; CHECK-NEXT:    fcvtzs x11, h6
+; CHECK-NEXT:    frintx h20, h20
+; CHECK-NEXT:    frintx h6, h7
+; CHECK-NEXT:    fcvtzs x14, h5
+; CHECK-NEXT:    mov v19.d[1], x13
+; CHECK-NEXT:    frintx h5, h18
+; CHECK-NEXT:    fcvtzs x13, h22
+; CHECK-NEXT:    mov v0.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, h4
+; CHECK-NEXT:    mov v23.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, h1
+; CHECK-NEXT:    fcvtzs x15, h24
+; CHECK-NEXT:    mov v16.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, h20
+; CHECK-NEXT:    mov v17.d[1], x14
+; CHECK-NEXT:    fcvtzs x14, h6
+; CHECK-NEXT:    mov v2.d[1], x13
+; CHECK-NEXT:    fcvtzs x13, h5
+; CHECK-NEXT:    fmov d4, x9
+; CHECK-NEXT:    stp q0, q19, [x8]
+; CHECK-NEXT:    fmov d0, x12
+; CHECK-NEXT:    stp q16, q23, [x8, #224]
+; CHECK-NEXT:    fmov d1, x10
+; CHECK-NEXT:    mov v3.d[1], x15
+; CHECK-NEXT:    stp q2, q17, [x8, #160]
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    mov v4.d[1], x13
+; CHECK-NEXT:    mov v1.d[1], x14
+; CHECK-NEXT:    stp q0, q3, [x8, #96]
+; CHECK-NEXT:    stp q4, q1, [x8, #32]
+; CHECK-NEXT:    ret
+  %a = call <32 x i64> @llvm.llrint.v32i64.v32f16(<32 x half> %x)
+  ret <32 x i64> %a
+}
+declare <32 x i64> @llvm.llrint.v32i64.v32f16(<32 x half>)
+
+define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
+; CHECK-LABEL: llrint_v1i64_v1f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    fcvtzs x8, s0
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+  %a = call <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float> %x)
+  ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float>)
+
+define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
+; CHECK-LABEL: llrint_v2i64_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov s1, v0.s[1]
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    frintx s1, s1
+; CHECK-NEXT:    fcvtzs x8, s0
+; CHECK-NEXT:    fcvtzs x9, s1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    ret
+  %a = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> %x)
+  ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float>)
+
+define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
+; CHECK-LABEL: llrint_v4i64_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    mov s3, v0.s[1]
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    mov s2, v1.s[1]
+; CHECK-NEXT:    frintx s1, s1
+; CHECK-NEXT:    frintx s3, s3
+; CHECK-NEXT:    fcvtzs x9, s0
+; CHECK-NEXT:    frintx s2, s2
+; CHECK-NEXT:    fcvtzs x8, s1
+; CHECK-NEXT:    fcvtzs x11, s3
+; CHECK-NEXT:    fmov d0, x9
+; CHECK-NEXT:    fcvtzs x10, s2
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    mov v1.d[1], x10
+; CHECK-NEXT:    ret
+  %a = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> %x)
+  ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float>)
+
+define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
+; CHECK-LABEL: llrint_v8i64_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    mov s4, v0.s[1]
+; CHECK-NEXT:    mov s7, v1.s[1]
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    frintx s1, s1
+; CHECK-NEXT:    mov s5, v2.s[1]
+; CHECK-NEXT:    mov s6, v3.s[1]
+; CHECK-NEXT:    frintx s2, s2
+; CHECK-NEXT:    frintx s3, s3
+; CHECK-NEXT:    frintx s4, s4
+; CHECK-NEXT:    frintx s7, s7
+; CHECK-NEXT:    fcvtzs x9, s0
+; CHECK-NEXT:    fcvtzs x12, s1
+; CHECK-NEXT:    frintx s5, s5
+; CHECK-NEXT:    frintx s6, s6
+; CHECK-NEXT:    fcvtzs x8, s2
+; CHECK-NEXT:    fcvtzs x10, s3
+; CHECK-NEXT:    fcvtzs x11, s4
+; CHECK-NEXT:    fcvtzs x15, s7
+; CHECK-NEXT:    fmov d0, x9
+; CHECK-NEXT:    fmov d2, x12
+; CHECK-NEXT:    fcvtzs x13, s5
+; CHECK-NEXT:    fcvtzs x14, s6
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    fmov d3, x10
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    mov v2.d[1], x15
+; CHECK-NEXT:    mov v1.d[1], x13
+; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    ret
+  %a = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> %x)
+  ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float>)
+
+define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
+; CHECK-LABEL: llrint_v16i64_v16f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v5.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v6.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    frintx s7, s0
+; CHECK-NEXT:    ext v16.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    mov s0, v0.s[1]
+; CHECK-NEXT:    frintx s17, s4
+; CHECK-NEXT:    mov s4, v4.s[1]
+; CHECK-NEXT:    mov s18, v5.s[1]
+; CHECK-NEXT:    frintx s5, s5
+; CHECK-NEXT:    frintx s19, s6
+; CHECK-NEXT:    fcvtzs x8, s7
+; CHECK-NEXT:    frintx s7, s16
+; CHECK-NEXT:    mov s6, v6.s[1]
+; CHECK-NEXT:    mov s16, v16.s[1]
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    frintx s4, s4
+; CHECK-NEXT:    fcvtzs x9, s17
+; CHECK-NEXT:    frintx s17, s1
+; CHECK-NEXT:    mov s1, v1.s[1]
+; CHECK-NEXT:    frintx s18, s18
+; CHECK-NEXT:    fcvtzs x10, s5
+; CHECK-NEXT:    mov s5, v2.s[1]
+; CHECK-NEXT:    fcvtzs x11, s19
+; CHECK-NEXT:    mov s19, v3.s[1]
+; CHECK-NEXT:    frintx s2, s2
+; CHECK-NEXT:    fcvtzs x12, s7
+; CHECK-NEXT:    frintx s6, s6
+; CHECK-NEXT:    fcvtzs x13, s4
+; CHECK-NEXT:    frintx s4, s3
+; CHECK-NEXT:    frintx s16, s16
+; CHECK-NEXT:    fcvtzs x14, s18
+; CHECK-NEXT:    frintx s18, s1
+; CHECK-NEXT:    fcvtzs x15, s17
+; CHECK-NEXT:    frintx s20, s5
+; CHECK-NEXT:    frintx s17, s19
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    fcvtzs x9, s2
+; CHECK-NEXT:    fmov d5, x11
+; CHECK-NEXT:    fmov d3, x10
+; CHECK-NEXT:    fcvtzs x11, s4
+; CHECK-NEXT:    fcvtzs x10, s0
+; CHECK-NEXT:    fmov d7, x12
+; CHECK-NEXT:    fcvtzs x12, s18
+; CHECK-NEXT:    fcvtzs x17, s6
+; CHECK-NEXT:    fcvtzs x18, s16
+; CHECK-NEXT:    fcvtzs x16, s20
+; CHECK-NEXT:    fcvtzs x0, s17
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d2, x15
+; CHECK-NEXT:    fmov d4, x9
+; CHECK-NEXT:    mov v1.d[1], x13
+; CHECK-NEXT:    fmov d6, x11
+; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    mov v0.d[1], x10
+; CHECK-NEXT:    mov v5.d[1], x17
+; CHECK-NEXT:    mov v7.d[1], x18
+; CHECK-NEXT:    mov v2.d[1], x12
+; CHECK-NEXT:    mov v4.d[1], x16
+; CHECK-NEXT:    mov v6.d[1], x0
+; CHECK-NEXT:    ret
+  %a = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> %x)
+  ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>)
+
+define <32 x i64> @llrint_v32i64_v32f32(<32 x float> %x) {
+; CHECK-LABEL: llrint_v32i64_v32f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v16.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    ext v20.16b, v5.16b, v5.16b, #8
+; CHECK-NEXT:    ext v17.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    ext v18.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v19.16b, v4.16b, v4.16b, #8
+; CHECK-NEXT:    ext v21.16b, v6.16b, v6.16b, #8
+; CHECK-NEXT:    ext v22.16b, v7.16b, v7.16b, #8
+; CHECK-NEXT:    frintx s24, s16
+; CHECK-NEXT:    mov s28, v20.s[1]
+; CHECK-NEXT:    frintx s25, s17
+; CHECK-NEXT:    frintx s26, s18
+; CHECK-NEXT:    frintx s27, s19
+; CHECK-NEXT:    frintx s29, s20
+; CHECK-NEXT:    mov s30, v21.s[1]
+; CHECK-NEXT:    frintx s20, s21
+; CHECK-NEXT:    frintx s21, s22
+; CHECK-NEXT:    mov s23, v22.s[1]
+; CHECK-NEXT:    mov s19, v19.s[1]
+; CHECK-NEXT:    mov s17, v17.s[1]
+; CHECK-NEXT:    fcvtzs x12, s24
+; CHECK-NEXT:    frintx s24, s28
+; CHECK-NEXT:    fcvtzs x13, s25
+; CHECK-NEXT:    mov s25, v7.s[1]
+; CHECK-NEXT:    fcvtzs x9, s26
+; CHECK-NEXT:    fcvtzs x11, s27
+; CHECK-NEXT:    fcvtzs x14, s20
+; CHECK-NEXT:    fcvtzs x15, s21
+; CHECK-NEXT:    frintx s26, s1
+; CHECK-NEXT:    frintx s23, s23
+; CHECK-NEXT:    frintx s27, s7
+; CHECK-NEXT:    frintx s22, s30
+; CHECK-NEXT:    fmov d20, x12
+; CHECK-NEXT:    fcvtzs x12, s24
+; CHECK-NEXT:    mov s24, v6.s[1]
+; CHECK-NEXT:    frintx s25, s25
+; CHECK-NEXT:    frintx s6, s6
+; CHECK-NEXT:    fcvtzs x10, s29
+; CHECK-NEXT:    fmov d7, x11
+; CHECK-NEXT:    fmov d21, x13
+; CHECK-NEXT:    frintx s28, s5
+; CHECK-NEXT:    fcvtzs x11, s23
+; CHECK-NEXT:    fmov d23, x14
+; CHECK-NEXT:    fcvtzs x14, s26
+; CHECK-NEXT:    fmov d26, x15
+; CHECK-NEXT:    fcvtzs x15, s27
+; CHECK-NEXT:    frintx s24, s24
+; CHECK-NEXT:    mov s27, v5.s[1]
+; CHECK-NEXT:    fcvtzs x13, s22
+; CHECK-NEXT:    fcvtzs x17, s25
+; CHECK-NEXT:    frintx s25, s4
+; CHECK-NEXT:    fcvtzs x18, s6
+; CHECK-NEXT:    fmov d6, x10
+; CHECK-NEXT:    frintx s22, s2
+; CHECK-NEXT:    mov v26.d[1], x11
+; CHECK-NEXT:    fmov d5, x14
+; CHECK-NEXT:    fcvtzs x10, s24
+; CHECK-NEXT:    fmov d24, x15
+; CHECK-NEXT:    fcvtzs x14, s28
+; CHECK-NEXT:    frintx s27, s27
+; CHECK-NEXT:    mov v23.d[1], x13
+; CHECK-NEXT:    mov s4, v4.s[1]
+; CHECK-NEXT:    fcvtzs x13, s25
+; CHECK-NEXT:    fmov d25, x18
+; CHECK-NEXT:    mov s16, v16.s[1]
+; CHECK-NEXT:    mov v24.d[1], x17
+; CHECK-NEXT:    fcvtzs x16, s22
+; CHECK-NEXT:    frintx s22, s3
+; CHECK-NEXT:    mov s3, v3.s[1]
+; CHECK-NEXT:    frintx s19, s19
+; CHECK-NEXT:    mov s2, v2.s[1]
+; CHECK-NEXT:    mov v25.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, s27
+; CHECK-NEXT:    frintx s4, s4
+; CHECK-NEXT:    mov v6.d[1], x12
+; CHECK-NEXT:    frintx s17, s17
+; CHECK-NEXT:    mov s18, v18.s[1]
+; CHECK-NEXT:    stp q24, q26, [x8, #224]
+; CHECK-NEXT:    fmov d24, x14
+; CHECK-NEXT:    fcvtzs x11, s22
+; CHECK-NEXT:    ext v22.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    mov s1, v1.s[1]
+; CHECK-NEXT:    frintx s3, s3
+; CHECK-NEXT:    stp q25, q23, [x8, #192]
+; CHECK-NEXT:    frintx s2, s2
+; CHECK-NEXT:    fcvtzs x12, s4
+; CHECK-NEXT:    mov v24.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, s19
+; CHECK-NEXT:    mov s19, v0.s[1]
+; CHECK-NEXT:    frintx s16, s16
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    fmov d4, x11
+; CHECK-NEXT:    mov s27, v22.s[1]
+; CHECK-NEXT:    frintx s22, s22
+; CHECK-NEXT:    frintx s1, s1
+; CHECK-NEXT:    fcvtzs x11, s3
+; CHECK-NEXT:    fcvtzs x14, s2
+; CHECK-NEXT:    frintx s2, s18
+; CHECK-NEXT:    stp q24, q6, [x8, #160]
+; CHECK-NEXT:    fmov d6, x13
+; CHECK-NEXT:    fcvtzs x13, s17
+; CHECK-NEXT:    frintx s17, s19
+; CHECK-NEXT:    fmov d23, x16
+; CHECK-NEXT:    mov v7.d[1], x10
+; CHECK-NEXT:    frintx s3, s27
+; CHECK-NEXT:    fcvtzs x10, s22
+; CHECK-NEXT:    fcvtzs x15, s1
+; CHECK-NEXT:    mov v6.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, s16
+; CHECK-NEXT:    mov v4.d[1], x11
+; CHECK-NEXT:    mov v21.d[1], x13
+; CHECK-NEXT:    fcvtzs x13, s0
+; CHECK-NEXT:    mov v23.d[1], x14
+; CHECK-NEXT:    fcvtzs x14, s17
+; CHECK-NEXT:    fcvtzs x11, s3
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    mov v5.d[1], x15
+; CHECK-NEXT:    stp q6, q7, [x8, #128]
+; CHECK-NEXT:    mov v20.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, s2
+; CHECK-NEXT:    stp q4, q21, [x8, #96]
+; CHECK-NEXT:    fmov d1, x13
+; CHECK-NEXT:    fmov d2, x9
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    stp q23, q20, [x8, #64]
+; CHECK-NEXT:    mov v1.d[1], x14
+; CHECK-NEXT:    mov v2.d[1], x12
+; CHECK-NEXT:    stp q5, q0, [x8, #32]
+; CHECK-NEXT:    stp q1, q2, [x8]
+; CHECK-NEXT:    ret
+  %a = call <32 x i64> @llvm.llrint.v32i64.v32f32(<32 x float> %x)
+  ret <32 x i64> %a
+}
+declare <32 x i64> @llvm.llrint.v32i64.v32f32(<32 x float>)
+
+define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) {
+; CHECK-LABEL: llrint_v1i64_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+  %a = call <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double> %x)
+  ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double>)
+
+define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
+; CHECK-LABEL: llrint_v2i64_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov d1, v0.d[1]
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    frintx d1, d1
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    fcvtzs x9, d1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    ret
+  %a = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> %x)
+  ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double>)
+
+define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
+; CHECK-LABEL: llrint_v4i64_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov d2, v0.d[1]
+; CHECK-NEXT:    mov d3, v1.d[1]
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    frintx d1, d1
+; CHECK-NEXT:    frintx d2, d2
+; CHECK-NEXT:    frintx d3, d3
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    fcvtzs x9, d1
+; CHECK-NEXT:    fcvtzs x10, d2
+; CHECK-NEXT:    fcvtzs x11, d3
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    mov v0.d[1], x10
+; CHECK-NEXT:    mov v1.d[1], x11
+; CHECK-NEXT:    ret
+  %a = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> %x)
+  ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double>)
+
+define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
+; CHECK-LABEL: llrint_v8i64_v8f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov d4, v0.d[1]
+; CHECK-NEXT:    mov d5, v1.d[1]
+; CHECK-NEXT:    mov d6, v2.d[1]
+; CHECK-NEXT:    mov d7, v3.d[1]
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    frintx d1, d1
+; CHECK-NEXT:    frintx d2, d2
+; CHECK-NEXT:    frintx d3, d3
+; CHECK-NEXT:    frintx d4, d4
+; CHECK-NEXT:    frintx d5, d5
+; CHECK-NEXT:    frintx d6, d6
+; CHECK-NEXT:    frintx d7, d7
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    fcvtzs x9, d1
+; CHECK-NEXT:    fcvtzs x10, d2
+; CHECK-NEXT:    fcvtzs x11, d3
+; CHECK-NEXT:    fcvtzs x12, d4
+; CHECK-NEXT:    fcvtzs x13, d5
+; CHECK-NEXT:    fcvtzs x14, d6
+; CHECK-NEXT:    fcvtzs x15, d7
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    fmov d2, x10
+; CHECK-NEXT:    fmov d3, x11
+; CHECK-NEXT:    mov v0.d[1], x12
+; CHECK-NEXT:    mov v1.d[1], x13
+; CHECK-NEXT:    mov v2.d[1], x14
+; CHECK-NEXT:    mov v3.d[1], x15
+; CHECK-NEXT:    ret
+  %a = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> %x)
+  ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>)
+
+define <16 x i64> @llrint_v16f64(<16 x double> %x) {
+; CHECK-LABEL: llrint_v16f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov d16, v0.d[1]
+; CHECK-NEXT:    mov d17, v1.d[1]
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    frintx d1, d1
+; CHECK-NEXT:    frintx d18, d2
+; CHECK-NEXT:    mov d2, v2.d[1]
+; CHECK-NEXT:    frintx d19, d3
+; CHECK-NEXT:    mov d3, v3.d[1]
+; CHECK-NEXT:    frintx d16, d16
+; CHECK-NEXT:    frintx d17, d17
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    frintx d0, d4
+; CHECK-NEXT:    mov d4, v4.d[1]
+; CHECK-NEXT:    fcvtzs x9, d1
+; CHECK-NEXT:    frintx d1, d5
+; CHECK-NEXT:    mov d5, v5.d[1]
+; CHECK-NEXT:    fcvtzs x12, d18
+; CHECK-NEXT:    frintx d2, d2
+; CHECK-NEXT:    fcvtzs x13, d19
+; CHECK-NEXT:    frintx d18, d3
+; CHECK-NEXT:    fcvtzs x10, d16
+; CHECK-NEXT:    mov d16, v6.d[1]
+; CHECK-NEXT:    fcvtzs x11, d17
+; CHECK-NEXT:    mov d17, v7.d[1]
+; CHECK-NEXT:    frintx d6, d6
+; CHECK-NEXT:    frintx d7, d7
+; CHECK-NEXT:    frintx d4, d4
+; CHECK-NEXT:    frintx d5, d5
+; CHECK-NEXT:    fcvtzs x14, d0
+; CHECK-NEXT:    fcvtzs x15, d1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    frintx d16, d16
+; CHECK-NEXT:    fcvtzs x9, d2
+; CHECK-NEXT:    fmov d2, x12
+; CHECK-NEXT:    frintx d17, d17
+; CHECK-NEXT:    fcvtzs x8, d6
+; CHECK-NEXT:    fcvtzs x12, d7
+; CHECK-NEXT:    fmov d3, x13
+; CHECK-NEXT:    fcvtzs x13, d18
+; CHECK-NEXT:    fcvtzs x16, d4
+; CHECK-NEXT:    fcvtzs x17, d5
+; CHECK-NEXT:    fmov d4, x14
+; CHECK-NEXT:    fmov d5, x15
+; CHECK-NEXT:    fcvtzs x18, d16
+; CHECK-NEXT:    mov v0.d[1], x10
+; CHECK-NEXT:    mov v1.d[1], x11
+; CHECK-NEXT:    fcvtzs x0, d17
+; CHECK-NEXT:    fmov d6, x8
+; CHECK-NEXT:    fmov d7, x12
+; CHECK-NEXT:    mov v2.d[1], x9
+; CHECK-NEXT:    mov v3.d[1], x13
+; CHECK-NEXT:    mov v4.d[1], x16
+; CHECK-NEXT:    mov v5.d[1], x17
+; CHECK-NEXT:    mov v6.d[1], x18
+; CHECK-NEXT:    mov v7.d[1], x0
+; CHECK-NEXT:    ret
+  %a = call <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double> %x)
+  ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double>)
+
+define <32 x i64> @llrint_v32f64(<32 x double> %x) {
+; CHECK-LABEL: llrint_v32f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d20, d0
+; CHECK-NEXT:    frintx d22, d3
+; CHECK-NEXT:    frintx d21, d4
+; CHECK-NEXT:    ldp q19, q18, [sp, #64]
+; CHECK-NEXT:    frintx d23, d5
+; CHECK-NEXT:    ldp q27, q26, [sp, #96]
+; CHECK-NEXT:    mov d4, v4.d[1]
+; CHECK-NEXT:    ldp q16, q17, [sp, #32]
+; CHECK-NEXT:    mov d5, v5.d[1]
+; CHECK-NEXT:    fcvtzs x9, d20
+; CHECK-NEXT:    frintx d20, d6
+; CHECK-NEXT:    fcvtzs x11, d22
+; CHECK-NEXT:    frintx d22, d19
+; CHECK-NEXT:    fcvtzs x12, d21
+; CHECK-NEXT:    fcvtzs x10, d23
+; CHECK-NEXT:    mov d21, v26.d[1]
+; CHECK-NEXT:    frintx d23, d27
+; CHECK-NEXT:    mov d27, v27.d[1]
+; CHECK-NEXT:    frintx d24, d16
+; CHECK-NEXT:    mov d19, v19.d[1]
+; CHECK-NEXT:    frintx d25, d17
+; CHECK-NEXT:    fcvtzs x13, d20
+; CHECK-NEXT:    mov d20, v18.d[1]
+; CHECK-NEXT:    frintx d18, d18
+; CHECK-NEXT:    fcvtzs x16, d22
+; CHECK-NEXT:    frintx d22, d26
+; CHECK-NEXT:    mov d16, v16.d[1]
+; CHECK-NEXT:    frintx d21, d21
+; CHECK-NEXT:    fcvtzs x17, d23
+; CHECK-NEXT:    frintx d23, d27
+; CHECK-NEXT:    fcvtzs x14, d24
+; CHECK-NEXT:    frintx d26, d19
+; CHECK-NEXT:    fmov d19, x11
+; CHECK-NEXT:    frintx d20, d20
+; CHECK-NEXT:    mov d27, v17.d[1]
+; CHECK-NEXT:    fcvtzs x15, d25
+; CHECK-NEXT:    ldp q25, q24, [sp]
+; CHECK-NEXT:    fcvtzs x11, d22
+; CHECK-NEXT:    fmov d17, x12
+; CHECK-NEXT:    fcvtzs x12, d21
+; CHECK-NEXT:    fcvtzs x0, d23
+; CHECK-NEXT:    fmov d23, x14
+; CHECK-NEXT:    fcvtzs x14, d18
+; CHECK-NEXT:    fmov d18, x17
+; CHECK-NEXT:    fcvtzs x17, d20
+; CHECK-NEXT:    frintx d21, d7
+; CHECK-NEXT:    fcvtzs x18, d26
+; CHECK-NEXT:    fmov d20, x11
+; CHECK-NEXT:    frintx d22, d25
+; CHECK-NEXT:    frintx d26, d27
+; CHECK-NEXT:    frintx d16, d16
+; CHECK-NEXT:    mov v18.d[1], x0
+; CHECK-NEXT:    mov d25, v25.d[1]
+; CHECK-NEXT:    mov d7, v7.d[1]
+; CHECK-NEXT:    mov d6, v6.d[1]
+; CHECK-NEXT:    mov d0, v0.d[1]
+; CHECK-NEXT:    mov v20.d[1], x12
+; CHECK-NEXT:    fcvtzs x11, d21
+; CHECK-NEXT:    fmov d21, x15
+; CHECK-NEXT:    fcvtzs x12, d22
+; CHECK-NEXT:    fmov d22, x16
+; CHECK-NEXT:    fcvtzs x15, d26
+; CHECK-NEXT:    fmov d26, x14
+; CHECK-NEXT:    fcvtzs x14, d16
+; CHECK-NEXT:    frintx d25, d25
+; CHECK-NEXT:    frintx d7, d7
+; CHECK-NEXT:    mov d16, v1.d[1]
+; CHECK-NEXT:    mov d3, v3.d[1]
+; CHECK-NEXT:    stp q18, q20, [x8, #224]
+; CHECK-NEXT:    mov d18, v24.d[1]
+; CHECK-NEXT:    mov v22.d[1], x18
+; CHECK-NEXT:    mov v26.d[1], x17
+; CHECK-NEXT:    frintx d24, d24
+; CHECK-NEXT:    mov v21.d[1], x15
+; CHECK-NEXT:    mov v23.d[1], x14
+; CHECK-NEXT:    frintx d20, d2
+; CHECK-NEXT:    mov d2, v2.d[1]
+; CHECK-NEXT:    frintx d6, d6
+; CHECK-NEXT:    frintx d5, d5
+; CHECK-NEXT:    frintx d4, d4
+; CHECK-NEXT:    frintx d18, d18
+; CHECK-NEXT:    frintx d1, d1
+; CHECK-NEXT:    frintx d3, d3
+; CHECK-NEXT:    stp q22, q26, [x8, #192]
+; CHECK-NEXT:    fmov d22, x10
+; CHECK-NEXT:    fcvtzs x10, d24
+; CHECK-NEXT:    stp q23, q21, [x8, #160]
+; CHECK-NEXT:    fmov d21, x11
+; CHECK-NEXT:    fmov d24, x13
+; CHECK-NEXT:    frintx d2, d2
+; CHECK-NEXT:    fcvtzs x13, d6
+; CHECK-NEXT:    frintx d6, d16
+; CHECK-NEXT:    fcvtzs x11, d18
+; CHECK-NEXT:    fmov d18, x12
+; CHECK-NEXT:    fcvtzs x12, d25
+; CHECK-NEXT:    fmov d23, x10
+; CHECK-NEXT:    fcvtzs x10, d7
+; CHECK-NEXT:    fcvtzs x14, d5
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs x15, d3
+; CHECK-NEXT:    mov v24.d[1], x13
+; CHECK-NEXT:    fcvtzs x13, d2
+; CHECK-NEXT:    fmov d2, x9
+; CHECK-NEXT:    mov v23.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, d4
+; CHECK-NEXT:    mov v18.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, d20
+; CHECK-NEXT:    mov v21.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, d1
+; CHECK-NEXT:    mov v22.d[1], x14
+; CHECK-NEXT:    fcvtzs x14, d6
+; CHECK-NEXT:    mov v19.d[1], x15
+; CHECK-NEXT:    stp q18, q23, [x8, #128]
+; CHECK-NEXT:    mov v17.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, d0
+; CHECK-NEXT:    stp q24, q21, [x8, #96]
+; CHECK-NEXT:    fmov d0, x12
+; CHECK-NEXT:    fmov d1, x10
+; CHECK-NEXT:    stp q17, q22, [x8, #64]
+; CHECK-NEXT:    mov v0.d[1], x13
+; CHECK-NEXT:    mov v1.d[1], x14
+; CHECK-NEXT:    mov v2.d[1], x11
+; CHECK-NEXT:    stp q0, q19, [x8, #32]
+; CHECK-NEXT:    stp q2, q1, [x8]
+; CHECK-NEXT:    ret
+  %a = call <32 x i64> @llvm.llrint.v32i64.v16f64(<32 x double> %x)
+  ret <32 x i64> %a
+}
+declare <32 x i64> @llvm.llrint.v32i64.v32f64(<32 x double>)
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll b/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll
new file mode 100644
index 0000000000000..e9c5fd9b769b6
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll
@@ -0,0 +1,893 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve | FileCheck %s
+
+define <1 x i64> @lrint_v1f16(<1 x half> %x) {
+; CHECK-LABEL: lrint_v1f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx h0, h0
+; CHECK-NEXT:    fcvtzs x8, h0
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+  %a = call <1 x i64> @llvm.lrint.v1i64.v1f16(<1 x half> %x)
+  ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.lrint.v1i64.v1f16(<1 x half>)
+
+define <2 x i64> @lrint_v2f16(<2 x half> %x) {
+; CHECK-LABEL: lrint_v2f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov h1, v0.h[1]
+; CHECK-NEXT:    frintx h0, h0
+; CHECK-NEXT:    frintx h1, h1
+; CHECK-NEXT:    fcvtzs x8, h0
+; CHECK-NEXT:    fcvtzs x9, h1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    ret
+  %a = call <2 x i64> @llvm.lrint.v2i64.v2f16(<2 x half> %x)
+  ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.lrint.v2i64.v2f16(<2 x half>)
+
+define <4 x i64> @lrint_v4f16(<4 x half> %x) {
+; CHECK-LABEL: lrint_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov h1, v0.h[2]
+; CHECK-NEXT:    mov h2, v0.h[1]
+; CHECK-NEXT:    mov h3, v0.h[3]
+; CHECK-NEXT:    frintx h0, h0
+; CHECK-NEXT:    frintx h1, h1
+; CHECK-NEXT:    frintx h2, h2
+; CHECK-NEXT:    frintx h3, h3
+; CHECK-NEXT:    fcvtzs x8, h0
+; CHECK-NEXT:    fcvtzs x9, h1
+; CHECK-NEXT:    fcvtzs x10, h2
+; CHECK-NEXT:    fcvtzs x11, h3
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    mov v0.d[1], x10
+; CHECK-NEXT:    mov v1.d[1], x11
+; CHECK-NEXT:    ret
+  %a = call <4 x i64> @llvm.lrint.v4i64.v4f16(<4 x half> %x)
+  ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.lrint.v4i64.v4f16(<4 x half>)
+
+define <8 x i64> @lrint_v8f16(<8 x half> %x) {
+; CHECK-LABEL: lrint_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    mov h4, v0.h[2]
+; CHECK-NEXT:    mov h3, v0.h[1]
+; CHECK-NEXT:    mov h7, v0.h[3]
+; CHECK-NEXT:    frintx h0, h0
+; CHECK-NEXT:    mov h2, v1.h[2]
+; CHECK-NEXT:    mov h5, v1.h[1]
+; CHECK-NEXT:    mov h6, v1.h[3]
+; CHECK-NEXT:    frintx h1, h1
+; CHECK-NEXT:    frintx h4, h4
+; CHECK-NEXT:    frintx h3, h3
+; CHECK-NEXT:    frintx h7, h7
+; CHECK-NEXT:    fcvtzs x9, h0
+; CHECK-NEXT:    frintx h2, h2
+; CHECK-NEXT:    frintx h5, h5
+; CHECK-NEXT:    frintx h6, h6
+; CHECK-NEXT:    fcvtzs x8, h1
+; CHECK-NEXT:    fcvtzs x12, h4
+; CHECK-NEXT:    fcvtzs x11, h3
+; CHECK-NEXT:    fcvtzs x15, h7
+; CHECK-NEXT:    fmov d0, x9
+; CHECK-NEXT:    fcvtzs x10, h2
+; CHECK-NEXT:    fcvtzs x13, h5
+; CHECK-NEXT:    fcvtzs x14, h6
+; CHECK-NEXT:    fmov d2, x8
+; CHECK-NEXT:    fmov d1, x12
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    fmov d3, x10
+; CHECK-NEXT:    mov v2.d[1], x13
+; CHECK-NEXT:    mov v1.d[1], x15
+; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    ret
+  %a = call <8 x i64> @llvm.lrint.v8i64.v8f16(<8 x half> %x)
+  ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.lrint.v8i64.v8f16(<8 x half>)
+
+define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) {
+; CHECK-LABEL: lrint_v16i64_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    mov h4, v0.h[1]
+; CHECK-NEXT:    frintx h5, h0
+; CHECK-NEXT:    mov h18, v0.h[2]
+; CHECK-NEXT:    mov h0, v0.h[3]
+; CHECK-NEXT:    frintx h6, h2
+; CHECK-NEXT:    mov h7, v2.h[1]
+; CHECK-NEXT:    mov h16, v2.h[2]
+; CHECK-NEXT:    mov h17, v3.h[2]
+; CHECK-NEXT:    frintx h19, h3
+; CHECK-NEXT:    frintx h4, h4
+; CHECK-NEXT:    fcvtzs x8, h5
+; CHECK-NEXT:    mov h5, v1.h[1]
+; CHECK-NEXT:    mov h2, v2.h[3]
+; CHECK-NEXT:    frintx h18, h18
+; CHECK-NEXT:    frintx h0, h0
+; CHECK-NEXT:    fcvtzs x9, h6
+; CHECK-NEXT:    frintx h6, h7
+; CHECK-NEXT:    frintx h7, h16
+; CHECK-NEXT:    mov h16, v1.h[2]
+; CHECK-NEXT:    frintx h17, h17
+; CHECK-NEXT:    fcvtzs x10, h19
+; CHECK-NEXT:    mov h19, v3.h[1]
+; CHECK-NEXT:    fcvtzs x11, h4
+; CHECK-NEXT:    mov h4, v1.h[3]
+; CHECK-NEXT:    mov h3, v3.h[3]
+; CHECK-NEXT:    frintx h1, h1
+; CHECK-NEXT:    frintx h5, h5
+; CHECK-NEXT:    fcvtzs x13, h7
+; CHECK-NEXT:    fcvtzs x12, h6
+; CHECK-NEXT:    fcvtzs x15, h18
+; CHECK-NEXT:    frintx h7, h16
+; CHECK-NEXT:    fcvtzs x14, h17
+; CHECK-NEXT:    frintx h16, h2
+; CHECK-NEXT:    frintx h17, h19
+; CHECK-NEXT:    frintx h4, h4
+; CHECK-NEXT:    fmov d2, x9
+; CHECK-NEXT:    frintx h19, h3
+; CHECK-NEXT:    fcvtzs x9, h1
+; CHECK-NEXT:    fmov d6, x10
+; CHECK-NEXT:    fmov d3, x13
+; CHECK-NEXT:    fcvtzs x13, h0
+; CHECK-NEXT:    fcvtzs x16, h5
+; CHECK-NEXT:    fcvtzs x10, h7
+; CHECK-NEXT:    fmov d7, x14
+; CHECK-NEXT:    fcvtzs x14, h16
+; CHECK-NEXT:    fcvtzs x17, h17
+; CHECK-NEXT:    fcvtzs x0, h4
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fcvtzs x18, h19
+; CHECK-NEXT:    fmov d1, x15
+; CHECK-NEXT:    fmov d4, x9
+; CHECK-NEXT:    mov v2.d[1], x12
+; CHECK-NEXT:    fmov d5, x10
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    mov v1.d[1], x13
+; CHECK-NEXT:    mov v4.d[1], x16
+; CHECK-NEXT:    mov v6.d[1], x17
+; CHECK-NEXT:    mov v7.d[1], x18
+; CHECK-NEXT:    mov v5.d[1], x0
+; CHECK-NEXT:    ret
+  %a = call <16 x i64> @llvm.lrint.v16i64.v16f16(<16 x half> %x)
+  ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.lrint.v16i64.v16f16(<16 x half>)
+
+define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
+; CHECK-LABEL: lrint_v32i64_v32f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v4.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v5.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    ext v6.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    ext v7.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    frintx h21, h1
+; CHECK-NEXT:    frintx h22, h2
+; CHECK-NEXT:    mov h26, v2.h[2]
+; CHECK-NEXT:    frintx h19, h0
+; CHECK-NEXT:    mov h27, v3.h[2]
+; CHECK-NEXT:    mov h20, v2.h[1]
+; CHECK-NEXT:    mov h18, v1.h[1]
+; CHECK-NEXT:    mov h16, v4.h[2]
+; CHECK-NEXT:    mov h17, v5.h[2]
+; CHECK-NEXT:    frintx h23, h5
+; CHECK-NEXT:    frintx h24, h6
+; CHECK-NEXT:    mov h25, v6.h[2]
+; CHECK-NEXT:    fcvtzs x9, h21
+; CHECK-NEXT:    fcvtzs x11, h22
+; CHECK-NEXT:    frintx h22, h7
+; CHECK-NEXT:    mov h21, v3.h[3]
+; CHECK-NEXT:    fcvtzs x10, h19
+; CHECK-NEXT:    frintx h27, h27
+; CHECK-NEXT:    frintx h20, h20
+; CHECK-NEXT:    frintx h16, h16
+; CHECK-NEXT:    frintx h17, h17
+; CHECK-NEXT:    fcvtzs x12, h23
+; CHECK-NEXT:    fcvtzs x13, h24
+; CHECK-NEXT:    frintx h23, h25
+; CHECK-NEXT:    frintx h25, h26
+; CHECK-NEXT:    mov h26, v3.h[1]
+; CHECK-NEXT:    mov h24, v2.h[3]
+; CHECK-NEXT:    fmov d19, x9
+; CHECK-NEXT:    fcvtzs x9, h22
+; CHECK-NEXT:    frintx h22, h3
+; CHECK-NEXT:    frintx h21, h21
+; CHECK-NEXT:    fcvtzs x14, h16
+; CHECK-NEXT:    fcvtzs x15, h17
+; CHECK-NEXT:    fmov d2, x12
+; CHECK-NEXT:    fmov d16, x13
+; CHECK-NEXT:    fcvtzs x12, h23
+; CHECK-NEXT:    fcvtzs x13, h25
+; CHECK-NEXT:    mov h23, v1.h[2]
+; CHECK-NEXT:    frintx h25, h26
+; CHECK-NEXT:    frintx h24, h24
+; CHECK-NEXT:    mov h1, v1.h[3]
+; CHECK-NEXT:    fmov d26, x11
+; CHECK-NEXT:    fcvtzs x11, h21
+; CHECK-NEXT:    fmov d3, x14
+; CHECK-NEXT:    fmov d17, x15
+; CHECK-NEXT:    fcvtzs x14, h22
+; CHECK-NEXT:    fcvtzs x15, h27
+; CHECK-NEXT:    mov h22, v0.h[2]
+; CHECK-NEXT:    frintx h18, h18
+; CHECK-NEXT:    frintx h21, h23
+; CHECK-NEXT:    fmov d23, x13
+; CHECK-NEXT:    fcvtzs x13, h25
+; CHECK-NEXT:    frintx h1, h1
+; CHECK-NEXT:    fmov d25, x14
+; CHECK-NEXT:    fcvtzs x14, h24
+; CHECK-NEXT:    fmov d24, x15
+; CHECK-NEXT:    frintx h22, h22
+; CHECK-NEXT:    fcvtzs x15, h18
+; CHECK-NEXT:    mov h18, v7.h[1]
+; CHECK-NEXT:    mov v25.d[1], x13
+; CHECK-NEXT:    fcvtzs x13, h21
+; CHECK-NEXT:    mov h21, v7.h[2]
+; CHECK-NEXT:    mov v24.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, h20
+; CHECK-NEXT:    mov h20, v0.h[1]
+; CHECK-NEXT:    mov h0, v0.h[3]
+; CHECK-NEXT:    mov v23.d[1], x14
+; CHECK-NEXT:    fcvtzs x14, h1
+; CHECK-NEXT:    mov h1, v6.h[3]
+; CHECK-NEXT:    mov h6, v6.h[1]
+; CHECK-NEXT:    mov v19.d[1], x15
+; CHECK-NEXT:    mov h7, v7.h[3]
+; CHECK-NEXT:    stp q25, q24, [x8, #192]
+; CHECK-NEXT:    fmov d24, x13
+; CHECK-NEXT:    frintx h20, h20
+; CHECK-NEXT:    mov v26.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, h22
+; CHECK-NEXT:    mov h22, v5.h[1]
+; CHECK-NEXT:    mov h5, v5.h[3]
+; CHECK-NEXT:    frintx h0, h0
+; CHECK-NEXT:    frintx h1, h1
+; CHECK-NEXT:    mov v24.d[1], x14
+; CHECK-NEXT:    mov h25, v4.h[3]
+; CHECK-NEXT:    frintx h6, h6
+; CHECK-NEXT:    stp q26, q23, [x8, #128]
+; CHECK-NEXT:    fmov d23, x12
+; CHECK-NEXT:    fcvtzs x12, h20
+; CHECK-NEXT:    mov h20, v4.h[1]
+; CHECK-NEXT:    frintx h5, h5
+; CHECK-NEXT:    fcvtzs x13, h0
+; CHECK-NEXT:    stp q19, q24, [x8, #64]
+; CHECK-NEXT:    frintx h22, h22
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fmov d19, x11
+; CHECK-NEXT:    frintx h4, h4
+; CHECK-NEXT:    fcvtzs x10, h1
+; CHECK-NEXT:    frintx h1, h21
+; CHECK-NEXT:    frintx h24, h25
+; CHECK-NEXT:    fcvtzs x11, h6
+; CHECK-NEXT:    frintx h20, h20
+; CHECK-NEXT:    frintx h6, h7
+; CHECK-NEXT:    fcvtzs x14, h5
+; CHECK-NEXT:    mov v19.d[1], x13
+; CHECK-NEXT:    frintx h5, h18
+; CHECK-NEXT:    fcvtzs x13, h22
+; CHECK-NEXT:    mov v0.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, h4
+; CHECK-NEXT:    mov v23.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, h1
+; CHECK-NEXT:    fcvtzs x15, h24
+; CHECK-NEXT:    mov v16.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, h20
+; CHECK-NEXT:    mov v17.d[1], x14
+; CHECK-NEXT:    fcvtzs x14, h6
+; CHECK-NEXT:    mov v2.d[1], x13
+; CHECK-NEXT:    fcvtzs x13, h5
+; CHECK-NEXT:    fmov d4, x9
+; CHECK-NEXT:    stp q0, q19, [x8]
+; CHECK-NEXT:    fmov d0, x12
+; CHECK-NEXT:    stp q16, q23, [x8, #224]
+; CHECK-NEXT:    fmov d1, x10
+; CHECK-NEXT:    mov v3.d[1], x15
+; CHECK-NEXT:    stp q2, q17, [x8, #160]
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    mov v4.d[1], x13
+; CHECK-NEXT:    mov v1.d[1], x14
+; CHECK-NEXT:    stp q0, q3, [x8, #96]
+; CHECK-NEXT:    stp q4, q1, [x8, #32]
+; CHECK-NEXT:    ret
+  %a = call <32 x i64> @llvm.lrint.v32i64.v32f16(<32 x half> %x)
+  ret <32 x i64> %a
+}
+declare <32 x i64> @llvm.lrint.v32i64.v32f16(<32 x half>)
+
+define <1 x i64> @lrint_v1f32(<1 x float> %x) {
+; CHECK-LABEL: lrint_v1f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    fcvtzs x8, s0
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+  %a = call <1 x i64> @llvm.lrint.v1i64.v1f32(<1 x float> %x)
+  ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.lrint.v1i64.v1f32(<1 x float>)
+
+define <2 x i64> @lrint_v2f32(<2 x float> %x) {
+; CHECK-LABEL: lrint_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov s1, v0.s[1]
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    frintx s1, s1
+; CHECK-NEXT:    fcvtzs x8, s0
+; CHECK-NEXT:    fcvtzs x9, s1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    ret
+  %a = call <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float> %x)
+  ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float>)
+
+define <4 x i64> @lrint_v4f32(<4 x float> %x) {
+; CHECK-LABEL: lrint_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    mov s3, v0.s[1]
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    mov s2, v1.s[1]
+; CHECK-NEXT:    frintx s1, s1
+; CHECK-NEXT:    frintx s3, s3
+; CHECK-NEXT:    fcvtzs x9, s0
+; CHECK-NEXT:    frintx s2, s2
+; CHECK-NEXT:    fcvtzs x8, s1
+; CHECK-NEXT:    fcvtzs x11, s3
+; CHECK-NEXT:    fmov d0, x9
+; CHECK-NEXT:    fcvtzs x10, s2
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    mov v1.d[1], x10
+; CHECK-NEXT:    ret
+  %a = call <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float> %x)
+  ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float>)
+
+define <8 x i64> @lrint_v8f32(<8 x float> %x) {
+; CHECK-LABEL: lrint_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    mov s4, v0.s[1]
+; CHECK-NEXT:    mov s7, v1.s[1]
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    frintx s1, s1
+; CHECK-NEXT:    mov s5, v2.s[1]
+; CHECK-NEXT:    mov s6, v3.s[1]
+; CHECK-NEXT:    frintx s2, s2
+; CHECK-NEXT:    frintx s3, s3
+; CHECK-NEXT:    frintx s4, s4
+; CHECK-NEXT:    frintx s7, s7
+; CHECK-NEXT:    fcvtzs x9, s0
+; CHECK-NEXT:    fcvtzs x12, s1
+; CHECK-NEXT:    frintx s5, s5
+; CHECK-NEXT:    frintx s6, s6
+; CHECK-NEXT:    fcvtzs x8, s2
+; CHECK-NEXT:    fcvtzs x10, s3
+; CHECK-NEXT:    fcvtzs x11, s4
+; CHECK-NEXT:    fcvtzs x15, s7
+; CHECK-NEXT:    fmov d0, x9
+; CHECK-NEXT:    fmov d2, x12
+; CHECK-NEXT:    fcvtzs x13, s5
+; CHECK-NEXT:    fcvtzs x14, s6
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    fmov d3, x10
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    mov v2.d[1], x15
+; CHECK-NEXT:    mov v1.d[1], x13
+; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    ret
+  %a = call <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float> %x)
+  ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float>)
+
+define <16 x i64> @lrint_v16i64_v16f32(<16 x float> %x) {
+; CHECK-LABEL: lrint_v16i64_v16f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v5.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v6.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    frintx s7, s0
+; CHECK-NEXT:    ext v16.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    mov s0, v0.s[1]
+; CHECK-NEXT:    frintx s17, s4
+; CHECK-NEXT:    mov s4, v4.s[1]
+; CHECK-NEXT:    mov s18, v5.s[1]
+; CHECK-NEXT:    frintx s5, s5
+; CHECK-NEXT:    frintx s19, s6
+; CHECK-NEXT:    fcvtzs x8, s7
+; CHECK-NEXT:    frintx s7, s16
+; CHECK-NEXT:    mov s6, v6.s[1]
+; CHECK-NEXT:    mov s16, v16.s[1]
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    frintx s4, s4
+; CHECK-NEXT:    fcvtzs x9, s17
+; CHECK-NEXT:    frintx s17, s1
+; CHECK-NEXT:    mov s1, v1.s[1]
+; CHECK-NEXT:    frintx s18, s18
+; CHECK-NEXT:    fcvtzs x10, s5
+; CHECK-NEXT:    mov s5, v2.s[1]
+; CHECK-NEXT:    fcvtzs x11, s19
+; CHECK-NEXT:    mov s19, v3.s[1]
+; CHECK-NEXT:    frintx s2, s2
+; CHECK-NEXT:    fcvtzs x12, s7
+; CHECK-NEXT:    frintx s6, s6
+; CHECK-NEXT:    fcvtzs x13, s4
+; CHECK-NEXT:    frintx s4, s3
+; CHECK-NEXT:    frintx s16, s16
+; CHECK-NEXT:    fcvtzs x14, s18
+; CHECK-NEXT:    frintx s18, s1
+; CHECK-NEXT:    fcvtzs x15, s17
+; CHECK-NEXT:    frintx s20, s5
+; CHECK-NEXT:    frintx s17, s19
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    fcvtzs x9, s2
+; CHECK-NEXT:    fmov d5, x11
+; CHECK-NEXT:    fmov d3, x10
+; CHECK-NEXT:    fcvtzs x11, s4
+; CHECK-NEXT:    fcvtzs x10, s0
+; CHECK-NEXT:    fmov d7, x12
+; CHECK-NEXT:    fcvtzs x12, s18
+; CHECK-NEXT:    fcvtzs x17, s6
+; CHECK-NEXT:    fcvtzs x18, s16
+; CHECK-NEXT:    fcvtzs x16, s20
+; CHECK-NEXT:    fcvtzs x0, s17
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d2, x15
+; CHECK-NEXT:    fmov d4, x9
+; CHECK-NEXT:    mov v1.d[1], x13
+; CHECK-NEXT:    fmov d6, x11
+; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    mov v0.d[1], x10
+; CHECK-NEXT:    mov v5.d[1], x17
+; CHECK-NEXT:    mov v7.d[1], x18
+; CHECK-NEXT:    mov v2.d[1], x12
+; CHECK-NEXT:    mov v4.d[1], x16
+; CHECK-NEXT:    mov v6.d[1], x0
+; CHECK-NEXT:    ret
+  %a = call <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float> %x)
+  ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float>)
+
+define <32 x i64> @lrint_v32i64_v32f32(<32 x float> %x) {
+; CHECK-LABEL: lrint_v32i64_v32f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v16.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    ext v20.16b, v5.16b, v5.16b, #8
+; CHECK-NEXT:    ext v17.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    ext v18.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v19.16b, v4.16b, v4.16b, #8
+; CHECK-NEXT:    ext v21.16b, v6.16b, v6.16b, #8
+; CHECK-NEXT:    ext v22.16b, v7.16b, v7.16b, #8
+; CHECK-NEXT:    frintx s24, s16
+; CHECK-NEXT:    mov s28, v20.s[1]
+; CHECK-NEXT:    frintx s25, s17
+; CHECK-NEXT:    frintx s26, s18
+; CHECK-NEXT:    frintx s27, s19
+; CHECK-NEXT:    frintx s29, s20
+; CHECK-NEXT:    mov s30, v21.s[1]
+; CHECK-NEXT:    frintx s20, s21
+; CHECK-NEXT:    frintx s21, s22
+; CHECK-NEXT:    mov s23, v22.s[1]
+; CHECK-NEXT:    mov s19, v19.s[1]
+; CHECK-NEXT:    mov s17, v17.s[1]
+; CHECK-NEXT:    fcvtzs x12, s24
+; CHECK-NEXT:    frintx s24, s28
+; CHECK-NEXT:    fcvtzs x13, s25
+; CHECK-NEXT:    mov s25, v7.s[1]
+; CHECK-NEXT:    fcvtzs x9, s26
+; CHECK-NEXT:    fcvtzs x11, s27
+; CHECK-NEXT:    fcvtzs x14, s20
+; CHECK-NEXT:    fcvtzs x15, s21
+; CHECK-NEXT:    frintx s26, s1
+; CHECK-NEXT:    frintx s23, s23
+; CHECK-NEXT:    frintx s27, s7
+; CHECK-NEXT:    frintx s22, s30
+; CHECK-NEXT:    fmov d20, x12
+; CHECK-NEXT:    fcvtzs x12, s24
+; CHECK-NEXT:    mov s24, v6.s[1]
+; CHECK-NEXT:    frintx s25, s25
+; CHECK-NEXT:    frintx s6, s6
+; CHECK-NEXT:    fcvtzs x10, s29
+; CHECK-NEXT:    fmov d7, x11
+; CHECK-NEXT:    fmov d21, x13
+; CHECK-NEXT:    frintx s28, s5
+; CHECK-NEXT:    fcvtzs x11, s23
+; CHECK-NEXT:    fmov d23, x14
+; CHECK-NEXT:    fcvtzs x14, s26
+; CHECK-NEXT:    fmov d26, x15
+; CHECK-NEXT:    fcvtzs x15, s27
+; CHECK-NEXT:    frintx s24, s24
+; CHECK-NEXT:    mov s27, v5.s[1]
+; CHECK-NEXT:    fcvtzs x13, s22
+; CHECK-NEXT:    fcvtzs x17, s25
+; CHECK-NEXT:    frintx s25, s4
+; CHECK-NEXT:    fcvtzs x18, s6
+; CHECK-NEXT:    fmov d6, x10
+; CHECK-NEXT:    frintx s22, s2
+; CHECK-NEXT:    mov v26.d[1], x11
+; CHECK-NEXT:    fmov d5, x14
+; CHECK-NEXT:    fcvtzs x10, s24
+; CHECK-NEXT:    fmov d24, x15
+; CHECK-NEXT:    fcvtzs x14, s28
+; CHECK-NEXT:    frintx s27, s27
+; CHECK-NEXT:    mov v23.d[1], x13
+; CHECK-NEXT:    mov s4, v4.s[1]
+; CHECK-NEXT:    fcvtzs x13, s25
+; CHECK-NEXT:    fmov d25, x18
+; CHECK-NEXT:    mov s16, v16.s[1]
+; CHECK-NEXT:    mov v24.d[1], x17
+; CHECK-NEXT:    fcvtzs x16, s22
+; CHECK-NEXT:    frintx s22, s3
+; CHECK-NEXT:    mov s3, v3.s[1]
+; CHECK-NEXT:    frintx s19, s19
+; CHECK-NEXT:    mov s2, v2.s[1]
+; CHECK-NEXT:    mov v25.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, s27
+; CHECK-NEXT:    frintx s4, s4
+; CHECK-NEXT:    mov v6.d[1], x12
+; CHECK-NEXT:    frintx s17, s17
+; CHECK-NEXT:    mov s18, v18.s[1]
+; CHECK-NEXT:    stp q24, q26, [x8, #224]
+; CHECK-NEXT:    fmov d24, x14
+; CHECK-NEXT:    fcvtzs x11, s22
+; CHECK-NEXT:    ext v22.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    mov s1, v1.s[1]
+; CHECK-NEXT:    frintx s3, s3
+; CHECK-NEXT:    stp q25, q23, [x8, #192]
+; CHECK-NEXT:    frintx s2, s2
+; CHECK-NEXT:    fcvtzs x12, s4
+; CHECK-NEXT:    mov v24.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, s19
+; CHECK-NEXT:    mov s19, v0.s[1]
+; CHECK-NEXT:    frintx s16, s16
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    fmov d4, x11
+; CHECK-NEXT:    mov s27, v22.s[1]
+; CHECK-NEXT:    frintx s22, s22
+; CHECK-NEXT:    frintx s1, s1
+; CHECK-NEXT:    fcvtzs x11, s3
+; CHECK-NEXT:    fcvtzs x14, s2
+; CHECK-NEXT:    frintx s2, s18
+; CHECK-NEXT:    stp q24, q6, [x8, #160]
+; CHECK-NEXT:    fmov d6, x13
+; CHECK-NEXT:    fcvtzs x13, s17
+; CHECK-NEXT:    frintx s17, s19
+; CHECK-NEXT:    fmov d23, x16
+; CHECK-NEXT:    mov v7.d[1], x10
+; CHECK-NEXT:    frintx s3, s27
+; CHECK-NEXT:    fcvtzs x10, s22
+; CHECK-NEXT:    fcvtzs x15, s1
+; CHECK-NEXT:    mov v6.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, s16
+; CHECK-NEXT:    mov v4.d[1], x11
+; CHECK-NEXT:    mov v21.d[1], x13
+; CHECK-NEXT:    fcvtzs x13, s0
+; CHECK-NEXT:    mov v23.d[1], x14
+; CHECK-NEXT:    fcvtzs x14, s17
+; CHECK-NEXT:    fcvtzs x11, s3
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    mov v5.d[1], x15
+; CHECK-NEXT:    stp q6, q7, [x8, #128]
+; CHECK-NEXT:    mov v20.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, s2
+; CHECK-NEXT:    stp q4, q21, [x8, #96]
+; CHECK-NEXT:    fmov d1, x13
+; CHECK-NEXT:    fmov d2, x9
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    stp q23, q20, [x8, #64]
+; CHECK-NEXT:    mov v1.d[1], x14
+; CHECK-NEXT:    mov v2.d[1], x12
+; CHECK-NEXT:    stp q5, q0, [x8, #32]
+; CHECK-NEXT:    stp q1, q2, [x8]
+; CHECK-NEXT:    ret
+  %a = call <32 x i64> @llvm.lrint.v32i64.v32f32(<32 x float> %x)
+  ret <32 x i64> %a
+}
+declare <32 x i64> @llvm.lrint.v32i64.v32f32(<32 x float>)
+
+define <1 x i64> @lrint_v1f64(<1 x double> %x) {
+; CHECK-LABEL: lrint_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+  %a = call <1 x i64> @llvm.lrint.v1i64.v1f64(<1 x double> %x)
+  ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.lrint.v1i64.v1f64(<1 x double>)
+
+define <2 x i64> @lrint_v2f64(<2 x double> %x) {
+; CHECK-LABEL: lrint_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov d1, v0.d[1]
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    frintx d1, d1
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    fcvtzs x9, d1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    ret
+  %a = call <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double> %x)
+  ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double>)
+
+define <4 x i64> @lrint_v4f64(<4 x double> %x) {
+; CHECK-LABEL: lrint_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov d2, v0.d[1]
+; CHECK-NEXT:    mov d3, v1.d[1]
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    frintx d1, d1
+; CHECK-NEXT:    frintx d2, d2
+; CHECK-NEXT:    frintx d3, d3
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    fcvtzs x9, d1
+; CHECK-NEXT:    fcvtzs x10, d2
+; CHECK-NEXT:    fcvtzs x11, d3
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    mov v0.d[1], x10
+; CHECK-NEXT:    mov v1.d[1], x11
+; CHECK-NEXT:    ret
+  %a = call <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double> %x)
+  ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double>)
+
+define <8 x i64> @lrint_v8f64(<8 x double> %x) {
+; CHECK-LABEL: lrint_v8f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov d4, v0.d[1]
+; CHECK-NEXT:    mov d5, v1.d[1]
+; CHECK-NEXT:    mov d6, v2.d[1]
+; CHECK-NEXT:    mov d7, v3.d[1]
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    frintx d1, d1
+; CHECK-NEXT:    frintx d2, d2
+; CHECK-NEXT:    frintx d3, d3
+; CHECK-NEXT:    frintx d4, d4
+; CHECK-NEXT:    frintx d5, d5
+; CHECK-NEXT:    frintx d6, d6
+; CHECK-NEXT:    frintx d7, d7
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    fcvtzs x9, d1
+; CHECK-NEXT:    fcvtzs x10, d2
+; CHECK-NEXT:    fcvtzs x11, d3
+; CHECK-NEXT:    fcvtzs x12, d4
+; CHECK-NEXT:    fcvtzs x13, d5
+; CHECK-NEXT:    fcvtzs x14, d6
+; CHECK-NEXT:    fcvtzs x15, d7
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    fmov d2, x10
+; CHECK-NEXT:    fmov d3, x11
+; CHECK-NEXT:    mov v0.d[1], x12
+; CHECK-NEXT:    mov v1.d[1], x13
+; CHECK-NEXT:    mov v2.d[1], x14
+; CHECK-NEXT:    mov v3.d[1], x15
+; CHECK-NEXT:    ret
+  %a = call <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double> %x)
+  ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double>)
+
+define <16 x i64> @lrint_v16f64(<16 x double> %x) {
+; CHECK-LABEL: lrint_v16f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov d16, v0.d[1]
+; CHECK-NEXT:    mov d17, v1.d[1]
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    frintx d1, d1
+; CHECK-NEXT:    frintx d18, d2
+; CHECK-NEXT:    mov d2, v2.d[1]
+; CHECK-NEXT:    frintx d19, d3
+; CHECK-NEXT:    mov d3, v3.d[1]
+; CHECK-NEXT:    frintx d16, d16
+; CHECK-NEXT:    frintx d17, d17
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    frintx d0, d4
+; CHECK-NEXT:    mov d4, v4.d[1]
+; CHECK-NEXT:    fcvtzs x9, d1
+; CHECK-NEXT:    frintx d1, d5
+; CHECK-NEXT:    mov d5, v5.d[1]
+; CHECK-NEXT:    fcvtzs x12, d18
+; CHECK-NEXT:    frintx d2, d2
+; CHECK-NEXT:    fcvtzs x13, d19
+; CHECK-NEXT:    frintx d18, d3
+; CHECK-NEXT:    fcvtzs x10, d16
+; CHECK-NEXT:    mov d16, v6.d[1]
+; CHECK-NEXT:    fcvtzs x11, d17
+; CHECK-NEXT:    mov d17, v7.d[1]
+; CHECK-NEXT:    frintx d6, d6
+; CHECK-NEXT:    frintx d7, d7
+; CHECK-NEXT:    frintx d4, d4
+; CHECK-NEXT:    frintx d5, d5
+; CHECK-NEXT:    fcvtzs x14, d0
+; CHECK-NEXT:    fcvtzs x15, d1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    frintx d16, d16
+; CHECK-NEXT:    fcvtzs x9, d2
+; CHECK-NEXT:    fmov d2, x12
+; CHECK-NEXT:    frintx d17, d17
+; CHECK-NEXT:    fcvtzs x8, d6
+; CHECK-NEXT:    fcvtzs x12, d7
+; CHECK-NEXT:    fmov d3, x13
+; CHECK-NEXT:    fcvtzs x13, d18
+; CHECK-NEXT:    fcvtzs x16, d4
+; CHECK-NEXT:    fcvtzs x17, d5
+; CHECK-NEXT:    fmov d4, x14
+; CHECK-NEXT:    fmov d5, x15
+; CHECK-NEXT:    fcvtzs x18, d16
+; CHECK-NEXT:    mov v0.d[1], x10
+; CHECK-NEXT:    mov v1.d[1], x11
+; CHECK-NEXT:    fcvtzs x0, d17
+; CHECK-NEXT:    fmov d6, x8
+; CHECK-NEXT:    fmov d7, x12
+; CHECK-NEXT:    mov v2.d[1], x9
+; CHECK-NEXT:    mov v3.d[1], x13
+; CHECK-NEXT:    mov v4.d[1], x16
+; CHECK-NEXT:    mov v5.d[1], x17
+; CHECK-NEXT:    mov v6.d[1], x18
+; CHECK-NEXT:    mov v7.d[1], x0
+; CHECK-NEXT:    ret
+  %a = call <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double> %x)
+  ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double>)
+
+define <32 x i64> @lrint_v32f64(<32 x double> %x) {
+; CHECK-LABEL: lrint_v32f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d20, d0
+; CHECK-NEXT:    frintx d22, d3
+; CHECK-NEXT:    frintx d21, d4
+; CHECK-NEXT:    ldp q19, q18, [sp, #64]
+; CHECK-NEXT:    frintx d23, d5
+; CHECK-NEXT:    ldp q27, q26, [sp, #96]
+; CHECK-NEXT:    mov d4, v4.d[1]
+; CHECK-NEXT:    ldp q16, q17, [sp, #32]
+; CHECK-NEXT:    mov d5, v5.d[1]
+; CHECK-NEXT:    fcvtzs x9, d20
+; CHECK-NEXT:    frintx d20, d6
+; CHECK-NEXT:    fcvtzs x11, d22
+; CHECK-NEXT:    frintx d22, d19
+; CHECK-NEXT:    fcvtzs x12, d21
+; CHECK-NEXT:    fcvtzs x10, d23
+; CHECK-NEXT:    mov d21, v26.d[1]
+; CHECK-NEXT:    frintx d23, d27
+; CHECK-NEXT:    mov d27, v27.d[1]
+; CHECK-NEXT:    frintx d24, d16
+; CHECK-NEXT:    mov d19, v19.d[1]
+; CHECK-NEXT:    frintx d25, d17
+; CHECK-NEXT:    fcvtzs x13, d20
+; CHECK-NEXT:    mov d20, v18.d[1]
+; CHECK-NEXT:    frintx d18, d18
+; CHECK-NEXT:    fcvtzs x16, d22
+; CHECK-NEXT:    frintx d22, d26
+; CHECK-NEXT:    mov d16, v16.d[1]
+; CHECK-NEXT:    frintx d21, d21
+; CHECK-NEXT:    fcvtzs x17, d23
+; CHECK-NEXT:    frintx d23, d27
+; CHECK-NEXT:    fcvtzs x14, d24
+; CHECK-NEXT:    frintx d26, d19
+; CHECK-NEXT:    fmov d19, x11
+; CHECK-NEXT:    frintx d20, d20
+; CHECK-NEXT:    mov d27, v17.d[1]
+; CHECK-NEXT:    fcvtzs x15, d25
+; CHECK-NEXT:    ldp q25, q24, [sp]
+; CHECK-NEXT:    fcvtzs x11, d22
+; CHECK-NEXT:    fmov d17, x12
+; CHECK-NEXT:    fcvtzs x12, d21
+; CHECK-NEXT:    fcvtzs x0, d23
+; CHECK-NEXT:    fmov d23, x14
+; CHECK-NEXT:    fcvtzs x14, d18
+; CHECK-NEXT:    fmov d18, x17
+; CHECK-NEXT:    fcvtzs x17, d20
+; CHECK-NEXT:    frintx d21, d7
+; CHECK-NEXT:    fcvtzs x18, d26
+; CHECK-NEXT:    fmov d20, x11
+; CHECK-NEXT:    frintx d22, d25
+; CHECK-NEXT:    frintx d26, d27
+; CHECK-NEXT:    frintx d16, d16
+; CHECK-NEXT:    mov v18.d[1], x0
+; CHECK-NEXT:    mov d25, v25.d[1]
+; CHECK-NEXT:    mov d7, v7.d[1]
+; CHECK-NEXT:    mov d6, v6.d[1]
+; CHECK-NEXT:    mov d0, v0.d[1]
+; CHECK-NEXT:    mov v20.d[1], x12
+; CHECK-NEXT:    fcvtzs x11, d21
+; CHECK-NEXT:    fmov d21, x15
+; CHECK-NEXT:    fcvtzs x12, d22
+; CHECK-NEXT:    fmov d22, x16
+; CHECK-NEXT:    fcvtzs x15, d26
+; CHECK-NEXT:    fmov d26, x14
+; CHECK-NEXT:    fcvtzs x14, d16
+; CHECK-NEXT:    frintx d25, d25
+; CHECK-NEXT:    frintx d7, d7
+; CHECK-NEXT:    mov d16, v1.d[1]
+; CHECK-NEXT:    mov d3, v3.d[1]
+; CHECK-NEXT:    stp q18, q20, [x8, #224]
+; CHECK-NEXT:    mov d18, v24.d[1]
+; CHECK-NEXT:    mov v22.d[1], x18
+; CHECK-NEXT:    mov v26.d[1], x17
+; CHECK-NEXT:    frintx d24, d24
+; CHECK-NEXT:    mov v21.d[1], x15
+; CHECK-NEXT:    mov v23.d[1], x14
+; CHECK-NEXT:    frintx d20, d2
+; CHECK-NEXT:    mov d2, v2.d[1]
+; CHECK-NEXT:    frintx d6, d6
+; CHECK-NEXT:    frintx d5, d5
+; CHECK-NEXT:    frintx d4, d4
+; CHECK-NEXT:    frintx d18, d18
+; CHECK-NEXT:    frintx d1, d1
+; CHECK-NEXT:    frintx d3, d3
+; CHECK-NEXT:    stp q22, q26, [x8, #192]
+; CHECK-NEXT:    fmov d22, x10
+; CHECK-NEXT:    fcvtzs x10, d24
+; CHECK-NEXT:    stp q23, q21, [x8, #160]
+; CHECK-NEXT:    fmov d21, x11
+; CHECK-NEXT:    fmov d24, x13
+; CHECK-NEXT:    frintx d2, d2
+; CHECK-NEXT:    fcvtzs x13, d6
+; CHECK-NEXT:    frintx d6, d16
+; CHECK-NEXT:    fcvtzs x11, d18
+; CHECK-NEXT:    fmov d18, x12
+; CHECK-NEXT:    fcvtzs x12, d25
+; CHECK-NEXT:    fmov d23, x10
+; CHECK-NEXT:    fcvtzs x10, d7
+; CHECK-NEXT:    fcvtzs x14, d5
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs x15, d3
+; CHECK-NEXT:    mov v24.d[1], x13
+; CHECK-NEXT:    fcvtzs x13, d2
+; CHECK-NEXT:    fmov d2, x9
+; CHECK-NEXT:    mov v23.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, d4
+; CHECK-NEXT:    mov v18.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, d20
+; CHECK-NEXT:    mov v21.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, d1
+; CHECK-NEXT:    mov v22.d[1], x14
+; CHECK-NEXT:    fcvtzs x14, d6
+; CHECK-NEXT:    mov v19.d[1], x15
+; CHECK-NEXT:    stp q18, q23, [x8, #128]
+; CHECK-NEXT:    mov v17.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, d0
+; CHECK-NEXT:    stp q24, q21, [x8, #96]
+; CHECK-NEXT:    fmov d0, x12
+; CHECK-NEXT:    fmov d1, x10
+; CHECK-NEXT:    stp q17, q22, [x8, #64]
+; CHECK-NEXT:    mov v0.d[1], x13
+; CHECK-NEXT:    mov v1.d[1], x14
+; CHECK-NEXT:    mov v2.d[1], x11
+; CHECK-NEXT:    stp q0, q19, [x8, #32]
+; CHECK-NEXT:    stp q2, q1, [x8]
+; CHECK-NEXT:    ret
+  %a = call <32 x i64> @llvm.lrint.v32i64.v16f64(<32 x double> %x)
+  ret <32 x i64> %a
+}
+declare <32 x i64> @llvm.lrint.v32i64.v32f64(<32 x double>)
diff --git a/llvm/test/CodeGen/AArch64/sve-llrint.ll b/llvm/test/CodeGen/AArch64/sve-llrint.ll
new file mode 100644
index 0000000000000..11d45b3a43521
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-llrint.ll
@@ -0,0 +1,492 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve | FileCheck %s
+
+define <vscale x 1 x i64> @llrint_v1i64_v1f16(<vscale x 1 x half> %x) {
+; CHECK-LABEL: llrint_v1i64_v1f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f16(<vscale x 1 x half> %x)
+  ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f16(<vscale x 1 x half>)
+
+define <vscale x 2 x i64> @llrint_v1i64_v2f16(<vscale x 2 x half> %x) {
+; CHECK-LABEL: llrint_v1i64_v2f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f16(<vscale x 2 x half> %x)
+  ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f16(<vscale x 2 x half>)
+
+define <vscale x 4 x i64> @llrint_v4i64_v4f16(<vscale x 4 x half> %x) {
+; CHECK-LABEL: llrint_v4i64_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpklo z1.d, z0.s
+; CHECK-NEXT:    uunpkhi z2.d, z0.s
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    movprfx z0, z1
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.h
+; CHECK-NEXT:    movprfx z1, z2
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z2.h
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f16(<vscale x 4 x half> %x)
+  ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f16(<vscale x 4 x half>)
+
+define <vscale x 8 x i64> @llrint_v8i64_v8f16(<vscale x 8 x half> %x) {
+; CHECK-LABEL: llrint_v8i64_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpklo z1.s, z0.h
+; CHECK-NEXT:    uunpkhi z0.s, z0.h
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uunpklo z2.d, z1.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    uunpklo z3.d, z0.s
+; CHECK-NEXT:    uunpkhi z4.d, z0.s
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.h
+; CHECK-NEXT:    movprfx z0, z2
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z2.h
+; CHECK-NEXT:    movprfx z2, z3
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z3.h
+; CHECK-NEXT:    movprfx z3, z4
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z4.h
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f16(<vscale x 8 x half> %x)
+  ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f16(<vscale x 8 x half>)
+
+define <vscale x 16 x i64> @llrint_v16i64_v16f16(<vscale x 16 x half> %x) {
+; CHECK-LABEL: llrint_v16i64_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpklo z2.s, z0.h
+; CHECK-NEXT:    uunpkhi z0.s, z0.h
+; CHECK-NEXT:    uunpklo z3.s, z1.h
+; CHECK-NEXT:    uunpkhi z1.s, z1.h
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uunpklo z4.d, z2.s
+; CHECK-NEXT:    uunpkhi z2.d, z2.s
+; CHECK-NEXT:    uunpklo z5.d, z0.s
+; CHECK-NEXT:    uunpkhi z6.d, z0.s
+; CHECK-NEXT:    uunpklo z7.d, z3.s
+; CHECK-NEXT:    uunpkhi z24.d, z3.s
+; CHECK-NEXT:    uunpklo z25.d, z1.s
+; CHECK-NEXT:    uunpkhi z26.d, z1.s
+; CHECK-NEXT:    movprfx z0, z4
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z4.h
+; CHECK-NEXT:    movprfx z1, z2
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z2.h
+; CHECK-NEXT:    movprfx z2, z5
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z5.h
+; CHECK-NEXT:    movprfx z3, z6
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z6.h
+; CHECK-NEXT:    movprfx z4, z7
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z7.h
+; CHECK-NEXT:    movprfx z5, z24
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z24.h
+; CHECK-NEXT:    movprfx z6, z25
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z25.h
+; CHECK-NEXT:    movprfx z7, z26
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z26.h
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f16(<vscale x 16 x half> %x)
+  ret <vscale x 16 x i64> %a
+}
+declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f16(<vscale x 16 x half>)
+
+define <vscale x 32 x i64> @llrint_v32i64_v32f16(<vscale x 32 x half> %x) {
+; CHECK-LABEL: llrint_v32i64_v32f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpkhi z4.s, z3.h
+; CHECK-NEXT:    uunpklo z3.s, z3.h
+; CHECK-NEXT:    rdvl x9, #15
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uunpkhi z7.s, z2.h
+; CHECK-NEXT:    uunpklo z2.s, z2.h
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    uunpklo z24.s, z0.h
+; CHECK-NEXT:    uunpkhi z0.s, z0.h
+; CHECK-NEXT:    uunpkhi z5.d, z4.s
+; CHECK-NEXT:    uunpklo z4.d, z4.s
+; CHECK-NEXT:    uunpkhi z6.d, z3.s
+; CHECK-NEXT:    uunpklo z3.d, z3.s
+; CHECK-NEXT:    uunpkhi z25.d, z2.s
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    uunpklo z26.d, z0.s
+; CHECK-NEXT:    uunpkhi z0.d, z0.s
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.h
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.h
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.h
+; CHECK-NEXT:    fcvtzs z25.d, p0/m, z25.h
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.h
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #14
+; CHECK-NEXT:    movprfx z5, z6
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z6.h
+; CHECK-NEXT:    uunpkhi z6.d, z7.s
+; CHECK-NEXT:    st1b { z4.b }, p1, [x8, x9]
+; CHECK-NEXT:    uunpkhi z4.s, z1.h
+; CHECK-NEXT:    uunpklo z7.d, z7.s
+; CHECK-NEXT:    rdvl x9, #13
+; CHECK-NEXT:    uunpklo z1.s, z1.h
+; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #12
+; CHECK-NEXT:    movprfx z5, z6
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z6.h
+; CHECK-NEXT:    st1b { z3.b }, p1, [x8, x9]
+; CHECK-NEXT:    uunpkhi z3.d, z4.s
+; CHECK-NEXT:    uunpklo z4.d, z4.s
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.h
+; CHECK-NEXT:    rdvl x9, #11
+; CHECK-NEXT:    uunpkhi z6.d, z24.s
+; CHECK-NEXT:    uunpkhi z27.d, z1.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #10
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.h
+; CHECK-NEXT:    st1b { z7.b }, p1, [x8, x9]
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.h
+; CHECK-NEXT:    uunpklo z7.d, z24.s
+; CHECK-NEXT:    rdvl x9, #9
+; CHECK-NEXT:    movprfx z5, z27
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z27.h
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.h
+; CHECK-NEXT:    st1b { z25.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #8
+; CHECK-NEXT:    st1b { z2.b }, p1, [x8, x9]
+; CHECK-NEXT:    movprfx z2, z26
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z26.h
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #7, mul vl]
+; CHECK-NEXT:    movprfx z3, z6
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z6.h
+; CHECK-NEXT:    st1d { z4.d }, p0, [x8, #6, mul vl]
+; CHECK-NEXT:    movprfx z4, z7
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z7.h
+; CHECK-NEXT:    st1d { z5.d }, p0, [x8, #5, mul vl]
+; CHECK-NEXT:    st1d { z1.d }, p0, [x8, #4, mul vl]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x8, #3, mul vl]
+; CHECK-NEXT:    st1d { z2.d }, p0, [x8, #2, mul vl]
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #1, mul vl]
+; CHECK-NEXT:    st1d { z4.d }, p0, [x8]
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f16(<vscale x 32 x half> %x)
+  ret <vscale x 32 x i64> %a
+}
+declare <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f16(<vscale x 32 x half>)
+
+define <vscale x 1 x i64> @llrint_v1i64_v1f32(<vscale x 1 x float> %x) {
+; CHECK-LABEL: llrint_v1i64_v1f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float> %x)
+  ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float>)
+
+define <vscale x 2 x i64> @llrint_v2i64_v2f32(<vscale x 2 x float> %x) {
+; CHECK-LABEL: llrint_v2i64_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float> %x)
+  ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float>)
+
+define <vscale x 4 x i64> @llrint_v4i64_v4f32(<vscale x 4 x float> %x) {
+; CHECK-LABEL: llrint_v4i64_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpklo z1.d, z0.s
+; CHECK-NEXT:    uunpkhi z2.d, z0.s
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    movprfx z0, z1
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.s
+; CHECK-NEXT:    movprfx z1, z2
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z2.s
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float> %x)
+  ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float>)
+
+define <vscale x 8 x i64> @llrint_v8i64_v8f32(<vscale x 8 x float> %x) {
+; CHECK-LABEL: llrint_v8i64_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpklo z2.d, z0.s
+; CHECK-NEXT:    uunpkhi z3.d, z0.s
+; CHECK-NEXT:    uunpklo z4.d, z1.s
+; CHECK-NEXT:    uunpkhi z5.d, z1.s
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    movprfx z0, z2
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z2.s
+; CHECK-NEXT:    movprfx z1, z3
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z3.s
+; CHECK-NEXT:    movprfx z2, z4
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z4.s
+; CHECK-NEXT:    movprfx z3, z5
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z5.s
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float> %x)
+  ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float>)
+
+define <vscale x 16 x i64> @llrint_v16i64_v16f32(<vscale x 16 x float> %x) {
+; CHECK-LABEL: llrint_v16i64_v16f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpklo z4.d, z0.s
+; CHECK-NEXT:    uunpkhi z5.d, z0.s
+; CHECK-NEXT:    uunpklo z6.d, z1.s
+; CHECK-NEXT:    uunpkhi z7.d, z1.s
+; CHECK-NEXT:    uunpklo z24.d, z2.s
+; CHECK-NEXT:    uunpkhi z25.d, z2.s
+; CHECK-NEXT:    uunpklo z26.d, z3.s
+; CHECK-NEXT:    uunpkhi z27.d, z3.s
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    movprfx z0, z4
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z4.s
+; CHECK-NEXT:    movprfx z1, z5
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z5.s
+; CHECK-NEXT:    movprfx z2, z6
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z6.s
+; CHECK-NEXT:    movprfx z3, z7
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z7.s
+; CHECK-NEXT:    movprfx z4, z24
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z24.s
+; CHECK-NEXT:    movprfx z5, z25
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z25.s
+; CHECK-NEXT:    movprfx z6, z26
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z26.s
+; CHECK-NEXT:    movprfx z7, z27
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z27.s
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float> %x)
+  ret <vscale x 16 x i64> %a
+}
+declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float>)
+
+define <vscale x 32 x i64> @llrint_v32i64_v32f32(<vscale x 32 x float> %x) {
+; CHECK-LABEL: llrint_v32i64_v32f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpkhi z24.d, z7.s
+; CHECK-NEXT:    uunpklo z7.d, z7.s
+; CHECK-NEXT:    rdvl x9, #15
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uunpkhi z27.d, z6.s
+; CHECK-NEXT:    uunpklo z6.d, z6.s
+; CHECK-NEXT:    uunpkhi z30.d, z5.s
+; CHECK-NEXT:    uunpklo z5.d, z5.s
+; CHECK-NEXT:    uunpkhi z31.d, z4.s
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    uunpklo z29.d, z3.s
+; CHECK-NEXT:    uunpkhi z3.d, z3.s
+; CHECK-NEXT:    fcvtzs z24.d, p0/m, z24.s
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.s
+; CHECK-NEXT:    uunpklo z4.d, z4.s
+; CHECK-NEXT:    fcvtzs z27.d, p0/m, z27.s
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z6.s
+; CHECK-NEXT:    uunpkhi z25.d, z0.s
+; CHECK-NEXT:    fcvtzs z30.d, p0/m, z30.s
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.s
+; CHECK-NEXT:    uunpklo z26.d, z1.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    uunpklo z28.d, z2.s
+; CHECK-NEXT:    uunpkhi z2.d, z2.s
+; CHECK-NEXT:    st1b { z24.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #14
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT:    st1b { z7.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #13
+; CHECK-NEXT:    movprfx z7, z31
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z31.s
+; CHECK-NEXT:    st1b { z27.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #12
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.s
+; CHECK-NEXT:    st1b { z6.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #11
+; CHECK-NEXT:    movprfx z6, z29
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z29.s
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    st1b { z30.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #10
+; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #9
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.s
+; CHECK-NEXT:    st1b { z7.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #8
+; CHECK-NEXT:    movprfx z5, z28
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z28.s
+; CHECK-NEXT:    st1b { z4.b }, p1, [x8, x9]
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.s
+; CHECK-NEXT:    movprfx z4, z25
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z25.s
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #7, mul vl]
+; CHECK-NEXT:    movprfx z3, z26
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z26.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT:    st1d { z6.d }, p0, [x8, #6, mul vl]
+; CHECK-NEXT:    st1d { z2.d }, p0, [x8, #5, mul vl]
+; CHECK-NEXT:    st1d { z5.d }, p0, [x8, #4, mul vl]
+; CHECK-NEXT:    st1d { z1.d }, p0, [x8, #3, mul vl]
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #2, mul vl]
+; CHECK-NEXT:    st1d { z4.d }, p0, [x8, #1, mul vl]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f32(<vscale x 32 x float> %x)
+  ret <vscale x 32 x i64> %a
+}
+declare <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f32(<vscale x 32 x float>)
+
+define <vscale x 1 x i64> @llrint_v1i64_v1f64(<vscale x 1 x double> %x) {
+; CHECK-LABEL: llrint_v1i64_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double> %x)
+  ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double>)
+
+define <vscale x 2 x i64> @llrint_v2i64_v2f64(<vscale x 2 x double> %x) {
+; CHECK-LABEL: llrint_v2i64_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double> %x)
+  ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double>)
+
+define <vscale x 4 x i64> @llrint_v4i64_v4f64(<vscale x 4 x double> %x) {
+; CHECK-LABEL: llrint_v4i64_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double> %x)
+  ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double>)
+
+define <vscale x 8 x i64> @llrint_v8i64_v8f64(<vscale x 8 x double> %x) {
+; CHECK-LABEL: llrint_v8i64_v8f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.d
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double> %x)
+  ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double>)
+
+define <vscale x 16 x i64> @llrint_v16f64(<vscale x 16 x double> %x) {
+; CHECK-LABEL: llrint_v16f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.d
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.d
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.d
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z6.d
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.d
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f64(<vscale x 16 x double> %x)
+  ret <vscale x 16 x i64> %a
+}
+declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f64(<vscale x 16 x double>)
+
+define <vscale x 32 x i64> @llrint_v32f64(<vscale x 32 x double> %x) {
+; CHECK-LABEL: llrint_v32f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    rdvl x9, #15
+; CHECK-NEXT:    rdvl x10, #14
+; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    rdvl x11, #13
+; CHECK-NEXT:    rdvl x12, #12
+; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x9]
+; CHECK-NEXT:    rdvl x13, #11
+; CHECK-NEXT:    rdvl x14, #10
+; CHECK-NEXT:    ld1b { z1.b }, p0/z, [x0, x10]
+; CHECK-NEXT:    ld1b { z2.b }, p0/z, [x0, x11]
+; CHECK-NEXT:    ld1b { z3.b }, p0/z, [x0, x12]
+; CHECK-NEXT:    ld1b { z4.b }, p0/z, [x0, x13]
+; CHECK-NEXT:    ld1b { z5.b }, p0/z, [x0, x14]
+; CHECK-NEXT:    rdvl x15, #9
+; CHECK-NEXT:    fcvtzs z0.d, p1/m, z0.d
+; CHECK-NEXT:    rdvl x16, #8
+; CHECK-NEXT:    ld1b { z6.b }, p0/z, [x0, x15]
+; CHECK-NEXT:    ld1b { z7.b }, p0/z, [x0, x16]
+; CHECK-NEXT:    ld1d { z24.d }, p1/z, [x0, #7, mul vl]
+; CHECK-NEXT:    ld1d { z25.d }, p1/z, [x0, #6, mul vl]
+; CHECK-NEXT:    ld1d { z26.d }, p1/z, [x0, #5, mul vl]
+; CHECK-NEXT:    fcvtzs z1.d, p1/m, z1.d
+; CHECK-NEXT:    ld1d { z27.d }, p1/z, [x0, #4, mul vl]
+; CHECK-NEXT:    ld1d { z28.d }, p1/z, [x0, #3, mul vl]
+; CHECK-NEXT:    fcvtzs z2.d, p1/m, z2.d
+; CHECK-NEXT:    ld1d { z29.d }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT:    ld1d { z30.d }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT:    fcvtzs z3.d, p1/m, z3.d
+; CHECK-NEXT:    ld1d { z31.d }, p1/z, [x0]
+; CHECK-NEXT:    fcvtzs z4.d, p1/m, z4.d
+; CHECK-NEXT:    st1b { z0.b }, p0, [x8, x9]
+; CHECK-NEXT:    movprfx z0, z5
+; CHECK-NEXT:    fcvtzs z0.d, p1/m, z5.d
+; CHECK-NEXT:    st1b { z1.b }, p0, [x8, x10]
+; CHECK-NEXT:    movprfx z1, z6
+; CHECK-NEXT:    fcvtzs z1.d, p1/m, z6.d
+; CHECK-NEXT:    st1b { z2.b }, p0, [x8, x11]
+; CHECK-NEXT:    movprfx z2, z7
+; CHECK-NEXT:    fcvtzs z2.d, p1/m, z7.d
+; CHECK-NEXT:    st1b { z3.b }, p0, [x8, x12]
+; CHECK-NEXT:    movprfx z3, z24
+; CHECK-NEXT:    fcvtzs z3.d, p1/m, z24.d
+; CHECK-NEXT:    st1b { z4.b }, p0, [x8, x13]
+; CHECK-NEXT:    movprfx z4, z25
+; CHECK-NEXT:    fcvtzs z4.d, p1/m, z25.d
+; CHECK-NEXT:    st1b { z0.b }, p0, [x8, x14]
+; CHECK-NEXT:    movprfx z0, z26
+; CHECK-NEXT:    fcvtzs z0.d, p1/m, z26.d
+; CHECK-NEXT:    st1b { z1.b }, p0, [x8, x15]
+; CHECK-NEXT:    movprfx z1, z27
+; CHECK-NEXT:    fcvtzs z1.d, p1/m, z27.d
+; CHECK-NEXT:    st1b { z2.b }, p0, [x8, x16]
+; CHECK-NEXT:    movprfx z2, z28
+; CHECK-NEXT:    fcvtzs z2.d, p1/m, z28.d
+; CHECK-NEXT:    st1d { z3.d }, p1, [x8, #7, mul vl]
+; CHECK-NEXT:    movprfx z3, z29
+; CHECK-NEXT:    fcvtzs z3.d, p1/m, z29.d
+; CHECK-NEXT:    st1d { z4.d }, p1, [x8, #6, mul vl]
+; CHECK-NEXT:    movprfx z4, z30
+; CHECK-NEXT:    fcvtzs z4.d, p1/m, z30.d
+; CHECK-NEXT:    st1d { z0.d }, p1, [x8, #5, mul vl]
+; CHECK-NEXT:    movprfx z0, z31
+; CHECK-NEXT:    fcvtzs z0.d, p1/m, z31.d
+; CHECK-NEXT:    st1d { z1.d }, p1, [x8, #4, mul vl]
+; CHECK-NEXT:    st1d { z2.d }, p1, [x8, #3, mul vl]
+; CHECK-NEXT:    st1d { z3.d }, p1, [x8, #2, mul vl]
+; CHECK-NEXT:    st1d { z4.d }, p1, [x8, #1, mul vl]
+; CHECK-NEXT:    st1d { z0.d }, p1, [x8]
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv16f64(<vscale x 32 x double> %x)
+  ret <vscale x 32 x i64> %a
+}
+declare <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f64(<vscale x 32 x double>)
diff --git a/llvm/test/CodeGen/AArch64/sve-lrint.ll b/llvm/test/CodeGen/AArch64/sve-lrint.ll
new file mode 100644
index 0000000000000..1e7bf2e280ce8
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-lrint.ll
@@ -0,0 +1,492 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve | FileCheck %s
+
+define <vscale x 1 x i64> @lrint_v1f16(<vscale x 1 x half> %x) {
+; CHECK-LABEL: lrint_v1f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f16(<vscale x 1 x half> %x)
+  ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f16(<vscale x 1 x half>)
+
+define <vscale x 2 x i64> @lrint_v2f16(<vscale x 2 x half> %x) {
+; CHECK-LABEL: lrint_v2f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f16(<vscale x 2 x half> %x)
+  ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f16(<vscale x 2 x half>)
+
+define <vscale x 4 x i64> @lrint_v4f16(<vscale x 4 x half> %x) {
+; CHECK-LABEL: lrint_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpklo z1.d, z0.s
+; CHECK-NEXT:    uunpkhi z2.d, z0.s
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    movprfx z0, z1
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.h
+; CHECK-NEXT:    movprfx z1, z2
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z2.h
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f16(<vscale x 4 x half> %x)
+  ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f16(<vscale x 4 x half>)
+
+define <vscale x 8 x i64> @lrint_v8f16(<vscale x 8 x half> %x) {
+; CHECK-LABEL: lrint_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpklo z1.s, z0.h
+; CHECK-NEXT:    uunpkhi z0.s, z0.h
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uunpklo z2.d, z1.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    uunpklo z3.d, z0.s
+; CHECK-NEXT:    uunpkhi z4.d, z0.s
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.h
+; CHECK-NEXT:    movprfx z0, z2
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z2.h
+; CHECK-NEXT:    movprfx z2, z3
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z3.h
+; CHECK-NEXT:    movprfx z3, z4
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z4.h
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f16(<vscale x 8 x half> %x)
+  ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f16(<vscale x 8 x half>)
+
+define <vscale x 16 x i64> @lrint_v16i64_v16f16(<vscale x 16 x half> %x) {
+; CHECK-LABEL: lrint_v16i64_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpklo z2.s, z0.h
+; CHECK-NEXT:    uunpkhi z0.s, z0.h
+; CHECK-NEXT:    uunpklo z3.s, z1.h
+; CHECK-NEXT:    uunpkhi z1.s, z1.h
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uunpklo z4.d, z2.s
+; CHECK-NEXT:    uunpkhi z2.d, z2.s
+; CHECK-NEXT:    uunpklo z5.d, z0.s
+; CHECK-NEXT:    uunpkhi z6.d, z0.s
+; CHECK-NEXT:    uunpklo z7.d, z3.s
+; CHECK-NEXT:    uunpkhi z24.d, z3.s
+; CHECK-NEXT:    uunpklo z25.d, z1.s
+; CHECK-NEXT:    uunpkhi z26.d, z1.s
+; CHECK-NEXT:    movprfx z0, z4
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z4.h
+; CHECK-NEXT:    movprfx z1, z2
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z2.h
+; CHECK-NEXT:    movprfx z2, z5
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z5.h
+; CHECK-NEXT:    movprfx z3, z6
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z6.h
+; CHECK-NEXT:    movprfx z4, z7
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z7.h
+; CHECK-NEXT:    movprfx z5, z24
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z24.h
+; CHECK-NEXT:    movprfx z6, z25
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z25.h
+; CHECK-NEXT:    movprfx z7, z26
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z26.h
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f16(<vscale x 16 x half> %x)
+  ret <vscale x 16 x i64> %a
+}
+declare <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f16(<vscale x 16 x half>)
+
+define <vscale x 32 x i64> @lrint_v32i64_v32f16(<vscale x 32 x half> %x) {
+; CHECK-LABEL: lrint_v32i64_v32f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpkhi z4.s, z3.h
+; CHECK-NEXT:    uunpklo z3.s, z3.h
+; CHECK-NEXT:    rdvl x9, #15
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uunpkhi z7.s, z2.h
+; CHECK-NEXT:    uunpklo z2.s, z2.h
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    uunpklo z24.s, z0.h
+; CHECK-NEXT:    uunpkhi z0.s, z0.h
+; CHECK-NEXT:    uunpkhi z5.d, z4.s
+; CHECK-NEXT:    uunpklo z4.d, z4.s
+; CHECK-NEXT:    uunpkhi z6.d, z3.s
+; CHECK-NEXT:    uunpklo z3.d, z3.s
+; CHECK-NEXT:    uunpkhi z25.d, z2.s
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    uunpklo z26.d, z0.s
+; CHECK-NEXT:    uunpkhi z0.d, z0.s
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.h
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.h
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.h
+; CHECK-NEXT:    fcvtzs z25.d, p0/m, z25.h
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.h
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #14
+; CHECK-NEXT:    movprfx z5, z6
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z6.h
+; CHECK-NEXT:    uunpkhi z6.d, z7.s
+; CHECK-NEXT:    st1b { z4.b }, p1, [x8, x9]
+; CHECK-NEXT:    uunpkhi z4.s, z1.h
+; CHECK-NEXT:    uunpklo z7.d, z7.s
+; CHECK-NEXT:    rdvl x9, #13
+; CHECK-NEXT:    uunpklo z1.s, z1.h
+; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #12
+; CHECK-NEXT:    movprfx z5, z6
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z6.h
+; CHECK-NEXT:    st1b { z3.b }, p1, [x8, x9]
+; CHECK-NEXT:    uunpkhi z3.d, z4.s
+; CHECK-NEXT:    uunpklo z4.d, z4.s
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.h
+; CHECK-NEXT:    rdvl x9, #11
+; CHECK-NEXT:    uunpkhi z6.d, z24.s
+; CHECK-NEXT:    uunpkhi z27.d, z1.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #10
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.h
+; CHECK-NEXT:    st1b { z7.b }, p1, [x8, x9]
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.h
+; CHECK-NEXT:    uunpklo z7.d, z24.s
+; CHECK-NEXT:    rdvl x9, #9
+; CHECK-NEXT:    movprfx z5, z27
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z27.h
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.h
+; CHECK-NEXT:    st1b { z25.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #8
+; CHECK-NEXT:    st1b { z2.b }, p1, [x8, x9]
+; CHECK-NEXT:    movprfx z2, z26
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z26.h
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #7, mul vl]
+; CHECK-NEXT:    movprfx z3, z6
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z6.h
+; CHECK-NEXT:    st1d { z4.d }, p0, [x8, #6, mul vl]
+; CHECK-NEXT:    movprfx z4, z7
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z7.h
+; CHECK-NEXT:    st1d { z5.d }, p0, [x8, #5, mul vl]
+; CHECK-NEXT:    st1d { z1.d }, p0, [x8, #4, mul vl]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x8, #3, mul vl]
+; CHECK-NEXT:    st1d { z2.d }, p0, [x8, #2, mul vl]
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #1, mul vl]
+; CHECK-NEXT:    st1d { z4.d }, p0, [x8]
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i64> @llvm.lrint.nxv32i64.nxv32f16(<vscale x 32 x half> %x)
+  ret <vscale x 32 x i64> %a
+}
+declare <vscale x 32 x i64> @llvm.lrint.nxv32i64.nxv32f16(<vscale x 32 x half>)
+
+define <vscale x 1 x i64> @lrint_v1f32(<vscale x 1 x float> %x) {
+; CHECK-LABEL: lrint_v1f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f32(<vscale x 1 x float> %x)
+  ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f32(<vscale x 1 x float>)
+
+define <vscale x 2 x i64> @lrint_v2f32(<vscale x 2 x float> %x) {
+; CHECK-LABEL: lrint_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f32(<vscale x 2 x float> %x)
+  ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f32(<vscale x 2 x float>)
+
+define <vscale x 4 x i64> @lrint_v4f32(<vscale x 4 x float> %x) {
+; CHECK-LABEL: lrint_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpklo z1.d, z0.s
+; CHECK-NEXT:    uunpkhi z2.d, z0.s
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    movprfx z0, z1
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.s
+; CHECK-NEXT:    movprfx z1, z2
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z2.s
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f32(<vscale x 4 x float> %x)
+  ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f32(<vscale x 4 x float>)
+
+define <vscale x 8 x i64> @lrint_v8f32(<vscale x 8 x float> %x) {
+; CHECK-LABEL: lrint_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpklo z2.d, z0.s
+; CHECK-NEXT:    uunpkhi z3.d, z0.s
+; CHECK-NEXT:    uunpklo z4.d, z1.s
+; CHECK-NEXT:    uunpkhi z5.d, z1.s
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    movprfx z0, z2
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z2.s
+; CHECK-NEXT:    movprfx z1, z3
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z3.s
+; CHECK-NEXT:    movprfx z2, z4
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z4.s
+; CHECK-NEXT:    movprfx z3, z5
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z5.s
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f32(<vscale x 8 x float> %x)
+  ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f32(<vscale x 8 x float>)
+
+define <vscale x 16 x i64> @lrint_v16i64_v16f32(<vscale x 16 x float> %x) {
+; CHECK-LABEL: lrint_v16i64_v16f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpklo z4.d, z0.s
+; CHECK-NEXT:    uunpkhi z5.d, z0.s
+; CHECK-NEXT:    uunpklo z6.d, z1.s
+; CHECK-NEXT:    uunpkhi z7.d, z1.s
+; CHECK-NEXT:    uunpklo z24.d, z2.s
+; CHECK-NEXT:    uunpkhi z25.d, z2.s
+; CHECK-NEXT:    uunpklo z26.d, z3.s
+; CHECK-NEXT:    uunpkhi z27.d, z3.s
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    movprfx z0, z4
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z4.s
+; CHECK-NEXT:    movprfx z1, z5
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z5.s
+; CHECK-NEXT:    movprfx z2, z6
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z6.s
+; CHECK-NEXT:    movprfx z3, z7
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z7.s
+; CHECK-NEXT:    movprfx z4, z24
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z24.s
+; CHECK-NEXT:    movprfx z5, z25
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z25.s
+; CHECK-NEXT:    movprfx z6, z26
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z26.s
+; CHECK-NEXT:    movprfx z7, z27
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z27.s
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f32(<vscale x 16 x float> %x)
+  ret <vscale x 16 x i64> %a
+}
+declare <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f32(<vscale x 16 x float>)
+
+define <vscale x 32 x i64> @lrint_v32i64_v32f32(<vscale x 32 x float> %x) {
+; CHECK-LABEL: lrint_v32i64_v32f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uunpkhi z24.d, z7.s
+; CHECK-NEXT:    uunpklo z7.d, z7.s
+; CHECK-NEXT:    rdvl x9, #15
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uunpkhi z27.d, z6.s
+; CHECK-NEXT:    uunpklo z6.d, z6.s
+; CHECK-NEXT:    uunpkhi z30.d, z5.s
+; CHECK-NEXT:    uunpklo z5.d, z5.s
+; CHECK-NEXT:    uunpkhi z31.d, z4.s
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    uunpklo z29.d, z3.s
+; CHECK-NEXT:    uunpkhi z3.d, z3.s
+; CHECK-NEXT:    fcvtzs z24.d, p0/m, z24.s
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.s
+; CHECK-NEXT:    uunpklo z4.d, z4.s
+; CHECK-NEXT:    fcvtzs z27.d, p0/m, z27.s
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z6.s
+; CHECK-NEXT:    uunpkhi z25.d, z0.s
+; CHECK-NEXT:    fcvtzs z30.d, p0/m, z30.s
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.s
+; CHECK-NEXT:    uunpklo z26.d, z1.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    uunpklo z28.d, z2.s
+; CHECK-NEXT:    uunpkhi z2.d, z2.s
+; CHECK-NEXT:    st1b { z24.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #14
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT:    st1b { z7.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #13
+; CHECK-NEXT:    movprfx z7, z31
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z31.s
+; CHECK-NEXT:    st1b { z27.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #12
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.s
+; CHECK-NEXT:    st1b { z6.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #11
+; CHECK-NEXT:    movprfx z6, z29
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z29.s
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    st1b { z30.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #10
+; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #9
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.s
+; CHECK-NEXT:    st1b { z7.b }, p1, [x8, x9]
+; CHECK-NEXT:    rdvl x9, #8
+; CHECK-NEXT:    movprfx z5, z28
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z28.s
+; CHECK-NEXT:    st1b { z4.b }, p1, [x8, x9]
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.s
+; CHECK-NEXT:    movprfx z4, z25
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z25.s
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #7, mul vl]
+; CHECK-NEXT:    movprfx z3, z26
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z26.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT:    st1d { z6.d }, p0, [x8, #6, mul vl]
+; CHECK-NEXT:    st1d { z2.d }, p0, [x8, #5, mul vl]
+; CHECK-NEXT:    st1d { z5.d }, p0, [x8, #4, mul vl]
+; CHECK-NEXT:    st1d { z1.d }, p0, [x8, #3, mul vl]
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #2, mul vl]
+; CHECK-NEXT:    st1d { z4.d }, p0, [x8, #1, mul vl]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i64> @llvm.lrint.nxv32i64.nxv32f32(<vscale x 32 x float> %x)
+  ret <vscale x 32 x i64> %a
+}
+declare <vscale x 32 x i64> @llvm.lrint.nxv32i64.nxv32f32(<vscale x 32 x float>)
+
+define <vscale x 1 x i64> @lrint_v1f64(<vscale x 1 x double> %x) {
+; CHECK-LABEL: lrint_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f64(<vscale x 1 x double> %x)
+  ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f64(<vscale x 1 x double>)
+
+define <vscale x 2 x i64> @lrint_v2f64(<vscale x 2 x double> %x) {
+; CHECK-LABEL: lrint_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f64(<vscale x 2 x double> %x)
+  ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f64(<vscale x 2 x double>)
+
+define <vscale x 4 x i64> @lrint_v4f64(<vscale x 4 x double> %x) {
+; CHECK-LABEL: lrint_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f64(<vscale x 4 x double> %x)
+  ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f64(<vscale x 4 x double>)
+
+define <vscale x 8 x i64> @lrint_v8f64(<vscale x 8 x double> %x) {
+; CHECK-LABEL: lrint_v8f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.d
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f64(<vscale x 8 x double> %x)
+  ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f64(<vscale x 8 x double>)
+
+define <vscale x 16 x i64> @lrint_v16f64(<vscale x 16 x double> %x) {
+; CHECK-LABEL: lrint_v16f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.d
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.d
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.d
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z6.d
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.d
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f64(<vscale x 16 x double> %x)
+  ret <vscale x 16 x i64> %a
+}
+declare <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f64(<vscale x 16 x double>)
+
+define <vscale x 32 x i64> @lrint_v32f64(<vscale x 32 x double> %x) {
+; CHECK-LABEL: lrint_v32f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    rdvl x9, #15
+; CHECK-NEXT:    rdvl x10, #14
+; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    rdvl x11, #13
+; CHECK-NEXT:    rdvl x12, #12
+; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x9]
+; CHECK-NEXT:    rdvl x13, #11
+; CHECK-NEXT:    rdvl x14, #10
+; CHECK-NEXT:    ld1b { z1.b }, p0/z, [x0, x10]
+; CHECK-NEXT:    ld1b { z2.b }, p0/z, [x0, x11]
+; CHECK-NEXT:    ld1b { z3.b }, p0/z, [x0, x12]
+; CHECK-NEXT:    ld1b { z4.b }, p0/z, [x0, x13]
+; CHECK-NEXT:    ld1b { z5.b }, p0/z, [x0, x14]
+; CHECK-NEXT:    rdvl x15, #9
+; CHECK-NEXT:    fcvtzs z0.d, p1/m, z0.d
+; CHECK-NEXT:    rdvl x16, #8
+; CHECK-NEXT:    ld1b { z6.b }, p0/z, [x0, x15]
+; CHECK-NEXT:    ld1b { z7.b }, p0/z, [x0, x16]
+; CHECK-NEXT:    ld1d { z24.d }, p1/z, [x0, #7, mul vl]
+; CHECK-NEXT:    ld1d { z25.d }, p1/z, [x0, #6, mul vl]
+; CHECK-NEXT:    ld1d { z26.d }, p1/z, [x0, #5, mul vl]
+; CHECK-NEXT:    fcvtzs z1.d, p1/m, z1.d
+; CHECK-NEXT:    ld1d { z27.d }, p1/z, [x0, #4, mul vl]
+; CHECK-NEXT:    ld1d { z28.d }, p1/z, [x0, #3, mul vl]
+; CHECK-NEXT:    fcvtzs z2.d, p1/m, z2.d
+; CHECK-NEXT:    ld1d { z29.d }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT:    ld1d { z30.d }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT:    fcvtzs z3.d, p1/m, z3.d
+; CHECK-NEXT:    ld1d { z31.d }, p1/z, [x0]
+; CHECK-NEXT:    fcvtzs z4.d, p1/m, z4.d
+; CHECK-NEXT:    st1b { z0.b }, p0, [x8, x9]
+; CHECK-NEXT:    movprfx z0, z5
+; CHECK-NEXT:    fcvtzs z0.d, p1/m, z5.d
+; CHECK-NEXT:    st1b { z1.b }, p0, [x8, x10]
+; CHECK-NEXT:    movprfx z1, z6
+; CHECK-NEXT:    fcvtzs z1.d, p1/m, z6.d
+; CHECK-NEXT:    st1b { z2.b }, p0, [x8, x11]
+; CHECK-NEXT:    movprfx z2, z7
+; CHECK-NEXT:    fcvtzs z2.d, p1/m, z7.d
+; CHECK-NEXT:    st1b { z3.b }, p0, [x8, x12]
+; CHECK-NEXT:    movprfx z3, z24
+; CHECK-NEXT:    fcvtzs z3.d, p1/m, z24.d
+; CHECK-NEXT:    st1b { z4.b }, p0, [x8, x13]
+; CHECK-NEXT:    movprfx z4, z25
+; CHECK-NEXT:    fcvtzs z4.d, p1/m, z25.d
+; CHECK-NEXT:    st1b { z0.b }, p0, [x8, x14]
+; CHECK-NEXT:    movprfx z0, z26
+; CHECK-NEXT:    fcvtzs z0.d, p1/m, z26.d
+; CHECK-NEXT:    st1b { z1.b }, p0, [x8, x15]
+; CHECK-NEXT:    movprfx z1, z27
+; CHECK-NEXT:    fcvtzs z1.d, p1/m, z27.d
+; CHECK-NEXT:    st1b { z2.b }, p0, [x8, x16]
+; CHECK-NEXT:    movprfx z2, z28
+; CHECK-NEXT:    fcvtzs z2.d, p1/m, z28.d
+; CHECK-NEXT:    st1d { z3.d }, p1, [x8, #7, mul vl]
+; CHECK-NEXT:    movprfx z3, z29
+; CHECK-NEXT:    fcvtzs z3.d, p1/m, z29.d
+; CHECK-NEXT:    st1d { z4.d }, p1, [x8, #6, mul vl]
+; CHECK-NEXT:    movprfx z4, z30
+; CHECK-NEXT:    fcvtzs z4.d, p1/m, z30.d
+; CHECK-NEXT:    st1d { z0.d }, p1, [x8, #5, mul vl]
+; CHECK-NEXT:    movprfx z0, z31
+; CHECK-NEXT:    fcvtzs z0.d, p1/m, z31.d
+; CHECK-NEXT:    st1d { z1.d }, p1, [x8, #4, mul vl]
+; CHECK-NEXT:    st1d { z2.d }, p1, [x8, #3, mul vl]
+; CHECK-NEXT:    st1d { z3.d }, p1, [x8, #2, mul vl]
+; CHECK-NEXT:    st1d { z4.d }, p1, [x8, #1, mul vl]
+; CHECK-NEXT:    st1d { z0.d }, p1, [x8]
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i64> @llvm.lrint.nxv32i64.nxv16f64(<vscale x 32 x double> %x)
+  ret <vscale x 32 x i64> %a
+}
+declare <vscale x 32 x i64> @llvm.lrint.nxv32i64.nxv32f64(<vscale x 32 x double>)
diff --git a/llvm/test/CodeGen/AArch64/vector-llrint.ll b/llvm/test/CodeGen/AArch64/vector-llrint.ll
index beb2b6a134600..d4d3fbb0e96b5 100644
--- a/llvm/test/CodeGen/AArch64/vector-llrint.ll
+++ b/llvm/test/CodeGen/AArch64/vector-llrint.ll
@@ -532,6 +532,143 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
 }
 declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>)
 
+define <32 x i64> @llrint_v32i64_v32f32(<32 x float> %x) {
+; CHECK-LABEL: llrint_v32i64_v32f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v16.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    ext v20.16b, v5.16b, v5.16b, #8
+; CHECK-NEXT:    ext v17.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    ext v18.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v19.16b, v4.16b, v4.16b, #8
+; CHECK-NEXT:    ext v21.16b, v6.16b, v6.16b, #8
+; CHECK-NEXT:    ext v22.16b, v7.16b, v7.16b, #8
+; CHECK-NEXT:    frintx s24, s16
+; CHECK-NEXT:    mov s28, v20.s[1]
+; CHECK-NEXT:    frintx s25, s17
+; CHECK-NEXT:    frintx s26, s18
+; CHECK-NEXT:    frintx s27, s19
+; CHECK-NEXT:    frintx s29, s20
+; CHECK-NEXT:    mov s30, v21.s[1]
+; CHECK-NEXT:    frintx s20, s21
+; CHECK-NEXT:    frintx s21, s22
+; CHECK-NEXT:    mov s23, v22.s[1]
+; CHECK-NEXT:    mov s19, v19.s[1]
+; CHECK-NEXT:    mov s17, v17.s[1]
+; CHECK-NEXT:    fcvtzs x12, s24
+; CHECK-NEXT:    frintx s24, s28
+; CHECK-NEXT:    fcvtzs x13, s25
+; CHECK-NEXT:    mov s25, v7.s[1]
+; CHECK-NEXT:    fcvtzs x9, s26
+; CHECK-NEXT:    fcvtzs x11, s27
+; CHECK-NEXT:    fcvtzs x14, s20
+; CHECK-NEXT:    fcvtzs x15, s21
+; CHECK-NEXT:    frintx s26, s1
+; CHECK-NEXT:    frintx s23, s23
+; CHECK-NEXT:    frintx s27, s7
+; CHECK-NEXT:    frintx s22, s30
+; CHECK-NEXT:    fmov d20, x12
+; CHECK-NEXT:    fcvtzs x12, s24
+; CHECK-NEXT:    mov s24, v6.s[1]
+; CHECK-NEXT:    frintx s25, s25
+; CHECK-NEXT:    frintx s6, s6
+; CHECK-NEXT:    fcvtzs x10, s29
+; CHECK-NEXT:    fmov d7, x11
+; CHECK-NEXT:    fmov d21, x13
+; CHECK-NEXT:    frintx s28, s5
+; CHECK-NEXT:    fcvtzs x11, s23
+; CHECK-NEXT:    fmov d23, x14
+; CHECK-NEXT:    fcvtzs x14, s26
+; CHECK-NEXT:    fmov d26, x15
+; CHECK-NEXT:    fcvtzs x15, s27
+; CHECK-NEXT:    frintx s24, s24
+; CHECK-NEXT:    mov s27, v5.s[1]
+; CHECK-NEXT:    fcvtzs x13, s22
+; CHECK-NEXT:    fcvtzs x17, s25
+; CHECK-NEXT:    frintx s25, s4
+; CHECK-NEXT:    fcvtzs x18, s6
+; CHECK-NEXT:    fmov d6, x10
+; CHECK-NEXT:    frintx s22, s2
+; CHECK-NEXT:    mov v26.d[1], x11
+; CHECK-NEXT:    fmov d5, x14
+; CHECK-NEXT:    fcvtzs x10, s24
+; CHECK-NEXT:    fmov d24, x15
+; CHECK-NEXT:    fcvtzs x14, s28
+; CHECK-NEXT:    frintx s27, s27
+; CHECK-NEXT:    mov v23.d[1], x13
+; CHECK-NEXT:    mov s4, v4.s[1]
+; CHECK-NEXT:    fcvtzs x13, s25
+; CHECK-NEXT:    fmov d25, x18
+; CHECK-NEXT:    mov s16, v16.s[1]
+; CHECK-NEXT:    mov v24.d[1], x17
+; CHECK-NEXT:    fcvtzs x16, s22
+; CHECK-NEXT:    frintx s22, s3
+; CHECK-NEXT:    mov s3, v3.s[1]
+; CHECK-NEXT:    frintx s19, s19
+; CHECK-NEXT:    mov s2, v2.s[1]
+; CHECK-NEXT:    mov v25.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, s27
+; CHECK-NEXT:    frintx s4, s4
+; CHECK-NEXT:    mov v6.d[1], x12
+; CHECK-NEXT:    frintx s17, s17
+; CHECK-NEXT:    mov s18, v18.s[1]
+; CHECK-NEXT:    stp q24, q26, [x8, #224]
+; CHECK-NEXT:    fmov d24, x14
+; CHECK-NEXT:    fcvtzs x11, s22
+; CHECK-NEXT:    ext v22.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    mov s1, v1.s[1]
+; CHECK-NEXT:    frintx s3, s3
+; CHECK-NEXT:    stp q25, q23, [x8, #192]
+; CHECK-NEXT:    frintx s2, s2
+; CHECK-NEXT:    fcvtzs x12, s4
+; CHECK-NEXT:    mov v24.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, s19
+; CHECK-NEXT:    mov s19, v0.s[1]
+; CHECK-NEXT:    frintx s16, s16
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    fmov d4, x11
+; CHECK-NEXT:    mov s27, v22.s[1]
+; CHECK-NEXT:    frintx s22, s22
+; CHECK-NEXT:    frintx s1, s1
+; CHECK-NEXT:    fcvtzs x11, s3
+; CHECK-NEXT:    fcvtzs x14, s2
+; CHECK-NEXT:    frintx s2, s18
+; CHECK-NEXT:    stp q24, q6, [x8, #160]
+; CHECK-NEXT:    fmov d6, x13
+; CHECK-NEXT:    fcvtzs x13, s17
+; CHECK-NEXT:    frintx s17, s19
+; CHECK-NEXT:    fmov d23, x16
+; CHECK-NEXT:    mov v7.d[1], x10
+; CHECK-NEXT:    frintx s3, s27
+; CHECK-NEXT:    fcvtzs x10, s22
+; CHECK-NEXT:    fcvtzs x15, s1
+; CHECK-NEXT:    mov v6.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, s16
+; CHECK-NEXT:    mov v4.d[1], x11
+; CHECK-NEXT:    mov v21.d[1], x13
+; CHECK-NEXT:    fcvtzs x13, s0
+; CHECK-NEXT:    mov v23.d[1], x14
+; CHECK-NEXT:    fcvtzs x14, s17
+; CHECK-NEXT:    fcvtzs x11, s3
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    mov v5.d[1], x15
+; CHECK-NEXT:    stp q6, q7, [x8, #128]
+; CHECK-NEXT:    mov v20.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, s2
+; CHECK-NEXT:    stp q4, q21, [x8, #96]
+; CHECK-NEXT:    fmov d1, x13
+; CHECK-NEXT:    fmov d2, x9
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    stp q23, q20, [x8, #64]
+; CHECK-NEXT:    mov v1.d[1], x14
+; CHECK-NEXT:    mov v2.d[1], x12
+; CHECK-NEXT:    stp q5, q0, [x8, #32]
+; CHECK-NEXT:    stp q1, q2, [x8]
+; CHECK-NEXT:    ret
+  %a = call <32 x i64> @llvm.llrint.v32i64.v32f32(<32 x float> %x)
+  ret <32 x i64> %a
+}
+declare <32 x i64> @llvm.llrint.v32i64.v32f32(<32 x float>)
+
 define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) {
 ; CHECK-LABEL: llrint_v1i64_v1f64:
 ; CHECK:       // %bb.0:
@@ -619,3 +756,201 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
   ret <8 x i64> %a
 }
 declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>)
+
+define <16 x i64> @llrint_v16f64(<16 x double> %x) {
+; CHECK-LABEL: llrint_v16f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov d16, v0.d[1]
+; CHECK-NEXT:    mov d17, v1.d[1]
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    frintx d1, d1
+; CHECK-NEXT:    frintx d18, d2
+; CHECK-NEXT:    mov d2, v2.d[1]
+; CHECK-NEXT:    frintx d19, d3
+; CHECK-NEXT:    mov d3, v3.d[1]
+; CHECK-NEXT:    frintx d16, d16
+; CHECK-NEXT:    frintx d17, d17
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    frintx d0, d4
+; CHECK-NEXT:    mov d4, v4.d[1]
+; CHECK-NEXT:    fcvtzs x9, d1
+; CHECK-NEXT:    frintx d1, d5
+; CHECK-NEXT:    mov d5, v5.d[1]
+; CHECK-NEXT:    fcvtzs x12, d18
+; CHECK-NEXT:    frintx d2, d2
+; CHECK-NEXT:    fcvtzs x13, d19
+; CHECK-NEXT:    frintx d18, d3
+; CHECK-NEXT:    fcvtzs x10, d16
+; CHECK-NEXT:    mov d16, v6.d[1]
+; CHECK-NEXT:    fcvtzs x11, d17
+; CHECK-NEXT:    mov d17, v7.d[1]
+; CHECK-NEXT:    frintx d6, d6
+; CHECK-NEXT:    frintx d7, d7
+; CHECK-NEXT:    frintx d4, d4
+; CHECK-NEXT:    frintx d5, d5
+; CHECK-NEXT:    fcvtzs x14, d0
+; CHECK-NEXT:    fcvtzs x15, d1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    frintx d16, d16
+; CHECK-NEXT:    fcvtzs x9, d2
+; CHECK-NEXT:    fmov d2, x12
+; CHECK-NEXT:    frintx d17, d17
+; CHECK-NEXT:    fcvtzs x8, d6
+; CHECK-NEXT:    fcvtzs x12, d7
+; CHECK-NEXT:    fmov d3, x13
+; CHECK-NEXT:    fcvtzs x13, d18
+; CHECK-NEXT:    fcvtzs x16, d4
+; CHECK-NEXT:    fcvtzs x17, d5
+; CHECK-NEXT:    fmov d4, x14
+; CHECK-NEXT:    fmov d5, x15
+; CHECK-NEXT:    fcvtzs x18, d16
+; CHECK-NEXT:    mov v0.d[1], x10
+; CHECK-NEXT:    mov v1.d[1], x11
+; CHECK-NEXT:    fcvtzs x0, d17
+; CHECK-NEXT:    fmov d6, x8
+; CHECK-NEXT:    fmov d7, x12
+; CHECK-NEXT:    mov v2.d[1], x9
+; CHECK-NEXT:    mov v3.d[1], x13
+; CHECK-NEXT:    mov v4.d[1], x16
+; CHECK-NEXT:    mov v5.d[1], x17
+; CHECK-NEXT:    mov v6.d[1], x18
+; CHECK-NEXT:    mov v7.d[1], x0
+; CHECK-NEXT:    ret
+  %a = call <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double> %x)
+  ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double>)
+
+define <32 x i64> @llrint_v32f64(<32 x double> %x) {
+; CHECK-LABEL: llrint_v32f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d20, d0
+; CHECK-NEXT:    frintx d22, d3
+; CHECK-NEXT:    frintx d21, d4
+; CHECK-NEXT:    ldp q19, q18, [sp, #64]
+; CHECK-NEXT:    frintx d23, d5
+; CHECK-NEXT:    ldp q27, q26, [sp, #96]
+; CHECK-NEXT:    mov d4, v4.d[1]
+; CHECK-NEXT:    ldp q16, q17, [sp, #32]
+; CHECK-NEXT:    mov d5, v5.d[1]
+; CHECK-NEXT:    fcvtzs x9, d20
+; CHECK-NEXT:    frintx d20, d6
+; CHECK-NEXT:    fcvtzs x11, d22
+; CHECK-NEXT:    frintx d22, d19
+; CHECK-NEXT:    fcvtzs x12, d21
+; CHECK-NEXT:    fcvtzs x10, d23
+; CHECK-NEXT:    mov d21, v26.d[1]
+; CHECK-NEXT:    frintx d23, d27
+; CHECK-NEXT:    mov d27, v27.d[1]
+; CHECK-NEXT:    frintx d24, d16
+; CHECK-NEXT:    mov d19, v19.d[1]
+; CHECK-NEXT:    frintx d25, d17
+; CHECK-NEXT:    fcvtzs x13, d20
+; CHECK-NEXT:    mov d20, v18.d[1]
+; CHECK-NEXT:    frintx d18, d18
+; CHECK-NEXT:    fcvtzs x16, d22
+; CHECK-NEXT:    frintx d22, d26
+; CHECK-NEXT:    mov d16, v16.d[1]
+; CHECK-NEXT:    frintx d21, d21
+; CHECK-NEXT:    fcvtzs x17, d23
+; CHECK-NEXT:    frintx d23, d27
+; CHECK-NEXT:    fcvtzs x14, d24
+; CHECK-NEXT:    frintx d26, d19
+; CHECK-NEXT:    fmov d19, x11
+; CHECK-NEXT:    frintx d20, d20
+; CHECK-NEXT:    mov d27, v17.d[1]
+; CHECK-NEXT:    fcvtzs x15, d25
+; CHECK-NEXT:    ldp q25, q24, [sp]
+; CHECK-NEXT:    fcvtzs x11, d22
+; CHECK-NEXT:    fmov d17, x12
+; CHECK-NEXT:    fcvtzs x12, d21
+; CHECK-NEXT:    fcvtzs x0, d23
+; CHECK-NEXT:    fmov d23, x14
+; CHECK-NEXT:    fcvtzs x14, d18
+; CHECK-NEXT:    fmov d18, x17
+; CHECK-NEXT:    fcvtzs x17, d20
+; CHECK-NEXT:    frintx d21, d7
+; CHECK-NEXT:    fcvtzs x18, d26
+; CHECK-NEXT:    fmov d20, x11
+; CHECK-NEXT:    frintx d22, d25
+; CHECK-NEXT:    frintx d26, d27
+; CHECK-NEXT:    frintx d16, d16
+; CHECK-NEXT:    mov v18.d[1], x0
+; CHECK-NEXT:    mov d25, v25.d[1]
+; CHECK-NEXT:    mov d7, v7.d[1]
+; CHECK-NEXT:    mov d6, v6.d[1]
+; CHECK-NEXT:    mov d0, v0.d[1]
+; CHECK-NEXT:    mov v20.d[1], x12
+; CHECK-NEXT:    fcvtzs x11, d21
+; CHECK-NEXT:    fmov d21, x15
+; CHECK-NEXT:    fcvtzs x12, d22
+; CHECK-NEXT:    fmov d22, x16
+; CHECK-NEXT:    fcvtzs x15, d26
+; CHECK-NEXT:    fmov d26, x14
+; CHECK-NEXT:    fcvtzs x14, d16
+; CHECK-NEXT:    frintx d25, d25
+; CHECK-NEXT:    frintx d7, d7
+; CHECK-NEXT:    mov d16, v1.d[1]
+; CHECK-NEXT:    mov d3, v3.d[1]
+; CHECK-NEXT:    stp q18, q20, [x8, #224]
+; CHECK-NEXT:    mov d18, v24.d[1]
+; CHECK-NEXT:    mov v22.d[1], x18
+; CHECK-NEXT:    mov v26.d[1], x17
+; CHECK-NEXT:    frintx d24, d24
+; CHECK-NEXT:    mov v21.d[1], x15
+; CHECK-NEXT:    mov v23.d[1], x14
+; CHECK-NEXT:    frintx d20, d2
+; CHECK-NEXT:    mov d2, v2.d[1]
+; CHECK-NEXT:    frintx d6, d6
+; CHECK-NEXT:    frintx d5, d5
+; CHECK-NEXT:    frintx d4, d4
+; CHECK-NEXT:    frintx d18, d18
+; CHECK-NEXT:    frintx d1, d1
+; CHECK-NEXT:    frintx d3, d3
+; CHECK-NEXT:    stp q22, q26, [x8, #192]
+; CHECK-NEXT:    fmov d22, x10
+; CHECK-NEXT:    fcvtzs x10, d24
+; CHECK-NEXT:    stp q23, q21, [x8, #160]
+; CHECK-NEXT:    fmov d21, x11
+; CHECK-NEXT:    fmov d24, x13
+; CHECK-NEXT:    frintx d2, d2
+; CHECK-NEXT:    fcvtzs x13, d6
+; CHECK-NEXT:    frintx d6, d16
+; CHECK-NEXT:    fcvtzs x11, d18
+; CHECK-NEXT:    fmov d18, x12
+; CHECK-NEXT:    fcvtzs x12, d25
+; CHECK-NEXT:    fmov d23, x10
+; CHECK-NEXT:    fcvtzs x10, d7
+; CHECK-NEXT:    fcvtzs x14, d5
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs x15, d3
+; CHECK-NEXT:    mov v24.d[1], x13
+; CHECK-NEXT:    fcvtzs x13, d2
+; CHECK-NEXT:    fmov d2, x9
+; CHECK-NEXT:    mov v23.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, d4
+; CHECK-NEXT:    mov v18.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, d20
+; CHECK-NEXT:    mov v21.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, d1
+; CHECK-NEXT:    mov v22.d[1], x14
+; CHECK-NEXT:    fcvtzs x14, d6
+; CHECK-NEXT:    mov v19.d[1], x15
+; CHECK-NEXT:    stp q18, q23, [x8, #128]
+; CHECK-NEXT:    mov v17.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, d0
+; CHECK-NEXT:    stp q24, q21, [x8, #96]
+; CHECK-NEXT:    fmov d0, x12
+; CHECK-NEXT:    fmov d1, x10
+; CHECK-NEXT:    stp q17, q22, [x8, #64]
+; CHECK-NEXT:    mov v0.d[1], x13
+; CHECK-NEXT:    mov v1.d[1], x14
+; CHECK-NEXT:    mov v2.d[1], x11
+; CHECK-NEXT:    stp q0, q19, [x8, #32]
+; CHECK-NEXT:    stp q2, q1, [x8]
+; CHECK-NEXT:    ret
+  %a = call <32 x i64> @llvm.llrint.v32i64.v16f64(<32 x double> %x)
+  ret <32 x i64> %a
+}
+declare <32 x i64> @llvm.llrint.v32i64.v32f64(<32 x double>)
diff --git a/llvm/test/CodeGen/AArch64/vector-lrint.ll b/llvm/test/CodeGen/AArch64/vector-lrint.ll
index db85b23428216..a58be8dcb7455 100644
--- a/llvm/test/CodeGen/AArch64/vector-lrint.ll
+++ b/llvm/test/CodeGen/AArch64/vector-lrint.ll
@@ -1,19 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc < %s -mtriple=aarch64 -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-
-; CHECK-GI:       warning: Instruction selection used fallback path for lrint_v2f16
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v4f16
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v8f16
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v16i64_v16f16
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v32i64_v32f16
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v2f32
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v4f32
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v8f32
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v16i64_v16f32
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v2f64
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v4f64
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v8f64
+; RUN: llc < %s -mtriple=aarch64 -mattr=+neon | FileCheck %s
 
 define <1 x i64> @lrint_v1f16(<1 x half> %x) {
 ; CHECK-LABEL: lrint_v1f16:
@@ -385,20 +371,13 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
 declare <32 x i64> @llvm.lrint.v32i64.v32f16(<32 x half>)
 
 define <1 x i64> @lrint_v1f32(<1 x float> %x) {
-; CHECK-SD-LABEL: lrint_v1f32:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT:    frintx s0, s0
-; CHECK-SD-NEXT:    fcvtzs x8, s0
-; CHECK-SD-NEXT:    fmov d0, x8
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: lrint_v1f32:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    frintx s0, s0
-; CHECK-GI-NEXT:    fcvtzs x8, s0
-; CHECK-GI-NEXT:    fmov d0, x8
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: lrint_v1f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    fcvtzs x8, s0
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
   %a = call <1 x i64> @llvm.lrint.v1i64.v1f32(<1 x float> %x)
   ret <1 x i64> %a
 }
@@ -553,6 +532,143 @@ define <16 x i64> @lrint_v16i64_v16f32(<16 x float> %x) {
 }
 declare <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float>)
 
+define <32 x i64> @lrint_v32i64_v32f32(<32 x float> %x) {
+; CHECK-LABEL: lrint_v32i64_v32f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v16.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    ext v20.16b, v5.16b, v5.16b, #8
+; CHECK-NEXT:    ext v17.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    ext v18.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v19.16b, v4.16b, v4.16b, #8
+; CHECK-NEXT:    ext v21.16b, v6.16b, v6.16b, #8
+; CHECK-NEXT:    ext v22.16b, v7.16b, v7.16b, #8
+; CHECK-NEXT:    frintx s24, s16
+; CHECK-NEXT:    mov s28, v20.s[1]
+; CHECK-NEXT:    frintx s25, s17
+; CHECK-NEXT:    frintx s26, s18
+; CHECK-NEXT:    frintx s27, s19
+; CHECK-NEXT:    frintx s29, s20
+; CHECK-NEXT:    mov s30, v21.s[1]
+; CHECK-NEXT:    frintx s20, s21
+; CHECK-NEXT:    frintx s21, s22
+; CHECK-NEXT:    mov s23, v22.s[1]
+; CHECK-NEXT:    mov s19, v19.s[1]
+; CHECK-NEXT:    mov s17, v17.s[1]
+; CHECK-NEXT:    fcvtzs x12, s24
+; CHECK-NEXT:    frintx s24, s28
+; CHECK-NEXT:    fcvtzs x13, s25
+; CHECK-NEXT:    mov s25, v7.s[1]
+; CHECK-NEXT:    fcvtzs x9, s26
+; CHECK-NEXT:    fcvtzs x11, s27
+; CHECK-NEXT:    fcvtzs x14, s20
+; CHECK-NEXT:    fcvtzs x15, s21
+; CHECK-NEXT:    frintx s26, s1
+; CHECK-NEXT:    frintx s23, s23
+; CHECK-NEXT:    frintx s27, s7
+; CHECK-NEXT:    frintx s22, s30
+; CHECK-NEXT:    fmov d20, x12
+; CHECK-NEXT:    fcvtzs x12, s24
+; CHECK-NEXT:    mov s24, v6.s[1]
+; CHECK-NEXT:    frintx s25, s25
+; CHECK-NEXT:    frintx s6, s6
+; CHECK-NEXT:    fcvtzs x10, s29
+; CHECK-NEXT:    fmov d7, x11
+; CHECK-NEXT:    fmov d21, x13
+; CHECK-NEXT:    frintx s28, s5
+; CHECK-NEXT:    fcvtzs x11, s23
+; CHECK-NEXT:    fmov d23, x14
+; CHECK-NEXT:    fcvtzs x14, s26
+; CHECK-NEXT:    fmov d26, x15
+; CHECK-NEXT:    fcvtzs x15, s27
+; CHECK-NEXT:    frintx s24, s24
+; CHECK-NEXT:    mov s27, v5.s[1]
+; CHECK-NEXT:    fcvtzs x13, s22
+; CHECK-NEXT:    fcvtzs x17, s25
+; CHECK-NEXT:    frintx s25, s4
+; CHECK-NEXT:    fcvtzs x18, s6
+; CHECK-NEXT:    fmov d6, x10
+; CHECK-NEXT:    frintx s22, s2
+; CHECK-NEXT:    mov v26.d[1], x11
+; CHECK-NEXT:    fmov d5, x14
+; CHECK-NEXT:    fcvtzs x10, s24
+; CHECK-NEXT:    fmov d24, x15
+; CHECK-NEXT:    fcvtzs x14, s28
+; CHECK-NEXT:    frintx s27, s27
+; CHECK-NEXT:    mov v23.d[1], x13
+; CHECK-NEXT:    mov s4, v4.s[1]
+; CHECK-NEXT:    fcvtzs x13, s25
+; CHECK-NEXT:    fmov d25, x18
+; CHECK-NEXT:    mov s16, v16.s[1]
+; CHECK-NEXT:    mov v24.d[1], x17
+; CHECK-NEXT:    fcvtzs x16, s22
+; CHECK-NEXT:    frintx s22, s3
+; CHECK-NEXT:    mov s3, v3.s[1]
+; CHECK-NEXT:    frintx s19, s19
+; CHECK-NEXT:    mov s2, v2.s[1]
+; CHECK-NEXT:    mov v25.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, s27
+; CHECK-NEXT:    frintx s4, s4
+; CHECK-NEXT:    mov v6.d[1], x12
+; CHECK-NEXT:    frintx s17, s17
+; CHECK-NEXT:    mov s18, v18.s[1]
+; CHECK-NEXT:    stp q24, q26, [x8, #224]
+; CHECK-NEXT:    fmov d24, x14
+; CHECK-NEXT:    fcvtzs x11, s22
+; CHECK-NEXT:    ext v22.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    mov s1, v1.s[1]
+; CHECK-NEXT:    frintx s3, s3
+; CHECK-NEXT:    stp q25, q23, [x8, #192]
+; CHECK-NEXT:    frintx s2, s2
+; CHECK-NEXT:    fcvtzs x12, s4
+; CHECK-NEXT:    mov v24.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, s19
+; CHECK-NEXT:    mov s19, v0.s[1]
+; CHECK-NEXT:    frintx s16, s16
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    fmov d4, x11
+; CHECK-NEXT:    mov s27, v22.s[1]
+; CHECK-NEXT:    frintx s22, s22
+; CHECK-NEXT:    frintx s1, s1
+; CHECK-NEXT:    fcvtzs x11, s3
+; CHECK-NEXT:    fcvtzs x14, s2
+; CHECK-NEXT:    frintx s2, s18
+; CHECK-NEXT:    stp q24, q6, [x8, #160]
+; CHECK-NEXT:    fmov d6, x13
+; CHECK-NEXT:    fcvtzs x13, s17
+; CHECK-NEXT:    frintx s17, s19
+; CHECK-NEXT:    fmov d23, x16
+; CHECK-NEXT:    mov v7.d[1], x10
+; CHECK-NEXT:    frintx s3, s27
+; CHECK-NEXT:    fcvtzs x10, s22
+; CHECK-NEXT:    fcvtzs x15, s1
+; CHECK-NEXT:    mov v6.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, s16
+; CHECK-NEXT:    mov v4.d[1], x11
+; CHECK-NEXT:    mov v21.d[1], x13
+; CHECK-NEXT:    fcvtzs x13, s0
+; CHECK-NEXT:    mov v23.d[1], x14
+; CHECK-NEXT:    fcvtzs x14, s17
+; CHECK-NEXT:    fcvtzs x11, s3
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    mov v5.d[1], x15
+; CHECK-NEXT:    stp q6, q7, [x8, #128]
+; CHECK-NEXT:    mov v20.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, s2
+; CHECK-NEXT:    stp q4, q21, [x8, #96]
+; CHECK-NEXT:    fmov d1, x13
+; CHECK-NEXT:    fmov d2, x9
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    stp q23, q20, [x8, #64]
+; CHECK-NEXT:    mov v1.d[1], x14
+; CHECK-NEXT:    mov v2.d[1], x12
+; CHECK-NEXT:    stp q5, q0, [x8, #32]
+; CHECK-NEXT:    stp q1, q2, [x8]
+; CHECK-NEXT:    ret
+  %a = call <32 x i64> @llvm.lrint.v32i64.v32f32(<32 x float> %x)
+  ret <32 x i64> %a
+}
+declare <32 x i64> @llvm.lrint.v32i64.v32f32(<32 x float>)
+
 define <1 x i64> @lrint_v1f64(<1 x double> %x) {
 ; CHECK-LABEL: lrint_v1f64:
 ; CHECK:       // %bb.0:
@@ -640,3 +756,201 @@ define <8 x i64> @lrint_v8f64(<8 x double> %x) {
   ret <8 x i64> %a
 }
 declare <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double>)
+
+define <16 x i64> @lrint_v16f64(<16 x double> %x) {
+; CHECK-LABEL: lrint_v16f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov d16, v0.d[1]
+; CHECK-NEXT:    mov d17, v1.d[1]
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    frintx d1, d1
+; CHECK-NEXT:    frintx d18, d2
+; CHECK-NEXT:    mov d2, v2.d[1]
+; CHECK-NEXT:    frintx d19, d3
+; CHECK-NEXT:    mov d3, v3.d[1]
+; CHECK-NEXT:    frintx d16, d16
+; CHECK-NEXT:    frintx d17, d17
+; CHECK-NEXT:    fcvtzs x8, d0
+; CHECK-NEXT:    frintx d0, d4
+; CHECK-NEXT:    mov d4, v4.d[1]
+; CHECK-NEXT:    fcvtzs x9, d1
+; CHECK-NEXT:    frintx d1, d5
+; CHECK-NEXT:    mov d5, v5.d[1]
+; CHECK-NEXT:    fcvtzs x12, d18
+; CHECK-NEXT:    frintx d2, d2
+; CHECK-NEXT:    fcvtzs x13, d19
+; CHECK-NEXT:    frintx d18, d3
+; CHECK-NEXT:    fcvtzs x10, d16
+; CHECK-NEXT:    mov d16, v6.d[1]
+; CHECK-NEXT:    fcvtzs x11, d17
+; CHECK-NEXT:    mov d17, v7.d[1]
+; CHECK-NEXT:    frintx d6, d6
+; CHECK-NEXT:    frintx d7, d7
+; CHECK-NEXT:    frintx d4, d4
+; CHECK-NEXT:    frintx d5, d5
+; CHECK-NEXT:    fcvtzs x14, d0
+; CHECK-NEXT:    fcvtzs x15, d1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    frintx d16, d16
+; CHECK-NEXT:    fcvtzs x9, d2
+; CHECK-NEXT:    fmov d2, x12
+; CHECK-NEXT:    frintx d17, d17
+; CHECK-NEXT:    fcvtzs x8, d6
+; CHECK-NEXT:    fcvtzs x12, d7
+; CHECK-NEXT:    fmov d3, x13
+; CHECK-NEXT:    fcvtzs x13, d18
+; CHECK-NEXT:    fcvtzs x16, d4
+; CHECK-NEXT:    fcvtzs x17, d5
+; CHECK-NEXT:    fmov d4, x14
+; CHECK-NEXT:    fmov d5, x15
+; CHECK-NEXT:    fcvtzs x18, d16
+; CHECK-NEXT:    mov v0.d[1], x10
+; CHECK-NEXT:    mov v1.d[1], x11
+; CHECK-NEXT:    fcvtzs x0, d17
+; CHECK-NEXT:    fmov d6, x8
+; CHECK-NEXT:    fmov d7, x12
+; CHECK-NEXT:    mov v2.d[1], x9
+; CHECK-NEXT:    mov v3.d[1], x13
+; CHECK-NEXT:    mov v4.d[1], x16
+; CHECK-NEXT:    mov v5.d[1], x17
+; CHECK-NEXT:    mov v6.d[1], x18
+; CHECK-NEXT:    mov v7.d[1], x0
+; CHECK-NEXT:    ret
+  %a = call <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double> %x)
+  ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double>)
+
+define <32 x i64> @lrint_v32f64(<32 x double> %x) {
+; CHECK-LABEL: lrint_v32f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d20, d0
+; CHECK-NEXT:    frintx d22, d3
+; CHECK-NEXT:    frintx d21, d4
+; CHECK-NEXT:    ldp q19, q18, [sp, #64]
+; CHECK-NEXT:    frintx d23, d5
+; CHECK-NEXT:    ldp q27, q26, [sp, #96]
+; CHECK-NEXT:    mov d4, v4.d[1]
+; CHECK-NEXT:    ldp q16, q17, [sp, #32]
+; CHECK-NEXT:    mov d5, v5.d[1]
+; CHECK-NEXT:    fcvtzs x9, d20
+; CHECK-NEXT:    frintx d20, d6
+; CHECK-NEXT:    fcvtzs x11, d22
+; CHECK-NEXT:    frintx d22, d19
+; CHECK-NEXT:    fcvtzs x12, d21
+; CHECK-NEXT:    fcvtzs x10, d23
+; CHECK-NEXT:    mov d21, v26.d[1]
+; CHECK-NEXT:    frintx d23, d27
+; CHECK-NEXT:    mov d27, v27.d[1]
+; CHECK-NEXT:    frintx d24, d16
+; CHECK-NEXT:    mov d19, v19.d[1]
+; CHECK-NEXT:    frintx d25, d17
+; CHECK-NEXT:    fcvtzs x13, d20
+; CHECK-NEXT:    mov d20, v18.d[1]
+; CHECK-NEXT:    frintx d18, d18
+; CHECK-NEXT:    fcvtzs x16, d22
+; CHECK-NEXT:    frintx d22, d26
+; CHECK-NEXT:    mov d16, v16.d[1]
+; CHECK-NEXT:    frintx d21, d21
+; CHECK-NEXT:    fcvtzs x17, d23
+; CHECK-NEXT:    frintx d23, d27
+; CHECK-NEXT:    fcvtzs x14, d24
+; CHECK-NEXT:    frintx d26, d19
+; CHECK-NEXT:    fmov d19, x11
+; CHECK-NEXT:    frintx d20, d20
+; CHECK-NEXT:    mov d27, v17.d[1]
+; CHECK-NEXT:    fcvtzs x15, d25
+; CHECK-NEXT:    ldp q25, q24, [sp]
+; CHECK-NEXT:    fcvtzs x11, d22
+; CHECK-NEXT:    fmov d17, x12
+; CHECK-NEXT:    fcvtzs x12, d21
+; CHECK-NEXT:    fcvtzs x0, d23
+; CHECK-NEXT:    fmov d23, x14
+; CHECK-NEXT:    fcvtzs x14, d18
+; CHECK-NEXT:    fmov d18, x17
+; CHECK-NEXT:    fcvtzs x17, d20
+; CHECK-NEXT:    frintx d21, d7
+; CHECK-NEXT:    fcvtzs x18, d26
+; CHECK-NEXT:    fmov d20, x11
+; CHECK-NEXT:    frintx d22, d25
+; CHECK-NEXT:    frintx d26, d27
+; CHECK-NEXT:    frintx d16, d16
+; CHECK-NEXT:    mov v18.d[1], x0
+; CHECK-NEXT:    mov d25, v25.d[1]
+; CHECK-NEXT:    mov d7, v7.d[1]
+; CHECK-NEXT:    mov d6, v6.d[1]
+; CHECK-NEXT:    mov d0, v0.d[1]
+; CHECK-NEXT:    mov v20.d[1], x12
+; CHECK-NEXT:    fcvtzs x11, d21
+; CHECK-NEXT:    fmov d21, x15
+; CHECK-NEXT:    fcvtzs x12, d22
+; CHECK-NEXT:    fmov d22, x16
+; CHECK-NEXT:    fcvtzs x15, d26
+; CHECK-NEXT:    fmov d26, x14
+; CHECK-NEXT:    fcvtzs x14, d16
+; CHECK-NEXT:    frintx d25, d25
+; CHECK-NEXT:    frintx d7, d7
+; CHECK-NEXT:    mov d16, v1.d[1]
+; CHECK-NEXT:    mov d3, v3.d[1]
+; CHECK-NEXT:    stp q18, q20, [x8, #224]
+; CHECK-NEXT:    mov d18, v24.d[1]
+; CHECK-NEXT:    mov v22.d[1], x18
+; CHECK-NEXT:    mov v26.d[1], x17
+; CHECK-NEXT:    frintx d24, d24
+; CHECK-NEXT:    mov v21.d[1], x15
+; CHECK-NEXT:    mov v23.d[1], x14
+; CHECK-NEXT:    frintx d20, d2
+; CHECK-NEXT:    mov d2, v2.d[1]
+; CHECK-NEXT:    frintx d6, d6
+; CHECK-NEXT:    frintx d5, d5
+; CHECK-NEXT:    frintx d4, d4
+; CHECK-NEXT:    frintx d18, d18
+; CHECK-NEXT:    frintx d1, d1
+; CHECK-NEXT:    frintx d3, d3
+; CHECK-NEXT:    stp q22, q26, [x8, #192]
+; CHECK-NEXT:    fmov d22, x10
+; CHECK-NEXT:    fcvtzs x10, d24
+; CHECK-NEXT:    stp q23, q21, [x8, #160]
+; CHECK-NEXT:    fmov d21, x11
+; CHECK-NEXT:    fmov d24, x13
+; CHECK-NEXT:    frintx d2, d2
+; CHECK-NEXT:    fcvtzs x13, d6
+; CHECK-NEXT:    frintx d6, d16
+; CHECK-NEXT:    fcvtzs x11, d18
+; CHECK-NEXT:    fmov d18, x12
+; CHECK-NEXT:    fcvtzs x12, d25
+; CHECK-NEXT:    fmov d23, x10
+; CHECK-NEXT:    fcvtzs x10, d7
+; CHECK-NEXT:    fcvtzs x14, d5
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs x15, d3
+; CHECK-NEXT:    mov v24.d[1], x13
+; CHECK-NEXT:    fcvtzs x13, d2
+; CHECK-NEXT:    fmov d2, x9
+; CHECK-NEXT:    mov v23.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, d4
+; CHECK-NEXT:    mov v18.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, d20
+; CHECK-NEXT:    mov v21.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, d1
+; CHECK-NEXT:    mov v22.d[1], x14
+; CHECK-NEXT:    fcvtzs x14, d6
+; CHECK-NEXT:    mov v19.d[1], x15
+; CHECK-NEXT:    stp q18, q23, [x8, #128]
+; CHECK-NEXT:    mov v17.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, d0
+; CHECK-NEXT:    stp q24, q21, [x8, #96]
+; CHECK-NEXT:    fmov d0, x12
+; CHECK-NEXT:    fmov d1, x10
+; CHECK-NEXT:    stp q17, q22, [x8, #64]
+; CHECK-NEXT:    mov v0.d[1], x13
+; CHECK-NEXT:    mov v1.d[1], x14
+; CHECK-NEXT:    mov v2.d[1], x11
+; CHECK-NEXT:    stp q0, q19, [x8, #32]
+; CHECK-NEXT:    stp q2, q1, [x8]
+; CHECK-NEXT:    ret
+  %a = call <32 x i64> @llvm.lrint.v32i64.v16f64(<32 x double> %x)
+  ret <32 x i64> %a
+}
+declare <32 x i64> @llvm.lrint.v32i64.v32f64(<32 x double>)

>From 106f35521dc27d4ba9e2780339157d129fdee996 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <r at artagnon.com>
Date: Wed, 8 May 2024 18:28:47 +0100
Subject: [PATCH 2/4] Use ISD::FP_TO_SINT

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 22 +------------------
 1 file changed, 1 insertion(+), 21 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 41372b5432a0e..3d3d45840f4e0 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -4374,11 +4374,6 @@ SDValue AArch64TargetLowering::LowerVectorXRINT(SDValue Op,
 
   assert(VT.isVector() && "Expected vector type");
 
-  // We can't custom-lower ISD::[L]LRINT without SVE, since it requires
-  // AArch64ISD::FCVTZS_MERGE_PASSTHRU.
-  if (!Subtarget->isSVEAvailable())
-    return SDValue();
-
   EVT ContainerVT = VT;
   EVT SrcVT = Src.getValueType();
   EVT CastVT =
@@ -4394,24 +4389,9 @@ SDValue AArch64TargetLowering::LowerVectorXRINT(SDValue Op,
   // the current rounding mode.
   SDValue FOp = DAG.getNode(ISD::FRINT, DL, CastVT, Src);
 
-  // In the case of vector filled with f32, ftrunc will convert it to an i32,
-  // but a vector filled with i32 isn't legal. So, FP_EXTEND the f32 into the
-  // required size.
-  size_t SrcSz = SrcVT.getScalarSizeInBits();
-  size_t ContainerSz = ContainerVT.getScalarSizeInBits();
-  if (ContainerSz > SrcSz) {
-    EVT SizedVT = MVT::getVectorVT(MVT::getFloatingPointVT(ContainerSz),
-                                   ContainerVT.getVectorElementCount());
-    FOp = DAG.getNode(ISD::FP_EXTEND, DL, SizedVT, FOp.getOperand(0));
-  }
-
   // Finally, truncate the rounded floating point to an integer, rounding to
   // zero.
-  SDValue Pred = getPredicateForVector(DAG, DL, ContainerVT);
-  SDValue Undef = DAG.getUNDEF(ContainerVT);
-  SDValue Truncated =
-      DAG.getNode(AArch64ISD::FCVTZS_MERGE_PASSTHRU, DL, ContainerVT,
-                  {Pred, FOp.getOperand(0), Undef}, FOp->getFlags());
+  SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, ContainerVT, FOp.getOperand(0));
 
   if (VT.isScalableVector())
     return Truncated;

>From ab50733b0216f6a433a0d3a1db0cc30cc8201efc Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <r at artagnon.com>
Date: Wed, 8 May 2024 18:34:23 +0100
Subject: [PATCH 3/4] clang-format

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 3d3d45840f4e0..8e530846881e6 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -4391,7 +4391,8 @@ SDValue AArch64TargetLowering::LowerVectorXRINT(SDValue Op,
 
   // Finally, truncate the rounded floating point to an integer, rounding to
   // zero.
-  SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, ContainerVT, FOp.getOperand(0));
+  SDValue Truncated =
+      DAG.getNode(ISD::FP_TO_SINT, DL, ContainerVT, FOp.getOperand(0));
 
   if (VT.isScalableVector())
     return Truncated;

>From 9a92adf8fc3e64ff5bca9525e300b4ca85c544af Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <r at artagnon.com>
Date: Thu, 9 May 2024 12:40:31 +0100
Subject: [PATCH 4/4] ISel/AArch64: fix all issues

---
 .../Target/AArch64/AArch64ISelLowering.cpp    |   46 +-
 .../AArch64/sve-fixed-vector-llrint.ll        | 1077 +++++---------
 .../CodeGen/AArch64/sve-fixed-vector-lrint.ll | 1077 +++++---------
 llvm/test/CodeGen/AArch64/sve-llrint.ll       |  434 +++---
 llvm/test/CodeGen/AArch64/sve-lrint.ll        |  434 +++---
 llvm/test/CodeGen/AArch64/vector-llrint.ll    | 1211 +++++++---------
 llvm/test/CodeGen/AArch64/vector-lrint.ll     | 1243 +++++++----------
 7 files changed, 2267 insertions(+), 3255 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 8e530846881e6..dcfe07dc330d6 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1304,6 +1304,12 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
           setOperationAction(Op, Ty, Legal);
     }
 
+    // LRINT and LLRINT.
+    for (auto VT : MVT::fp_fixedlen_vector_valuetypes()) {
+      setOperationAction(ISD::LRINT, VT, Custom);
+      setOperationAction(ISD::LLRINT, VT, Custom);
+    }
+
     setTruncStoreAction(MVT::v4i16, MVT::v4i8, Custom);
 
     setOperationAction(ISD::BITCAST, MVT::i2, Custom);
@@ -1419,6 +1425,12 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
         setOperationAction(ISD::OR, VT, Custom);
     }
 
+    // LRINT and LLRINT.
+    for (auto VT : MVT::fp_scalable_vector_valuetypes()) {
+      setOperationAction(ISD::LRINT, VT, Custom);
+      setOperationAction(ISD::LLRINT, VT, Custom);
+    }
+
     // Illegal unpacked integer vector types.
     for (auto VT : {MVT::nxv8i8, MVT::nxv4i16, MVT::nxv2i32}) {
       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
@@ -1526,8 +1538,6 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::FNEARBYINT, VT, Custom);
       setOperationAction(ISD::FRINT, VT, Custom);
       setOperationAction(ISD::FROUND, VT, Custom);
-      setOperationAction(ISD::LRINT, VT, Custom);
-      setOperationAction(ISD::LLRINT, VT, Custom);
       setOperationAction(ISD::FROUNDEVEN, VT, Custom);
       setOperationAction(ISD::FTRUNC, VT, Custom);
       setOperationAction(ISD::FSQRT, VT, Custom);
@@ -1667,6 +1677,11 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
         setOperationAction(ISD::MULHU, VT, Custom);
       }
 
+      // LRINT and LLRINT.
+      for (auto VT : MVT::fp_fixedlen_vector_valuetypes()) {
+        setOperationAction(ISD::LRINT, VT, Custom);
+        setOperationAction(ISD::LLRINT, VT, Custom);
+      }
 
       // Use SVE for vectors with more than 2 elements.
       for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v4f32})
@@ -1942,8 +1957,6 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
   setOperationAction(ISD::FP_TO_UINT, VT, Default);
   setOperationAction(ISD::FRINT, VT, Default);
   setOperationAction(ISD::FROUND, VT, Default);
-  setOperationAction(ISD::LRINT, VT, Default);
-  setOperationAction(ISD::LLRINT, VT, Default);
   setOperationAction(ISD::FROUNDEVEN, VT, Default);
   setOperationAction(ISD::FSQRT, VT, Default);
   setOperationAction(ISD::FSUB, VT, Default);
@@ -4374,30 +4387,15 @@ SDValue AArch64TargetLowering::LowerVectorXRINT(SDValue Op,
 
   assert(VT.isVector() && "Expected vector type");
 
-  EVT ContainerVT = VT;
-  EVT SrcVT = Src.getValueType();
   EVT CastVT =
-      ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
+      VT.changeVectorElementType(Src.getValueType().getVectorElementType());
 
-  if (VT.isFixedLengthVector()) {
-    ContainerVT = getContainerForFixedLengthVector(DAG, VT);
-    CastVT = ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
-    Src = convertToScalableVector(DAG, CastVT, Src);
-  }
-
-  // First, round the floating-point value into a floating-point register with
-  // the current rounding mode.
+  // Round the floating-point value into a floating-point register with the
+  // current rounding mode.
   SDValue FOp = DAG.getNode(ISD::FRINT, DL, CastVT, Src);
 
-  // Finally, truncate the rounded floating point to an integer, rounding to
-  // zero.
-  SDValue Truncated =
-      DAG.getNode(ISD::FP_TO_SINT, DL, ContainerVT, FOp.getOperand(0));
-
-  if (VT.isScalableVector())
-    return Truncated;
-
-  return convertFromScalableVector(DAG, VT, Truncated);
+  // Truncate the rounded floating point to an integer, rounding to zero.
+  return DAG.getNode(ISD::FP_TO_SINT, DL, VT, FOp);
 }
 
 SDValue AArch64TargetLowering::LowerVectorINT_TO_FP(SDValue Op,
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll b/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll
index febfa785eaeff..89ef30e38849f 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64 -mattr=+sve | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve -aarch64-sve-vector-bits-min=256 | FileCheck %s
 
 define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) {
 ; CHECK-LABEL: llrint_v1i64_v1f16:
@@ -16,14 +16,12 @@ declare <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half>)
 define <2 x i64> @llrint_v1i64_v2f16(<2 x half> %x) {
 ; CHECK-LABEL: llrint_v1i64_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov h1, v0.h[1]
-; CHECK-NEXT:    frintx h0, h0
-; CHECK-NEXT:    frintx h1, h1
-; CHECK-NEXT:    fcvtzs x8, h0
-; CHECK-NEXT:    fcvtzs x9, h1
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    frintx v0.4h, v0.4h
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %a = call <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half> %x)
   ret <2 x i64> %a
@@ -33,22 +31,15 @@ declare <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half>)
 define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) {
 ; CHECK-LABEL: llrint_v4i64_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov h1, v0.h[2]
-; CHECK-NEXT:    mov h2, v0.h[1]
-; CHECK-NEXT:    mov h3, v0.h[3]
-; CHECK-NEXT:    frintx h0, h0
-; CHECK-NEXT:    frintx h1, h1
-; CHECK-NEXT:    frintx h2, h2
-; CHECK-NEXT:    frintx h3, h3
-; CHECK-NEXT:    fcvtzs x8, h0
-; CHECK-NEXT:    fcvtzs x9, h1
-; CHECK-NEXT:    fcvtzs x10, h2
-; CHECK-NEXT:    fcvtzs x11, h3
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    mov v1.d[1], x11
+; CHECK-NEXT:    frintx v0.4h, v0.4h
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    ret
   %a = call <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half> %x)
   ret <4 x i64> %a
@@ -59,36 +50,24 @@ define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) {
 ; CHECK-LABEL: llrint_v8i64_v8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    mov h4, v0.h[2]
-; CHECK-NEXT:    mov h3, v0.h[1]
-; CHECK-NEXT:    mov h7, v0.h[3]
-; CHECK-NEXT:    frintx h0, h0
-; CHECK-NEXT:    mov h2, v1.h[2]
-; CHECK-NEXT:    mov h5, v1.h[1]
-; CHECK-NEXT:    mov h6, v1.h[3]
-; CHECK-NEXT:    frintx h1, h1
-; CHECK-NEXT:    frintx h4, h4
-; CHECK-NEXT:    frintx h3, h3
-; CHECK-NEXT:    frintx h7, h7
-; CHECK-NEXT:    fcvtzs x9, h0
-; CHECK-NEXT:    frintx h2, h2
-; CHECK-NEXT:    frintx h5, h5
-; CHECK-NEXT:    frintx h6, h6
-; CHECK-NEXT:    fcvtzs x8, h1
-; CHECK-NEXT:    fcvtzs x12, h4
-; CHECK-NEXT:    fcvtzs x11, h3
-; CHECK-NEXT:    fcvtzs x15, h7
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    fcvtzs x10, h2
-; CHECK-NEXT:    fcvtzs x13, h5
-; CHECK-NEXT:    fcvtzs x14, h6
-; CHECK-NEXT:    fmov d2, x8
-; CHECK-NEXT:    fmov d1, x12
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    fmov d3, x10
-; CHECK-NEXT:    mov v2.d[1], x13
-; CHECK-NEXT:    mov v1.d[1], x15
-; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    frintx v0.4h, v0.4h
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    frintx v1.4h, v1.4h
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z1.s, z1.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    movprfx z2, z1
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z1.h
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    ext z3.b, z3.b, z2.b, #16
+; CHECK-NEXT:    // kill: def $q2 killed $q2 killed $z2
+; CHECK-NEXT:    // kill: def $q3 killed $q3 killed $z3
 ; CHECK-NEXT:    ret
   %a = call <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half> %x)
   ret <8 x i64> %a
@@ -100,66 +79,41 @@ define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
 ; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    mov h4, v0.h[1]
-; CHECK-NEXT:    frintx h5, h0
-; CHECK-NEXT:    mov h18, v0.h[2]
-; CHECK-NEXT:    mov h0, v0.h[3]
-; CHECK-NEXT:    frintx h6, h2
-; CHECK-NEXT:    mov h7, v2.h[1]
-; CHECK-NEXT:    mov h16, v2.h[2]
-; CHECK-NEXT:    mov h17, v3.h[2]
-; CHECK-NEXT:    frintx h19, h3
-; CHECK-NEXT:    frintx h4, h4
-; CHECK-NEXT:    fcvtzs x8, h5
-; CHECK-NEXT:    mov h5, v1.h[1]
-; CHECK-NEXT:    mov h2, v2.h[3]
-; CHECK-NEXT:    frintx h18, h18
-; CHECK-NEXT:    frintx h0, h0
-; CHECK-NEXT:    fcvtzs x9, h6
-; CHECK-NEXT:    frintx h6, h7
-; CHECK-NEXT:    frintx h7, h16
-; CHECK-NEXT:    mov h16, v1.h[2]
-; CHECK-NEXT:    frintx h17, h17
-; CHECK-NEXT:    fcvtzs x10, h19
-; CHECK-NEXT:    mov h19, v3.h[1]
-; CHECK-NEXT:    fcvtzs x11, h4
-; CHECK-NEXT:    mov h4, v1.h[3]
-; CHECK-NEXT:    mov h3, v3.h[3]
-; CHECK-NEXT:    frintx h1, h1
-; CHECK-NEXT:    frintx h5, h5
-; CHECK-NEXT:    fcvtzs x13, h7
-; CHECK-NEXT:    fcvtzs x12, h6
-; CHECK-NEXT:    fcvtzs x15, h18
-; CHECK-NEXT:    frintx h7, h16
-; CHECK-NEXT:    fcvtzs x14, h17
-; CHECK-NEXT:    frintx h16, h2
-; CHECK-NEXT:    frintx h17, h19
-; CHECK-NEXT:    frintx h4, h4
-; CHECK-NEXT:    fmov d2, x9
-; CHECK-NEXT:    frintx h19, h3
-; CHECK-NEXT:    fcvtzs x9, h1
-; CHECK-NEXT:    fmov d6, x10
-; CHECK-NEXT:    fmov d3, x13
-; CHECK-NEXT:    fcvtzs x13, h0
-; CHECK-NEXT:    fcvtzs x16, h5
-; CHECK-NEXT:    fcvtzs x10, h7
-; CHECK-NEXT:    fmov d7, x14
-; CHECK-NEXT:    fcvtzs x14, h16
-; CHECK-NEXT:    fcvtzs x17, h17
-; CHECK-NEXT:    fcvtzs x0, h4
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fcvtzs x18, h19
-; CHECK-NEXT:    fmov d1, x15
-; CHECK-NEXT:    fmov d4, x9
-; CHECK-NEXT:    mov v2.d[1], x12
-; CHECK-NEXT:    fmov d5, x10
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    mov v3.d[1], x14
-; CHECK-NEXT:    mov v1.d[1], x13
-; CHECK-NEXT:    mov v4.d[1], x16
-; CHECK-NEXT:    mov v6.d[1], x17
-; CHECK-NEXT:    mov v7.d[1], x18
-; CHECK-NEXT:    mov v5.d[1], x0
+; CHECK-NEXT:    frintx v0.4h, v0.4h
+; CHECK-NEXT:    frintx v1.4h, v1.4h
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    frintx v2.4h, v2.4h
+; CHECK-NEXT:    frintx v3.4h, v3.4h
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z1.s, z1.h
+; CHECK-NEXT:    uunpklo z2.s, z2.h
+; CHECK-NEXT:    uunpklo z3.s, z3.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    uunpklo z3.d, z3.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    movprfx z4, z1
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z1.h
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.h
+; CHECK-NEXT:    movprfx z6, z3
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z3.h
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    mov z5.d, z4.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    mov z7.d, z6.d
+; CHECK-NEXT:    ext z5.b, z5.b, z4.b, #16
+; CHECK-NEXT:    // kill: def $q4 killed $q4 killed $z4
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    // kill: def $q5 killed $q5 killed $z5
+; CHECK-NEXT:    ext z3.b, z3.b, z2.b, #16
+; CHECK-NEXT:    ext z7.b, z7.b, z6.b, #16
+; CHECK-NEXT:    // kill: def $q2 killed $q2 killed $z2
+; CHECK-NEXT:    // kill: def $q6 killed $q6 killed $z6
+; CHECK-NEXT:    // kill: def $q3 killed $q3 killed $z3
+; CHECK-NEXT:    // kill: def $q7 killed $q7 killed $z7
 ; CHECK-NEXT:    ret
   %a = call <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half> %x)
   ret <16 x i64> %a
@@ -169,138 +123,61 @@ declare <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half>)
 define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
 ; CHECK-LABEL: llrint_v32i64_v32f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v4.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v4.16b, v3.16b, v3.16b, #8
 ; CHECK-NEXT:    ext v5.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    ext v6.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    mov x9, #24 // =0x18
+; CHECK-NEXT:    frintx v3.4h, v3.4h
+; CHECK-NEXT:    ext v6.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    frintx v2.4h, v2.4h
 ; CHECK-NEXT:    ext v7.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    frintx h21, h1
-; CHECK-NEXT:    frintx h22, h2
-; CHECK-NEXT:    mov h26, v2.h[2]
-; CHECK-NEXT:    frintx h19, h0
-; CHECK-NEXT:    mov h27, v3.h[2]
-; CHECK-NEXT:    mov h20, v2.h[1]
-; CHECK-NEXT:    mov h18, v1.h[1]
-; CHECK-NEXT:    mov h16, v4.h[2]
-; CHECK-NEXT:    mov h17, v5.h[2]
-; CHECK-NEXT:    frintx h23, h5
-; CHECK-NEXT:    frintx h24, h6
-; CHECK-NEXT:    mov h25, v6.h[2]
-; CHECK-NEXT:    fcvtzs x9, h21
-; CHECK-NEXT:    fcvtzs x11, h22
-; CHECK-NEXT:    frintx h22, h7
-; CHECK-NEXT:    mov h21, v3.h[3]
-; CHECK-NEXT:    fcvtzs x10, h19
-; CHECK-NEXT:    frintx h27, h27
-; CHECK-NEXT:    frintx h20, h20
-; CHECK-NEXT:    frintx h16, h16
-; CHECK-NEXT:    frintx h17, h17
-; CHECK-NEXT:    fcvtzs x12, h23
-; CHECK-NEXT:    fcvtzs x13, h24
-; CHECK-NEXT:    frintx h23, h25
-; CHECK-NEXT:    frintx h25, h26
-; CHECK-NEXT:    mov h26, v3.h[1]
-; CHECK-NEXT:    mov h24, v2.h[3]
-; CHECK-NEXT:    fmov d19, x9
-; CHECK-NEXT:    fcvtzs x9, h22
-; CHECK-NEXT:    frintx h22, h3
-; CHECK-NEXT:    frintx h21, h21
-; CHECK-NEXT:    fcvtzs x14, h16
-; CHECK-NEXT:    fcvtzs x15, h17
-; CHECK-NEXT:    fmov d2, x12
-; CHECK-NEXT:    fmov d16, x13
-; CHECK-NEXT:    fcvtzs x12, h23
-; CHECK-NEXT:    fcvtzs x13, h25
-; CHECK-NEXT:    mov h23, v1.h[2]
-; CHECK-NEXT:    frintx h25, h26
-; CHECK-NEXT:    frintx h24, h24
-; CHECK-NEXT:    mov h1, v1.h[3]
-; CHECK-NEXT:    fmov d26, x11
-; CHECK-NEXT:    fcvtzs x11, h21
-; CHECK-NEXT:    fmov d3, x14
-; CHECK-NEXT:    fmov d17, x15
-; CHECK-NEXT:    fcvtzs x14, h22
-; CHECK-NEXT:    fcvtzs x15, h27
-; CHECK-NEXT:    mov h22, v0.h[2]
-; CHECK-NEXT:    frintx h18, h18
-; CHECK-NEXT:    frintx h21, h23
-; CHECK-NEXT:    fmov d23, x13
-; CHECK-NEXT:    fcvtzs x13, h25
-; CHECK-NEXT:    frintx h1, h1
-; CHECK-NEXT:    fmov d25, x14
-; CHECK-NEXT:    fcvtzs x14, h24
-; CHECK-NEXT:    fmov d24, x15
-; CHECK-NEXT:    frintx h22, h22
-; CHECK-NEXT:    fcvtzs x15, h18
-; CHECK-NEXT:    mov h18, v7.h[1]
-; CHECK-NEXT:    mov v25.d[1], x13
-; CHECK-NEXT:    fcvtzs x13, h21
-; CHECK-NEXT:    mov h21, v7.h[2]
-; CHECK-NEXT:    mov v24.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, h20
-; CHECK-NEXT:    mov h20, v0.h[1]
-; CHECK-NEXT:    mov h0, v0.h[3]
-; CHECK-NEXT:    mov v23.d[1], x14
-; CHECK-NEXT:    fcvtzs x14, h1
-; CHECK-NEXT:    mov h1, v6.h[3]
-; CHECK-NEXT:    mov h6, v6.h[1]
-; CHECK-NEXT:    mov v19.d[1], x15
-; CHECK-NEXT:    mov h7, v7.h[3]
-; CHECK-NEXT:    stp q25, q24, [x8, #192]
-; CHECK-NEXT:    fmov d24, x13
-; CHECK-NEXT:    frintx h20, h20
-; CHECK-NEXT:    mov v26.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, h22
-; CHECK-NEXT:    mov h22, v5.h[1]
-; CHECK-NEXT:    mov h5, v5.h[3]
-; CHECK-NEXT:    frintx h0, h0
-; CHECK-NEXT:    frintx h1, h1
-; CHECK-NEXT:    mov v24.d[1], x14
-; CHECK-NEXT:    mov h25, v4.h[3]
-; CHECK-NEXT:    frintx h6, h6
-; CHECK-NEXT:    stp q26, q23, [x8, #128]
-; CHECK-NEXT:    fmov d23, x12
-; CHECK-NEXT:    fcvtzs x12, h20
-; CHECK-NEXT:    mov h20, v4.h[1]
-; CHECK-NEXT:    frintx h5, h5
-; CHECK-NEXT:    fcvtzs x13, h0
-; CHECK-NEXT:    stp q19, q24, [x8, #64]
-; CHECK-NEXT:    frintx h22, h22
-; CHECK-NEXT:    fmov d0, x10
-; CHECK-NEXT:    fmov d19, x11
-; CHECK-NEXT:    frintx h4, h4
-; CHECK-NEXT:    fcvtzs x10, h1
-; CHECK-NEXT:    frintx h1, h21
-; CHECK-NEXT:    frintx h24, h25
-; CHECK-NEXT:    fcvtzs x11, h6
-; CHECK-NEXT:    frintx h20, h20
-; CHECK-NEXT:    frintx h6, h7
-; CHECK-NEXT:    fcvtzs x14, h5
-; CHECK-NEXT:    mov v19.d[1], x13
-; CHECK-NEXT:    frintx h5, h18
-; CHECK-NEXT:    fcvtzs x13, h22
-; CHECK-NEXT:    mov v0.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, h4
-; CHECK-NEXT:    mov v23.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, h1
-; CHECK-NEXT:    fcvtzs x15, h24
-; CHECK-NEXT:    mov v16.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, h20
-; CHECK-NEXT:    mov v17.d[1], x14
-; CHECK-NEXT:    fcvtzs x14, h6
-; CHECK-NEXT:    mov v2.d[1], x13
-; CHECK-NEXT:    fcvtzs x13, h5
-; CHECK-NEXT:    fmov d4, x9
-; CHECK-NEXT:    stp q0, q19, [x8]
-; CHECK-NEXT:    fmov d0, x12
-; CHECK-NEXT:    stp q16, q23, [x8, #224]
-; CHECK-NEXT:    fmov d1, x10
-; CHECK-NEXT:    mov v3.d[1], x15
-; CHECK-NEXT:    stp q2, q17, [x8, #160]
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    mov v4.d[1], x13
-; CHECK-NEXT:    mov v1.d[1], x14
-; CHECK-NEXT:    stp q0, q3, [x8, #96]
-; CHECK-NEXT:    stp q4, q1, [x8, #32]
+; CHECK-NEXT:    frintx v1.4h, v1.4h
+; CHECK-NEXT:    frintx v0.4h, v0.4h
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    frintx v4.4h, v4.4h
+; CHECK-NEXT:    frintx v5.4h, v5.4h
+; CHECK-NEXT:    uunpklo z3.s, z3.h
+; CHECK-NEXT:    frintx v6.4h, v6.4h
+; CHECK-NEXT:    uunpklo z2.s, z2.h
+; CHECK-NEXT:    frintx v7.4h, v7.4h
+; CHECK-NEXT:    uunpklo z1.s, z1.h
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z4.s, z4.h
+; CHECK-NEXT:    uunpklo z5.s, z5.h
+; CHECK-NEXT:    uunpklo z3.d, z3.s
+; CHECK-NEXT:    uunpklo z6.s, z6.h
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    uunpklo z7.s, z7.h
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    uunpklo z4.d, z4.s
+; CHECK-NEXT:    uunpklo z5.d, z5.s
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.h
+; CHECK-NEXT:    uunpklo z6.d, z6.s
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.h
+; CHECK-NEXT:    uunpklo z7.d, z7.s
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.h
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.h
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #16 // =0x10
+; CHECK-NEXT:    movprfx z3, z5
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z5.h
+; CHECK-NEXT:    st1d { z2.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #8 // =0x8
+; CHECK-NEXT:    movprfx z2, z6
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z6.h
+; CHECK-NEXT:    st1d { z1.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #28 // =0x1c
+; CHECK-NEXT:    movprfx z1, z7
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z7.h
+; CHECK-NEXT:    st1d { z4.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #20 // =0x14
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #12 // =0xc
+; CHECK-NEXT:    st1d { z2.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #4 // =0x4
+; CHECK-NEXT:    st1d { z1.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
 ; CHECK-NEXT:    ret
   %a = call <32 x i64> @llvm.llrint.v32i64.v32f16(<32 x half> %x)
   ret <32 x i64> %a
@@ -310,10 +187,10 @@ declare <32 x i64> @llvm.llrint.v32i64.v32f16(<32 x half>)
 define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
 ; CHECK-LABEL: llrint_v1i64_v1f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    fcvtzs x8, s0
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    ret
   %a = call <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float> %x)
   ret <1 x i64> %a
@@ -323,14 +200,9 @@ declare <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float>)
 define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
 ; CHECK-LABEL: llrint_v2i64_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov s1, v0.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    fcvtzs x8, s0
-; CHECK-NEXT:    fcvtzs x9, s1
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %a = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> %x)
   ret <2 x i64> %a
@@ -340,21 +212,14 @@ declare <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float>)
 define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
 ; CHECK-LABEL: llrint_v4i64_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    mov s3, v0.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    mov s2, v1.s[1]
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    fcvtzs x9, s0
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    fcvtzs x8, s1
-; CHECK-NEXT:    fcvtzs x11, s3
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    fcvtzs x10, s2
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    mov v1.d[1], x10
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    ret
   %a = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> %x)
   ret <4 x i64> %a
@@ -364,36 +229,22 @@ declare <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float>)
 define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
 ; CHECK-LABEL: llrint_v8i64_v8f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    mov s4, v0.s[1]
-; CHECK-NEXT:    mov s7, v1.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    mov s5, v2.s[1]
-; CHECK-NEXT:    mov s6, v3.s[1]
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    frintx s7, s7
-; CHECK-NEXT:    fcvtzs x9, s0
-; CHECK-NEXT:    fcvtzs x12, s1
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvtzs x8, s2
-; CHECK-NEXT:    fcvtzs x10, s3
-; CHECK-NEXT:    fcvtzs x11, s4
-; CHECK-NEXT:    fcvtzs x15, s7
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    fmov d2, x12
-; CHECK-NEXT:    fcvtzs x13, s5
-; CHECK-NEXT:    fcvtzs x14, s6
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    fmov d3, x10
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    mov v2.d[1], x15
-; CHECK-NEXT:    mov v1.d[1], x13
-; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT:    movprfx z2, z1
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z1.s
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    ext z3.b, z3.b, z2.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q2 killed $q2 killed $z2
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    // kill: def $q3 killed $q3 killed $z3
 ; CHECK-NEXT:    ret
   %a = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> %x)
   ret <8 x i64> %a
@@ -403,66 +254,37 @@ declare <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float>)
 define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
 ; CHECK-LABEL: llrint_v16i64_v16f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v5.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v6.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    frintx s7, s0
-; CHECK-NEXT:    ext v16.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT:    mov s0, v0.s[1]
-; CHECK-NEXT:    frintx s17, s4
-; CHECK-NEXT:    mov s4, v4.s[1]
-; CHECK-NEXT:    mov s18, v5.s[1]
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    frintx s19, s6
-; CHECK-NEXT:    fcvtzs x8, s7
-; CHECK-NEXT:    frintx s7, s16
-; CHECK-NEXT:    mov s6, v6.s[1]
-; CHECK-NEXT:    mov s16, v16.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    fcvtzs x9, s17
-; CHECK-NEXT:    frintx s17, s1
-; CHECK-NEXT:    mov s1, v1.s[1]
-; CHECK-NEXT:    frintx s18, s18
-; CHECK-NEXT:    fcvtzs x10, s5
-; CHECK-NEXT:    mov s5, v2.s[1]
-; CHECK-NEXT:    fcvtzs x11, s19
-; CHECK-NEXT:    mov s19, v3.s[1]
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    fcvtzs x12, s7
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvtzs x13, s4
-; CHECK-NEXT:    frintx s4, s3
-; CHECK-NEXT:    frintx s16, s16
-; CHECK-NEXT:    fcvtzs x14, s18
-; CHECK-NEXT:    frintx s18, s1
-; CHECK-NEXT:    fcvtzs x15, s17
-; CHECK-NEXT:    frintx s20, s5
-; CHECK-NEXT:    frintx s17, s19
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    fcvtzs x9, s2
-; CHECK-NEXT:    fmov d5, x11
-; CHECK-NEXT:    fmov d3, x10
-; CHECK-NEXT:    fcvtzs x11, s4
-; CHECK-NEXT:    fcvtzs x10, s0
-; CHECK-NEXT:    fmov d7, x12
-; CHECK-NEXT:    fcvtzs x12, s18
-; CHECK-NEXT:    fcvtzs x17, s6
-; CHECK-NEXT:    fcvtzs x18, s16
-; CHECK-NEXT:    fcvtzs x16, s20
-; CHECK-NEXT:    fcvtzs x0, s17
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d2, x15
-; CHECK-NEXT:    fmov d4, x9
-; CHECK-NEXT:    mov v1.d[1], x13
-; CHECK-NEXT:    fmov d6, x11
-; CHECK-NEXT:    mov v3.d[1], x14
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    mov v5.d[1], x17
-; CHECK-NEXT:    mov v7.d[1], x18
-; CHECK-NEXT:    mov v2.d[1], x12
-; CHECK-NEXT:    mov v4.d[1], x16
-; CHECK-NEXT:    mov v6.d[1], x0
+; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    frintx v2.4s, v2.4s
+; CHECK-NEXT:    frintx v3.4s, v3.4s
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    uunpklo z4.d, z2.s
+; CHECK-NEXT:    uunpklo z3.d, z3.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT:    movprfx z2, z1
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z1.s
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT:    movprfx z6, z3
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z3.s
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    mov z5.d, z4.d
+; CHECK-NEXT:    mov z7.d, z6.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    ext z3.b, z3.b, z2.b, #16
+; CHECK-NEXT:    ext z5.b, z5.b, z4.b, #16
+; CHECK-NEXT:    ext z7.b, z7.b, z6.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q2 killed $q2 killed $z2
+; CHECK-NEXT:    // kill: def $q4 killed $q4 killed $z4
+; CHECK-NEXT:    // kill: def $q6 killed $q6 killed $z6
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    // kill: def $q3 killed $q3 killed $z3
+; CHECK-NEXT:    // kill: def $q5 killed $q5 killed $z5
+; CHECK-NEXT:    // kill: def $q7 killed $q7 killed $z7
 ; CHECK-NEXT:    ret
   %a = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> %x)
   ret <16 x i64> %a
@@ -472,134 +294,46 @@ declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>)
 define <32 x i64> @llrint_v32i64_v32f32(<32 x float> %x) {
 ; CHECK-LABEL: llrint_v32i64_v32f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v16.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    ext v20.16b, v5.16b, v5.16b, #8
-; CHECK-NEXT:    ext v17.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT:    ext v18.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v19.16b, v4.16b, v4.16b, #8
-; CHECK-NEXT:    ext v21.16b, v6.16b, v6.16b, #8
-; CHECK-NEXT:    ext v22.16b, v7.16b, v7.16b, #8
-; CHECK-NEXT:    frintx s24, s16
-; CHECK-NEXT:    mov s28, v20.s[1]
-; CHECK-NEXT:    frintx s25, s17
-; CHECK-NEXT:    frintx s26, s18
-; CHECK-NEXT:    frintx s27, s19
-; CHECK-NEXT:    frintx s29, s20
-; CHECK-NEXT:    mov s30, v21.s[1]
-; CHECK-NEXT:    frintx s20, s21
-; CHECK-NEXT:    frintx s21, s22
-; CHECK-NEXT:    mov s23, v22.s[1]
-; CHECK-NEXT:    mov s19, v19.s[1]
-; CHECK-NEXT:    mov s17, v17.s[1]
-; CHECK-NEXT:    fcvtzs x12, s24
-; CHECK-NEXT:    frintx s24, s28
-; CHECK-NEXT:    fcvtzs x13, s25
-; CHECK-NEXT:    mov s25, v7.s[1]
-; CHECK-NEXT:    fcvtzs x9, s26
-; CHECK-NEXT:    fcvtzs x11, s27
-; CHECK-NEXT:    fcvtzs x14, s20
-; CHECK-NEXT:    fcvtzs x15, s21
-; CHECK-NEXT:    frintx s26, s1
-; CHECK-NEXT:    frintx s23, s23
-; CHECK-NEXT:    frintx s27, s7
-; CHECK-NEXT:    frintx s22, s30
-; CHECK-NEXT:    fmov d20, x12
-; CHECK-NEXT:    fcvtzs x12, s24
-; CHECK-NEXT:    mov s24, v6.s[1]
-; CHECK-NEXT:    frintx s25, s25
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvtzs x10, s29
-; CHECK-NEXT:    fmov d7, x11
-; CHECK-NEXT:    fmov d21, x13
-; CHECK-NEXT:    frintx s28, s5
-; CHECK-NEXT:    fcvtzs x11, s23
-; CHECK-NEXT:    fmov d23, x14
-; CHECK-NEXT:    fcvtzs x14, s26
-; CHECK-NEXT:    fmov d26, x15
-; CHECK-NEXT:    fcvtzs x15, s27
-; CHECK-NEXT:    frintx s24, s24
-; CHECK-NEXT:    mov s27, v5.s[1]
-; CHECK-NEXT:    fcvtzs x13, s22
-; CHECK-NEXT:    fcvtzs x17, s25
-; CHECK-NEXT:    frintx s25, s4
-; CHECK-NEXT:    fcvtzs x18, s6
-; CHECK-NEXT:    fmov d6, x10
-; CHECK-NEXT:    frintx s22, s2
-; CHECK-NEXT:    mov v26.d[1], x11
-; CHECK-NEXT:    fmov d5, x14
-; CHECK-NEXT:    fcvtzs x10, s24
-; CHECK-NEXT:    fmov d24, x15
-; CHECK-NEXT:    fcvtzs x14, s28
-; CHECK-NEXT:    frintx s27, s27
-; CHECK-NEXT:    mov v23.d[1], x13
-; CHECK-NEXT:    mov s4, v4.s[1]
-; CHECK-NEXT:    fcvtzs x13, s25
-; CHECK-NEXT:    fmov d25, x18
-; CHECK-NEXT:    mov s16, v16.s[1]
-; CHECK-NEXT:    mov v24.d[1], x17
-; CHECK-NEXT:    fcvtzs x16, s22
-; CHECK-NEXT:    frintx s22, s3
-; CHECK-NEXT:    mov s3, v3.s[1]
-; CHECK-NEXT:    frintx s19, s19
-; CHECK-NEXT:    mov s2, v2.s[1]
-; CHECK-NEXT:    mov v25.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, s27
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    mov v6.d[1], x12
-; CHECK-NEXT:    frintx s17, s17
-; CHECK-NEXT:    mov s18, v18.s[1]
-; CHECK-NEXT:    stp q24, q26, [x8, #224]
-; CHECK-NEXT:    fmov d24, x14
-; CHECK-NEXT:    fcvtzs x11, s22
-; CHECK-NEXT:    ext v22.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    mov s1, v1.s[1]
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    stp q25, q23, [x8, #192]
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    fcvtzs x12, s4
-; CHECK-NEXT:    mov v24.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, s19
-; CHECK-NEXT:    mov s19, v0.s[1]
-; CHECK-NEXT:    frintx s16, s16
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    fmov d4, x11
-; CHECK-NEXT:    mov s27, v22.s[1]
-; CHECK-NEXT:    frintx s22, s22
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    fcvtzs x11, s3
-; CHECK-NEXT:    fcvtzs x14, s2
-; CHECK-NEXT:    frintx s2, s18
-; CHECK-NEXT:    stp q24, q6, [x8, #160]
-; CHECK-NEXT:    fmov d6, x13
-; CHECK-NEXT:    fcvtzs x13, s17
-; CHECK-NEXT:    frintx s17, s19
-; CHECK-NEXT:    fmov d23, x16
-; CHECK-NEXT:    mov v7.d[1], x10
-; CHECK-NEXT:    frintx s3, s27
-; CHECK-NEXT:    fcvtzs x10, s22
-; CHECK-NEXT:    fcvtzs x15, s1
-; CHECK-NEXT:    mov v6.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, s16
-; CHECK-NEXT:    mov v4.d[1], x11
-; CHECK-NEXT:    mov v21.d[1], x13
-; CHECK-NEXT:    fcvtzs x13, s0
-; CHECK-NEXT:    mov v23.d[1], x14
-; CHECK-NEXT:    fcvtzs x14, s17
-; CHECK-NEXT:    fcvtzs x11, s3
-; CHECK-NEXT:    fmov d0, x10
-; CHECK-NEXT:    mov v5.d[1], x15
-; CHECK-NEXT:    stp q6, q7, [x8, #128]
-; CHECK-NEXT:    mov v20.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, s2
-; CHECK-NEXT:    stp q4, q21, [x8, #96]
-; CHECK-NEXT:    fmov d1, x13
-; CHECK-NEXT:    fmov d2, x9
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    stp q23, q20, [x8, #64]
-; CHECK-NEXT:    mov v1.d[1], x14
-; CHECK-NEXT:    mov v2.d[1], x12
-; CHECK-NEXT:    stp q5, q0, [x8, #32]
-; CHECK-NEXT:    stp q1, q2, [x8]
+; CHECK-NEXT:    frintx v7.4s, v7.4s
+; CHECK-NEXT:    frintx v6.4s, v6.4s
+; CHECK-NEXT:    mov x9, #28 // =0x1c
+; CHECK-NEXT:    frintx v5.4s, v5.4s
+; CHECK-NEXT:    frintx v4.4s, v4.4s
+; CHECK-NEXT:    frintx v3.4s, v3.4s
+; CHECK-NEXT:    frintx v2.4s, v2.4s
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    uunpklo z7.d, z7.s
+; CHECK-NEXT:    uunpklo z6.d, z6.s
+; CHECK-NEXT:    uunpklo z5.d, z5.s
+; CHECK-NEXT:    uunpklo z4.d, z4.s
+; CHECK-NEXT:    uunpklo z3.d, z3.s
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.s
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z6.s
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.s
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.s
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.s
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT:    st1d { z7.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #24 // =0x18
+; CHECK-NEXT:    st1d { z6.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #20 // =0x14
+; CHECK-NEXT:    st1d { z5.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #16 // =0x10
+; CHECK-NEXT:    st1d { z4.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #12 // =0xc
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #8 // =0x8
+; CHECK-NEXT:    st1d { z2.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #4 // =0x4
+; CHECK-NEXT:    st1d { z1.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
 ; CHECK-NEXT:    ret
   %a = call <32 x i64> @llvm.llrint.v32i64.v32f32(<32 x float> %x)
   ret <32 x i64> %a
@@ -621,13 +355,8 @@ declare <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double>)
 define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
 ; CHECK-LABEL: llrint_v2i64_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d1, v0.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    frintx v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %a = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> %x)
   ret <2 x i64> %a
@@ -637,20 +366,17 @@ declare <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double>)
 define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
 ; CHECK-LABEL: llrint_v4i64_v4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d2, v0.d[1]
-; CHECK-NEXT:    mov d3, v1.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    frintx d3, d3
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    fcvtzs x10, d2
-; CHECK-NEXT:    fcvtzs x11, d3
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    mov v1.d[1], x11
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    splice z0.d, p0, z0.d, z1.d
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    ret
   %a = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> %x)
   ret <4 x i64> %a
@@ -660,34 +386,28 @@ declare <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double>)
 define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
 ; CHECK-LABEL: llrint_v8i64_v8f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d4, v0.d[1]
-; CHECK-NEXT:    mov d5, v1.d[1]
-; CHECK-NEXT:    mov d6, v2.d[1]
-; CHECK-NEXT:    mov d7, v3.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    frintx d3, d3
-; CHECK-NEXT:    frintx d4, d4
-; CHECK-NEXT:    frintx d5, d5
-; CHECK-NEXT:    frintx d6, d6
-; CHECK-NEXT:    frintx d7, d7
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    fcvtzs x10, d2
-; CHECK-NEXT:    fcvtzs x11, d3
-; CHECK-NEXT:    fcvtzs x12, d4
-; CHECK-NEXT:    fcvtzs x13, d5
-; CHECK-NEXT:    fcvtzs x14, d6
-; CHECK-NEXT:    fcvtzs x15, d7
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    fmov d2, x10
-; CHECK-NEXT:    fmov d3, x11
-; CHECK-NEXT:    mov v0.d[1], x12
-; CHECK-NEXT:    mov v1.d[1], x13
-; CHECK-NEXT:    mov v2.d[1], x14
-; CHECK-NEXT:    mov v3.d[1], x15
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    // kill: def $q3 killed $q3 def $z3
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    splice z2.d, p0, z2.d, z3.d
+; CHECK-NEXT:    splice z0.d, p0, z0.d, z1.d
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    movprfx z1, z2
+; CHECK-NEXT:    frintx z1.d, p0/m, z2.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    movprfx z2, z1
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z1.d
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    ext z3.b, z3.b, z2.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q2 killed $q2 killed $z2
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    // kill: def $q3 killed $q3 killed $z3
 ; CHECK-NEXT:    ret
   %a = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> %x)
   ret <8 x i64> %a
@@ -697,62 +417,50 @@ declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>)
 define <16 x i64> @llrint_v16f64(<16 x double> %x) {
 ; CHECK-LABEL: llrint_v16f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d16, v0.d[1]
-; CHECK-NEXT:    mov d17, v1.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d18, d2
-; CHECK-NEXT:    mov d2, v2.d[1]
-; CHECK-NEXT:    frintx d19, d3
-; CHECK-NEXT:    mov d3, v3.d[1]
-; CHECK-NEXT:    frintx d16, d16
-; CHECK-NEXT:    frintx d17, d17
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    frintx d0, d4
-; CHECK-NEXT:    mov d4, v4.d[1]
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    frintx d1, d5
-; CHECK-NEXT:    mov d5, v5.d[1]
-; CHECK-NEXT:    fcvtzs x12, d18
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    fcvtzs x13, d19
-; CHECK-NEXT:    frintx d18, d3
-; CHECK-NEXT:    fcvtzs x10, d16
-; CHECK-NEXT:    mov d16, v6.d[1]
-; CHECK-NEXT:    fcvtzs x11, d17
-; CHECK-NEXT:    mov d17, v7.d[1]
-; CHECK-NEXT:    frintx d6, d6
-; CHECK-NEXT:    frintx d7, d7
-; CHECK-NEXT:    frintx d4, d4
-; CHECK-NEXT:    frintx d5, d5
-; CHECK-NEXT:    fcvtzs x14, d0
-; CHECK-NEXT:    fcvtzs x15, d1
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    frintx d16, d16
-; CHECK-NEXT:    fcvtzs x9, d2
-; CHECK-NEXT:    fmov d2, x12
-; CHECK-NEXT:    frintx d17, d17
-; CHECK-NEXT:    fcvtzs x8, d6
-; CHECK-NEXT:    fcvtzs x12, d7
-; CHECK-NEXT:    fmov d3, x13
-; CHECK-NEXT:    fcvtzs x13, d18
-; CHECK-NEXT:    fcvtzs x16, d4
-; CHECK-NEXT:    fcvtzs x17, d5
-; CHECK-NEXT:    fmov d4, x14
-; CHECK-NEXT:    fmov d5, x15
-; CHECK-NEXT:    fcvtzs x18, d16
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    mov v1.d[1], x11
-; CHECK-NEXT:    fcvtzs x0, d17
-; CHECK-NEXT:    fmov d6, x8
-; CHECK-NEXT:    fmov d7, x12
-; CHECK-NEXT:    mov v2.d[1], x9
-; CHECK-NEXT:    mov v3.d[1], x13
-; CHECK-NEXT:    mov v4.d[1], x16
-; CHECK-NEXT:    mov v5.d[1], x17
-; CHECK-NEXT:    mov v6.d[1], x18
-; CHECK-NEXT:    mov v7.d[1], x0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    // kill: def $q6 killed $q6 def $z6
+; CHECK-NEXT:    // kill: def $q4 killed $q4 def $z4
+; CHECK-NEXT:    // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    // kill: def $q7 killed $q7 def $z7
+; CHECK-NEXT:    // kill: def $q5 killed $q5 def $z5
+; CHECK-NEXT:    // kill: def $q3 killed $q3 def $z3
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    splice z2.d, p0, z2.d, z3.d
+; CHECK-NEXT:    splice z0.d, p0, z0.d, z1.d
+; CHECK-NEXT:    splice z6.d, p0, z6.d, z7.d
+; CHECK-NEXT:    splice z4.d, p0, z4.d, z5.d
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    movprfx z1, z2
+; CHECK-NEXT:    frintx z1.d, p0/m, z2.d
+; CHECK-NEXT:    movprfx z5, z6
+; CHECK-NEXT:    frintx z5.d, p0/m, z6.d
+; CHECK-NEXT:    movprfx z3, z4
+; CHECK-NEXT:    frintx z3.d, p0/m, z4.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    movprfx z2, z1
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z1.d
+; CHECK-NEXT:    movprfx z6, z5
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z5.d
+; CHECK-NEXT:    movprfx z4, z3
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z3.d
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    mov z7.d, z6.d
+; CHECK-NEXT:    mov z5.d, z4.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    ext z3.b, z3.b, z2.b, #16
+; CHECK-NEXT:    ext z7.b, z7.b, z6.b, #16
+; CHECK-NEXT:    ext z5.b, z5.b, z4.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q2 killed $q2 killed $z2
+; CHECK-NEXT:    // kill: def $q4 killed $q4 killed $z4
+; CHECK-NEXT:    // kill: def $q6 killed $q6 killed $z6
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    // kill: def $q3 killed $q3 killed $z3
+; CHECK-NEXT:    // kill: def $q7 killed $q7 killed $z7
+; CHECK-NEXT:    // kill: def $q5 killed $q5 killed $z5
 ; CHECK-NEXT:    ret
   %a = call <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double> %x)
   ret <16 x i64> %a
@@ -762,130 +470,63 @@ declare <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double>)
 define <32 x i64> @llrint_v32f64(<32 x double> %x) {
 ; CHECK-LABEL: llrint_v32f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintx d20, d0
-; CHECK-NEXT:    frintx d22, d3
-; CHECK-NEXT:    frintx d21, d4
+; CHECK-NEXT:    ldp q17, q16, [sp, #96]
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    ldp q19, q18, [sp, #64]
-; CHECK-NEXT:    frintx d23, d5
-; CHECK-NEXT:    ldp q27, q26, [sp, #96]
-; CHECK-NEXT:    mov d4, v4.d[1]
-; CHECK-NEXT:    ldp q16, q17, [sp, #32]
-; CHECK-NEXT:    mov d5, v5.d[1]
-; CHECK-NEXT:    fcvtzs x9, d20
-; CHECK-NEXT:    frintx d20, d6
-; CHECK-NEXT:    fcvtzs x11, d22
-; CHECK-NEXT:    frintx d22, d19
-; CHECK-NEXT:    fcvtzs x12, d21
-; CHECK-NEXT:    fcvtzs x10, d23
-; CHECK-NEXT:    mov d21, v26.d[1]
-; CHECK-NEXT:    frintx d23, d27
-; CHECK-NEXT:    mov d27, v27.d[1]
-; CHECK-NEXT:    frintx d24, d16
-; CHECK-NEXT:    mov d19, v19.d[1]
-; CHECK-NEXT:    frintx d25, d17
-; CHECK-NEXT:    fcvtzs x13, d20
-; CHECK-NEXT:    mov d20, v18.d[1]
-; CHECK-NEXT:    frintx d18, d18
-; CHECK-NEXT:    fcvtzs x16, d22
-; CHECK-NEXT:    frintx d22, d26
-; CHECK-NEXT:    mov d16, v16.d[1]
-; CHECK-NEXT:    frintx d21, d21
-; CHECK-NEXT:    fcvtzs x17, d23
-; CHECK-NEXT:    frintx d23, d27
-; CHECK-NEXT:    fcvtzs x14, d24
-; CHECK-NEXT:    frintx d26, d19
-; CHECK-NEXT:    fmov d19, x11
-; CHECK-NEXT:    frintx d20, d20
-; CHECK-NEXT:    mov d27, v17.d[1]
-; CHECK-NEXT:    fcvtzs x15, d25
-; CHECK-NEXT:    ldp q25, q24, [sp]
-; CHECK-NEXT:    fcvtzs x11, d22
-; CHECK-NEXT:    fmov d17, x12
-; CHECK-NEXT:    fcvtzs x12, d21
-; CHECK-NEXT:    fcvtzs x0, d23
-; CHECK-NEXT:    fmov d23, x14
-; CHECK-NEXT:    fcvtzs x14, d18
-; CHECK-NEXT:    fmov d18, x17
-; CHECK-NEXT:    fcvtzs x17, d20
-; CHECK-NEXT:    frintx d21, d7
-; CHECK-NEXT:    fcvtzs x18, d26
-; CHECK-NEXT:    fmov d20, x11
-; CHECK-NEXT:    frintx d22, d25
-; CHECK-NEXT:    frintx d26, d27
-; CHECK-NEXT:    frintx d16, d16
-; CHECK-NEXT:    mov v18.d[1], x0
-; CHECK-NEXT:    mov d25, v25.d[1]
-; CHECK-NEXT:    mov d7, v7.d[1]
-; CHECK-NEXT:    mov d6, v6.d[1]
-; CHECK-NEXT:    mov d0, v0.d[1]
-; CHECK-NEXT:    mov v20.d[1], x12
-; CHECK-NEXT:    fcvtzs x11, d21
-; CHECK-NEXT:    fmov d21, x15
-; CHECK-NEXT:    fcvtzs x12, d22
-; CHECK-NEXT:    fmov d22, x16
-; CHECK-NEXT:    fcvtzs x15, d26
-; CHECK-NEXT:    fmov d26, x14
-; CHECK-NEXT:    fcvtzs x14, d16
-; CHECK-NEXT:    frintx d25, d25
-; CHECK-NEXT:    frintx d7, d7
-; CHECK-NEXT:    mov d16, v1.d[1]
-; CHECK-NEXT:    mov d3, v3.d[1]
-; CHECK-NEXT:    stp q18, q20, [x8, #224]
-; CHECK-NEXT:    mov d18, v24.d[1]
-; CHECK-NEXT:    mov v22.d[1], x18
-; CHECK-NEXT:    mov v26.d[1], x17
-; CHECK-NEXT:    frintx d24, d24
-; CHECK-NEXT:    mov v21.d[1], x15
-; CHECK-NEXT:    mov v23.d[1], x14
-; CHECK-NEXT:    frintx d20, d2
-; CHECK-NEXT:    mov d2, v2.d[1]
-; CHECK-NEXT:    frintx d6, d6
-; CHECK-NEXT:    frintx d5, d5
-; CHECK-NEXT:    frintx d4, d4
-; CHECK-NEXT:    frintx d18, d18
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d3, d3
-; CHECK-NEXT:    stp q22, q26, [x8, #192]
-; CHECK-NEXT:    fmov d22, x10
-; CHECK-NEXT:    fcvtzs x10, d24
-; CHECK-NEXT:    stp q23, q21, [x8, #160]
-; CHECK-NEXT:    fmov d21, x11
-; CHECK-NEXT:    fmov d24, x13
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    fcvtzs x13, d6
-; CHECK-NEXT:    frintx d6, d16
-; CHECK-NEXT:    fcvtzs x11, d18
-; CHECK-NEXT:    fmov d18, x12
-; CHECK-NEXT:    fcvtzs x12, d25
-; CHECK-NEXT:    fmov d23, x10
-; CHECK-NEXT:    fcvtzs x10, d7
-; CHECK-NEXT:    fcvtzs x14, d5
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    fcvtzs x15, d3
-; CHECK-NEXT:    mov v24.d[1], x13
-; CHECK-NEXT:    fcvtzs x13, d2
-; CHECK-NEXT:    fmov d2, x9
-; CHECK-NEXT:    mov v23.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, d4
-; CHECK-NEXT:    mov v18.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, d20
-; CHECK-NEXT:    mov v21.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, d1
-; CHECK-NEXT:    mov v22.d[1], x14
-; CHECK-NEXT:    fcvtzs x14, d6
-; CHECK-NEXT:    mov v19.d[1], x15
-; CHECK-NEXT:    stp q18, q23, [x8, #128]
-; CHECK-NEXT:    mov v17.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, d0
-; CHECK-NEXT:    stp q24, q21, [x8, #96]
-; CHECK-NEXT:    fmov d0, x12
-; CHECK-NEXT:    fmov d1, x10
-; CHECK-NEXT:    stp q17, q22, [x8, #64]
-; CHECK-NEXT:    mov v0.d[1], x13
-; CHECK-NEXT:    mov v1.d[1], x14
-; CHECK-NEXT:    mov v2.d[1], x11
-; CHECK-NEXT:    stp q0, q19, [x8, #32]
-; CHECK-NEXT:    stp q2, q1, [x8]
+; CHECK-NEXT:    ptrue p1.d, vl4
+; CHECK-NEXT:    // kill: def $q7 killed $q7 def $z7
+; CHECK-NEXT:    // kill: def $q6 killed $q6 def $z6
+; CHECK-NEXT:    // kill: def $q5 killed $q5 def $z5
+; CHECK-NEXT:    // kill: def $q4 killed $q4 def $z4
+; CHECK-NEXT:    // kill: def $q3 killed $q3 def $z3
+; CHECK-NEXT:    // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    mov x9, #28 // =0x1c
+; CHECK-NEXT:    splice z17.d, p0, z17.d, z16.d
+; CHECK-NEXT:    ldp q20, q16, [sp, #32]
+; CHECK-NEXT:    splice z19.d, p0, z19.d, z18.d
+; CHECK-NEXT:    ldp q21, q18, [sp]
+; CHECK-NEXT:    splice z6.d, p0, z6.d, z7.d
+; CHECK-NEXT:    splice z4.d, p0, z4.d, z5.d
+; CHECK-NEXT:    splice z2.d, p0, z2.d, z3.d
+; CHECK-NEXT:    splice z20.d, p0, z20.d, z16.d
+; CHECK-NEXT:    splice z0.d, p0, z0.d, z1.d
+; CHECK-NEXT:    splice z21.d, p0, z21.d, z18.d
+; CHECK-NEXT:    movprfx z7, z17
+; CHECK-NEXT:    frintx z7.d, p1/m, z17.d
+; CHECK-NEXT:    movprfx z5, z19
+; CHECK-NEXT:    frintx z5.d, p1/m, z19.d
+; CHECK-NEXT:    frintx z6.d, p1/m, z6.d
+; CHECK-NEXT:    frintx z4.d, p1/m, z4.d
+; CHECK-NEXT:    frintx z2.d, p1/m, z2.d
+; CHECK-NEXT:    movprfx z3, z20
+; CHECK-NEXT:    frintx z3.d, p1/m, z20.d
+; CHECK-NEXT:    frintx z0.d, p1/m, z0.d
+; CHECK-NEXT:    movprfx z1, z21
+; CHECK-NEXT:    frintx z1.d, p1/m, z21.d
+; CHECK-NEXT:    fcvtzs z7.d, p1/m, z7.d
+; CHECK-NEXT:    fcvtzs z5.d, p1/m, z5.d
+; CHECK-NEXT:    fcvtzs z6.d, p1/m, z6.d
+; CHECK-NEXT:    fcvtzs z4.d, p1/m, z4.d
+; CHECK-NEXT:    fcvtzs z2.d, p1/m, z2.d
+; CHECK-NEXT:    fcvtzs z3.d, p1/m, z3.d
+; CHECK-NEXT:    fcvtzs z0.d, p1/m, z0.d
+; CHECK-NEXT:    fcvtzs z1.d, p1/m, z1.d
+; CHECK-NEXT:    st1d { z7.d }, p1, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #24 // =0x18
+; CHECK-NEXT:    st1d { z5.d }, p1, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #20 // =0x14
+; CHECK-NEXT:    st1d { z3.d }, p1, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #16 // =0x10
+; CHECK-NEXT:    st1d { z1.d }, p1, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #12 // =0xc
+; CHECK-NEXT:    st1d { z6.d }, p1, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #8 // =0x8
+; CHECK-NEXT:    st1d { z4.d }, p1, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #4 // =0x4
+; CHECK-NEXT:    st1d { z2.d }, p1, [x8, x9, lsl #3]
+; CHECK-NEXT:    st1d { z0.d }, p1, [x8]
 ; CHECK-NEXT:    ret
   %a = call <32 x i64> @llvm.llrint.v32i64.v16f64(<32 x double> %x)
   ret <32 x i64> %a
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll b/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll
index e9c5fd9b769b6..558fa88eb64bd 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64 -mattr=+sve | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve -aarch64-sve-vector-bits-min=256 | FileCheck %s
 
 define <1 x i64> @lrint_v1f16(<1 x half> %x) {
 ; CHECK-LABEL: lrint_v1f16:
@@ -16,14 +16,12 @@ declare <1 x i64> @llvm.lrint.v1i64.v1f16(<1 x half>)
 define <2 x i64> @lrint_v2f16(<2 x half> %x) {
 ; CHECK-LABEL: lrint_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov h1, v0.h[1]
-; CHECK-NEXT:    frintx h0, h0
-; CHECK-NEXT:    frintx h1, h1
-; CHECK-NEXT:    fcvtzs x8, h0
-; CHECK-NEXT:    fcvtzs x9, h1
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    frintx v0.4h, v0.4h
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %a = call <2 x i64> @llvm.lrint.v2i64.v2f16(<2 x half> %x)
   ret <2 x i64> %a
@@ -33,22 +31,15 @@ declare <2 x i64> @llvm.lrint.v2i64.v2f16(<2 x half>)
 define <4 x i64> @lrint_v4f16(<4 x half> %x) {
 ; CHECK-LABEL: lrint_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov h1, v0.h[2]
-; CHECK-NEXT:    mov h2, v0.h[1]
-; CHECK-NEXT:    mov h3, v0.h[3]
-; CHECK-NEXT:    frintx h0, h0
-; CHECK-NEXT:    frintx h1, h1
-; CHECK-NEXT:    frintx h2, h2
-; CHECK-NEXT:    frintx h3, h3
-; CHECK-NEXT:    fcvtzs x8, h0
-; CHECK-NEXT:    fcvtzs x9, h1
-; CHECK-NEXT:    fcvtzs x10, h2
-; CHECK-NEXT:    fcvtzs x11, h3
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    mov v1.d[1], x11
+; CHECK-NEXT:    frintx v0.4h, v0.4h
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    ret
   %a = call <4 x i64> @llvm.lrint.v4i64.v4f16(<4 x half> %x)
   ret <4 x i64> %a
@@ -59,36 +50,24 @@ define <8 x i64> @lrint_v8f16(<8 x half> %x) {
 ; CHECK-LABEL: lrint_v8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    mov h4, v0.h[2]
-; CHECK-NEXT:    mov h3, v0.h[1]
-; CHECK-NEXT:    mov h7, v0.h[3]
-; CHECK-NEXT:    frintx h0, h0
-; CHECK-NEXT:    mov h2, v1.h[2]
-; CHECK-NEXT:    mov h5, v1.h[1]
-; CHECK-NEXT:    mov h6, v1.h[3]
-; CHECK-NEXT:    frintx h1, h1
-; CHECK-NEXT:    frintx h4, h4
-; CHECK-NEXT:    frintx h3, h3
-; CHECK-NEXT:    frintx h7, h7
-; CHECK-NEXT:    fcvtzs x9, h0
-; CHECK-NEXT:    frintx h2, h2
-; CHECK-NEXT:    frintx h5, h5
-; CHECK-NEXT:    frintx h6, h6
-; CHECK-NEXT:    fcvtzs x8, h1
-; CHECK-NEXT:    fcvtzs x12, h4
-; CHECK-NEXT:    fcvtzs x11, h3
-; CHECK-NEXT:    fcvtzs x15, h7
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    fcvtzs x10, h2
-; CHECK-NEXT:    fcvtzs x13, h5
-; CHECK-NEXT:    fcvtzs x14, h6
-; CHECK-NEXT:    fmov d2, x8
-; CHECK-NEXT:    fmov d1, x12
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    fmov d3, x10
-; CHECK-NEXT:    mov v2.d[1], x13
-; CHECK-NEXT:    mov v1.d[1], x15
-; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    frintx v0.4h, v0.4h
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    frintx v1.4h, v1.4h
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z1.s, z1.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    movprfx z2, z1
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z1.h
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    ext z3.b, z3.b, z2.b, #16
+; CHECK-NEXT:    // kill: def $q2 killed $q2 killed $z2
+; CHECK-NEXT:    // kill: def $q3 killed $q3 killed $z3
 ; CHECK-NEXT:    ret
   %a = call <8 x i64> @llvm.lrint.v8i64.v8f16(<8 x half> %x)
   ret <8 x i64> %a
@@ -100,66 +79,41 @@ define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
 ; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    mov h4, v0.h[1]
-; CHECK-NEXT:    frintx h5, h0
-; CHECK-NEXT:    mov h18, v0.h[2]
-; CHECK-NEXT:    mov h0, v0.h[3]
-; CHECK-NEXT:    frintx h6, h2
-; CHECK-NEXT:    mov h7, v2.h[1]
-; CHECK-NEXT:    mov h16, v2.h[2]
-; CHECK-NEXT:    mov h17, v3.h[2]
-; CHECK-NEXT:    frintx h19, h3
-; CHECK-NEXT:    frintx h4, h4
-; CHECK-NEXT:    fcvtzs x8, h5
-; CHECK-NEXT:    mov h5, v1.h[1]
-; CHECK-NEXT:    mov h2, v2.h[3]
-; CHECK-NEXT:    frintx h18, h18
-; CHECK-NEXT:    frintx h0, h0
-; CHECK-NEXT:    fcvtzs x9, h6
-; CHECK-NEXT:    frintx h6, h7
-; CHECK-NEXT:    frintx h7, h16
-; CHECK-NEXT:    mov h16, v1.h[2]
-; CHECK-NEXT:    frintx h17, h17
-; CHECK-NEXT:    fcvtzs x10, h19
-; CHECK-NEXT:    mov h19, v3.h[1]
-; CHECK-NEXT:    fcvtzs x11, h4
-; CHECK-NEXT:    mov h4, v1.h[3]
-; CHECK-NEXT:    mov h3, v3.h[3]
-; CHECK-NEXT:    frintx h1, h1
-; CHECK-NEXT:    frintx h5, h5
-; CHECK-NEXT:    fcvtzs x13, h7
-; CHECK-NEXT:    fcvtzs x12, h6
-; CHECK-NEXT:    fcvtzs x15, h18
-; CHECK-NEXT:    frintx h7, h16
-; CHECK-NEXT:    fcvtzs x14, h17
-; CHECK-NEXT:    frintx h16, h2
-; CHECK-NEXT:    frintx h17, h19
-; CHECK-NEXT:    frintx h4, h4
-; CHECK-NEXT:    fmov d2, x9
-; CHECK-NEXT:    frintx h19, h3
-; CHECK-NEXT:    fcvtzs x9, h1
-; CHECK-NEXT:    fmov d6, x10
-; CHECK-NEXT:    fmov d3, x13
-; CHECK-NEXT:    fcvtzs x13, h0
-; CHECK-NEXT:    fcvtzs x16, h5
-; CHECK-NEXT:    fcvtzs x10, h7
-; CHECK-NEXT:    fmov d7, x14
-; CHECK-NEXT:    fcvtzs x14, h16
-; CHECK-NEXT:    fcvtzs x17, h17
-; CHECK-NEXT:    fcvtzs x0, h4
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fcvtzs x18, h19
-; CHECK-NEXT:    fmov d1, x15
-; CHECK-NEXT:    fmov d4, x9
-; CHECK-NEXT:    mov v2.d[1], x12
-; CHECK-NEXT:    fmov d5, x10
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    mov v3.d[1], x14
-; CHECK-NEXT:    mov v1.d[1], x13
-; CHECK-NEXT:    mov v4.d[1], x16
-; CHECK-NEXT:    mov v6.d[1], x17
-; CHECK-NEXT:    mov v7.d[1], x18
-; CHECK-NEXT:    mov v5.d[1], x0
+; CHECK-NEXT:    frintx v0.4h, v0.4h
+; CHECK-NEXT:    frintx v1.4h, v1.4h
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    frintx v2.4h, v2.4h
+; CHECK-NEXT:    frintx v3.4h, v3.4h
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z1.s, z1.h
+; CHECK-NEXT:    uunpklo z2.s, z2.h
+; CHECK-NEXT:    uunpklo z3.s, z3.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    uunpklo z3.d, z3.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    movprfx z4, z1
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z1.h
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.h
+; CHECK-NEXT:    movprfx z6, z3
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z3.h
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    mov z5.d, z4.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    mov z7.d, z6.d
+; CHECK-NEXT:    ext z5.b, z5.b, z4.b, #16
+; CHECK-NEXT:    // kill: def $q4 killed $q4 killed $z4
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    // kill: def $q5 killed $q5 killed $z5
+; CHECK-NEXT:    ext z3.b, z3.b, z2.b, #16
+; CHECK-NEXT:    ext z7.b, z7.b, z6.b, #16
+; CHECK-NEXT:    // kill: def $q2 killed $q2 killed $z2
+; CHECK-NEXT:    // kill: def $q6 killed $q6 killed $z6
+; CHECK-NEXT:    // kill: def $q3 killed $q3 killed $z3
+; CHECK-NEXT:    // kill: def $q7 killed $q7 killed $z7
 ; CHECK-NEXT:    ret
   %a = call <16 x i64> @llvm.lrint.v16i64.v16f16(<16 x half> %x)
   ret <16 x i64> %a
@@ -169,138 +123,61 @@ declare <16 x i64> @llvm.lrint.v16i64.v16f16(<16 x half>)
 define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
 ; CHECK-LABEL: lrint_v32i64_v32f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v4.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v4.16b, v3.16b, v3.16b, #8
 ; CHECK-NEXT:    ext v5.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    ext v6.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    mov x9, #24 // =0x18
+; CHECK-NEXT:    frintx v3.4h, v3.4h
+; CHECK-NEXT:    ext v6.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    frintx v2.4h, v2.4h
 ; CHECK-NEXT:    ext v7.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    frintx h21, h1
-; CHECK-NEXT:    frintx h22, h2
-; CHECK-NEXT:    mov h26, v2.h[2]
-; CHECK-NEXT:    frintx h19, h0
-; CHECK-NEXT:    mov h27, v3.h[2]
-; CHECK-NEXT:    mov h20, v2.h[1]
-; CHECK-NEXT:    mov h18, v1.h[1]
-; CHECK-NEXT:    mov h16, v4.h[2]
-; CHECK-NEXT:    mov h17, v5.h[2]
-; CHECK-NEXT:    frintx h23, h5
-; CHECK-NEXT:    frintx h24, h6
-; CHECK-NEXT:    mov h25, v6.h[2]
-; CHECK-NEXT:    fcvtzs x9, h21
-; CHECK-NEXT:    fcvtzs x11, h22
-; CHECK-NEXT:    frintx h22, h7
-; CHECK-NEXT:    mov h21, v3.h[3]
-; CHECK-NEXT:    fcvtzs x10, h19
-; CHECK-NEXT:    frintx h27, h27
-; CHECK-NEXT:    frintx h20, h20
-; CHECK-NEXT:    frintx h16, h16
-; CHECK-NEXT:    frintx h17, h17
-; CHECK-NEXT:    fcvtzs x12, h23
-; CHECK-NEXT:    fcvtzs x13, h24
-; CHECK-NEXT:    frintx h23, h25
-; CHECK-NEXT:    frintx h25, h26
-; CHECK-NEXT:    mov h26, v3.h[1]
-; CHECK-NEXT:    mov h24, v2.h[3]
-; CHECK-NEXT:    fmov d19, x9
-; CHECK-NEXT:    fcvtzs x9, h22
-; CHECK-NEXT:    frintx h22, h3
-; CHECK-NEXT:    frintx h21, h21
-; CHECK-NEXT:    fcvtzs x14, h16
-; CHECK-NEXT:    fcvtzs x15, h17
-; CHECK-NEXT:    fmov d2, x12
-; CHECK-NEXT:    fmov d16, x13
-; CHECK-NEXT:    fcvtzs x12, h23
-; CHECK-NEXT:    fcvtzs x13, h25
-; CHECK-NEXT:    mov h23, v1.h[2]
-; CHECK-NEXT:    frintx h25, h26
-; CHECK-NEXT:    frintx h24, h24
-; CHECK-NEXT:    mov h1, v1.h[3]
-; CHECK-NEXT:    fmov d26, x11
-; CHECK-NEXT:    fcvtzs x11, h21
-; CHECK-NEXT:    fmov d3, x14
-; CHECK-NEXT:    fmov d17, x15
-; CHECK-NEXT:    fcvtzs x14, h22
-; CHECK-NEXT:    fcvtzs x15, h27
-; CHECK-NEXT:    mov h22, v0.h[2]
-; CHECK-NEXT:    frintx h18, h18
-; CHECK-NEXT:    frintx h21, h23
-; CHECK-NEXT:    fmov d23, x13
-; CHECK-NEXT:    fcvtzs x13, h25
-; CHECK-NEXT:    frintx h1, h1
-; CHECK-NEXT:    fmov d25, x14
-; CHECK-NEXT:    fcvtzs x14, h24
-; CHECK-NEXT:    fmov d24, x15
-; CHECK-NEXT:    frintx h22, h22
-; CHECK-NEXT:    fcvtzs x15, h18
-; CHECK-NEXT:    mov h18, v7.h[1]
-; CHECK-NEXT:    mov v25.d[1], x13
-; CHECK-NEXT:    fcvtzs x13, h21
-; CHECK-NEXT:    mov h21, v7.h[2]
-; CHECK-NEXT:    mov v24.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, h20
-; CHECK-NEXT:    mov h20, v0.h[1]
-; CHECK-NEXT:    mov h0, v0.h[3]
-; CHECK-NEXT:    mov v23.d[1], x14
-; CHECK-NEXT:    fcvtzs x14, h1
-; CHECK-NEXT:    mov h1, v6.h[3]
-; CHECK-NEXT:    mov h6, v6.h[1]
-; CHECK-NEXT:    mov v19.d[1], x15
-; CHECK-NEXT:    mov h7, v7.h[3]
-; CHECK-NEXT:    stp q25, q24, [x8, #192]
-; CHECK-NEXT:    fmov d24, x13
-; CHECK-NEXT:    frintx h20, h20
-; CHECK-NEXT:    mov v26.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, h22
-; CHECK-NEXT:    mov h22, v5.h[1]
-; CHECK-NEXT:    mov h5, v5.h[3]
-; CHECK-NEXT:    frintx h0, h0
-; CHECK-NEXT:    frintx h1, h1
-; CHECK-NEXT:    mov v24.d[1], x14
-; CHECK-NEXT:    mov h25, v4.h[3]
-; CHECK-NEXT:    frintx h6, h6
-; CHECK-NEXT:    stp q26, q23, [x8, #128]
-; CHECK-NEXT:    fmov d23, x12
-; CHECK-NEXT:    fcvtzs x12, h20
-; CHECK-NEXT:    mov h20, v4.h[1]
-; CHECK-NEXT:    frintx h5, h5
-; CHECK-NEXT:    fcvtzs x13, h0
-; CHECK-NEXT:    stp q19, q24, [x8, #64]
-; CHECK-NEXT:    frintx h22, h22
-; CHECK-NEXT:    fmov d0, x10
-; CHECK-NEXT:    fmov d19, x11
-; CHECK-NEXT:    frintx h4, h4
-; CHECK-NEXT:    fcvtzs x10, h1
-; CHECK-NEXT:    frintx h1, h21
-; CHECK-NEXT:    frintx h24, h25
-; CHECK-NEXT:    fcvtzs x11, h6
-; CHECK-NEXT:    frintx h20, h20
-; CHECK-NEXT:    frintx h6, h7
-; CHECK-NEXT:    fcvtzs x14, h5
-; CHECK-NEXT:    mov v19.d[1], x13
-; CHECK-NEXT:    frintx h5, h18
-; CHECK-NEXT:    fcvtzs x13, h22
-; CHECK-NEXT:    mov v0.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, h4
-; CHECK-NEXT:    mov v23.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, h1
-; CHECK-NEXT:    fcvtzs x15, h24
-; CHECK-NEXT:    mov v16.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, h20
-; CHECK-NEXT:    mov v17.d[1], x14
-; CHECK-NEXT:    fcvtzs x14, h6
-; CHECK-NEXT:    mov v2.d[1], x13
-; CHECK-NEXT:    fcvtzs x13, h5
-; CHECK-NEXT:    fmov d4, x9
-; CHECK-NEXT:    stp q0, q19, [x8]
-; CHECK-NEXT:    fmov d0, x12
-; CHECK-NEXT:    stp q16, q23, [x8, #224]
-; CHECK-NEXT:    fmov d1, x10
-; CHECK-NEXT:    mov v3.d[1], x15
-; CHECK-NEXT:    stp q2, q17, [x8, #160]
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    mov v4.d[1], x13
-; CHECK-NEXT:    mov v1.d[1], x14
-; CHECK-NEXT:    stp q0, q3, [x8, #96]
-; CHECK-NEXT:    stp q4, q1, [x8, #32]
+; CHECK-NEXT:    frintx v1.4h, v1.4h
+; CHECK-NEXT:    frintx v0.4h, v0.4h
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    frintx v4.4h, v4.4h
+; CHECK-NEXT:    frintx v5.4h, v5.4h
+; CHECK-NEXT:    uunpklo z3.s, z3.h
+; CHECK-NEXT:    frintx v6.4h, v6.4h
+; CHECK-NEXT:    uunpklo z2.s, z2.h
+; CHECK-NEXT:    frintx v7.4h, v7.4h
+; CHECK-NEXT:    uunpklo z1.s, z1.h
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z4.s, z4.h
+; CHECK-NEXT:    uunpklo z5.s, z5.h
+; CHECK-NEXT:    uunpklo z3.d, z3.s
+; CHECK-NEXT:    uunpklo z6.s, z6.h
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    uunpklo z7.s, z7.h
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    uunpklo z4.d, z4.s
+; CHECK-NEXT:    uunpklo z5.d, z5.s
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.h
+; CHECK-NEXT:    uunpklo z6.d, z6.s
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.h
+; CHECK-NEXT:    uunpklo z7.d, z7.s
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.h
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.h
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #16 // =0x10
+; CHECK-NEXT:    movprfx z3, z5
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z5.h
+; CHECK-NEXT:    st1d { z2.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #8 // =0x8
+; CHECK-NEXT:    movprfx z2, z6
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z6.h
+; CHECK-NEXT:    st1d { z1.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #28 // =0x1c
+; CHECK-NEXT:    movprfx z1, z7
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z7.h
+; CHECK-NEXT:    st1d { z4.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #20 // =0x14
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #12 // =0xc
+; CHECK-NEXT:    st1d { z2.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #4 // =0x4
+; CHECK-NEXT:    st1d { z1.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
 ; CHECK-NEXT:    ret
   %a = call <32 x i64> @llvm.lrint.v32i64.v32f16(<32 x half> %x)
   ret <32 x i64> %a
@@ -310,10 +187,10 @@ declare <32 x i64> @llvm.lrint.v32i64.v32f16(<32 x half>)
 define <1 x i64> @lrint_v1f32(<1 x float> %x) {
 ; CHECK-LABEL: lrint_v1f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    fcvtzs x8, s0
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    ret
   %a = call <1 x i64> @llvm.lrint.v1i64.v1f32(<1 x float> %x)
   ret <1 x i64> %a
@@ -323,14 +200,9 @@ declare <1 x i64> @llvm.lrint.v1i64.v1f32(<1 x float>)
 define <2 x i64> @lrint_v2f32(<2 x float> %x) {
 ; CHECK-LABEL: lrint_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov s1, v0.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    fcvtzs x8, s0
-; CHECK-NEXT:    fcvtzs x9, s1
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %a = call <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float> %x)
   ret <2 x i64> %a
@@ -340,21 +212,14 @@ declare <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float>)
 define <4 x i64> @lrint_v4f32(<4 x float> %x) {
 ; CHECK-LABEL: lrint_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    mov s3, v0.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    mov s2, v1.s[1]
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    fcvtzs x9, s0
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    fcvtzs x8, s1
-; CHECK-NEXT:    fcvtzs x11, s3
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    fcvtzs x10, s2
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    mov v1.d[1], x10
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    ret
   %a = call <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float> %x)
   ret <4 x i64> %a
@@ -364,36 +229,22 @@ declare <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float>)
 define <8 x i64> @lrint_v8f32(<8 x float> %x) {
 ; CHECK-LABEL: lrint_v8f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    mov s4, v0.s[1]
-; CHECK-NEXT:    mov s7, v1.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    mov s5, v2.s[1]
-; CHECK-NEXT:    mov s6, v3.s[1]
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    frintx s7, s7
-; CHECK-NEXT:    fcvtzs x9, s0
-; CHECK-NEXT:    fcvtzs x12, s1
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvtzs x8, s2
-; CHECK-NEXT:    fcvtzs x10, s3
-; CHECK-NEXT:    fcvtzs x11, s4
-; CHECK-NEXT:    fcvtzs x15, s7
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    fmov d2, x12
-; CHECK-NEXT:    fcvtzs x13, s5
-; CHECK-NEXT:    fcvtzs x14, s6
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    fmov d3, x10
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    mov v2.d[1], x15
-; CHECK-NEXT:    mov v1.d[1], x13
-; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT:    movprfx z2, z1
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z1.s
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    ext z3.b, z3.b, z2.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q2 killed $q2 killed $z2
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    // kill: def $q3 killed $q3 killed $z3
 ; CHECK-NEXT:    ret
   %a = call <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float> %x)
   ret <8 x i64> %a
@@ -403,66 +254,37 @@ declare <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float>)
 define <16 x i64> @lrint_v16i64_v16f32(<16 x float> %x) {
 ; CHECK-LABEL: lrint_v16i64_v16f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v5.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v6.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    frintx s7, s0
-; CHECK-NEXT:    ext v16.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT:    mov s0, v0.s[1]
-; CHECK-NEXT:    frintx s17, s4
-; CHECK-NEXT:    mov s4, v4.s[1]
-; CHECK-NEXT:    mov s18, v5.s[1]
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    frintx s19, s6
-; CHECK-NEXT:    fcvtzs x8, s7
-; CHECK-NEXT:    frintx s7, s16
-; CHECK-NEXT:    mov s6, v6.s[1]
-; CHECK-NEXT:    mov s16, v16.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    fcvtzs x9, s17
-; CHECK-NEXT:    frintx s17, s1
-; CHECK-NEXT:    mov s1, v1.s[1]
-; CHECK-NEXT:    frintx s18, s18
-; CHECK-NEXT:    fcvtzs x10, s5
-; CHECK-NEXT:    mov s5, v2.s[1]
-; CHECK-NEXT:    fcvtzs x11, s19
-; CHECK-NEXT:    mov s19, v3.s[1]
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    fcvtzs x12, s7
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvtzs x13, s4
-; CHECK-NEXT:    frintx s4, s3
-; CHECK-NEXT:    frintx s16, s16
-; CHECK-NEXT:    fcvtzs x14, s18
-; CHECK-NEXT:    frintx s18, s1
-; CHECK-NEXT:    fcvtzs x15, s17
-; CHECK-NEXT:    frintx s20, s5
-; CHECK-NEXT:    frintx s17, s19
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    fcvtzs x9, s2
-; CHECK-NEXT:    fmov d5, x11
-; CHECK-NEXT:    fmov d3, x10
-; CHECK-NEXT:    fcvtzs x11, s4
-; CHECK-NEXT:    fcvtzs x10, s0
-; CHECK-NEXT:    fmov d7, x12
-; CHECK-NEXT:    fcvtzs x12, s18
-; CHECK-NEXT:    fcvtzs x17, s6
-; CHECK-NEXT:    fcvtzs x18, s16
-; CHECK-NEXT:    fcvtzs x16, s20
-; CHECK-NEXT:    fcvtzs x0, s17
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d2, x15
-; CHECK-NEXT:    fmov d4, x9
-; CHECK-NEXT:    mov v1.d[1], x13
-; CHECK-NEXT:    fmov d6, x11
-; CHECK-NEXT:    mov v3.d[1], x14
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    mov v5.d[1], x17
-; CHECK-NEXT:    mov v7.d[1], x18
-; CHECK-NEXT:    mov v2.d[1], x12
-; CHECK-NEXT:    mov v4.d[1], x16
-; CHECK-NEXT:    mov v6.d[1], x0
+; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    frintx v2.4s, v2.4s
+; CHECK-NEXT:    frintx v3.4s, v3.4s
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    uunpklo z4.d, z2.s
+; CHECK-NEXT:    uunpklo z3.d, z3.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT:    movprfx z2, z1
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z1.s
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT:    movprfx z6, z3
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z3.s
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    mov z5.d, z4.d
+; CHECK-NEXT:    mov z7.d, z6.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    ext z3.b, z3.b, z2.b, #16
+; CHECK-NEXT:    ext z5.b, z5.b, z4.b, #16
+; CHECK-NEXT:    ext z7.b, z7.b, z6.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q2 killed $q2 killed $z2
+; CHECK-NEXT:    // kill: def $q4 killed $q4 killed $z4
+; CHECK-NEXT:    // kill: def $q6 killed $q6 killed $z6
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    // kill: def $q3 killed $q3 killed $z3
+; CHECK-NEXT:    // kill: def $q5 killed $q5 killed $z5
+; CHECK-NEXT:    // kill: def $q7 killed $q7 killed $z7
 ; CHECK-NEXT:    ret
   %a = call <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float> %x)
   ret <16 x i64> %a
@@ -472,134 +294,46 @@ declare <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float>)
 define <32 x i64> @lrint_v32i64_v32f32(<32 x float> %x) {
 ; CHECK-LABEL: lrint_v32i64_v32f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v16.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    ext v20.16b, v5.16b, v5.16b, #8
-; CHECK-NEXT:    ext v17.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT:    ext v18.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v19.16b, v4.16b, v4.16b, #8
-; CHECK-NEXT:    ext v21.16b, v6.16b, v6.16b, #8
-; CHECK-NEXT:    ext v22.16b, v7.16b, v7.16b, #8
-; CHECK-NEXT:    frintx s24, s16
-; CHECK-NEXT:    mov s28, v20.s[1]
-; CHECK-NEXT:    frintx s25, s17
-; CHECK-NEXT:    frintx s26, s18
-; CHECK-NEXT:    frintx s27, s19
-; CHECK-NEXT:    frintx s29, s20
-; CHECK-NEXT:    mov s30, v21.s[1]
-; CHECK-NEXT:    frintx s20, s21
-; CHECK-NEXT:    frintx s21, s22
-; CHECK-NEXT:    mov s23, v22.s[1]
-; CHECK-NEXT:    mov s19, v19.s[1]
-; CHECK-NEXT:    mov s17, v17.s[1]
-; CHECK-NEXT:    fcvtzs x12, s24
-; CHECK-NEXT:    frintx s24, s28
-; CHECK-NEXT:    fcvtzs x13, s25
-; CHECK-NEXT:    mov s25, v7.s[1]
-; CHECK-NEXT:    fcvtzs x9, s26
-; CHECK-NEXT:    fcvtzs x11, s27
-; CHECK-NEXT:    fcvtzs x14, s20
-; CHECK-NEXT:    fcvtzs x15, s21
-; CHECK-NEXT:    frintx s26, s1
-; CHECK-NEXT:    frintx s23, s23
-; CHECK-NEXT:    frintx s27, s7
-; CHECK-NEXT:    frintx s22, s30
-; CHECK-NEXT:    fmov d20, x12
-; CHECK-NEXT:    fcvtzs x12, s24
-; CHECK-NEXT:    mov s24, v6.s[1]
-; CHECK-NEXT:    frintx s25, s25
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvtzs x10, s29
-; CHECK-NEXT:    fmov d7, x11
-; CHECK-NEXT:    fmov d21, x13
-; CHECK-NEXT:    frintx s28, s5
-; CHECK-NEXT:    fcvtzs x11, s23
-; CHECK-NEXT:    fmov d23, x14
-; CHECK-NEXT:    fcvtzs x14, s26
-; CHECK-NEXT:    fmov d26, x15
-; CHECK-NEXT:    fcvtzs x15, s27
-; CHECK-NEXT:    frintx s24, s24
-; CHECK-NEXT:    mov s27, v5.s[1]
-; CHECK-NEXT:    fcvtzs x13, s22
-; CHECK-NEXT:    fcvtzs x17, s25
-; CHECK-NEXT:    frintx s25, s4
-; CHECK-NEXT:    fcvtzs x18, s6
-; CHECK-NEXT:    fmov d6, x10
-; CHECK-NEXT:    frintx s22, s2
-; CHECK-NEXT:    mov v26.d[1], x11
-; CHECK-NEXT:    fmov d5, x14
-; CHECK-NEXT:    fcvtzs x10, s24
-; CHECK-NEXT:    fmov d24, x15
-; CHECK-NEXT:    fcvtzs x14, s28
-; CHECK-NEXT:    frintx s27, s27
-; CHECK-NEXT:    mov v23.d[1], x13
-; CHECK-NEXT:    mov s4, v4.s[1]
-; CHECK-NEXT:    fcvtzs x13, s25
-; CHECK-NEXT:    fmov d25, x18
-; CHECK-NEXT:    mov s16, v16.s[1]
-; CHECK-NEXT:    mov v24.d[1], x17
-; CHECK-NEXT:    fcvtzs x16, s22
-; CHECK-NEXT:    frintx s22, s3
-; CHECK-NEXT:    mov s3, v3.s[1]
-; CHECK-NEXT:    frintx s19, s19
-; CHECK-NEXT:    mov s2, v2.s[1]
-; CHECK-NEXT:    mov v25.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, s27
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    mov v6.d[1], x12
-; CHECK-NEXT:    frintx s17, s17
-; CHECK-NEXT:    mov s18, v18.s[1]
-; CHECK-NEXT:    stp q24, q26, [x8, #224]
-; CHECK-NEXT:    fmov d24, x14
-; CHECK-NEXT:    fcvtzs x11, s22
-; CHECK-NEXT:    ext v22.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    mov s1, v1.s[1]
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    stp q25, q23, [x8, #192]
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    fcvtzs x12, s4
-; CHECK-NEXT:    mov v24.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, s19
-; CHECK-NEXT:    mov s19, v0.s[1]
-; CHECK-NEXT:    frintx s16, s16
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    fmov d4, x11
-; CHECK-NEXT:    mov s27, v22.s[1]
-; CHECK-NEXT:    frintx s22, s22
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    fcvtzs x11, s3
-; CHECK-NEXT:    fcvtzs x14, s2
-; CHECK-NEXT:    frintx s2, s18
-; CHECK-NEXT:    stp q24, q6, [x8, #160]
-; CHECK-NEXT:    fmov d6, x13
-; CHECK-NEXT:    fcvtzs x13, s17
-; CHECK-NEXT:    frintx s17, s19
-; CHECK-NEXT:    fmov d23, x16
-; CHECK-NEXT:    mov v7.d[1], x10
-; CHECK-NEXT:    frintx s3, s27
-; CHECK-NEXT:    fcvtzs x10, s22
-; CHECK-NEXT:    fcvtzs x15, s1
-; CHECK-NEXT:    mov v6.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, s16
-; CHECK-NEXT:    mov v4.d[1], x11
-; CHECK-NEXT:    mov v21.d[1], x13
-; CHECK-NEXT:    fcvtzs x13, s0
-; CHECK-NEXT:    mov v23.d[1], x14
-; CHECK-NEXT:    fcvtzs x14, s17
-; CHECK-NEXT:    fcvtzs x11, s3
-; CHECK-NEXT:    fmov d0, x10
-; CHECK-NEXT:    mov v5.d[1], x15
-; CHECK-NEXT:    stp q6, q7, [x8, #128]
-; CHECK-NEXT:    mov v20.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, s2
-; CHECK-NEXT:    stp q4, q21, [x8, #96]
-; CHECK-NEXT:    fmov d1, x13
-; CHECK-NEXT:    fmov d2, x9
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    stp q23, q20, [x8, #64]
-; CHECK-NEXT:    mov v1.d[1], x14
-; CHECK-NEXT:    mov v2.d[1], x12
-; CHECK-NEXT:    stp q5, q0, [x8, #32]
-; CHECK-NEXT:    stp q1, q2, [x8]
+; CHECK-NEXT:    frintx v7.4s, v7.4s
+; CHECK-NEXT:    frintx v6.4s, v6.4s
+; CHECK-NEXT:    mov x9, #28 // =0x1c
+; CHECK-NEXT:    frintx v5.4s, v5.4s
+; CHECK-NEXT:    frintx v4.4s, v4.4s
+; CHECK-NEXT:    frintx v3.4s, v3.4s
+; CHECK-NEXT:    frintx v2.4s, v2.4s
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    uunpklo z7.d, z7.s
+; CHECK-NEXT:    uunpklo z6.d, z6.s
+; CHECK-NEXT:    uunpklo z5.d, z5.s
+; CHECK-NEXT:    uunpklo z4.d, z4.s
+; CHECK-NEXT:    uunpklo z3.d, z3.s
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.s
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z6.s
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.s
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.s
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.s
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
+; CHECK-NEXT:    st1d { z7.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #24 // =0x18
+; CHECK-NEXT:    st1d { z6.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #20 // =0x14
+; CHECK-NEXT:    st1d { z5.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #16 // =0x10
+; CHECK-NEXT:    st1d { z4.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #12 // =0xc
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #8 // =0x8
+; CHECK-NEXT:    st1d { z2.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #4 // =0x4
+; CHECK-NEXT:    st1d { z1.d }, p0, [x8, x9, lsl #3]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
 ; CHECK-NEXT:    ret
   %a = call <32 x i64> @llvm.lrint.v32i64.v32f32(<32 x float> %x)
   ret <32 x i64> %a
@@ -621,13 +355,8 @@ declare <1 x i64> @llvm.lrint.v1i64.v1f64(<1 x double>)
 define <2 x i64> @lrint_v2f64(<2 x double> %x) {
 ; CHECK-LABEL: lrint_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d1, v0.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    frintx v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %a = call <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double> %x)
   ret <2 x i64> %a
@@ -637,20 +366,17 @@ declare <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double>)
 define <4 x i64> @lrint_v4f64(<4 x double> %x) {
 ; CHECK-LABEL: lrint_v4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d2, v0.d[1]
-; CHECK-NEXT:    mov d3, v1.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    frintx d3, d3
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    fcvtzs x10, d2
-; CHECK-NEXT:    fcvtzs x11, d3
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    mov v1.d[1], x11
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    splice z0.d, p0, z0.d, z1.d
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    ret
   %a = call <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double> %x)
   ret <4 x i64> %a
@@ -660,34 +386,28 @@ declare <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double>)
 define <8 x i64> @lrint_v8f64(<8 x double> %x) {
 ; CHECK-LABEL: lrint_v8f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d4, v0.d[1]
-; CHECK-NEXT:    mov d5, v1.d[1]
-; CHECK-NEXT:    mov d6, v2.d[1]
-; CHECK-NEXT:    mov d7, v3.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    frintx d3, d3
-; CHECK-NEXT:    frintx d4, d4
-; CHECK-NEXT:    frintx d5, d5
-; CHECK-NEXT:    frintx d6, d6
-; CHECK-NEXT:    frintx d7, d7
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    fcvtzs x10, d2
-; CHECK-NEXT:    fcvtzs x11, d3
-; CHECK-NEXT:    fcvtzs x12, d4
-; CHECK-NEXT:    fcvtzs x13, d5
-; CHECK-NEXT:    fcvtzs x14, d6
-; CHECK-NEXT:    fcvtzs x15, d7
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    fmov d2, x10
-; CHECK-NEXT:    fmov d3, x11
-; CHECK-NEXT:    mov v0.d[1], x12
-; CHECK-NEXT:    mov v1.d[1], x13
-; CHECK-NEXT:    mov v2.d[1], x14
-; CHECK-NEXT:    mov v3.d[1], x15
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    // kill: def $q3 killed $q3 def $z3
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    splice z2.d, p0, z2.d, z3.d
+; CHECK-NEXT:    splice z0.d, p0, z0.d, z1.d
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    movprfx z1, z2
+; CHECK-NEXT:    frintx z1.d, p0/m, z2.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    movprfx z2, z1
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z1.d
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    ext z3.b, z3.b, z2.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q2 killed $q2 killed $z2
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    // kill: def $q3 killed $q3 killed $z3
 ; CHECK-NEXT:    ret
   %a = call <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double> %x)
   ret <8 x i64> %a
@@ -697,62 +417,50 @@ declare <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double>)
 define <16 x i64> @lrint_v16f64(<16 x double> %x) {
 ; CHECK-LABEL: lrint_v16f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d16, v0.d[1]
-; CHECK-NEXT:    mov d17, v1.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d18, d2
-; CHECK-NEXT:    mov d2, v2.d[1]
-; CHECK-NEXT:    frintx d19, d3
-; CHECK-NEXT:    mov d3, v3.d[1]
-; CHECK-NEXT:    frintx d16, d16
-; CHECK-NEXT:    frintx d17, d17
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    frintx d0, d4
-; CHECK-NEXT:    mov d4, v4.d[1]
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    frintx d1, d5
-; CHECK-NEXT:    mov d5, v5.d[1]
-; CHECK-NEXT:    fcvtzs x12, d18
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    fcvtzs x13, d19
-; CHECK-NEXT:    frintx d18, d3
-; CHECK-NEXT:    fcvtzs x10, d16
-; CHECK-NEXT:    mov d16, v6.d[1]
-; CHECK-NEXT:    fcvtzs x11, d17
-; CHECK-NEXT:    mov d17, v7.d[1]
-; CHECK-NEXT:    frintx d6, d6
-; CHECK-NEXT:    frintx d7, d7
-; CHECK-NEXT:    frintx d4, d4
-; CHECK-NEXT:    frintx d5, d5
-; CHECK-NEXT:    fcvtzs x14, d0
-; CHECK-NEXT:    fcvtzs x15, d1
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    frintx d16, d16
-; CHECK-NEXT:    fcvtzs x9, d2
-; CHECK-NEXT:    fmov d2, x12
-; CHECK-NEXT:    frintx d17, d17
-; CHECK-NEXT:    fcvtzs x8, d6
-; CHECK-NEXT:    fcvtzs x12, d7
-; CHECK-NEXT:    fmov d3, x13
-; CHECK-NEXT:    fcvtzs x13, d18
-; CHECK-NEXT:    fcvtzs x16, d4
-; CHECK-NEXT:    fcvtzs x17, d5
-; CHECK-NEXT:    fmov d4, x14
-; CHECK-NEXT:    fmov d5, x15
-; CHECK-NEXT:    fcvtzs x18, d16
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    mov v1.d[1], x11
-; CHECK-NEXT:    fcvtzs x0, d17
-; CHECK-NEXT:    fmov d6, x8
-; CHECK-NEXT:    fmov d7, x12
-; CHECK-NEXT:    mov v2.d[1], x9
-; CHECK-NEXT:    mov v3.d[1], x13
-; CHECK-NEXT:    mov v4.d[1], x16
-; CHECK-NEXT:    mov v5.d[1], x17
-; CHECK-NEXT:    mov v6.d[1], x18
-; CHECK-NEXT:    mov v7.d[1], x0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    // kill: def $q6 killed $q6 def $z6
+; CHECK-NEXT:    // kill: def $q4 killed $q4 def $z4
+; CHECK-NEXT:    // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    // kill: def $q7 killed $q7 def $z7
+; CHECK-NEXT:    // kill: def $q5 killed $q5 def $z5
+; CHECK-NEXT:    // kill: def $q3 killed $q3 def $z3
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    splice z2.d, p0, z2.d, z3.d
+; CHECK-NEXT:    splice z0.d, p0, z0.d, z1.d
+; CHECK-NEXT:    splice z6.d, p0, z6.d, z7.d
+; CHECK-NEXT:    splice z4.d, p0, z4.d, z5.d
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    movprfx z1, z2
+; CHECK-NEXT:    frintx z1.d, p0/m, z2.d
+; CHECK-NEXT:    movprfx z5, z6
+; CHECK-NEXT:    frintx z5.d, p0/m, z6.d
+; CHECK-NEXT:    movprfx z3, z4
+; CHECK-NEXT:    frintx z3.d, p0/m, z4.d
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    movprfx z2, z1
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z1.d
+; CHECK-NEXT:    movprfx z6, z5
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z5.d
+; CHECK-NEXT:    movprfx z4, z3
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z3.d
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    mov z7.d, z6.d
+; CHECK-NEXT:    mov z5.d, z4.d
+; CHECK-NEXT:    ext z1.b, z1.b, z0.b, #16
+; CHECK-NEXT:    ext z3.b, z3.b, z2.b, #16
+; CHECK-NEXT:    ext z7.b, z7.b, z6.b, #16
+; CHECK-NEXT:    ext z5.b, z5.b, z4.b, #16
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q2 killed $q2 killed $z2
+; CHECK-NEXT:    // kill: def $q4 killed $q4 killed $z4
+; CHECK-NEXT:    // kill: def $q6 killed $q6 killed $z6
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    // kill: def $q3 killed $q3 killed $z3
+; CHECK-NEXT:    // kill: def $q7 killed $q7 killed $z7
+; CHECK-NEXT:    // kill: def $q5 killed $q5 killed $z5
 ; CHECK-NEXT:    ret
   %a = call <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double> %x)
   ret <16 x i64> %a
@@ -762,130 +470,63 @@ declare <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double>)
 define <32 x i64> @lrint_v32f64(<32 x double> %x) {
 ; CHECK-LABEL: lrint_v32f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintx d20, d0
-; CHECK-NEXT:    frintx d22, d3
-; CHECK-NEXT:    frintx d21, d4
+; CHECK-NEXT:    ldp q17, q16, [sp, #96]
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    ldp q19, q18, [sp, #64]
-; CHECK-NEXT:    frintx d23, d5
-; CHECK-NEXT:    ldp q27, q26, [sp, #96]
-; CHECK-NEXT:    mov d4, v4.d[1]
-; CHECK-NEXT:    ldp q16, q17, [sp, #32]
-; CHECK-NEXT:    mov d5, v5.d[1]
-; CHECK-NEXT:    fcvtzs x9, d20
-; CHECK-NEXT:    frintx d20, d6
-; CHECK-NEXT:    fcvtzs x11, d22
-; CHECK-NEXT:    frintx d22, d19
-; CHECK-NEXT:    fcvtzs x12, d21
-; CHECK-NEXT:    fcvtzs x10, d23
-; CHECK-NEXT:    mov d21, v26.d[1]
-; CHECK-NEXT:    frintx d23, d27
-; CHECK-NEXT:    mov d27, v27.d[1]
-; CHECK-NEXT:    frintx d24, d16
-; CHECK-NEXT:    mov d19, v19.d[1]
-; CHECK-NEXT:    frintx d25, d17
-; CHECK-NEXT:    fcvtzs x13, d20
-; CHECK-NEXT:    mov d20, v18.d[1]
-; CHECK-NEXT:    frintx d18, d18
-; CHECK-NEXT:    fcvtzs x16, d22
-; CHECK-NEXT:    frintx d22, d26
-; CHECK-NEXT:    mov d16, v16.d[1]
-; CHECK-NEXT:    frintx d21, d21
-; CHECK-NEXT:    fcvtzs x17, d23
-; CHECK-NEXT:    frintx d23, d27
-; CHECK-NEXT:    fcvtzs x14, d24
-; CHECK-NEXT:    frintx d26, d19
-; CHECK-NEXT:    fmov d19, x11
-; CHECK-NEXT:    frintx d20, d20
-; CHECK-NEXT:    mov d27, v17.d[1]
-; CHECK-NEXT:    fcvtzs x15, d25
-; CHECK-NEXT:    ldp q25, q24, [sp]
-; CHECK-NEXT:    fcvtzs x11, d22
-; CHECK-NEXT:    fmov d17, x12
-; CHECK-NEXT:    fcvtzs x12, d21
-; CHECK-NEXT:    fcvtzs x0, d23
-; CHECK-NEXT:    fmov d23, x14
-; CHECK-NEXT:    fcvtzs x14, d18
-; CHECK-NEXT:    fmov d18, x17
-; CHECK-NEXT:    fcvtzs x17, d20
-; CHECK-NEXT:    frintx d21, d7
-; CHECK-NEXT:    fcvtzs x18, d26
-; CHECK-NEXT:    fmov d20, x11
-; CHECK-NEXT:    frintx d22, d25
-; CHECK-NEXT:    frintx d26, d27
-; CHECK-NEXT:    frintx d16, d16
-; CHECK-NEXT:    mov v18.d[1], x0
-; CHECK-NEXT:    mov d25, v25.d[1]
-; CHECK-NEXT:    mov d7, v7.d[1]
-; CHECK-NEXT:    mov d6, v6.d[1]
-; CHECK-NEXT:    mov d0, v0.d[1]
-; CHECK-NEXT:    mov v20.d[1], x12
-; CHECK-NEXT:    fcvtzs x11, d21
-; CHECK-NEXT:    fmov d21, x15
-; CHECK-NEXT:    fcvtzs x12, d22
-; CHECK-NEXT:    fmov d22, x16
-; CHECK-NEXT:    fcvtzs x15, d26
-; CHECK-NEXT:    fmov d26, x14
-; CHECK-NEXT:    fcvtzs x14, d16
-; CHECK-NEXT:    frintx d25, d25
-; CHECK-NEXT:    frintx d7, d7
-; CHECK-NEXT:    mov d16, v1.d[1]
-; CHECK-NEXT:    mov d3, v3.d[1]
-; CHECK-NEXT:    stp q18, q20, [x8, #224]
-; CHECK-NEXT:    mov d18, v24.d[1]
-; CHECK-NEXT:    mov v22.d[1], x18
-; CHECK-NEXT:    mov v26.d[1], x17
-; CHECK-NEXT:    frintx d24, d24
-; CHECK-NEXT:    mov v21.d[1], x15
-; CHECK-NEXT:    mov v23.d[1], x14
-; CHECK-NEXT:    frintx d20, d2
-; CHECK-NEXT:    mov d2, v2.d[1]
-; CHECK-NEXT:    frintx d6, d6
-; CHECK-NEXT:    frintx d5, d5
-; CHECK-NEXT:    frintx d4, d4
-; CHECK-NEXT:    frintx d18, d18
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d3, d3
-; CHECK-NEXT:    stp q22, q26, [x8, #192]
-; CHECK-NEXT:    fmov d22, x10
-; CHECK-NEXT:    fcvtzs x10, d24
-; CHECK-NEXT:    stp q23, q21, [x8, #160]
-; CHECK-NEXT:    fmov d21, x11
-; CHECK-NEXT:    fmov d24, x13
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    fcvtzs x13, d6
-; CHECK-NEXT:    frintx d6, d16
-; CHECK-NEXT:    fcvtzs x11, d18
-; CHECK-NEXT:    fmov d18, x12
-; CHECK-NEXT:    fcvtzs x12, d25
-; CHECK-NEXT:    fmov d23, x10
-; CHECK-NEXT:    fcvtzs x10, d7
-; CHECK-NEXT:    fcvtzs x14, d5
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    fcvtzs x15, d3
-; CHECK-NEXT:    mov v24.d[1], x13
-; CHECK-NEXT:    fcvtzs x13, d2
-; CHECK-NEXT:    fmov d2, x9
-; CHECK-NEXT:    mov v23.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, d4
-; CHECK-NEXT:    mov v18.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, d20
-; CHECK-NEXT:    mov v21.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, d1
-; CHECK-NEXT:    mov v22.d[1], x14
-; CHECK-NEXT:    fcvtzs x14, d6
-; CHECK-NEXT:    mov v19.d[1], x15
-; CHECK-NEXT:    stp q18, q23, [x8, #128]
-; CHECK-NEXT:    mov v17.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, d0
-; CHECK-NEXT:    stp q24, q21, [x8, #96]
-; CHECK-NEXT:    fmov d0, x12
-; CHECK-NEXT:    fmov d1, x10
-; CHECK-NEXT:    stp q17, q22, [x8, #64]
-; CHECK-NEXT:    mov v0.d[1], x13
-; CHECK-NEXT:    mov v1.d[1], x14
-; CHECK-NEXT:    mov v2.d[1], x11
-; CHECK-NEXT:    stp q0, q19, [x8, #32]
-; CHECK-NEXT:    stp q2, q1, [x8]
+; CHECK-NEXT:    ptrue p1.d, vl4
+; CHECK-NEXT:    // kill: def $q7 killed $q7 def $z7
+; CHECK-NEXT:    // kill: def $q6 killed $q6 def $z6
+; CHECK-NEXT:    // kill: def $q5 killed $q5 def $z5
+; CHECK-NEXT:    // kill: def $q4 killed $q4 def $z4
+; CHECK-NEXT:    // kill: def $q3 killed $q3 def $z3
+; CHECK-NEXT:    // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    mov x9, #28 // =0x1c
+; CHECK-NEXT:    splice z17.d, p0, z17.d, z16.d
+; CHECK-NEXT:    ldp q20, q16, [sp, #32]
+; CHECK-NEXT:    splice z19.d, p0, z19.d, z18.d
+; CHECK-NEXT:    ldp q21, q18, [sp]
+; CHECK-NEXT:    splice z6.d, p0, z6.d, z7.d
+; CHECK-NEXT:    splice z4.d, p0, z4.d, z5.d
+; CHECK-NEXT:    splice z2.d, p0, z2.d, z3.d
+; CHECK-NEXT:    splice z20.d, p0, z20.d, z16.d
+; CHECK-NEXT:    splice z0.d, p0, z0.d, z1.d
+; CHECK-NEXT:    splice z21.d, p0, z21.d, z18.d
+; CHECK-NEXT:    movprfx z7, z17
+; CHECK-NEXT:    frintx z7.d, p1/m, z17.d
+; CHECK-NEXT:    movprfx z5, z19
+; CHECK-NEXT:    frintx z5.d, p1/m, z19.d
+; CHECK-NEXT:    frintx z6.d, p1/m, z6.d
+; CHECK-NEXT:    frintx z4.d, p1/m, z4.d
+; CHECK-NEXT:    frintx z2.d, p1/m, z2.d
+; CHECK-NEXT:    movprfx z3, z20
+; CHECK-NEXT:    frintx z3.d, p1/m, z20.d
+; CHECK-NEXT:    frintx z0.d, p1/m, z0.d
+; CHECK-NEXT:    movprfx z1, z21
+; CHECK-NEXT:    frintx z1.d, p1/m, z21.d
+; CHECK-NEXT:    fcvtzs z7.d, p1/m, z7.d
+; CHECK-NEXT:    fcvtzs z5.d, p1/m, z5.d
+; CHECK-NEXT:    fcvtzs z6.d, p1/m, z6.d
+; CHECK-NEXT:    fcvtzs z4.d, p1/m, z4.d
+; CHECK-NEXT:    fcvtzs z2.d, p1/m, z2.d
+; CHECK-NEXT:    fcvtzs z3.d, p1/m, z3.d
+; CHECK-NEXT:    fcvtzs z0.d, p1/m, z0.d
+; CHECK-NEXT:    fcvtzs z1.d, p1/m, z1.d
+; CHECK-NEXT:    st1d { z7.d }, p1, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #24 // =0x18
+; CHECK-NEXT:    st1d { z5.d }, p1, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #20 // =0x14
+; CHECK-NEXT:    st1d { z3.d }, p1, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #16 // =0x10
+; CHECK-NEXT:    st1d { z1.d }, p1, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #12 // =0xc
+; CHECK-NEXT:    st1d { z6.d }, p1, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #8 // =0x8
+; CHECK-NEXT:    st1d { z4.d }, p1, [x8, x9, lsl #3]
+; CHECK-NEXT:    mov x9, #4 // =0x4
+; CHECK-NEXT:    st1d { z2.d }, p1, [x8, x9, lsl #3]
+; CHECK-NEXT:    st1d { z0.d }, p1, [x8]
 ; CHECK-NEXT:    ret
   %a = call <32 x i64> @llvm.lrint.v32i64.v16f64(<32 x double> %x)
   ret <32 x i64> %a
diff --git a/llvm/test/CodeGen/AArch64/sve-llrint.ll b/llvm/test/CodeGen/AArch64/sve-llrint.ll
index 11d45b3a43521..825ff55117d5c 100644
--- a/llvm/test/CodeGen/AArch64/sve-llrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve-llrint.ll
@@ -5,6 +5,7 @@ define <vscale x 1 x i64> @llrint_v1i64_v1f16(<vscale x 1 x half> %x) {
 ; CHECK-LABEL: llrint_v1i64_v1f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
 ; CHECK-NEXT:    ret
   %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f16(<vscale x 1 x half> %x)
@@ -16,6 +17,7 @@ define <vscale x 2 x i64> @llrint_v1i64_v2f16(<vscale x 2 x half> %x) {
 ; CHECK-LABEL: llrint_v1i64_v2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f16(<vscale x 2 x half> %x)
@@ -27,8 +29,11 @@ define <vscale x 4 x i64> @llrint_v4i64_v4f16(<vscale x 4 x half> %x) {
 ; CHECK-LABEL: llrint_v4i64_v4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uunpklo z1.d, z0.s
-; CHECK-NEXT:    uunpkhi z2.d, z0.s
+; CHECK-NEXT:    uunpkhi z0.d, z0.s
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z1.h, p0/m, z1.h
+; CHECK-NEXT:    movprfx z2, z0
+; CHECK-NEXT:    frintx z2.h, p0/m, z0.h
 ; CHECK-NEXT:    movprfx z0, z1
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.h
 ; CHECK-NEXT:    movprfx z1, z2
@@ -48,7 +53,12 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f16(<vscale x 8 x half> %x) {
 ; CHECK-NEXT:    uunpklo z2.d, z1.s
 ; CHECK-NEXT:    uunpkhi z1.d, z1.s
 ; CHECK-NEXT:    uunpklo z3.d, z0.s
-; CHECK-NEXT:    uunpkhi z4.d, z0.s
+; CHECK-NEXT:    uunpkhi z0.d, z0.s
+; CHECK-NEXT:    frintx z1.h, p0/m, z1.h
+; CHECK-NEXT:    frintx z2.h, p0/m, z2.h
+; CHECK-NEXT:    frintx z3.h, p0/m, z3.h
+; CHECK-NEXT:    movprfx z4, z0
+; CHECK-NEXT:    frintx z4.h, p0/m, z0.h
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.h
 ; CHECK-NEXT:    movprfx z0, z2
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z2.h
@@ -73,25 +83,36 @@ define <vscale x 16 x i64> @llrint_v16i64_v16f16(<vscale x 16 x half> %x) {
 ; CHECK-NEXT:    uunpklo z4.d, z2.s
 ; CHECK-NEXT:    uunpkhi z2.d, z2.s
 ; CHECK-NEXT:    uunpklo z5.d, z0.s
-; CHECK-NEXT:    uunpkhi z6.d, z0.s
-; CHECK-NEXT:    uunpklo z7.d, z3.s
-; CHECK-NEXT:    uunpkhi z24.d, z3.s
-; CHECK-NEXT:    uunpklo z25.d, z1.s
-; CHECK-NEXT:    uunpkhi z26.d, z1.s
+; CHECK-NEXT:    uunpkhi z0.d, z0.s
+; CHECK-NEXT:    uunpklo z6.d, z3.s
+; CHECK-NEXT:    uunpkhi z3.d, z3.s
+; CHECK-NEXT:    uunpklo z7.d, z1.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    frintx z4.h, p0/m, z4.h
+; CHECK-NEXT:    frintx z2.h, p0/m, z2.h
+; CHECK-NEXT:    frintx z5.h, p0/m, z5.h
+; CHECK-NEXT:    movprfx z24, z0
+; CHECK-NEXT:    frintx z24.h, p0/m, z0.h
+; CHECK-NEXT:    frintx z6.h, p0/m, z6.h
+; CHECK-NEXT:    movprfx z25, z3
+; CHECK-NEXT:    frintx z25.h, p0/m, z3.h
+; CHECK-NEXT:    frintx z7.h, p0/m, z7.h
+; CHECK-NEXT:    movprfx z26, z1
+; CHECK-NEXT:    frintx z26.h, p0/m, z1.h
 ; CHECK-NEXT:    movprfx z0, z4
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z4.h
 ; CHECK-NEXT:    movprfx z1, z2
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z2.h
 ; CHECK-NEXT:    movprfx z2, z5
 ; CHECK-NEXT:    fcvtzs z2.d, p0/m, z5.h
-; CHECK-NEXT:    movprfx z3, z6
-; CHECK-NEXT:    fcvtzs z3.d, p0/m, z6.h
-; CHECK-NEXT:    movprfx z4, z7
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z7.h
-; CHECK-NEXT:    movprfx z5, z24
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z24.h
-; CHECK-NEXT:    movprfx z6, z25
-; CHECK-NEXT:    fcvtzs z6.d, p0/m, z25.h
+; CHECK-NEXT:    movprfx z3, z24
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z24.h
+; CHECK-NEXT:    movprfx z4, z6
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z6.h
+; CHECK-NEXT:    movprfx z5, z25
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z25.h
+; CHECK-NEXT:    movprfx z6, z7
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z7.h
 ; CHECK-NEXT:    movprfx z7, z26
 ; CHECK-NEXT:    fcvtzs z7.d, p0/m, z26.h
 ; CHECK-NEXT:    ret
@@ -110,71 +131,86 @@ define <vscale x 32 x i64> @llrint_v32i64_v32f16(<vscale x 32 x half> %x) {
 ; CHECK-NEXT:    uunpkhi z7.s, z2.h
 ; CHECK-NEXT:    uunpklo z2.s, z2.h
 ; CHECK-NEXT:    ptrue p1.b
-; CHECK-NEXT:    uunpklo z24.s, z0.h
-; CHECK-NEXT:    uunpkhi z0.s, z0.h
 ; CHECK-NEXT:    uunpkhi z5.d, z4.s
 ; CHECK-NEXT:    uunpklo z4.d, z4.s
 ; CHECK-NEXT:    uunpkhi z6.d, z3.s
 ; CHECK-NEXT:    uunpklo z3.d, z3.s
-; CHECK-NEXT:    uunpkhi z25.d, z2.s
-; CHECK-NEXT:    uunpklo z2.d, z2.s
-; CHECK-NEXT:    uunpklo z26.d, z0.s
-; CHECK-NEXT:    uunpkhi z0.d, z0.s
+; CHECK-NEXT:    uunpkhi z24.d, z7.s
+; CHECK-NEXT:    uunpklo z7.d, z7.s
+; CHECK-NEXT:    frintx z5.h, p0/m, z5.h
+; CHECK-NEXT:    frintx z4.h, p0/m, z4.h
+; CHECK-NEXT:    frintx z6.h, p0/m, z6.h
+; CHECK-NEXT:    frintx z3.h, p0/m, z3.h
+; CHECK-NEXT:    frintx z24.h, p0/m, z24.h
+; CHECK-NEXT:    frintx z7.h, p0/m, z7.h
 ; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.h
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.h
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z6.h
 ; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.h
-; CHECK-NEXT:    fcvtzs z25.d, p0/m, z25.h
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.h
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.h
 ; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #14
-; CHECK-NEXT:    movprfx z5, z6
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z6.h
-; CHECK-NEXT:    uunpkhi z6.d, z7.s
+; CHECK-NEXT:    uunpkhi z5.s, z1.h
 ; CHECK-NEXT:    st1b { z4.b }, p1, [x8, x9]
-; CHECK-NEXT:    uunpkhi z4.s, z1.h
-; CHECK-NEXT:    uunpklo z7.d, z7.s
+; CHECK-NEXT:    uunpkhi z4.d, z2.s
 ; CHECK-NEXT:    rdvl x9, #13
-; CHECK-NEXT:    uunpklo z1.s, z1.h
-; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
+; CHECK-NEXT:    st1b { z6.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #12
-; CHECK-NEXT:    movprfx z5, z6
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z6.h
+; CHECK-NEXT:    movprfx z6, z24
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z24.h
+; CHECK-NEXT:    uunpklo z1.s, z1.h
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    uunpkhi z24.s, z0.h
 ; CHECK-NEXT:    st1b { z3.b }, p1, [x8, x9]
-; CHECK-NEXT:    uunpkhi z3.d, z4.s
-; CHECK-NEXT:    uunpklo z4.d, z4.s
-; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.h
+; CHECK-NEXT:    uunpklo z3.d, z5.s
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    frintx z4.h, p0/m, z4.h
 ; CHECK-NEXT:    rdvl x9, #11
-; CHECK-NEXT:    uunpkhi z6.d, z24.s
-; CHECK-NEXT:    uunpkhi z27.d, z1.s
-; CHECK-NEXT:    uunpklo z1.d, z1.s
-; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
+; CHECK-NEXT:    uunpkhi z25.d, z5.s
+; CHECK-NEXT:    st1b { z6.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #10
-; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.h
-; CHECK-NEXT:    st1b { z7.b }, p1, [x8, x9]
+; CHECK-NEXT:    uunpkhi z5.d, z1.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    frintx z2.h, p0/m, z2.h
+; CHECK-NEXT:    uunpkhi z6.d, z24.s
+; CHECK-NEXT:    uunpklo z24.d, z24.s
+; CHECK-NEXT:    frintx z3.h, p0/m, z3.h
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.h
-; CHECK-NEXT:    uunpklo z7.d, z24.s
+; CHECK-NEXT:    st1b { z7.b }, p1, [x8, x9]
+; CHECK-NEXT:    uunpkhi z7.d, z0.s
+; CHECK-NEXT:    uunpklo z0.d, z0.s
 ; CHECK-NEXT:    rdvl x9, #9
-; CHECK-NEXT:    movprfx z5, z27
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z27.h
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.h
-; CHECK-NEXT:    st1b { z25.b }, p1, [x8, x9]
+; CHECK-NEXT:    frintx z25.h, p0/m, z25.h
+; CHECK-NEXT:    frintx z5.h, p0/m, z5.h
+; CHECK-NEXT:    frintx z1.h, p0/m, z1.h
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.h
+; CHECK-NEXT:    frintx z6.h, p0/m, z6.h
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.h
+; CHECK-NEXT:    st1b { z4.b }, p1, [x8, x9]
+; CHECK-NEXT:    movprfx z4, z24
+; CHECK-NEXT:    frintx z4.h, p0/m, z24.h
+; CHECK-NEXT:    frintx z7.h, p0/m, z7.h
+; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
 ; CHECK-NEXT:    rdvl x9, #8
+; CHECK-NEXT:    fcvtzs z25.d, p0/m, z25.h
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.h
 ; CHECK-NEXT:    st1b { z2.b }, p1, [x8, x9]
-; CHECK-NEXT:    movprfx z2, z26
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z26.h
-; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #7, mul vl]
-; CHECK-NEXT:    movprfx z3, z6
-; CHECK-NEXT:    fcvtzs z3.d, p0/m, z6.h
-; CHECK-NEXT:    st1d { z4.d }, p0, [x8, #6, mul vl]
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.h
+; CHECK-NEXT:    movprfx z2, z6
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z6.h
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #6, mul vl]
+; CHECK-NEXT:    movprfx z3, z4
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z4.h
 ; CHECK-NEXT:    movprfx z4, z7
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z7.h
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    st1d { z25.d }, p0, [x8, #7, mul vl]
 ; CHECK-NEXT:    st1d { z5.d }, p0, [x8, #5, mul vl]
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x8, #4, mul vl]
-; CHECK-NEXT:    st1d { z0.d }, p0, [x8, #3, mul vl]
-; CHECK-NEXT:    st1d { z2.d }, p0, [x8, #2, mul vl]
-; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #1, mul vl]
-; CHECK-NEXT:    st1d { z4.d }, p0, [x8]
+; CHECK-NEXT:    st1d { z2.d }, p0, [x8, #3, mul vl]
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #2, mul vl]
+; CHECK-NEXT:    st1d { z4.d }, p0, [x8, #1, mul vl]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
 ; CHECK-NEXT:    ret
   %a = call <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f16(<vscale x 32 x half> %x)
   ret <vscale x 32 x i64> %a
@@ -185,6 +221,7 @@ define <vscale x 1 x i64> @llrint_v1i64_v1f32(<vscale x 1 x float> %x) {
 ; CHECK-LABEL: llrint_v1i64_v1f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
 ; CHECK-NEXT:    ret
   %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float> %x)
@@ -196,6 +233,7 @@ define <vscale x 2 x i64> @llrint_v2i64_v2f32(<vscale x 2 x float> %x) {
 ; CHECK-LABEL: llrint_v2i64_v2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float> %x)
@@ -207,8 +245,11 @@ define <vscale x 4 x i64> @llrint_v4i64_v4f32(<vscale x 4 x float> %x) {
 ; CHECK-LABEL: llrint_v4i64_v4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uunpklo z1.d, z0.s
-; CHECK-NEXT:    uunpkhi z2.d, z0.s
+; CHECK-NEXT:    uunpkhi z0.d, z0.s
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z1.s, p0/m, z1.s
+; CHECK-NEXT:    movprfx z2, z0
+; CHECK-NEXT:    frintx z2.s, p0/m, z0.s
 ; CHECK-NEXT:    movprfx z0, z1
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.s
 ; CHECK-NEXT:    movprfx z1, z2
@@ -223,16 +264,22 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f32(<vscale x 8 x float> %x) {
 ; CHECK-LABEL: llrint_v8i64_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uunpklo z2.d, z0.s
-; CHECK-NEXT:    uunpkhi z3.d, z0.s
-; CHECK-NEXT:    uunpklo z4.d, z1.s
-; CHECK-NEXT:    uunpkhi z5.d, z1.s
+; CHECK-NEXT:    uunpkhi z0.d, z0.s
+; CHECK-NEXT:    uunpklo z3.d, z1.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z2.s, p0/m, z2.s
+; CHECK-NEXT:    movprfx z4, z0
+; CHECK-NEXT:    frintx z4.s, p0/m, z0.s
+; CHECK-NEXT:    frintx z3.s, p0/m, z3.s
+; CHECK-NEXT:    movprfx z5, z1
+; CHECK-NEXT:    frintx z5.s, p0/m, z1.s
 ; CHECK-NEXT:    movprfx z0, z2
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z2.s
-; CHECK-NEXT:    movprfx z1, z3
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z3.s
-; CHECK-NEXT:    movprfx z2, z4
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z4.s
+; CHECK-NEXT:    movprfx z1, z4
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z4.s
+; CHECK-NEXT:    movprfx z2, z3
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z3.s
 ; CHECK-NEXT:    movprfx z3, z5
 ; CHECK-NEXT:    fcvtzs z3.d, p0/m, z5.s
 ; CHECK-NEXT:    ret
@@ -245,28 +292,40 @@ define <vscale x 16 x i64> @llrint_v16i64_v16f32(<vscale x 16 x float> %x) {
 ; CHECK-LABEL: llrint_v16i64_v16f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uunpklo z4.d, z0.s
-; CHECK-NEXT:    uunpkhi z5.d, z0.s
-; CHECK-NEXT:    uunpklo z6.d, z1.s
-; CHECK-NEXT:    uunpkhi z7.d, z1.s
-; CHECK-NEXT:    uunpklo z24.d, z2.s
-; CHECK-NEXT:    uunpkhi z25.d, z2.s
-; CHECK-NEXT:    uunpklo z26.d, z3.s
-; CHECK-NEXT:    uunpkhi z27.d, z3.s
+; CHECK-NEXT:    uunpkhi z0.d, z0.s
+; CHECK-NEXT:    uunpklo z5.d, z1.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    uunpklo z6.d, z2.s
+; CHECK-NEXT:    uunpkhi z2.d, z2.s
+; CHECK-NEXT:    uunpklo z7.d, z3.s
+; CHECK-NEXT:    uunpkhi z3.d, z3.s
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z4.s, p0/m, z4.s
+; CHECK-NEXT:    movprfx z24, z0
+; CHECK-NEXT:    frintx z24.s, p0/m, z0.s
+; CHECK-NEXT:    frintx z5.s, p0/m, z5.s
+; CHECK-NEXT:    movprfx z25, z1
+; CHECK-NEXT:    frintx z25.s, p0/m, z1.s
+; CHECK-NEXT:    frintx z6.s, p0/m, z6.s
+; CHECK-NEXT:    movprfx z26, z2
+; CHECK-NEXT:    frintx z26.s, p0/m, z2.s
+; CHECK-NEXT:    frintx z7.s, p0/m, z7.s
+; CHECK-NEXT:    movprfx z27, z3
+; CHECK-NEXT:    frintx z27.s, p0/m, z3.s
 ; CHECK-NEXT:    movprfx z0, z4
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z4.s
-; CHECK-NEXT:    movprfx z1, z5
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z5.s
-; CHECK-NEXT:    movprfx z2, z6
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z6.s
-; CHECK-NEXT:    movprfx z3, z7
-; CHECK-NEXT:    fcvtzs z3.d, p0/m, z7.s
-; CHECK-NEXT:    movprfx z4, z24
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z24.s
-; CHECK-NEXT:    movprfx z5, z25
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z25.s
-; CHECK-NEXT:    movprfx z6, z26
-; CHECK-NEXT:    fcvtzs z6.d, p0/m, z26.s
+; CHECK-NEXT:    movprfx z1, z24
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z24.s
+; CHECK-NEXT:    movprfx z2, z5
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z5.s
+; CHECK-NEXT:    movprfx z3, z25
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z25.s
+; CHECK-NEXT:    movprfx z4, z6
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z6.s
+; CHECK-NEXT:    movprfx z5, z26
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z26.s
+; CHECK-NEXT:    movprfx z6, z7
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z7.s
 ; CHECK-NEXT:    movprfx z7, z27
 ; CHECK-NEXT:    fcvtzs z7.d, p0/m, z27.s
 ; CHECK-NEXT:    ret
@@ -279,65 +338,83 @@ define <vscale x 32 x i64> @llrint_v32i64_v32f32(<vscale x 32 x float> %x) {
 ; CHECK-LABEL: llrint_v32i64_v32f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uunpkhi z24.d, z7.s
-; CHECK-NEXT:    uunpklo z7.d, z7.s
-; CHECK-NEXT:    rdvl x9, #15
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    uunpkhi z27.d, z6.s
-; CHECK-NEXT:    uunpklo z6.d, z6.s
-; CHECK-NEXT:    uunpkhi z30.d, z5.s
-; CHECK-NEXT:    uunpklo z5.d, z5.s
-; CHECK-NEXT:    uunpkhi z31.d, z4.s
+; CHECK-NEXT:    rdvl x9, #15
+; CHECK-NEXT:    uunpklo z7.d, z7.s
+; CHECK-NEXT:    uunpkhi z25.d, z6.s
 ; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    uunpklo z6.d, z6.s
+; CHECK-NEXT:    uunpkhi z27.d, z4.s
+; CHECK-NEXT:    uunpklo z4.d, z4.s
 ; CHECK-NEXT:    uunpklo z29.d, z3.s
 ; CHECK-NEXT:    uunpkhi z3.d, z3.s
-; CHECK-NEXT:    fcvtzs z24.d, p0/m, z24.s
-; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.s
-; CHECK-NEXT:    uunpklo z4.d, z4.s
-; CHECK-NEXT:    fcvtzs z27.d, p0/m, z27.s
-; CHECK-NEXT:    fcvtzs z6.d, p0/m, z6.s
-; CHECK-NEXT:    uunpkhi z25.d, z0.s
-; CHECK-NEXT:    fcvtzs z30.d, p0/m, z30.s
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.s
 ; CHECK-NEXT:    uunpklo z26.d, z1.s
-; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    frintx z24.s, p0/m, z24.s
 ; CHECK-NEXT:    uunpklo z28.d, z2.s
 ; CHECK-NEXT:    uunpkhi z2.d, z2.s
+; CHECK-NEXT:    frintx z7.s, p0/m, z7.s
+; CHECK-NEXT:    frintx z25.s, p0/m, z25.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    frintx z6.s, p0/m, z6.s
+; CHECK-NEXT:    frintx z27.s, p0/m, z27.s
+; CHECK-NEXT:    frintx z4.s, p0/m, z4.s
+; CHECK-NEXT:    frintx z3.s, p0/m, z3.s
+; CHECK-NEXT:    frintx z26.s, p0/m, z26.s
+; CHECK-NEXT:    fcvtzs z24.d, p0/m, z24.s
+; CHECK-NEXT:    frintx z2.s, p0/m, z2.s
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.s
+; CHECK-NEXT:    fcvtzs z25.d, p0/m, z25.s
+; CHECK-NEXT:    frintx z1.s, p0/m, z1.s
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z6.s
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.s
 ; CHECK-NEXT:    st1b { z24.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #14
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT:    uunpkhi z24.d, z5.s
 ; CHECK-NEXT:    st1b { z7.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #13
-; CHECK-NEXT:    movprfx z7, z31
-; CHECK-NEXT:    fcvtzs z7.d, p0/m, z31.s
-; CHECK-NEXT:    st1b { z27.b }, p1, [x8, x9]
+; CHECK-NEXT:    uunpklo z5.d, z5.s
+; CHECK-NEXT:    st1b { z25.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #12
-; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.s
+; CHECK-NEXT:    movprfx z25, z27
+; CHECK-NEXT:    fcvtzs z25.d, p0/m, z27.s
 ; CHECK-NEXT:    st1b { z6.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #11
 ; CHECK-NEXT:    movprfx z6, z29
-; CHECK-NEXT:    fcvtzs z6.d, p0/m, z29.s
+; CHECK-NEXT:    frintx z6.s, p0/m, z29.s
+; CHECK-NEXT:    frintx z24.s, p0/m, z24.s
+; CHECK-NEXT:    uunpkhi z7.d, z0.s
 ; CHECK-NEXT:    uunpklo z0.d, z0.s
-; CHECK-NEXT:    st1b { z30.b }, p1, [x8, x9]
+; CHECK-NEXT:    frintx z5.s, p0/m, z5.s
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.s
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.s
+; CHECK-NEXT:    fcvtzs z24.d, p0/m, z24.s
+; CHECK-NEXT:    frintx z7.s, p0/m, z7.s
+; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.s
+; CHECK-NEXT:    st1b { z24.b }, p1, [x8, x9]
+; CHECK-NEXT:    movprfx z24, z28
+; CHECK-NEXT:    frintx z24.s, p0/m, z28.s
 ; CHECK-NEXT:    rdvl x9, #10
 ; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #9
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.s
-; CHECK-NEXT:    st1b { z7.b }, p1, [x8, x9]
+; CHECK-NEXT:    movprfx z5, z6
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z6.s
+; CHECK-NEXT:    st1b { z25.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #8
-; CHECK-NEXT:    movprfx z5, z28
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z28.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
 ; CHECK-NEXT:    st1b { z4.b }, p1, [x8, x9]
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.s
-; CHECK-NEXT:    movprfx z4, z25
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z25.s
+; CHECK-NEXT:    movprfx z4, z7
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z7.s
+; CHECK-NEXT:    movprfx z6, z24
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z24.s
 ; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #7, mul vl]
 ; CHECK-NEXT:    movprfx z3, z26
 ; CHECK-NEXT:    fcvtzs z3.d, p0/m, z26.s
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
-; CHECK-NEXT:    st1d { z6.d }, p0, [x8, #6, mul vl]
+; CHECK-NEXT:    st1d { z5.d }, p0, [x8, #6, mul vl]
 ; CHECK-NEXT:    st1d { z2.d }, p0, [x8, #5, mul vl]
-; CHECK-NEXT:    st1d { z5.d }, p0, [x8, #4, mul vl]
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x8, #3, mul vl]
+; CHECK-NEXT:    st1d { z6.d }, p0, [x8, #4, mul vl]
 ; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #2, mul vl]
 ; CHECK-NEXT:    st1d { z4.d }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
@@ -351,6 +428,7 @@ define <vscale x 1 x i64> @llrint_v1i64_v1f64(<vscale x 1 x double> %x) {
 ; CHECK-LABEL: llrint_v1i64_v1f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
 ; CHECK-NEXT:    ret
   %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double> %x)
@@ -362,6 +440,7 @@ define <vscale x 2 x i64> @llrint_v2i64_v2f64(<vscale x 2 x double> %x) {
 ; CHECK-LABEL: llrint_v2i64_v2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double> %x)
@@ -373,6 +452,8 @@ define <vscale x 4 x i64> @llrint_v4i64_v4f64(<vscale x 4 x double> %x) {
 ; CHECK-LABEL: llrint_v4i64_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
 ; CHECK-NEXT:    ret
@@ -385,6 +466,10 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f64(<vscale x 8 x double> %x) {
 ; CHECK-LABEL: llrint_v8i64_v8f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
+; CHECK-NEXT:    frintx z2.d, p0/m, z2.d
+; CHECK-NEXT:    frintx z3.d, p0/m, z3.d
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
 ; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
@@ -399,6 +484,14 @@ define <vscale x 16 x i64> @llrint_v16f64(<vscale x 16 x double> %x) {
 ; CHECK-LABEL: llrint_v16f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
+; CHECK-NEXT:    frintx z2.d, p0/m, z2.d
+; CHECK-NEXT:    frintx z3.d, p0/m, z3.d
+; CHECK-NEXT:    frintx z4.d, p0/m, z4.d
+; CHECK-NEXT:    frintx z5.d, p0/m, z5.d
+; CHECK-NEXT:    frintx z6.d, p0/m, z6.d
+; CHECK-NEXT:    frintx z7.d, p0/m, z7.d
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
 ; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
@@ -417,74 +510,91 @@ define <vscale x 32 x i64> @llrint_v32f64(<vscale x 32 x double> %x) {
 ; CHECK-LABEL: llrint_v32f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    rdvl x9, #15
-; CHECK-NEXT:    rdvl x10, #14
+; CHECK-NEXT:    rdvl x14, #15
+; CHECK-NEXT:    rdvl x15, #14
 ; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    rdvl x11, #13
+; CHECK-NEXT:    rdvl x13, #13
 ; CHECK-NEXT:    rdvl x12, #12
-; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x9]
-; CHECK-NEXT:    rdvl x13, #11
-; CHECK-NEXT:    rdvl x14, #10
-; CHECK-NEXT:    ld1b { z1.b }, p0/z, [x0, x10]
-; CHECK-NEXT:    ld1b { z2.b }, p0/z, [x0, x11]
+; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x14]
+; CHECK-NEXT:    ld1b { z1.b }, p0/z, [x0, x15]
+; CHECK-NEXT:    rdvl x10, #11
+; CHECK-NEXT:    rdvl x11, #10
+; CHECK-NEXT:    rdvl x9, #9
+; CHECK-NEXT:    ld1b { z2.b }, p0/z, [x0, x13]
 ; CHECK-NEXT:    ld1b { z3.b }, p0/z, [x0, x12]
-; CHECK-NEXT:    ld1b { z4.b }, p0/z, [x0, x13]
-; CHECK-NEXT:    ld1b { z5.b }, p0/z, [x0, x14]
-; CHECK-NEXT:    rdvl x15, #9
-; CHECK-NEXT:    fcvtzs z0.d, p1/m, z0.d
+; CHECK-NEXT:    ld1b { z4.b }, p0/z, [x0, x10]
+; CHECK-NEXT:    ld1b { z5.b }, p0/z, [x0, x11]
+; CHECK-NEXT:    frintx z0.d, p1/m, z0.d
+; CHECK-NEXT:    frintx z1.d, p1/m, z1.d
+; CHECK-NEXT:    ld1b { z6.b }, p0/z, [x0, x9]
 ; CHECK-NEXT:    rdvl x16, #8
-; CHECK-NEXT:    ld1b { z6.b }, p0/z, [x0, x15]
-; CHECK-NEXT:    ld1b { z7.b }, p0/z, [x0, x16]
+; CHECK-NEXT:    frintx z2.d, p1/m, z2.d
 ; CHECK-NEXT:    ld1d { z24.d }, p1/z, [x0, #7, mul vl]
+; CHECK-NEXT:    frintx z3.d, p1/m, z3.d
+; CHECK-NEXT:    frintx z4.d, p1/m, z4.d
+; CHECK-NEXT:    frintx z5.d, p1/m, z5.d
+; CHECK-NEXT:    frintx z6.d, p1/m, z6.d
+; CHECK-NEXT:    ld1b { z7.b }, p0/z, [x0, x16]
 ; CHECK-NEXT:    ld1d { z25.d }, p1/z, [x0, #6, mul vl]
-; CHECK-NEXT:    ld1d { z26.d }, p1/z, [x0, #5, mul vl]
+; CHECK-NEXT:    fcvtzs z0.d, p1/m, z0.d
 ; CHECK-NEXT:    fcvtzs z1.d, p1/m, z1.d
+; CHECK-NEXT:    ld1d { z26.d }, p1/z, [x0, #5, mul vl]
 ; CHECK-NEXT:    ld1d { z27.d }, p1/z, [x0, #4, mul vl]
 ; CHECK-NEXT:    ld1d { z28.d }, p1/z, [x0, #3, mul vl]
-; CHECK-NEXT:    fcvtzs z2.d, p1/m, z2.d
 ; CHECK-NEXT:    ld1d { z29.d }, p1/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ld1d { z30.d }, p1/z, [x0, #1, mul vl]
-; CHECK-NEXT:    fcvtzs z3.d, p1/m, z3.d
+; CHECK-NEXT:    fcvtzs z2.d, p1/m, z2.d
 ; CHECK-NEXT:    ld1d { z31.d }, p1/z, [x0]
+; CHECK-NEXT:    frintx z7.d, p1/m, z7.d
+; CHECK-NEXT:    fcvtzs z3.d, p1/m, z3.d
 ; CHECK-NEXT:    fcvtzs z4.d, p1/m, z4.d
-; CHECK-NEXT:    st1b { z0.b }, p0, [x8, x9]
+; CHECK-NEXT:    st1b { z0.b }, p0, [x8, x14]
 ; CHECK-NEXT:    movprfx z0, z5
 ; CHECK-NEXT:    fcvtzs z0.d, p1/m, z5.d
-; CHECK-NEXT:    st1b { z1.b }, p0, [x8, x10]
+; CHECK-NEXT:    frintx z24.d, p1/m, z24.d
+; CHECK-NEXT:    st1b { z1.b }, p0, [x8, x15]
 ; CHECK-NEXT:    movprfx z1, z6
 ; CHECK-NEXT:    fcvtzs z1.d, p1/m, z6.d
-; CHECK-NEXT:    st1b { z2.b }, p0, [x8, x11]
+; CHECK-NEXT:    movprfx z5, z25
+; CHECK-NEXT:    frintx z5.d, p1/m, z25.d
+; CHECK-NEXT:    movprfx z6, z26
+; CHECK-NEXT:    frintx z6.d, p1/m, z26.d
+; CHECK-NEXT:    st1b { z2.b }, p0, [x8, x13]
 ; CHECK-NEXT:    movprfx z2, z7
 ; CHECK-NEXT:    fcvtzs z2.d, p1/m, z7.d
+; CHECK-NEXT:    movprfx z7, z27
+; CHECK-NEXT:    frintx z7.d, p1/m, z27.d
 ; CHECK-NEXT:    st1b { z3.b }, p0, [x8, x12]
-; CHECK-NEXT:    movprfx z3, z24
-; CHECK-NEXT:    fcvtzs z3.d, p1/m, z24.d
-; CHECK-NEXT:    st1b { z4.b }, p0, [x8, x13]
-; CHECK-NEXT:    movprfx z4, z25
-; CHECK-NEXT:    fcvtzs z4.d, p1/m, z25.d
-; CHECK-NEXT:    st1b { z0.b }, p0, [x8, x14]
-; CHECK-NEXT:    movprfx z0, z26
-; CHECK-NEXT:    fcvtzs z0.d, p1/m, z26.d
-; CHECK-NEXT:    st1b { z1.b }, p0, [x8, x15]
-; CHECK-NEXT:    movprfx z1, z27
-; CHECK-NEXT:    fcvtzs z1.d, p1/m, z27.d
+; CHECK-NEXT:    movprfx z3, z28
+; CHECK-NEXT:    frintx z3.d, p1/m, z28.d
+; CHECK-NEXT:    st1b { z4.b }, p0, [x8, x10]
+; CHECK-NEXT:    movprfx z4, z29
+; CHECK-NEXT:    frintx z4.d, p1/m, z29.d
+; CHECK-NEXT:    st1b { z0.b }, p0, [x8, x11]
+; CHECK-NEXT:    movprfx z0, z30
+; CHECK-NEXT:    frintx z0.d, p1/m, z30.d
+; CHECK-NEXT:    fcvtzs z24.d, p1/m, z24.d
+; CHECK-NEXT:    st1b { z1.b }, p0, [x8, x9]
+; CHECK-NEXT:    movprfx z1, z31
+; CHECK-NEXT:    frintx z1.d, p1/m, z31.d
+; CHECK-NEXT:    fcvtzs z5.d, p1/m, z5.d
+; CHECK-NEXT:    fcvtzs z6.d, p1/m, z6.d
+; CHECK-NEXT:    fcvtzs z7.d, p1/m, z7.d
 ; CHECK-NEXT:    st1b { z2.b }, p0, [x8, x16]
-; CHECK-NEXT:    movprfx z2, z28
-; CHECK-NEXT:    fcvtzs z2.d, p1/m, z28.d
-; CHECK-NEXT:    st1d { z3.d }, p1, [x8, #7, mul vl]
-; CHECK-NEXT:    movprfx z3, z29
-; CHECK-NEXT:    fcvtzs z3.d, p1/m, z29.d
-; CHECK-NEXT:    st1d { z4.d }, p1, [x8, #6, mul vl]
-; CHECK-NEXT:    movprfx z4, z30
-; CHECK-NEXT:    fcvtzs z4.d, p1/m, z30.d
-; CHECK-NEXT:    st1d { z0.d }, p1, [x8, #5, mul vl]
-; CHECK-NEXT:    movprfx z0, z31
-; CHECK-NEXT:    fcvtzs z0.d, p1/m, z31.d
-; CHECK-NEXT:    st1d { z1.d }, p1, [x8, #4, mul vl]
+; CHECK-NEXT:    movprfx z2, z3
+; CHECK-NEXT:    fcvtzs z2.d, p1/m, z3.d
+; CHECK-NEXT:    movprfx z3, z4
+; CHECK-NEXT:    fcvtzs z3.d, p1/m, z4.d
+; CHECK-NEXT:    fcvtzs z0.d, p1/m, z0.d
+; CHECK-NEXT:    st1d { z24.d }, p1, [x8, #7, mul vl]
+; CHECK-NEXT:    fcvtzs z1.d, p1/m, z1.d
+; CHECK-NEXT:    st1d { z5.d }, p1, [x8, #6, mul vl]
+; CHECK-NEXT:    st1d { z6.d }, p1, [x8, #5, mul vl]
+; CHECK-NEXT:    st1d { z7.d }, p1, [x8, #4, mul vl]
 ; CHECK-NEXT:    st1d { z2.d }, p1, [x8, #3, mul vl]
 ; CHECK-NEXT:    st1d { z3.d }, p1, [x8, #2, mul vl]
-; CHECK-NEXT:    st1d { z4.d }, p1, [x8, #1, mul vl]
-; CHECK-NEXT:    st1d { z0.d }, p1, [x8]
+; CHECK-NEXT:    st1d { z0.d }, p1, [x8, #1, mul vl]
+; CHECK-NEXT:    st1d { z1.d }, p1, [x8]
 ; CHECK-NEXT:    ret
   %a = call <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv16f64(<vscale x 32 x double> %x)
   ret <vscale x 32 x i64> %a
diff --git a/llvm/test/CodeGen/AArch64/sve-lrint.ll b/llvm/test/CodeGen/AArch64/sve-lrint.ll
index 1e7bf2e280ce8..ce58e26ff8a75 100644
--- a/llvm/test/CodeGen/AArch64/sve-lrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve-lrint.ll
@@ -5,6 +5,7 @@ define <vscale x 1 x i64> @lrint_v1f16(<vscale x 1 x half> %x) {
 ; CHECK-LABEL: lrint_v1f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
 ; CHECK-NEXT:    ret
   %a = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f16(<vscale x 1 x half> %x)
@@ -16,6 +17,7 @@ define <vscale x 2 x i64> @lrint_v2f16(<vscale x 2 x half> %x) {
 ; CHECK-LABEL: lrint_v2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f16(<vscale x 2 x half> %x)
@@ -27,8 +29,11 @@ define <vscale x 4 x i64> @lrint_v4f16(<vscale x 4 x half> %x) {
 ; CHECK-LABEL: lrint_v4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uunpklo z1.d, z0.s
-; CHECK-NEXT:    uunpkhi z2.d, z0.s
+; CHECK-NEXT:    uunpkhi z0.d, z0.s
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z1.h, p0/m, z1.h
+; CHECK-NEXT:    movprfx z2, z0
+; CHECK-NEXT:    frintx z2.h, p0/m, z0.h
 ; CHECK-NEXT:    movprfx z0, z1
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.h
 ; CHECK-NEXT:    movprfx z1, z2
@@ -48,7 +53,12 @@ define <vscale x 8 x i64> @lrint_v8f16(<vscale x 8 x half> %x) {
 ; CHECK-NEXT:    uunpklo z2.d, z1.s
 ; CHECK-NEXT:    uunpkhi z1.d, z1.s
 ; CHECK-NEXT:    uunpklo z3.d, z0.s
-; CHECK-NEXT:    uunpkhi z4.d, z0.s
+; CHECK-NEXT:    uunpkhi z0.d, z0.s
+; CHECK-NEXT:    frintx z1.h, p0/m, z1.h
+; CHECK-NEXT:    frintx z2.h, p0/m, z2.h
+; CHECK-NEXT:    frintx z3.h, p0/m, z3.h
+; CHECK-NEXT:    movprfx z4, z0
+; CHECK-NEXT:    frintx z4.h, p0/m, z0.h
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.h
 ; CHECK-NEXT:    movprfx z0, z2
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z2.h
@@ -73,25 +83,36 @@ define <vscale x 16 x i64> @lrint_v16i64_v16f16(<vscale x 16 x half> %x) {
 ; CHECK-NEXT:    uunpklo z4.d, z2.s
 ; CHECK-NEXT:    uunpkhi z2.d, z2.s
 ; CHECK-NEXT:    uunpklo z5.d, z0.s
-; CHECK-NEXT:    uunpkhi z6.d, z0.s
-; CHECK-NEXT:    uunpklo z7.d, z3.s
-; CHECK-NEXT:    uunpkhi z24.d, z3.s
-; CHECK-NEXT:    uunpklo z25.d, z1.s
-; CHECK-NEXT:    uunpkhi z26.d, z1.s
+; CHECK-NEXT:    uunpkhi z0.d, z0.s
+; CHECK-NEXT:    uunpklo z6.d, z3.s
+; CHECK-NEXT:    uunpkhi z3.d, z3.s
+; CHECK-NEXT:    uunpklo z7.d, z1.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    frintx z4.h, p0/m, z4.h
+; CHECK-NEXT:    frintx z2.h, p0/m, z2.h
+; CHECK-NEXT:    frintx z5.h, p0/m, z5.h
+; CHECK-NEXT:    movprfx z24, z0
+; CHECK-NEXT:    frintx z24.h, p0/m, z0.h
+; CHECK-NEXT:    frintx z6.h, p0/m, z6.h
+; CHECK-NEXT:    movprfx z25, z3
+; CHECK-NEXT:    frintx z25.h, p0/m, z3.h
+; CHECK-NEXT:    frintx z7.h, p0/m, z7.h
+; CHECK-NEXT:    movprfx z26, z1
+; CHECK-NEXT:    frintx z26.h, p0/m, z1.h
 ; CHECK-NEXT:    movprfx z0, z4
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z4.h
 ; CHECK-NEXT:    movprfx z1, z2
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z2.h
 ; CHECK-NEXT:    movprfx z2, z5
 ; CHECK-NEXT:    fcvtzs z2.d, p0/m, z5.h
-; CHECK-NEXT:    movprfx z3, z6
-; CHECK-NEXT:    fcvtzs z3.d, p0/m, z6.h
-; CHECK-NEXT:    movprfx z4, z7
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z7.h
-; CHECK-NEXT:    movprfx z5, z24
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z24.h
-; CHECK-NEXT:    movprfx z6, z25
-; CHECK-NEXT:    fcvtzs z6.d, p0/m, z25.h
+; CHECK-NEXT:    movprfx z3, z24
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z24.h
+; CHECK-NEXT:    movprfx z4, z6
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z6.h
+; CHECK-NEXT:    movprfx z5, z25
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z25.h
+; CHECK-NEXT:    movprfx z6, z7
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z7.h
 ; CHECK-NEXT:    movprfx z7, z26
 ; CHECK-NEXT:    fcvtzs z7.d, p0/m, z26.h
 ; CHECK-NEXT:    ret
@@ -110,71 +131,86 @@ define <vscale x 32 x i64> @lrint_v32i64_v32f16(<vscale x 32 x half> %x) {
 ; CHECK-NEXT:    uunpkhi z7.s, z2.h
 ; CHECK-NEXT:    uunpklo z2.s, z2.h
 ; CHECK-NEXT:    ptrue p1.b
-; CHECK-NEXT:    uunpklo z24.s, z0.h
-; CHECK-NEXT:    uunpkhi z0.s, z0.h
 ; CHECK-NEXT:    uunpkhi z5.d, z4.s
 ; CHECK-NEXT:    uunpklo z4.d, z4.s
 ; CHECK-NEXT:    uunpkhi z6.d, z3.s
 ; CHECK-NEXT:    uunpklo z3.d, z3.s
-; CHECK-NEXT:    uunpkhi z25.d, z2.s
-; CHECK-NEXT:    uunpklo z2.d, z2.s
-; CHECK-NEXT:    uunpklo z26.d, z0.s
-; CHECK-NEXT:    uunpkhi z0.d, z0.s
+; CHECK-NEXT:    uunpkhi z24.d, z7.s
+; CHECK-NEXT:    uunpklo z7.d, z7.s
+; CHECK-NEXT:    frintx z5.h, p0/m, z5.h
+; CHECK-NEXT:    frintx z4.h, p0/m, z4.h
+; CHECK-NEXT:    frintx z6.h, p0/m, z6.h
+; CHECK-NEXT:    frintx z3.h, p0/m, z3.h
+; CHECK-NEXT:    frintx z24.h, p0/m, z24.h
+; CHECK-NEXT:    frintx z7.h, p0/m, z7.h
 ; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.h
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.h
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z6.h
 ; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.h
-; CHECK-NEXT:    fcvtzs z25.d, p0/m, z25.h
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.h
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.h
 ; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #14
-; CHECK-NEXT:    movprfx z5, z6
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z6.h
-; CHECK-NEXT:    uunpkhi z6.d, z7.s
+; CHECK-NEXT:    uunpkhi z5.s, z1.h
 ; CHECK-NEXT:    st1b { z4.b }, p1, [x8, x9]
-; CHECK-NEXT:    uunpkhi z4.s, z1.h
-; CHECK-NEXT:    uunpklo z7.d, z7.s
+; CHECK-NEXT:    uunpkhi z4.d, z2.s
 ; CHECK-NEXT:    rdvl x9, #13
-; CHECK-NEXT:    uunpklo z1.s, z1.h
-; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
+; CHECK-NEXT:    st1b { z6.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #12
-; CHECK-NEXT:    movprfx z5, z6
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z6.h
+; CHECK-NEXT:    movprfx z6, z24
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z24.h
+; CHECK-NEXT:    uunpklo z1.s, z1.h
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    uunpkhi z24.s, z0.h
 ; CHECK-NEXT:    st1b { z3.b }, p1, [x8, x9]
-; CHECK-NEXT:    uunpkhi z3.d, z4.s
-; CHECK-NEXT:    uunpklo z4.d, z4.s
-; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.h
+; CHECK-NEXT:    uunpklo z3.d, z5.s
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    frintx z4.h, p0/m, z4.h
 ; CHECK-NEXT:    rdvl x9, #11
-; CHECK-NEXT:    uunpkhi z6.d, z24.s
-; CHECK-NEXT:    uunpkhi z27.d, z1.s
-; CHECK-NEXT:    uunpklo z1.d, z1.s
-; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
+; CHECK-NEXT:    uunpkhi z25.d, z5.s
+; CHECK-NEXT:    st1b { z6.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #10
-; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.h
-; CHECK-NEXT:    st1b { z7.b }, p1, [x8, x9]
+; CHECK-NEXT:    uunpkhi z5.d, z1.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    frintx z2.h, p0/m, z2.h
+; CHECK-NEXT:    uunpkhi z6.d, z24.s
+; CHECK-NEXT:    uunpklo z24.d, z24.s
+; CHECK-NEXT:    frintx z3.h, p0/m, z3.h
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.h
-; CHECK-NEXT:    uunpklo z7.d, z24.s
+; CHECK-NEXT:    st1b { z7.b }, p1, [x8, x9]
+; CHECK-NEXT:    uunpkhi z7.d, z0.s
+; CHECK-NEXT:    uunpklo z0.d, z0.s
 ; CHECK-NEXT:    rdvl x9, #9
-; CHECK-NEXT:    movprfx z5, z27
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z27.h
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.h
-; CHECK-NEXT:    st1b { z25.b }, p1, [x8, x9]
+; CHECK-NEXT:    frintx z25.h, p0/m, z25.h
+; CHECK-NEXT:    frintx z5.h, p0/m, z5.h
+; CHECK-NEXT:    frintx z1.h, p0/m, z1.h
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.h
+; CHECK-NEXT:    frintx z6.h, p0/m, z6.h
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.h
+; CHECK-NEXT:    st1b { z4.b }, p1, [x8, x9]
+; CHECK-NEXT:    movprfx z4, z24
+; CHECK-NEXT:    frintx z4.h, p0/m, z24.h
+; CHECK-NEXT:    frintx z7.h, p0/m, z7.h
+; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
 ; CHECK-NEXT:    rdvl x9, #8
+; CHECK-NEXT:    fcvtzs z25.d, p0/m, z25.h
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.h
 ; CHECK-NEXT:    st1b { z2.b }, p1, [x8, x9]
-; CHECK-NEXT:    movprfx z2, z26
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z26.h
-; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #7, mul vl]
-; CHECK-NEXT:    movprfx z3, z6
-; CHECK-NEXT:    fcvtzs z3.d, p0/m, z6.h
-; CHECK-NEXT:    st1d { z4.d }, p0, [x8, #6, mul vl]
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.h
+; CHECK-NEXT:    movprfx z2, z6
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z6.h
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #6, mul vl]
+; CHECK-NEXT:    movprfx z3, z4
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z4.h
 ; CHECK-NEXT:    movprfx z4, z7
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z7.h
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.h
+; CHECK-NEXT:    st1d { z25.d }, p0, [x8, #7, mul vl]
 ; CHECK-NEXT:    st1d { z5.d }, p0, [x8, #5, mul vl]
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x8, #4, mul vl]
-; CHECK-NEXT:    st1d { z0.d }, p0, [x8, #3, mul vl]
-; CHECK-NEXT:    st1d { z2.d }, p0, [x8, #2, mul vl]
-; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #1, mul vl]
-; CHECK-NEXT:    st1d { z4.d }, p0, [x8]
+; CHECK-NEXT:    st1d { z2.d }, p0, [x8, #3, mul vl]
+; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #2, mul vl]
+; CHECK-NEXT:    st1d { z4.d }, p0, [x8, #1, mul vl]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
 ; CHECK-NEXT:    ret
   %a = call <vscale x 32 x i64> @llvm.lrint.nxv32i64.nxv32f16(<vscale x 32 x half> %x)
   ret <vscale x 32 x i64> %a
@@ -185,6 +221,7 @@ define <vscale x 1 x i64> @lrint_v1f32(<vscale x 1 x float> %x) {
 ; CHECK-LABEL: lrint_v1f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
 ; CHECK-NEXT:    ret
   %a = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f32(<vscale x 1 x float> %x)
@@ -196,6 +233,7 @@ define <vscale x 2 x i64> @lrint_v2f32(<vscale x 2 x float> %x) {
 ; CHECK-LABEL: lrint_v2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f32(<vscale x 2 x float> %x)
@@ -207,8 +245,11 @@ define <vscale x 4 x i64> @lrint_v4f32(<vscale x 4 x float> %x) {
 ; CHECK-LABEL: lrint_v4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uunpklo z1.d, z0.s
-; CHECK-NEXT:    uunpkhi z2.d, z0.s
+; CHECK-NEXT:    uunpkhi z0.d, z0.s
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z1.s, p0/m, z1.s
+; CHECK-NEXT:    movprfx z2, z0
+; CHECK-NEXT:    frintx z2.s, p0/m, z0.s
 ; CHECK-NEXT:    movprfx z0, z1
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.s
 ; CHECK-NEXT:    movprfx z1, z2
@@ -223,16 +264,22 @@ define <vscale x 8 x i64> @lrint_v8f32(<vscale x 8 x float> %x) {
 ; CHECK-LABEL: lrint_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uunpklo z2.d, z0.s
-; CHECK-NEXT:    uunpkhi z3.d, z0.s
-; CHECK-NEXT:    uunpklo z4.d, z1.s
-; CHECK-NEXT:    uunpkhi z5.d, z1.s
+; CHECK-NEXT:    uunpkhi z0.d, z0.s
+; CHECK-NEXT:    uunpklo z3.d, z1.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z2.s, p0/m, z2.s
+; CHECK-NEXT:    movprfx z4, z0
+; CHECK-NEXT:    frintx z4.s, p0/m, z0.s
+; CHECK-NEXT:    frintx z3.s, p0/m, z3.s
+; CHECK-NEXT:    movprfx z5, z1
+; CHECK-NEXT:    frintx z5.s, p0/m, z1.s
 ; CHECK-NEXT:    movprfx z0, z2
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z2.s
-; CHECK-NEXT:    movprfx z1, z3
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z3.s
-; CHECK-NEXT:    movprfx z2, z4
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z4.s
+; CHECK-NEXT:    movprfx z1, z4
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z4.s
+; CHECK-NEXT:    movprfx z2, z3
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z3.s
 ; CHECK-NEXT:    movprfx z3, z5
 ; CHECK-NEXT:    fcvtzs z3.d, p0/m, z5.s
 ; CHECK-NEXT:    ret
@@ -245,28 +292,40 @@ define <vscale x 16 x i64> @lrint_v16i64_v16f32(<vscale x 16 x float> %x) {
 ; CHECK-LABEL: lrint_v16i64_v16f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uunpklo z4.d, z0.s
-; CHECK-NEXT:    uunpkhi z5.d, z0.s
-; CHECK-NEXT:    uunpklo z6.d, z1.s
-; CHECK-NEXT:    uunpkhi z7.d, z1.s
-; CHECK-NEXT:    uunpklo z24.d, z2.s
-; CHECK-NEXT:    uunpkhi z25.d, z2.s
-; CHECK-NEXT:    uunpklo z26.d, z3.s
-; CHECK-NEXT:    uunpkhi z27.d, z3.s
+; CHECK-NEXT:    uunpkhi z0.d, z0.s
+; CHECK-NEXT:    uunpklo z5.d, z1.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    uunpklo z6.d, z2.s
+; CHECK-NEXT:    uunpkhi z2.d, z2.s
+; CHECK-NEXT:    uunpklo z7.d, z3.s
+; CHECK-NEXT:    uunpkhi z3.d, z3.s
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z4.s, p0/m, z4.s
+; CHECK-NEXT:    movprfx z24, z0
+; CHECK-NEXT:    frintx z24.s, p0/m, z0.s
+; CHECK-NEXT:    frintx z5.s, p0/m, z5.s
+; CHECK-NEXT:    movprfx z25, z1
+; CHECK-NEXT:    frintx z25.s, p0/m, z1.s
+; CHECK-NEXT:    frintx z6.s, p0/m, z6.s
+; CHECK-NEXT:    movprfx z26, z2
+; CHECK-NEXT:    frintx z26.s, p0/m, z2.s
+; CHECK-NEXT:    frintx z7.s, p0/m, z7.s
+; CHECK-NEXT:    movprfx z27, z3
+; CHECK-NEXT:    frintx z27.s, p0/m, z3.s
 ; CHECK-NEXT:    movprfx z0, z4
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z4.s
-; CHECK-NEXT:    movprfx z1, z5
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z5.s
-; CHECK-NEXT:    movprfx z2, z6
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z6.s
-; CHECK-NEXT:    movprfx z3, z7
-; CHECK-NEXT:    fcvtzs z3.d, p0/m, z7.s
-; CHECK-NEXT:    movprfx z4, z24
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z24.s
-; CHECK-NEXT:    movprfx z5, z25
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z25.s
-; CHECK-NEXT:    movprfx z6, z26
-; CHECK-NEXT:    fcvtzs z6.d, p0/m, z26.s
+; CHECK-NEXT:    movprfx z1, z24
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z24.s
+; CHECK-NEXT:    movprfx z2, z5
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z5.s
+; CHECK-NEXT:    movprfx z3, z25
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z25.s
+; CHECK-NEXT:    movprfx z4, z6
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z6.s
+; CHECK-NEXT:    movprfx z5, z26
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z26.s
+; CHECK-NEXT:    movprfx z6, z7
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z7.s
 ; CHECK-NEXT:    movprfx z7, z27
 ; CHECK-NEXT:    fcvtzs z7.d, p0/m, z27.s
 ; CHECK-NEXT:    ret
@@ -279,65 +338,83 @@ define <vscale x 32 x i64> @lrint_v32i64_v32f32(<vscale x 32 x float> %x) {
 ; CHECK-LABEL: lrint_v32i64_v32f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uunpkhi z24.d, z7.s
-; CHECK-NEXT:    uunpklo z7.d, z7.s
-; CHECK-NEXT:    rdvl x9, #15
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    uunpkhi z27.d, z6.s
-; CHECK-NEXT:    uunpklo z6.d, z6.s
-; CHECK-NEXT:    uunpkhi z30.d, z5.s
-; CHECK-NEXT:    uunpklo z5.d, z5.s
-; CHECK-NEXT:    uunpkhi z31.d, z4.s
+; CHECK-NEXT:    rdvl x9, #15
+; CHECK-NEXT:    uunpklo z7.d, z7.s
+; CHECK-NEXT:    uunpkhi z25.d, z6.s
 ; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    uunpklo z6.d, z6.s
+; CHECK-NEXT:    uunpkhi z27.d, z4.s
+; CHECK-NEXT:    uunpklo z4.d, z4.s
 ; CHECK-NEXT:    uunpklo z29.d, z3.s
 ; CHECK-NEXT:    uunpkhi z3.d, z3.s
-; CHECK-NEXT:    fcvtzs z24.d, p0/m, z24.s
-; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.s
-; CHECK-NEXT:    uunpklo z4.d, z4.s
-; CHECK-NEXT:    fcvtzs z27.d, p0/m, z27.s
-; CHECK-NEXT:    fcvtzs z6.d, p0/m, z6.s
-; CHECK-NEXT:    uunpkhi z25.d, z0.s
-; CHECK-NEXT:    fcvtzs z30.d, p0/m, z30.s
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.s
 ; CHECK-NEXT:    uunpklo z26.d, z1.s
-; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    frintx z24.s, p0/m, z24.s
 ; CHECK-NEXT:    uunpklo z28.d, z2.s
 ; CHECK-NEXT:    uunpkhi z2.d, z2.s
+; CHECK-NEXT:    frintx z7.s, p0/m, z7.s
+; CHECK-NEXT:    frintx z25.s, p0/m, z25.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    frintx z6.s, p0/m, z6.s
+; CHECK-NEXT:    frintx z27.s, p0/m, z27.s
+; CHECK-NEXT:    frintx z4.s, p0/m, z4.s
+; CHECK-NEXT:    frintx z3.s, p0/m, z3.s
+; CHECK-NEXT:    frintx z26.s, p0/m, z26.s
+; CHECK-NEXT:    fcvtzs z24.d, p0/m, z24.s
+; CHECK-NEXT:    frintx z2.s, p0/m, z2.s
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z7.s
+; CHECK-NEXT:    fcvtzs z25.d, p0/m, z25.s
+; CHECK-NEXT:    frintx z1.s, p0/m, z1.s
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z6.s
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.s
 ; CHECK-NEXT:    st1b { z24.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #14
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.s
+; CHECK-NEXT:    uunpkhi z24.d, z5.s
 ; CHECK-NEXT:    st1b { z7.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #13
-; CHECK-NEXT:    movprfx z7, z31
-; CHECK-NEXT:    fcvtzs z7.d, p0/m, z31.s
-; CHECK-NEXT:    st1b { z27.b }, p1, [x8, x9]
+; CHECK-NEXT:    uunpklo z5.d, z5.s
+; CHECK-NEXT:    st1b { z25.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #12
-; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.s
+; CHECK-NEXT:    movprfx z25, z27
+; CHECK-NEXT:    fcvtzs z25.d, p0/m, z27.s
 ; CHECK-NEXT:    st1b { z6.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #11
 ; CHECK-NEXT:    movprfx z6, z29
-; CHECK-NEXT:    fcvtzs z6.d, p0/m, z29.s
+; CHECK-NEXT:    frintx z6.s, p0/m, z29.s
+; CHECK-NEXT:    frintx z24.s, p0/m, z24.s
+; CHECK-NEXT:    uunpkhi z7.d, z0.s
 ; CHECK-NEXT:    uunpklo z0.d, z0.s
-; CHECK-NEXT:    st1b { z30.b }, p1, [x8, x9]
+; CHECK-NEXT:    frintx z5.s, p0/m, z5.s
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.s
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.s
+; CHECK-NEXT:    fcvtzs z24.d, p0/m, z24.s
+; CHECK-NEXT:    frintx z7.s, p0/m, z7.s
+; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.s
+; CHECK-NEXT:    st1b { z24.b }, p1, [x8, x9]
+; CHECK-NEXT:    movprfx z24, z28
+; CHECK-NEXT:    frintx z24.s, p0/m, z28.s
 ; CHECK-NEXT:    rdvl x9, #10
 ; CHECK-NEXT:    st1b { z5.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #9
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.s
-; CHECK-NEXT:    st1b { z7.b }, p1, [x8, x9]
+; CHECK-NEXT:    movprfx z5, z6
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z6.s
+; CHECK-NEXT:    st1b { z25.b }, p1, [x8, x9]
 ; CHECK-NEXT:    rdvl x9, #8
-; CHECK-NEXT:    movprfx z5, z28
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z28.s
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
 ; CHECK-NEXT:    st1b { z4.b }, p1, [x8, x9]
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.s
-; CHECK-NEXT:    movprfx z4, z25
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z25.s
+; CHECK-NEXT:    movprfx z4, z7
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z7.s
+; CHECK-NEXT:    movprfx z6, z24
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z24.s
 ; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #7, mul vl]
 ; CHECK-NEXT:    movprfx z3, z26
 ; CHECK-NEXT:    fcvtzs z3.d, p0/m, z26.s
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
-; CHECK-NEXT:    st1d { z6.d }, p0, [x8, #6, mul vl]
+; CHECK-NEXT:    st1d { z5.d }, p0, [x8, #6, mul vl]
 ; CHECK-NEXT:    st1d { z2.d }, p0, [x8, #5, mul vl]
-; CHECK-NEXT:    st1d { z5.d }, p0, [x8, #4, mul vl]
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x8, #3, mul vl]
+; CHECK-NEXT:    st1d { z6.d }, p0, [x8, #4, mul vl]
 ; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #2, mul vl]
 ; CHECK-NEXT:    st1d { z4.d }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
@@ -351,6 +428,7 @@ define <vscale x 1 x i64> @lrint_v1f64(<vscale x 1 x double> %x) {
 ; CHECK-LABEL: lrint_v1f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
 ; CHECK-NEXT:    ret
   %a = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f64(<vscale x 1 x double> %x)
@@ -362,6 +440,7 @@ define <vscale x 2 x i64> @lrint_v2f64(<vscale x 2 x double> %x) {
 ; CHECK-LABEL: lrint_v2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f64(<vscale x 2 x double> %x)
@@ -373,6 +452,8 @@ define <vscale x 4 x i64> @lrint_v4f64(<vscale x 4 x double> %x) {
 ; CHECK-LABEL: lrint_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
 ; CHECK-NEXT:    ret
@@ -385,6 +466,10 @@ define <vscale x 8 x i64> @lrint_v8f64(<vscale x 8 x double> %x) {
 ; CHECK-LABEL: lrint_v8f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
+; CHECK-NEXT:    frintx z2.d, p0/m, z2.d
+; CHECK-NEXT:    frintx z3.d, p0/m, z3.d
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
 ; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
@@ -399,6 +484,14 @@ define <vscale x 16 x i64> @lrint_v16f64(<vscale x 16 x double> %x) {
 ; CHECK-LABEL: lrint_v16f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
+; CHECK-NEXT:    frintx z2.d, p0/m, z2.d
+; CHECK-NEXT:    frintx z3.d, p0/m, z3.d
+; CHECK-NEXT:    frintx z4.d, p0/m, z4.d
+; CHECK-NEXT:    frintx z5.d, p0/m, z5.d
+; CHECK-NEXT:    frintx z6.d, p0/m, z6.d
+; CHECK-NEXT:    frintx z7.d, p0/m, z7.d
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
 ; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
@@ -417,74 +510,91 @@ define <vscale x 32 x i64> @lrint_v32f64(<vscale x 32 x double> %x) {
 ; CHECK-LABEL: lrint_v32f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    rdvl x9, #15
-; CHECK-NEXT:    rdvl x10, #14
+; CHECK-NEXT:    rdvl x14, #15
+; CHECK-NEXT:    rdvl x15, #14
 ; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    rdvl x11, #13
+; CHECK-NEXT:    rdvl x13, #13
 ; CHECK-NEXT:    rdvl x12, #12
-; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x9]
-; CHECK-NEXT:    rdvl x13, #11
-; CHECK-NEXT:    rdvl x14, #10
-; CHECK-NEXT:    ld1b { z1.b }, p0/z, [x0, x10]
-; CHECK-NEXT:    ld1b { z2.b }, p0/z, [x0, x11]
+; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x14]
+; CHECK-NEXT:    ld1b { z1.b }, p0/z, [x0, x15]
+; CHECK-NEXT:    rdvl x10, #11
+; CHECK-NEXT:    rdvl x11, #10
+; CHECK-NEXT:    rdvl x9, #9
+; CHECK-NEXT:    ld1b { z2.b }, p0/z, [x0, x13]
 ; CHECK-NEXT:    ld1b { z3.b }, p0/z, [x0, x12]
-; CHECK-NEXT:    ld1b { z4.b }, p0/z, [x0, x13]
-; CHECK-NEXT:    ld1b { z5.b }, p0/z, [x0, x14]
-; CHECK-NEXT:    rdvl x15, #9
-; CHECK-NEXT:    fcvtzs z0.d, p1/m, z0.d
+; CHECK-NEXT:    ld1b { z4.b }, p0/z, [x0, x10]
+; CHECK-NEXT:    ld1b { z5.b }, p0/z, [x0, x11]
+; CHECK-NEXT:    frintx z0.d, p1/m, z0.d
+; CHECK-NEXT:    frintx z1.d, p1/m, z1.d
+; CHECK-NEXT:    ld1b { z6.b }, p0/z, [x0, x9]
 ; CHECK-NEXT:    rdvl x16, #8
-; CHECK-NEXT:    ld1b { z6.b }, p0/z, [x0, x15]
-; CHECK-NEXT:    ld1b { z7.b }, p0/z, [x0, x16]
+; CHECK-NEXT:    frintx z2.d, p1/m, z2.d
 ; CHECK-NEXT:    ld1d { z24.d }, p1/z, [x0, #7, mul vl]
+; CHECK-NEXT:    frintx z3.d, p1/m, z3.d
+; CHECK-NEXT:    frintx z4.d, p1/m, z4.d
+; CHECK-NEXT:    frintx z5.d, p1/m, z5.d
+; CHECK-NEXT:    frintx z6.d, p1/m, z6.d
+; CHECK-NEXT:    ld1b { z7.b }, p0/z, [x0, x16]
 ; CHECK-NEXT:    ld1d { z25.d }, p1/z, [x0, #6, mul vl]
-; CHECK-NEXT:    ld1d { z26.d }, p1/z, [x0, #5, mul vl]
+; CHECK-NEXT:    fcvtzs z0.d, p1/m, z0.d
 ; CHECK-NEXT:    fcvtzs z1.d, p1/m, z1.d
+; CHECK-NEXT:    ld1d { z26.d }, p1/z, [x0, #5, mul vl]
 ; CHECK-NEXT:    ld1d { z27.d }, p1/z, [x0, #4, mul vl]
 ; CHECK-NEXT:    ld1d { z28.d }, p1/z, [x0, #3, mul vl]
-; CHECK-NEXT:    fcvtzs z2.d, p1/m, z2.d
 ; CHECK-NEXT:    ld1d { z29.d }, p1/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ld1d { z30.d }, p1/z, [x0, #1, mul vl]
-; CHECK-NEXT:    fcvtzs z3.d, p1/m, z3.d
+; CHECK-NEXT:    fcvtzs z2.d, p1/m, z2.d
 ; CHECK-NEXT:    ld1d { z31.d }, p1/z, [x0]
+; CHECK-NEXT:    frintx z7.d, p1/m, z7.d
+; CHECK-NEXT:    fcvtzs z3.d, p1/m, z3.d
 ; CHECK-NEXT:    fcvtzs z4.d, p1/m, z4.d
-; CHECK-NEXT:    st1b { z0.b }, p0, [x8, x9]
+; CHECK-NEXT:    st1b { z0.b }, p0, [x8, x14]
 ; CHECK-NEXT:    movprfx z0, z5
 ; CHECK-NEXT:    fcvtzs z0.d, p1/m, z5.d
-; CHECK-NEXT:    st1b { z1.b }, p0, [x8, x10]
+; CHECK-NEXT:    frintx z24.d, p1/m, z24.d
+; CHECK-NEXT:    st1b { z1.b }, p0, [x8, x15]
 ; CHECK-NEXT:    movprfx z1, z6
 ; CHECK-NEXT:    fcvtzs z1.d, p1/m, z6.d
-; CHECK-NEXT:    st1b { z2.b }, p0, [x8, x11]
+; CHECK-NEXT:    movprfx z5, z25
+; CHECK-NEXT:    frintx z5.d, p1/m, z25.d
+; CHECK-NEXT:    movprfx z6, z26
+; CHECK-NEXT:    frintx z6.d, p1/m, z26.d
+; CHECK-NEXT:    st1b { z2.b }, p0, [x8, x13]
 ; CHECK-NEXT:    movprfx z2, z7
 ; CHECK-NEXT:    fcvtzs z2.d, p1/m, z7.d
+; CHECK-NEXT:    movprfx z7, z27
+; CHECK-NEXT:    frintx z7.d, p1/m, z27.d
 ; CHECK-NEXT:    st1b { z3.b }, p0, [x8, x12]
-; CHECK-NEXT:    movprfx z3, z24
-; CHECK-NEXT:    fcvtzs z3.d, p1/m, z24.d
-; CHECK-NEXT:    st1b { z4.b }, p0, [x8, x13]
-; CHECK-NEXT:    movprfx z4, z25
-; CHECK-NEXT:    fcvtzs z4.d, p1/m, z25.d
-; CHECK-NEXT:    st1b { z0.b }, p0, [x8, x14]
-; CHECK-NEXT:    movprfx z0, z26
-; CHECK-NEXT:    fcvtzs z0.d, p1/m, z26.d
-; CHECK-NEXT:    st1b { z1.b }, p0, [x8, x15]
-; CHECK-NEXT:    movprfx z1, z27
-; CHECK-NEXT:    fcvtzs z1.d, p1/m, z27.d
+; CHECK-NEXT:    movprfx z3, z28
+; CHECK-NEXT:    frintx z3.d, p1/m, z28.d
+; CHECK-NEXT:    st1b { z4.b }, p0, [x8, x10]
+; CHECK-NEXT:    movprfx z4, z29
+; CHECK-NEXT:    frintx z4.d, p1/m, z29.d
+; CHECK-NEXT:    st1b { z0.b }, p0, [x8, x11]
+; CHECK-NEXT:    movprfx z0, z30
+; CHECK-NEXT:    frintx z0.d, p1/m, z30.d
+; CHECK-NEXT:    fcvtzs z24.d, p1/m, z24.d
+; CHECK-NEXT:    st1b { z1.b }, p0, [x8, x9]
+; CHECK-NEXT:    movprfx z1, z31
+; CHECK-NEXT:    frintx z1.d, p1/m, z31.d
+; CHECK-NEXT:    fcvtzs z5.d, p1/m, z5.d
+; CHECK-NEXT:    fcvtzs z6.d, p1/m, z6.d
+; CHECK-NEXT:    fcvtzs z7.d, p1/m, z7.d
 ; CHECK-NEXT:    st1b { z2.b }, p0, [x8, x16]
-; CHECK-NEXT:    movprfx z2, z28
-; CHECK-NEXT:    fcvtzs z2.d, p1/m, z28.d
-; CHECK-NEXT:    st1d { z3.d }, p1, [x8, #7, mul vl]
-; CHECK-NEXT:    movprfx z3, z29
-; CHECK-NEXT:    fcvtzs z3.d, p1/m, z29.d
-; CHECK-NEXT:    st1d { z4.d }, p1, [x8, #6, mul vl]
-; CHECK-NEXT:    movprfx z4, z30
-; CHECK-NEXT:    fcvtzs z4.d, p1/m, z30.d
-; CHECK-NEXT:    st1d { z0.d }, p1, [x8, #5, mul vl]
-; CHECK-NEXT:    movprfx z0, z31
-; CHECK-NEXT:    fcvtzs z0.d, p1/m, z31.d
-; CHECK-NEXT:    st1d { z1.d }, p1, [x8, #4, mul vl]
+; CHECK-NEXT:    movprfx z2, z3
+; CHECK-NEXT:    fcvtzs z2.d, p1/m, z3.d
+; CHECK-NEXT:    movprfx z3, z4
+; CHECK-NEXT:    fcvtzs z3.d, p1/m, z4.d
+; CHECK-NEXT:    fcvtzs z0.d, p1/m, z0.d
+; CHECK-NEXT:    st1d { z24.d }, p1, [x8, #7, mul vl]
+; CHECK-NEXT:    fcvtzs z1.d, p1/m, z1.d
+; CHECK-NEXT:    st1d { z5.d }, p1, [x8, #6, mul vl]
+; CHECK-NEXT:    st1d { z6.d }, p1, [x8, #5, mul vl]
+; CHECK-NEXT:    st1d { z7.d }, p1, [x8, #4, mul vl]
 ; CHECK-NEXT:    st1d { z2.d }, p1, [x8, #3, mul vl]
 ; CHECK-NEXT:    st1d { z3.d }, p1, [x8, #2, mul vl]
-; CHECK-NEXT:    st1d { z4.d }, p1, [x8, #1, mul vl]
-; CHECK-NEXT:    st1d { z0.d }, p1, [x8]
+; CHECK-NEXT:    st1d { z0.d }, p1, [x8, #1, mul vl]
+; CHECK-NEXT:    st1d { z1.d }, p1, [x8]
 ; CHECK-NEXT:    ret
   %a = call <vscale x 32 x i64> @llvm.lrint.nxv32i64.nxv16f64(<vscale x 32 x double> %x)
   ret <vscale x 32 x i64> %a
diff --git a/llvm/test/CodeGen/AArch64/vector-llrint.ll b/llvm/test/CodeGen/AArch64/vector-llrint.ll
index d4d3fbb0e96b5..480d0c19db3aa 100644
--- a/llvm/test/CodeGen/AArch64/vector-llrint.ll
+++ b/llvm/test/CodeGen/AArch64/vector-llrint.ll
@@ -17,12 +17,12 @@ declare <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half>)
 define <2 x i64> @llrint_v1i64_v2f16(<2 x half> %x) {
 ; CHECK-LABEL: llrint_v1i64_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
 ; CHECK-NEXT:    mov h1, v0.h[1]
 ; CHECK-NEXT:    fcvt s0, h0
 ; CHECK-NEXT:    fcvt s1, h1
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s1, s1
 ; CHECK-NEXT:    fcvtzs x8, s0
 ; CHECK-NEXT:    fcvtzs x9, s1
 ; CHECK-NEXT:    fmov d0, x8
@@ -37,22 +37,24 @@ define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) {
 ; CHECK-LABEL: llrint_v4i64_v4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov h1, v0.h[2]
+; CHECK-NEXT:    dup v1.2s, v0.s[1]
+; CHECK-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NEXT:    fcvtn v1.4h, v1.4s
 ; CHECK-NEXT:    mov h2, v0.h[1]
-; CHECK-NEXT:    mov h3, v0.h[3]
 ; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    mov h3, v1.h[1]
 ; CHECK-NEXT:    fcvt s1, h1
 ; CHECK-NEXT:    fcvt s2, h2
-; CHECK-NEXT:    fcvt s3, h3
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    frintx s3, s3
 ; CHECK-NEXT:    fcvtzs x8, s0
+; CHECK-NEXT:    fcvt s3, h3
 ; CHECK-NEXT:    fcvtzs x9, s1
 ; CHECK-NEXT:    fcvtzs x10, s2
-; CHECK-NEXT:    fcvtzs x11, s3
 ; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fcvtzs x11, s3
 ; CHECK-NEXT:    fmov d1, x9
 ; CHECK-NEXT:    mov v0.d[1], x10
 ; CHECK-NEXT:    mov v1.d[1], x11
@@ -65,45 +67,48 @@ declare <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half>)
 define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) {
 ; CHECK-LABEL: llrint_v8i64_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    mov h4, v0.h[2]
-; CHECK-NEXT:    mov h3, v0.h[1]
-; CHECK-NEXT:    mov h7, v0.h[3]
+; CHECK-NEXT:    dup v1.2s, v0.s[1]
+; CHECK-NEXT:    dup v2.2s, v0.s[3]
+; CHECK-NEXT:    fcvtl v3.4s, v0.4h
+; CHECK-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NEXT:    fcvtl v2.4s, v2.4h
+; CHECK-NEXT:    frintx v3.4s, v3.4s
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    frintx v2.4s, v2.4s
+; CHECK-NEXT:    fcvtn v3.4h, v3.4s
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NEXT:    fcvtn v2.4h, v2.4s
+; CHECK-NEXT:    mov h4, v3.h[1]
+; CHECK-NEXT:    mov h5, v0.h[1]
 ; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    mov h2, v1.h[2]
-; CHECK-NEXT:    mov h5, v1.h[1]
-; CHECK-NEXT:    mov h6, v1.h[3]
-; CHECK-NEXT:    fcvt s1, h1
-; CHECK-NEXT:    fcvt s4, h4
 ; CHECK-NEXT:    fcvt s3, h3
-; CHECK-NEXT:    fcvt s7, h7
-; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    mov h6, v1.h[1]
+; CHECK-NEXT:    mov h7, v2.h[1]
+; CHECK-NEXT:    fcvt s1, h1
 ; CHECK-NEXT:    fcvt s2, h2
+; CHECK-NEXT:    fcvt s4, h4
 ; CHECK-NEXT:    fcvt s5, h5
+; CHECK-NEXT:    fcvtzs x8, s0
+; CHECK-NEXT:    fcvtzs x9, s3
 ; CHECK-NEXT:    fcvt s6, h6
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    frintx s7, s7
-; CHECK-NEXT:    fcvtzs x9, s0
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvtzs x8, s1
-; CHECK-NEXT:    fcvtzs x12, s4
-; CHECK-NEXT:    fcvtzs x11, s3
-; CHECK-NEXT:    fcvtzs x15, s7
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    fcvtzs x10, s2
+; CHECK-NEXT:    fcvt s7, h7
+; CHECK-NEXT:    fcvtzs x11, s1
+; CHECK-NEXT:    fcvtzs x12, s2
+; CHECK-NEXT:    fcvtzs x10, s4
 ; CHECK-NEXT:    fcvtzs x13, s5
-; CHECK-NEXT:    fcvtzs x14, s6
 ; CHECK-NEXT:    fmov d2, x8
-; CHECK-NEXT:    fmov d1, x12
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    fmov d3, x10
+; CHECK-NEXT:    fmov d0, x9
+; CHECK-NEXT:    fcvtzs x14, s6
+; CHECK-NEXT:    fcvtzs x15, s7
+; CHECK-NEXT:    fmov d1, x11
+; CHECK-NEXT:    fmov d3, x12
+; CHECK-NEXT:    mov v0.d[1], x10
 ; CHECK-NEXT:    mov v2.d[1], x13
-; CHECK-NEXT:    mov v1.d[1], x15
-; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    mov v1.d[1], x14
+; CHECK-NEXT:    mov v3.d[1], x15
 ; CHECK-NEXT:    ret
   %a = call <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half> %x)
   ret <8 x i64> %a
@@ -113,84 +118,90 @@ declare <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half>)
 define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
 ; CHECK-LABEL: llrint_v16i64_v16f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    mov h17, v0.h[1]
-; CHECK-NEXT:    mov h19, v0.h[2]
-; CHECK-NEXT:    fcvt s18, h0
-; CHECK-NEXT:    mov h0, v0.h[3]
-; CHECK-NEXT:    mov h4, v2.h[1]
-; CHECK-NEXT:    mov h5, v2.h[2]
-; CHECK-NEXT:    fcvt s7, h3
-; CHECK-NEXT:    fcvt s6, h2
-; CHECK-NEXT:    mov h16, v3.h[2]
-; CHECK-NEXT:    mov h2, v2.h[3]
-; CHECK-NEXT:    fcvt s17, h17
-; CHECK-NEXT:    fcvt s19, h19
-; CHECK-NEXT:    frintx s18, s18
-; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    fcvtl2 v4.4s, v0.8h
+; CHECK-NEXT:    fcvtl2 v2.4s, v1.8h
+; CHECK-NEXT:    fcvtl v3.4s, v0.4h
+; CHECK-NEXT:    dup v5.2s, v0.s[1]
+; CHECK-NEXT:    dup v0.2s, v0.s[3]
+; CHECK-NEXT:    dup v6.2s, v1.s[1]
+; CHECK-NEXT:    dup v7.2s, v1.s[3]
+; CHECK-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NEXT:    frintx v4.4s, v4.4s
+; CHECK-NEXT:    frintx v2.4s, v2.4s
+; CHECK-NEXT:    frintx v3.4s, v3.4s
+; CHECK-NEXT:    fcvtl v5.4s, v5.4h
+; CHECK-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NEXT:    fcvtl v6.4s, v6.4h
+; CHECK-NEXT:    fcvtl v7.4s, v7.4h
+; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    fcvtn v4.4h, v4.4s
+; CHECK-NEXT:    fcvtn v2.4h, v2.4s
+; CHECK-NEXT:    fcvtn v3.4h, v3.4s
+; CHECK-NEXT:    frintx v5.4s, v5.4s
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    frintx v6.4s, v6.4s
+; CHECK-NEXT:    frintx v7.4s, v7.4s
+; CHECK-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NEXT:    mov h16, v4.h[1]
 ; CHECK-NEXT:    fcvt s4, h4
-; CHECK-NEXT:    fcvt s5, h5
-; CHECK-NEXT:    frintx s7, s7
-; CHECK-NEXT:    frintx s6, s6
+; CHECK-NEXT:    fcvt s17, h2
+; CHECK-NEXT:    mov h18, v3.h[1]
+; CHECK-NEXT:    fcvtn v5.4h, v5.4s
+; CHECK-NEXT:    fcvt s3, h3
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NEXT:    fcvtn v6.4h, v6.4s
+; CHECK-NEXT:    fcvtn v7.4h, v7.4s
+; CHECK-NEXT:    mov h2, v2.h[1]
 ; CHECK-NEXT:    fcvt s16, h16
-; CHECK-NEXT:    fcvt s2, h2
-; CHECK-NEXT:    frintx s17, s17
-; CHECK-NEXT:    frintx s19, s19
-; CHECK-NEXT:    fcvtzs x13, s18
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    fcvtzs x9, s7
-; CHECK-NEXT:    mov h7, v1.h[2]
-; CHECK-NEXT:    fcvtzs x8, s6
-; CHECK-NEXT:    mov h6, v1.h[1]
-; CHECK-NEXT:    frintx s16, s16
-; CHECK-NEXT:    fcvtzs x14, s17
-; CHECK-NEXT:    fcvtzs x15, s19
-; CHECK-NEXT:    fcvtzs x10, s4
-; CHECK-NEXT:    mov h4, v3.h[1]
-; CHECK-NEXT:    fcvtzs x11, s5
-; CHECK-NEXT:    mov h5, v1.h[3]
-; CHECK-NEXT:    mov h3, v3.h[3]
+; CHECK-NEXT:    fcvtzs x8, s4
+; CHECK-NEXT:    fcvtzs x9, s17
+; CHECK-NEXT:    fcvt s4, h18
+; CHECK-NEXT:    fcvt s17, h5
+; CHECK-NEXT:    fcvtzs x10, s3
+; CHECK-NEXT:    mov h3, v5.h[1]
+; CHECK-NEXT:    fcvt s5, h0
+; CHECK-NEXT:    mov h0, v0.h[1]
+; CHECK-NEXT:    mov h18, v6.h[1]
+; CHECK-NEXT:    mov h19, v7.h[1]
+; CHECK-NEXT:    fcvtzs x11, s16
+; CHECK-NEXT:    mov h16, v1.h[1]
 ; CHECK-NEXT:    fcvt s1, h1
-; CHECK-NEXT:    fcvt s7, h7
-; CHECK-NEXT:    fcvt s6, h6
-; CHECK-NEXT:    fcvtzs x12, s16
-; CHECK-NEXT:    frintx s16, s2
-; CHECK-NEXT:    fmov d2, x8
-; CHECK-NEXT:    fcvt s4, h4
+; CHECK-NEXT:    fcvtzs x12, s4
+; CHECK-NEXT:    fcvt s4, h6
+; CHECK-NEXT:    fcvtzs x13, s17
+; CHECK-NEXT:    fcvtzs x14, s5
+; CHECK-NEXT:    fcvt s5, h7
 ; CHECK-NEXT:    fcvt s3, h3
-; CHECK-NEXT:    fcvt s5, h5
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    frintx s7, s7
-; CHECK-NEXT:    frintx s17, s6
+; CHECK-NEXT:    fcvt s7, h2
+; CHECK-NEXT:    fcvt s17, h0
+; CHECK-NEXT:    fcvt s18, h18
+; CHECK-NEXT:    fcvt s16, h16
+; CHECK-NEXT:    fcvt s19, h19
+; CHECK-NEXT:    fcvtzs x15, s1
+; CHECK-NEXT:    fmov d2, x8
+; CHECK-NEXT:    fcvtzs x8, s4
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fcvtzs x10, s5
 ; CHECK-NEXT:    fmov d6, x9
-; CHECK-NEXT:    mov v2.d[1], x10
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    frintx s18, s3
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    fcvtzs x8, s1
-; CHECK-NEXT:    fcvtzs x9, s7
-; CHECK-NEXT:    fmov d3, x11
-; CHECK-NEXT:    fcvtzs x11, s0
-; CHECK-NEXT:    fmov d7, x12
-; CHECK-NEXT:    fcvtzs x12, s16
-; CHECK-NEXT:    fcvtzs x16, s17
-; CHECK-NEXT:    fcvtzs x17, s4
-; CHECK-NEXT:    fmov d0, x13
-; CHECK-NEXT:    fmov d1, x15
+; CHECK-NEXT:    fcvtzs x9, s3
+; CHECK-NEXT:    fmov d1, x13
+; CHECK-NEXT:    fcvtzs x13, s17
+; CHECK-NEXT:    fcvtzs x17, s7
+; CHECK-NEXT:    fcvtzs x16, s16
 ; CHECK-NEXT:    fcvtzs x18, s18
-; CHECK-NEXT:    fcvtzs x0, s5
-; CHECK-NEXT:    fmov d4, x8
-; CHECK-NEXT:    fmov d5, x9
-; CHECK-NEXT:    mov v0.d[1], x14
-; CHECK-NEXT:    mov v1.d[1], x11
-; CHECK-NEXT:    mov v3.d[1], x12
-; CHECK-NEXT:    mov v4.d[1], x16
+; CHECK-NEXT:    fcvtzs x0, s19
+; CHECK-NEXT:    fmov d3, x14
+; CHECK-NEXT:    fmov d4, x15
+; CHECK-NEXT:    fmov d5, x8
+; CHECK-NEXT:    fmov d7, x10
+; CHECK-NEXT:    mov v0.d[1], x12
+; CHECK-NEXT:    mov v1.d[1], x9
+; CHECK-NEXT:    mov v2.d[1], x11
 ; CHECK-NEXT:    mov v6.d[1], x17
-; CHECK-NEXT:    mov v7.d[1], x18
-; CHECK-NEXT:    mov v5.d[1], x0
+; CHECK-NEXT:    mov v3.d[1], x13
+; CHECK-NEXT:    mov v4.d[1], x16
+; CHECK-NEXT:    mov v5.d[1], x18
+; CHECK-NEXT:    mov v7.d[1], x0
 ; CHECK-NEXT:    ret
   %a = call <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half> %x)
   ret <16 x i64> %a
@@ -200,170 +211,182 @@ declare <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half>)
 define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
 ; CHECK-LABEL: llrint_v32i64_v32f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v4.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v5.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    ext v6.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT:    ext v7.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    mov h19, v0.h[1]
-; CHECK-NEXT:    fcvt s21, h0
-; CHECK-NEXT:    mov h23, v1.h[2]
-; CHECK-NEXT:    fcvt s22, h1
-; CHECK-NEXT:    fcvt s26, h2
-; CHECK-NEXT:    mov h27, v2.h[1]
-; CHECK-NEXT:    mov h28, v2.h[2]
-; CHECK-NEXT:    mov h16, v4.h[2]
-; CHECK-NEXT:    fcvt s17, h5
-; CHECK-NEXT:    mov h18, v5.h[2]
-; CHECK-NEXT:    mov h20, v6.h[2]
-; CHECK-NEXT:    fcvt s24, h7
-; CHECK-NEXT:    fcvt s25, h6
-; CHECK-NEXT:    fcvt s19, h19
-; CHECK-NEXT:    frintx s22, s22
-; CHECK-NEXT:    fcvt s16, h16
-; CHECK-NEXT:    frintx s17, s17
-; CHECK-NEXT:    fcvt s18, h18
-; CHECK-NEXT:    fcvt s20, h20
-; CHECK-NEXT:    frintx s16, s16
-; CHECK-NEXT:    fcvtzs x12, s17
-; CHECK-NEXT:    frintx s17, s18
-; CHECK-NEXT:    frintx s18, s21
-; CHECK-NEXT:    fcvt s21, h23
-; CHECK-NEXT:    frintx s23, s24
-; CHECK-NEXT:    frintx s24, s25
-; CHECK-NEXT:    frintx s25, s19
-; CHECK-NEXT:    mov h19, v7.h[1]
-; CHECK-NEXT:    fcvtzs x13, s16
-; CHECK-NEXT:    frintx s16, s20
-; CHECK-NEXT:    frintx s20, s26
-; CHECK-NEXT:    fcvtzs x9, s23
-; CHECK-NEXT:    mov h23, v3.h[2]
-; CHECK-NEXT:    fcvt s26, h27
-; CHECK-NEXT:    fcvtzs x15, s24
-; CHECK-NEXT:    fcvtzs x10, s25
-; CHECK-NEXT:    fcvt s24, h28
-; CHECK-NEXT:    mov h25, v3.h[3]
-; CHECK-NEXT:    fcvtzs x14, s17
-; CHECK-NEXT:    frintx s21, s21
-; CHECK-NEXT:    fmov d17, x12
-; CHECK-NEXT:    fcvtzs x12, s16
-; CHECK-NEXT:    fmov d16, x13
+; CHECK-NEXT:    dup v4.2s, v1.s[1]
+; CHECK-NEXT:    fcvtl v5.4s, v0.4h
+; CHECK-NEXT:    dup v6.2s, v1.s[3]
+; CHECK-NEXT:    fcvtl v7.4s, v1.4h
+; CHECK-NEXT:    dup v16.2s, v2.s[3]
+; CHECK-NEXT:    fcvtl v17.4s, v2.4h
+; CHECK-NEXT:    dup v19.2s, v2.s[1]
+; CHECK-NEXT:    dup v18.2s, v0.s[1]
+; CHECK-NEXT:    dup v21.2s, v3.s[1]
+; CHECK-NEXT:    dup v24.2s, v3.s[3]
+; CHECK-NEXT:    fcvtl2 v1.4s, v1.8h
+; CHECK-NEXT:    fcvtl2 v2.4s, v2.8h
+; CHECK-NEXT:    fcvtl v4.4s, v4.4h
+; CHECK-NEXT:    frintx v5.4s, v5.4s
+; CHECK-NEXT:    fcvtl v6.4s, v6.4h
+; CHECK-NEXT:    frintx v7.4s, v7.4s
+; CHECK-NEXT:    fcvtl v16.4s, v16.4h
+; CHECK-NEXT:    frintx v22.4s, v17.4s
+; CHECK-NEXT:    fcvtl v19.4s, v19.4h
+; CHECK-NEXT:    dup v17.2s, v0.s[3]
+; CHECK-NEXT:    fcvtl v21.4s, v21.4h
+; CHECK-NEXT:    fcvtl v24.4s, v24.4h
+; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    frintx v2.4s, v2.4s
+; CHECK-NEXT:    frintx v20.4s, v4.4s
+; CHECK-NEXT:    fcvtn v4.4h, v5.4s
+; CHECK-NEXT:    frintx v23.4s, v6.4s
+; CHECK-NEXT:    fcvtn v5.4h, v7.4s
+; CHECK-NEXT:    frintx v25.4s, v16.4s
+; CHECK-NEXT:    fcvtn v16.4h, v22.4s
+; CHECK-NEXT:    frintx v26.4s, v19.4s
+; CHECK-NEXT:    fcvtn v6.4h, v20.4s
+; CHECK-NEXT:    fcvtl v20.4s, v3.4h
+; CHECK-NEXT:    fcvt s22, h4
+; CHECK-NEXT:    fcvtn v7.4h, v23.4s
+; CHECK-NEXT:    fcvtl2 v23.4s, v3.8h
+; CHECK-NEXT:    fcvtl v3.4s, v18.4h
+; CHECK-NEXT:    fcvtn v25.4h, v25.4s
+; CHECK-NEXT:    fcvt s27, h5
+; CHECK-NEXT:    fcvtl v18.4s, v17.4h
+; CHECK-NEXT:    frintx v17.4s, v21.4s
+; CHECK-NEXT:    fcvt s29, h16
+; CHECK-NEXT:    mov h16, v16.h[1]
+; CHECK-NEXT:    frintx v20.4s, v20.4s
+; CHECK-NEXT:    fcvtzs x9, s22
+; CHECK-NEXT:    fcvt s28, h6
+; CHECK-NEXT:    fcvt s22, h7
+; CHECK-NEXT:    frintx v19.4s, v3.4s
+; CHECK-NEXT:    fcvtn v3.4h, v26.4s
+; CHECK-NEXT:    mov h21, v25.h[1]
+; CHECK-NEXT:    frintx v23.4s, v23.4s
+; CHECK-NEXT:    fcvtzs x10, s27
+; CHECK-NEXT:    fcvtl2 v26.4s, v0.8h
+; CHECK-NEXT:    fcvt s25, h25
+; CHECK-NEXT:    fcvtn v17.4h, v17.4s
+; CHECK-NEXT:    fcvtn v20.4h, v20.4s
+; CHECK-NEXT:    fcvtzs x12, s28
+; CHECK-NEXT:    fcvtzs x14, s29
 ; CHECK-NEXT:    fcvtzs x13, s22
-; CHECK-NEXT:    fcvt s22, h3
-; CHECK-NEXT:    mov h3, v3.h[1]
-; CHECK-NEXT:    mov h27, v0.h[2]
-; CHECK-NEXT:    mov h28, v2.h[3]
+; CHECK-NEXT:    frintx v22.4s, v24.4s
+; CHECK-NEXT:    fcvt s24, h3
+; CHECK-NEXT:    fcvt s21, h21
+; CHECK-NEXT:    fcvtn v23.4h, v23.4s
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fcvtzs x15, s25
+; CHECK-NEXT:    mov h25, v17.h[1]
+; CHECK-NEXT:    fcvt s17, h17
+; CHECK-NEXT:    mov h27, v20.h[1]
+; CHECK-NEXT:    fcvt s20, h20
+; CHECK-NEXT:    fcvtn v28.4h, v2.4s
+; CHECK-NEXT:    fcvtn v22.4h, v22.4s
+; CHECK-NEXT:    fcvtzs x10, s24
+; CHECK-NEXT:    frintx v24.4s, v26.4s
+; CHECK-NEXT:    fcvtzs x11, s21
+; CHECK-NEXT:    mov h26, v23.h[1]
 ; CHECK-NEXT:    fcvt s23, h23
-; CHECK-NEXT:    frintx s26, s26
+; CHECK-NEXT:    fcvt s25, h25
+; CHECK-NEXT:    fmov d2, x13
+; CHECK-NEXT:    fcvtzs x13, s17
+; CHECK-NEXT:    fcvt s21, h27
 ; CHECK-NEXT:    fcvtzs x16, s20
-; CHECK-NEXT:    frintx s20, s24
-; CHECK-NEXT:    fcvt s24, h25
-; CHECK-NEXT:    fcvtzs x11, s18
-; CHECK-NEXT:    fmov d18, x14
+; CHECK-NEXT:    fcvtn v27.4h, v1.4s
+; CHECK-NEXT:    mov h20, v22.h[1]
+; CHECK-NEXT:    fcvt s22, h22
+; CHECK-NEXT:    fcvtn v24.4h, v24.4s
+; CHECK-NEXT:    fmov d1, x12
+; CHECK-NEXT:    fcvtzs x0, s23
+; CHECK-NEXT:    fmov d17, x14
+; CHECK-NEXT:    fcvtzs x18, s25
+; CHECK-NEXT:    mov h25, v28.h[1]
+; CHECK-NEXT:    fcvt s23, h28
+; CHECK-NEXT:    fcvtzs x12, s21
+; CHECK-NEXT:    fcvt s21, h26
+; CHECK-NEXT:    fcvt s26, h27
+; CHECK-NEXT:    fcvt s20, h20
+; CHECK-NEXT:    fcvtzs x17, s22
+; CHECK-NEXT:    fcvt s22, h24
+; CHECK-NEXT:    frintx v18.4s, v18.4s
+; CHECK-NEXT:    mov h3, v3.h[1]
+; CHECK-NEXT:    mov h7, v7.h[1]
+; CHECK-NEXT:    fcvt s25, h25
+; CHECK-NEXT:    fcvtn v19.4h, v19.4s
+; CHECK-NEXT:    fcvt s16, h16
 ; CHECK-NEXT:    fcvtzs x14, s21
-; CHECK-NEXT:    frintx s22, s22
-; CHECK-NEXT:    fcvt s3, h3
-; CHECK-NEXT:    fcvt s25, h27
-; CHECK-NEXT:    fcvt s27, h28
-; CHECK-NEXT:    frintx s23, s23
-; CHECK-NEXT:    mov h21, v1.h[3]
-; CHECK-NEXT:    fmov d2, x15
-; CHECK-NEXT:    fcvtzs x15, s26
-; CHECK-NEXT:    fmov d26, x13
-; CHECK-NEXT:    mov h1, v1.h[1]
-; CHECK-NEXT:    fcvtzs x13, s20
-; CHECK-NEXT:    frintx s20, s24
-; CHECK-NEXT:    fmov d24, x14
-; CHECK-NEXT:    fcvtzs x14, s22
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    fmov d22, x16
-; CHECK-NEXT:    frintx s27, s27
-; CHECK-NEXT:    fcvtzs x16, s23
-; CHECK-NEXT:    fcvt s21, h21
-; CHECK-NEXT:    frintx s25, s25
-; CHECK-NEXT:    fcvt s1, h1
-; CHECK-NEXT:    mov h0, v0.h[3]
-; CHECK-NEXT:    mov h23, v7.h[2]
-; CHECK-NEXT:    mov v22.d[1], x15
+; CHECK-NEXT:    fmov d21, x15
+; CHECK-NEXT:    mov h5, v5.h[1]
 ; CHECK-NEXT:    fcvtzs x15, s20
-; CHECK-NEXT:    fmov d20, x13
-; CHECK-NEXT:    fcvtzs x13, s3
-; CHECK-NEXT:    fmov d3, x14
-; CHECK-NEXT:    fcvtzs x14, s27
-; CHECK-NEXT:    fmov d27, x16
-; CHECK-NEXT:    frintx s21, s21
-; CHECK-NEXT:    mov h7, v7.h[3]
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    fcvt s23, h23
-; CHECK-NEXT:    fcvt s19, h19
-; CHECK-NEXT:    mov v27.d[1], x15
-; CHECK-NEXT:    fcvtzs x15, s25
-; CHECK-NEXT:    mov h25, v6.h[3]
+; CHECK-NEXT:    fmov d20, x16
+; CHECK-NEXT:    fcvtzs x16, s22
+; CHECK-NEXT:    fmov d22, x17
+; CHECK-NEXT:    fcvtzs x17, s26
+; CHECK-NEXT:    fmov d26, x0
+; CHECK-NEXT:    fcvtn v18.4h, v18.4s
 ; CHECK-NEXT:    mov h6, v6.h[1]
-; CHECK-NEXT:    mov v3.d[1], x13
-; CHECK-NEXT:    fcvtzs x13, s21
-; CHECK-NEXT:    mov h21, v5.h[1]
-; CHECK-NEXT:    mov h5, v5.h[3]
-; CHECK-NEXT:    mov v20.d[1], x14
-; CHECK-NEXT:    fcvtzs x14, s1
-; CHECK-NEXT:    mov h1, v4.h[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    fcvt s25, h25
+; CHECK-NEXT:    fcvt s3, h3
+; CHECK-NEXT:    mov v20.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, s25
 ; CHECK-NEXT:    fcvt s7, h7
-; CHECK-NEXT:    stp q3, q27, [x8, #192]
+; CHECK-NEXT:    mov v26.d[1], x14
+; CHECK-NEXT:    mov v22.d[1], x15
+; CHECK-NEXT:    fcvtzs x14, s23
+; CHECK-NEXT:    fmov d23, x13
+; CHECK-NEXT:    mov v21.d[1], x11
+; CHECK-NEXT:    mov h4, v4.h[1]
+; CHECK-NEXT:    mov h25, v19.h[1]
 ; CHECK-NEXT:    fcvt s6, h6
-; CHECK-NEXT:    mov h3, v4.h[3]
-; CHECK-NEXT:    stp q22, q20, [x8, #128]
-; CHECK-NEXT:    fcvt s21, h21
+; CHECK-NEXT:    fcvtzs x11, s3
 ; CHECK-NEXT:    fcvt s5, h5
-; CHECK-NEXT:    mov v24.d[1], x13
-; CHECK-NEXT:    mov v26.d[1], x14
+; CHECK-NEXT:    fcvt s19, h19
+; CHECK-NEXT:    fcvtzs x13, s7
+; CHECK-NEXT:    stp q26, q22, [x8, #224]
+; CHECK-NEXT:    mov v23.d[1], x18
+; CHECK-NEXT:    mov h26, v27.h[1]
+; CHECK-NEXT:    fmov d22, x14
 ; CHECK-NEXT:    fcvt s4, h4
-; CHECK-NEXT:    frintx s22, s25
-; CHECK-NEXT:    fmov d20, x12
-; CHECK-NEXT:    fcvt s1, h1
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvt s3, h3
-; CHECK-NEXT:    fcvtzs x12, s0
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    frintx s21, s21
-; CHECK-NEXT:    fmov d0, x11
-; CHECK-NEXT:    stp q26, q24, [x8, #64]
-; CHECK-NEXT:    fmov d24, x15
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    fcvtzs x11, s22
-; CHECK-NEXT:    frintx s22, s23
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    fcvtzs x13, s6
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    frintx s6, s7
-; CHECK-NEXT:    fcvtzs x14, s5
-; CHECK-NEXT:    mov v24.d[1], x12
-; CHECK-NEXT:    frintx s5, s19
-; CHECK-NEXT:    fcvtzs x12, s21
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, s4
-; CHECK-NEXT:    mov v20.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, s22
-; CHECK-NEXT:    mov v2.d[1], x13
-; CHECK-NEXT:    fcvtzs x15, s3
-; CHECK-NEXT:    fcvtzs x13, s1
-; CHECK-NEXT:    mov v18.d[1], x14
+; CHECK-NEXT:    fmov d3, x16
+; CHECK-NEXT:    fcvt s7, h25
 ; CHECK-NEXT:    fcvtzs x14, s6
-; CHECK-NEXT:    stp q0, q24, [x8]
-; CHECK-NEXT:    mov v17.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, s5
-; CHECK-NEXT:    fmov d0, x10
-; CHECK-NEXT:    fmov d1, x11
-; CHECK-NEXT:    stp q2, q20, [x8, #224]
-; CHECK-NEXT:    fmov d2, x9
-; CHECK-NEXT:    mov v16.d[1], x15
-; CHECK-NEXT:    stp q17, q18, [x8, #160]
-; CHECK-NEXT:    mov v0.d[1], x13
+; CHECK-NEXT:    mov v2.d[1], x13
+; CHECK-NEXT:    stp q20, q23, [x8, #192]
+; CHECK-NEXT:    fcvt s23, h26
+; CHECK-NEXT:    mov v22.d[1], x12
+; CHECK-NEXT:    fmov d20, x10
+; CHECK-NEXT:    fcvtzs x10, s16
+; CHECK-NEXT:    mov h16, v24.h[1]
+; CHECK-NEXT:    mov h24, v18.h[1]
+; CHECK-NEXT:    fcvt s18, h18
 ; CHECK-NEXT:    mov v1.d[1], x14
-; CHECK-NEXT:    mov v2.d[1], x12
-; CHECK-NEXT:    stp q0, q16, [x8, #96]
-; CHECK-NEXT:    stp q2, q1, [x8, #32]
+; CHECK-NEXT:    fcvtzs x14, s7
+; CHECK-NEXT:    stp q22, q21, [x8, #160]
+; CHECK-NEXT:    fcvtzs x12, s23
+; CHECK-NEXT:    fmov d21, x17
+; CHECK-NEXT:    fcvt s16, h16
+; CHECK-NEXT:    mov v20.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, s5
+; CHECK-NEXT:    fcvt s22, h24
+; CHECK-NEXT:    mov v17.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, s18
+; CHECK-NEXT:    mov v21.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, s19
+; CHECK-NEXT:    fcvtzs x15, s16
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, s4
+; CHECK-NEXT:    stp q17, q20, [x8, #128]
+; CHECK-NEXT:    fcvtzs x13, s22
+; CHECK-NEXT:    fmov d4, x10
+; CHECK-NEXT:    stp q21, q2, [x8, #96]
+; CHECK-NEXT:    fmov d5, x12
+; CHECK-NEXT:    fmov d2, x9
+; CHECK-NEXT:    stp q0, q1, [x8, #64]
+; CHECK-NEXT:    mov v3.d[1], x15
+; CHECK-NEXT:    mov v4.d[1], x13
+; CHECK-NEXT:    mov v5.d[1], x14
+; CHECK-NEXT:    mov v2.d[1], x11
+; CHECK-NEXT:    stp q3, q4, [x8, #32]
+; CHECK-NEXT:    stp q2, q5, [x8]
 ; CHECK-NEXT:    ret
   %a = call <32 x i64> @llvm.llrint.v32i64.v32f16(<32 x half> %x)
   ret <32 x i64> %a
@@ -373,10 +396,10 @@ declare <32 x i64> @llvm.llrint.v32i64.v32f16(<32 x half>)
 define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
 ; CHECK-LABEL: llrint_v1i64_v1f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    fcvtzs x8, s0
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    ret
   %a = call <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float> %x)
   ret <1 x i64> %a
@@ -386,14 +409,9 @@ declare <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float>)
 define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
 ; CHECK-LABEL: llrint_v2i64_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov s1, v0.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    fcvtzs x8, s0
-; CHECK-NEXT:    fcvtzs x9, s1
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %a = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> %x)
   ret <2 x i64> %a
@@ -404,20 +422,12 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
 ; CHECK-LABEL: llrint_v4i64_v4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    mov s3, v0.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    mov s2, v1.s[1]
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    fcvtzs x9, s0
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    fcvtzs x8, s1
-; CHECK-NEXT:    fcvtzs x11, s3
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    fcvtzs x10, s2
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    mov v1.d[1], x10
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    frintx v1.2s, v1.2s
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    fcvtl v1.2d, v1.2s
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
 ; CHECK-NEXT:    ret
   %a = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> %x)
   ret <4 x i64> %a
@@ -429,34 +439,18 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
 ; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    mov s4, v0.s[1]
-; CHECK-NEXT:    mov s7, v1.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    mov s5, v2.s[1]
-; CHECK-NEXT:    mov s6, v3.s[1]
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    frintx s7, s7
-; CHECK-NEXT:    fcvtzs x9, s0
-; CHECK-NEXT:    fcvtzs x12, s1
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvtzs x8, s2
-; CHECK-NEXT:    fcvtzs x10, s3
-; CHECK-NEXT:    fcvtzs x11, s4
-; CHECK-NEXT:    fcvtzs x15, s7
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    fmov d2, x12
-; CHECK-NEXT:    fcvtzs x13, s5
-; CHECK-NEXT:    fcvtzs x14, s6
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    fmov d3, x10
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    mov v2.d[1], x15
-; CHECK-NEXT:    mov v1.d[1], x13
-; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    frintx v1.2s, v1.2s
+; CHECK-NEXT:    frintx v2.2s, v2.2s
+; CHECK-NEXT:    frintx v3.2s, v3.2s
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    fcvtl v1.2d, v1.2s
+; CHECK-NEXT:    fcvtl v4.2d, v2.2s
+; CHECK-NEXT:    fcvtl v3.2d, v3.2s
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v2.2d, v1.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v4.2d
+; CHECK-NEXT:    fcvtzs v3.2d, v3.2d
 ; CHECK-NEXT:    ret
   %a = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> %x)
   ret <8 x i64> %a
@@ -466,66 +460,34 @@ declare <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float>)
 define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
 ; CHECK-LABEL: llrint_v16i64_v16f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v5.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v4.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v5.16b, v0.16b, v0.16b, #8
 ; CHECK-NEXT:    ext v6.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    frintx s7, s0
-; CHECK-NEXT:    ext v16.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT:    mov s0, v0.s[1]
-; CHECK-NEXT:    frintx s17, s4
-; CHECK-NEXT:    mov s4, v4.s[1]
-; CHECK-NEXT:    mov s18, v5.s[1]
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    frintx s19, s6
-; CHECK-NEXT:    fcvtzs x8, s7
-; CHECK-NEXT:    frintx s7, s16
-; CHECK-NEXT:    mov s6, v6.s[1]
-; CHECK-NEXT:    mov s16, v16.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    fcvtzs x9, s17
-; CHECK-NEXT:    frintx s17, s1
-; CHECK-NEXT:    mov s1, v1.s[1]
-; CHECK-NEXT:    frintx s18, s18
-; CHECK-NEXT:    fcvtzs x10, s5
-; CHECK-NEXT:    mov s5, v2.s[1]
-; CHECK-NEXT:    fcvtzs x11, s19
-; CHECK-NEXT:    mov s19, v3.s[1]
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    fcvtzs x12, s7
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvtzs x13, s4
-; CHECK-NEXT:    frintx s4, s3
-; CHECK-NEXT:    frintx s16, s16
-; CHECK-NEXT:    fcvtzs x14, s18
-; CHECK-NEXT:    frintx s18, s1
-; CHECK-NEXT:    fcvtzs x15, s17
-; CHECK-NEXT:    frintx s20, s5
-; CHECK-NEXT:    frintx s17, s19
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    fcvtzs x9, s2
-; CHECK-NEXT:    fmov d5, x11
-; CHECK-NEXT:    fmov d3, x10
-; CHECK-NEXT:    fcvtzs x11, s4
-; CHECK-NEXT:    fcvtzs x10, s0
-; CHECK-NEXT:    fmov d7, x12
-; CHECK-NEXT:    fcvtzs x12, s18
-; CHECK-NEXT:    fcvtzs x17, s6
-; CHECK-NEXT:    fcvtzs x18, s16
-; CHECK-NEXT:    fcvtzs x16, s20
-; CHECK-NEXT:    fcvtzs x0, s17
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d2, x15
-; CHECK-NEXT:    fmov d4, x9
-; CHECK-NEXT:    mov v1.d[1], x13
-; CHECK-NEXT:    fmov d6, x11
-; CHECK-NEXT:    mov v3.d[1], x14
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    mov v5.d[1], x17
-; CHECK-NEXT:    mov v7.d[1], x18
-; CHECK-NEXT:    mov v2.d[1], x12
-; CHECK-NEXT:    mov v4.d[1], x16
-; CHECK-NEXT:    mov v6.d[1], x0
+; CHECK-NEXT:    ext v7.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    frintx v1.2s, v1.2s
+; CHECK-NEXT:    frintx v2.2s, v2.2s
+; CHECK-NEXT:    frintx v3.2s, v3.2s
+; CHECK-NEXT:    frintx v5.2s, v5.2s
+; CHECK-NEXT:    frintx v4.2s, v4.2s
+; CHECK-NEXT:    frintx v6.2s, v6.2s
+; CHECK-NEXT:    frintx v7.2s, v7.2s
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    fcvtl v1.2d, v1.2s
+; CHECK-NEXT:    fcvtl v16.2d, v2.2s
+; CHECK-NEXT:    fcvtl v18.2d, v3.2s
+; CHECK-NEXT:    fcvtl v5.2d, v5.2s
+; CHECK-NEXT:    fcvtl v17.2d, v4.2s
+; CHECK-NEXT:    fcvtl v19.2d, v6.2s
+; CHECK-NEXT:    fcvtl v7.2d, v7.2s
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v2.2d, v1.2d
+; CHECK-NEXT:    fcvtzs v4.2d, v16.2d
+; CHECK-NEXT:    fcvtzs v6.2d, v18.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v5.2d
+; CHECK-NEXT:    fcvtzs v3.2d, v17.2d
+; CHECK-NEXT:    fcvtzs v5.2d, v19.2d
+; CHECK-NEXT:    fcvtzs v7.2d, v7.2d
 ; CHECK-NEXT:    ret
   %a = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> %x)
   ret <16 x i64> %a
@@ -535,134 +497,70 @@ declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>)
 define <32 x i64> @llrint_v32i64_v32f32(<32 x float> %x) {
 ; CHECK-LABEL: llrint_v32i64_v32f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v16.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    ext v20.16b, v5.16b, v5.16b, #8
-; CHECK-NEXT:    ext v17.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT:    ext v18.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v19.16b, v4.16b, v4.16b, #8
-; CHECK-NEXT:    ext v21.16b, v6.16b, v6.16b, #8
-; CHECK-NEXT:    ext v22.16b, v7.16b, v7.16b, #8
-; CHECK-NEXT:    frintx s24, s16
-; CHECK-NEXT:    mov s28, v20.s[1]
-; CHECK-NEXT:    frintx s25, s17
-; CHECK-NEXT:    frintx s26, s18
-; CHECK-NEXT:    frintx s27, s19
-; CHECK-NEXT:    frintx s29, s20
-; CHECK-NEXT:    mov s30, v21.s[1]
-; CHECK-NEXT:    frintx s20, s21
-; CHECK-NEXT:    frintx s21, s22
-; CHECK-NEXT:    mov s23, v22.s[1]
-; CHECK-NEXT:    mov s19, v19.s[1]
-; CHECK-NEXT:    mov s17, v17.s[1]
-; CHECK-NEXT:    fcvtzs x12, s24
-; CHECK-NEXT:    frintx s24, s28
-; CHECK-NEXT:    fcvtzs x13, s25
-; CHECK-NEXT:    mov s25, v7.s[1]
-; CHECK-NEXT:    fcvtzs x9, s26
-; CHECK-NEXT:    fcvtzs x11, s27
-; CHECK-NEXT:    fcvtzs x14, s20
-; CHECK-NEXT:    fcvtzs x15, s21
-; CHECK-NEXT:    frintx s26, s1
-; CHECK-NEXT:    frintx s23, s23
-; CHECK-NEXT:    frintx s27, s7
-; CHECK-NEXT:    frintx s22, s30
-; CHECK-NEXT:    fmov d20, x12
-; CHECK-NEXT:    fcvtzs x12, s24
-; CHECK-NEXT:    mov s24, v6.s[1]
-; CHECK-NEXT:    frintx s25, s25
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvtzs x10, s29
-; CHECK-NEXT:    fmov d7, x11
-; CHECK-NEXT:    fmov d21, x13
-; CHECK-NEXT:    frintx s28, s5
-; CHECK-NEXT:    fcvtzs x11, s23
-; CHECK-NEXT:    fmov d23, x14
-; CHECK-NEXT:    fcvtzs x14, s26
-; CHECK-NEXT:    fmov d26, x15
-; CHECK-NEXT:    fcvtzs x15, s27
-; CHECK-NEXT:    frintx s24, s24
-; CHECK-NEXT:    mov s27, v5.s[1]
-; CHECK-NEXT:    fcvtzs x13, s22
-; CHECK-NEXT:    fcvtzs x17, s25
-; CHECK-NEXT:    frintx s25, s4
-; CHECK-NEXT:    fcvtzs x18, s6
-; CHECK-NEXT:    fmov d6, x10
-; CHECK-NEXT:    frintx s22, s2
-; CHECK-NEXT:    mov v26.d[1], x11
-; CHECK-NEXT:    fmov d5, x14
-; CHECK-NEXT:    fcvtzs x10, s24
-; CHECK-NEXT:    fmov d24, x15
-; CHECK-NEXT:    fcvtzs x14, s28
-; CHECK-NEXT:    frintx s27, s27
-; CHECK-NEXT:    mov v23.d[1], x13
-; CHECK-NEXT:    mov s4, v4.s[1]
-; CHECK-NEXT:    fcvtzs x13, s25
-; CHECK-NEXT:    fmov d25, x18
-; CHECK-NEXT:    mov s16, v16.s[1]
-; CHECK-NEXT:    mov v24.d[1], x17
-; CHECK-NEXT:    fcvtzs x16, s22
-; CHECK-NEXT:    frintx s22, s3
-; CHECK-NEXT:    mov s3, v3.s[1]
-; CHECK-NEXT:    frintx s19, s19
-; CHECK-NEXT:    mov s2, v2.s[1]
-; CHECK-NEXT:    mov v25.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, s27
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    mov v6.d[1], x12
-; CHECK-NEXT:    frintx s17, s17
-; CHECK-NEXT:    mov s18, v18.s[1]
-; CHECK-NEXT:    stp q24, q26, [x8, #224]
-; CHECK-NEXT:    fmov d24, x14
-; CHECK-NEXT:    fcvtzs x11, s22
-; CHECK-NEXT:    ext v22.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    mov s1, v1.s[1]
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    stp q25, q23, [x8, #192]
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    fcvtzs x12, s4
-; CHECK-NEXT:    mov v24.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, s19
-; CHECK-NEXT:    mov s19, v0.s[1]
-; CHECK-NEXT:    frintx s16, s16
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    fmov d4, x11
-; CHECK-NEXT:    mov s27, v22.s[1]
-; CHECK-NEXT:    frintx s22, s22
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    fcvtzs x11, s3
-; CHECK-NEXT:    fcvtzs x14, s2
-; CHECK-NEXT:    frintx s2, s18
-; CHECK-NEXT:    stp q24, q6, [x8, #160]
-; CHECK-NEXT:    fmov d6, x13
-; CHECK-NEXT:    fcvtzs x13, s17
-; CHECK-NEXT:    frintx s17, s19
-; CHECK-NEXT:    fmov d23, x16
-; CHECK-NEXT:    mov v7.d[1], x10
-; CHECK-NEXT:    frintx s3, s27
-; CHECK-NEXT:    fcvtzs x10, s22
-; CHECK-NEXT:    fcvtzs x15, s1
-; CHECK-NEXT:    mov v6.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, s16
-; CHECK-NEXT:    mov v4.d[1], x11
-; CHECK-NEXT:    mov v21.d[1], x13
-; CHECK-NEXT:    fcvtzs x13, s0
-; CHECK-NEXT:    mov v23.d[1], x14
-; CHECK-NEXT:    fcvtzs x14, s17
-; CHECK-NEXT:    fcvtzs x11, s3
-; CHECK-NEXT:    fmov d0, x10
-; CHECK-NEXT:    mov v5.d[1], x15
-; CHECK-NEXT:    stp q6, q7, [x8, #128]
-; CHECK-NEXT:    mov v20.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, s2
-; CHECK-NEXT:    stp q4, q21, [x8, #96]
-; CHECK-NEXT:    fmov d1, x13
-; CHECK-NEXT:    fmov d2, x9
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    stp q23, q20, [x8, #64]
-; CHECK-NEXT:    mov v1.d[1], x14
-; CHECK-NEXT:    mov v2.d[1], x12
-; CHECK-NEXT:    stp q5, q0, [x8, #32]
-; CHECK-NEXT:    stp q1, q2, [x8]
+; CHECK-NEXT:    ext v16.16b, v7.16b, v7.16b, #8
+; CHECK-NEXT:    ext v17.16b, v6.16b, v6.16b, #8
+; CHECK-NEXT:    frintx v7.2s, v7.2s
+; CHECK-NEXT:    frintx v6.2s, v6.2s
+; CHECK-NEXT:    ext v18.16b, v5.16b, v5.16b, #8
+; CHECK-NEXT:    ext v21.16b, v4.16b, v4.16b, #8
+; CHECK-NEXT:    ext v22.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    frintx v5.2s, v5.2s
+; CHECK-NEXT:    ext v23.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    frintx v4.2s, v4.2s
+; CHECK-NEXT:    ext v19.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v20.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    frintx v16.2s, v16.2s
+; CHECK-NEXT:    frintx v17.2s, v17.2s
+; CHECK-NEXT:    fcvtl v7.2d, v7.2s
+; CHECK-NEXT:    fcvtl v6.2d, v6.2s
+; CHECK-NEXT:    frintx v18.2s, v18.2s
+; CHECK-NEXT:    frintx v21.2s, v21.2s
+; CHECK-NEXT:    frintx v2.2s, v2.2s
+; CHECK-NEXT:    frintx v3.2s, v3.2s
+; CHECK-NEXT:    fcvtl v5.2d, v5.2s
+; CHECK-NEXT:    frintx v23.2s, v23.2s
+; CHECK-NEXT:    fcvtl v4.2d, v4.2s
+; CHECK-NEXT:    frintx v1.2s, v1.2s
+; CHECK-NEXT:    fcvtl v16.2d, v16.2s
+; CHECK-NEXT:    fcvtl v17.2d, v17.2s
+; CHECK-NEXT:    fcvtzs v7.2d, v7.2d
+; CHECK-NEXT:    fcvtzs v6.2d, v6.2d
+; CHECK-NEXT:    fcvtl v18.2d, v18.2s
+; CHECK-NEXT:    fcvtl v21.2d, v21.2s
+; CHECK-NEXT:    frintx v20.2s, v20.2s
+; CHECK-NEXT:    fcvtl v3.2d, v3.2s
+; CHECK-NEXT:    fcvtzs v5.2d, v5.2d
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    fcvtl v2.2d, v2.2s
+; CHECK-NEXT:    fcvtzs v4.2d, v4.2d
+; CHECK-NEXT:    fcvtzs v16.2d, v16.2d
+; CHECK-NEXT:    fcvtzs v17.2d, v17.2d
+; CHECK-NEXT:    fcvtl v1.2d, v1.2s
+; CHECK-NEXT:    fcvtzs v3.2d, v3.2d
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    fcvtzs v2.2d, v2.2d
+; CHECK-NEXT:    stp q6, q17, [x8, #192]
+; CHECK-NEXT:    fcvtl v6.2d, v23.2s
+; CHECK-NEXT:    frintx v17.2s, v19.2s
+; CHECK-NEXT:    stp q7, q16, [x8, #224]
+; CHECK-NEXT:    frintx v7.2s, v22.2s
+; CHECK-NEXT:    fcvtzs v16.2d, v18.2d
+; CHECK-NEXT:    fcvtzs v18.2d, v21.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v6.2d, v6.2d
+; CHECK-NEXT:    stp q5, q16, [x8, #160]
+; CHECK-NEXT:    fcvtl v7.2d, v7.2s
+; CHECK-NEXT:    fcvtl v5.2d, v20.2s
+; CHECK-NEXT:    stp q4, q18, [x8, #128]
+; CHECK-NEXT:    fcvtl v4.2d, v17.2s
+; CHECK-NEXT:    stp q3, q6, [x8, #96]
+; CHECK-NEXT:    fcvtzs v7.2d, v7.2d
+; CHECK-NEXT:    fcvtzs v3.2d, v5.2d
+; CHECK-NEXT:    stp q1, q3, [x8, #32]
+; CHECK-NEXT:    stp q2, q7, [x8, #64]
+; CHECK-NEXT:    fcvtzs v2.2d, v4.2d
+; CHECK-NEXT:    stp q0, q2, [x8]
 ; CHECK-NEXT:    ret
   %a = call <32 x i64> @llvm.llrint.v32i64.v32f32(<32 x float> %x)
   ret <32 x i64> %a
@@ -684,13 +582,8 @@ declare <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double>)
 define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
 ; CHECK-LABEL: llrint_v2i64_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d1, v0.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    frintx v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %a = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> %x)
   ret <2 x i64> %a
@@ -700,20 +593,10 @@ declare <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double>)
 define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
 ; CHECK-LABEL: llrint_v4i64_v4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d2, v0.d[1]
-; CHECK-NEXT:    mov d3, v1.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    frintx d3, d3
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    fcvtzs x10, d2
-; CHECK-NEXT:    fcvtzs x11, d3
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    mov v1.d[1], x11
+; CHECK-NEXT:    frintx v0.2d, v0.2d
+; CHECK-NEXT:    frintx v1.2d, v1.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
 ; CHECK-NEXT:    ret
   %a = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> %x)
   ret <4 x i64> %a
@@ -723,34 +606,14 @@ declare <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double>)
 define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
 ; CHECK-LABEL: llrint_v8i64_v8f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d4, v0.d[1]
-; CHECK-NEXT:    mov d5, v1.d[1]
-; CHECK-NEXT:    mov d6, v2.d[1]
-; CHECK-NEXT:    mov d7, v3.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    frintx d3, d3
-; CHECK-NEXT:    frintx d4, d4
-; CHECK-NEXT:    frintx d5, d5
-; CHECK-NEXT:    frintx d6, d6
-; CHECK-NEXT:    frintx d7, d7
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    fcvtzs x10, d2
-; CHECK-NEXT:    fcvtzs x11, d3
-; CHECK-NEXT:    fcvtzs x12, d4
-; CHECK-NEXT:    fcvtzs x13, d5
-; CHECK-NEXT:    fcvtzs x14, d6
-; CHECK-NEXT:    fcvtzs x15, d7
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    fmov d2, x10
-; CHECK-NEXT:    fmov d3, x11
-; CHECK-NEXT:    mov v0.d[1], x12
-; CHECK-NEXT:    mov v1.d[1], x13
-; CHECK-NEXT:    mov v2.d[1], x14
-; CHECK-NEXT:    mov v3.d[1], x15
+; CHECK-NEXT:    frintx v0.2d, v0.2d
+; CHECK-NEXT:    frintx v1.2d, v1.2d
+; CHECK-NEXT:    frintx v2.2d, v2.2d
+; CHECK-NEXT:    frintx v3.2d, v3.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
+; CHECK-NEXT:    fcvtzs v2.2d, v2.2d
+; CHECK-NEXT:    fcvtzs v3.2d, v3.2d
 ; CHECK-NEXT:    ret
   %a = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> %x)
   ret <8 x i64> %a
@@ -760,62 +623,22 @@ declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>)
 define <16 x i64> @llrint_v16f64(<16 x double> %x) {
 ; CHECK-LABEL: llrint_v16f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d16, v0.d[1]
-; CHECK-NEXT:    mov d17, v1.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d18, d2
-; CHECK-NEXT:    mov d2, v2.d[1]
-; CHECK-NEXT:    frintx d19, d3
-; CHECK-NEXT:    mov d3, v3.d[1]
-; CHECK-NEXT:    frintx d16, d16
-; CHECK-NEXT:    frintx d17, d17
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    frintx d0, d4
-; CHECK-NEXT:    mov d4, v4.d[1]
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    frintx d1, d5
-; CHECK-NEXT:    mov d5, v5.d[1]
-; CHECK-NEXT:    fcvtzs x12, d18
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    fcvtzs x13, d19
-; CHECK-NEXT:    frintx d18, d3
-; CHECK-NEXT:    fcvtzs x10, d16
-; CHECK-NEXT:    mov d16, v6.d[1]
-; CHECK-NEXT:    fcvtzs x11, d17
-; CHECK-NEXT:    mov d17, v7.d[1]
-; CHECK-NEXT:    frintx d6, d6
-; CHECK-NEXT:    frintx d7, d7
-; CHECK-NEXT:    frintx d4, d4
-; CHECK-NEXT:    frintx d5, d5
-; CHECK-NEXT:    fcvtzs x14, d0
-; CHECK-NEXT:    fcvtzs x15, d1
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    frintx d16, d16
-; CHECK-NEXT:    fcvtzs x9, d2
-; CHECK-NEXT:    fmov d2, x12
-; CHECK-NEXT:    frintx d17, d17
-; CHECK-NEXT:    fcvtzs x8, d6
-; CHECK-NEXT:    fcvtzs x12, d7
-; CHECK-NEXT:    fmov d3, x13
-; CHECK-NEXT:    fcvtzs x13, d18
-; CHECK-NEXT:    fcvtzs x16, d4
-; CHECK-NEXT:    fcvtzs x17, d5
-; CHECK-NEXT:    fmov d4, x14
-; CHECK-NEXT:    fmov d5, x15
-; CHECK-NEXT:    fcvtzs x18, d16
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    mov v1.d[1], x11
-; CHECK-NEXT:    fcvtzs x0, d17
-; CHECK-NEXT:    fmov d6, x8
-; CHECK-NEXT:    fmov d7, x12
-; CHECK-NEXT:    mov v2.d[1], x9
-; CHECK-NEXT:    mov v3.d[1], x13
-; CHECK-NEXT:    mov v4.d[1], x16
-; CHECK-NEXT:    mov v5.d[1], x17
-; CHECK-NEXT:    mov v6.d[1], x18
-; CHECK-NEXT:    mov v7.d[1], x0
+; CHECK-NEXT:    frintx v0.2d, v0.2d
+; CHECK-NEXT:    frintx v1.2d, v1.2d
+; CHECK-NEXT:    frintx v2.2d, v2.2d
+; CHECK-NEXT:    frintx v3.2d, v3.2d
+; CHECK-NEXT:    frintx v4.2d, v4.2d
+; CHECK-NEXT:    frintx v5.2d, v5.2d
+; CHECK-NEXT:    frintx v6.2d, v6.2d
+; CHECK-NEXT:    frintx v7.2d, v7.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
+; CHECK-NEXT:    fcvtzs v2.2d, v2.2d
+; CHECK-NEXT:    fcvtzs v3.2d, v3.2d
+; CHECK-NEXT:    fcvtzs v4.2d, v4.2d
+; CHECK-NEXT:    fcvtzs v5.2d, v5.2d
+; CHECK-NEXT:    fcvtzs v6.2d, v6.2d
+; CHECK-NEXT:    fcvtzs v7.2d, v7.2d
 ; CHECK-NEXT:    ret
   %a = call <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double> %x)
   ret <16 x i64> %a
@@ -825,130 +648,50 @@ declare <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double>)
 define <32 x i64> @llrint_v32f64(<32 x double> %x) {
 ; CHECK-LABEL: llrint_v32f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintx d20, d0
-; CHECK-NEXT:    frintx d22, d3
-; CHECK-NEXT:    frintx d21, d4
+; CHECK-NEXT:    ldp q17, q16, [sp, #96]
+; CHECK-NEXT:    frintx v7.2d, v7.2d
 ; CHECK-NEXT:    ldp q19, q18, [sp, #64]
-; CHECK-NEXT:    frintx d23, d5
-; CHECK-NEXT:    ldp q27, q26, [sp, #96]
-; CHECK-NEXT:    mov d4, v4.d[1]
-; CHECK-NEXT:    ldp q16, q17, [sp, #32]
-; CHECK-NEXT:    mov d5, v5.d[1]
-; CHECK-NEXT:    fcvtzs x9, d20
-; CHECK-NEXT:    frintx d20, d6
-; CHECK-NEXT:    fcvtzs x11, d22
-; CHECK-NEXT:    frintx d22, d19
-; CHECK-NEXT:    fcvtzs x12, d21
-; CHECK-NEXT:    fcvtzs x10, d23
-; CHECK-NEXT:    mov d21, v26.d[1]
-; CHECK-NEXT:    frintx d23, d27
-; CHECK-NEXT:    mov d27, v27.d[1]
-; CHECK-NEXT:    frintx d24, d16
-; CHECK-NEXT:    mov d19, v19.d[1]
-; CHECK-NEXT:    frintx d25, d17
-; CHECK-NEXT:    fcvtzs x13, d20
-; CHECK-NEXT:    mov d20, v18.d[1]
-; CHECK-NEXT:    frintx d18, d18
-; CHECK-NEXT:    fcvtzs x16, d22
-; CHECK-NEXT:    frintx d22, d26
-; CHECK-NEXT:    mov d16, v16.d[1]
-; CHECK-NEXT:    frintx d21, d21
-; CHECK-NEXT:    fcvtzs x17, d23
-; CHECK-NEXT:    frintx d23, d27
-; CHECK-NEXT:    fcvtzs x14, d24
-; CHECK-NEXT:    frintx d26, d19
-; CHECK-NEXT:    fmov d19, x11
-; CHECK-NEXT:    frintx d20, d20
-; CHECK-NEXT:    mov d27, v17.d[1]
-; CHECK-NEXT:    fcvtzs x15, d25
-; CHECK-NEXT:    ldp q25, q24, [sp]
-; CHECK-NEXT:    fcvtzs x11, d22
-; CHECK-NEXT:    fmov d17, x12
-; CHECK-NEXT:    fcvtzs x12, d21
-; CHECK-NEXT:    fcvtzs x0, d23
-; CHECK-NEXT:    fmov d23, x14
-; CHECK-NEXT:    fcvtzs x14, d18
-; CHECK-NEXT:    fmov d18, x17
-; CHECK-NEXT:    fcvtzs x17, d20
-; CHECK-NEXT:    frintx d21, d7
-; CHECK-NEXT:    fcvtzs x18, d26
-; CHECK-NEXT:    fmov d20, x11
-; CHECK-NEXT:    frintx d22, d25
-; CHECK-NEXT:    frintx d26, d27
-; CHECK-NEXT:    frintx d16, d16
-; CHECK-NEXT:    mov v18.d[1], x0
-; CHECK-NEXT:    mov d25, v25.d[1]
-; CHECK-NEXT:    mov d7, v7.d[1]
-; CHECK-NEXT:    mov d6, v6.d[1]
-; CHECK-NEXT:    mov d0, v0.d[1]
-; CHECK-NEXT:    mov v20.d[1], x12
-; CHECK-NEXT:    fcvtzs x11, d21
-; CHECK-NEXT:    fmov d21, x15
-; CHECK-NEXT:    fcvtzs x12, d22
-; CHECK-NEXT:    fmov d22, x16
-; CHECK-NEXT:    fcvtzs x15, d26
-; CHECK-NEXT:    fmov d26, x14
-; CHECK-NEXT:    fcvtzs x14, d16
-; CHECK-NEXT:    frintx d25, d25
-; CHECK-NEXT:    frintx d7, d7
-; CHECK-NEXT:    mov d16, v1.d[1]
-; CHECK-NEXT:    mov d3, v3.d[1]
-; CHECK-NEXT:    stp q18, q20, [x8, #224]
-; CHECK-NEXT:    mov d18, v24.d[1]
-; CHECK-NEXT:    mov v22.d[1], x18
-; CHECK-NEXT:    mov v26.d[1], x17
-; CHECK-NEXT:    frintx d24, d24
-; CHECK-NEXT:    mov v21.d[1], x15
-; CHECK-NEXT:    mov v23.d[1], x14
-; CHECK-NEXT:    frintx d20, d2
-; CHECK-NEXT:    mov d2, v2.d[1]
-; CHECK-NEXT:    frintx d6, d6
-; CHECK-NEXT:    frintx d5, d5
-; CHECK-NEXT:    frintx d4, d4
-; CHECK-NEXT:    frintx d18, d18
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d3, d3
-; CHECK-NEXT:    stp q22, q26, [x8, #192]
-; CHECK-NEXT:    fmov d22, x10
-; CHECK-NEXT:    fcvtzs x10, d24
-; CHECK-NEXT:    stp q23, q21, [x8, #160]
-; CHECK-NEXT:    fmov d21, x11
-; CHECK-NEXT:    fmov d24, x13
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    fcvtzs x13, d6
-; CHECK-NEXT:    frintx d6, d16
-; CHECK-NEXT:    fcvtzs x11, d18
-; CHECK-NEXT:    fmov d18, x12
-; CHECK-NEXT:    fcvtzs x12, d25
-; CHECK-NEXT:    fmov d23, x10
-; CHECK-NEXT:    fcvtzs x10, d7
-; CHECK-NEXT:    fcvtzs x14, d5
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    fcvtzs x15, d3
-; CHECK-NEXT:    mov v24.d[1], x13
-; CHECK-NEXT:    fcvtzs x13, d2
-; CHECK-NEXT:    fmov d2, x9
-; CHECK-NEXT:    mov v23.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, d4
-; CHECK-NEXT:    mov v18.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, d20
-; CHECK-NEXT:    mov v21.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, d1
-; CHECK-NEXT:    mov v22.d[1], x14
-; CHECK-NEXT:    fcvtzs x14, d6
-; CHECK-NEXT:    mov v19.d[1], x15
-; CHECK-NEXT:    stp q18, q23, [x8, #128]
-; CHECK-NEXT:    mov v17.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, d0
-; CHECK-NEXT:    stp q24, q21, [x8, #96]
-; CHECK-NEXT:    fmov d0, x12
-; CHECK-NEXT:    fmov d1, x10
-; CHECK-NEXT:    stp q17, q22, [x8, #64]
-; CHECK-NEXT:    mov v0.d[1], x13
-; CHECK-NEXT:    mov v1.d[1], x14
-; CHECK-NEXT:    mov v2.d[1], x11
-; CHECK-NEXT:    stp q0, q19, [x8, #32]
-; CHECK-NEXT:    stp q2, q1, [x8]
+; CHECK-NEXT:    frintx v6.2d, v6.2d
+; CHECK-NEXT:    ldp q21, q20, [sp, #32]
+; CHECK-NEXT:    frintx v5.2d, v5.2d
+; CHECK-NEXT:    frintx v16.2d, v16.2d
+; CHECK-NEXT:    frintx v17.2d, v17.2d
+; CHECK-NEXT:    frintx v4.2d, v4.2d
+; CHECK-NEXT:    frintx v18.2d, v18.2d
+; CHECK-NEXT:    frintx v19.2d, v19.2d
+; CHECK-NEXT:    frintx v3.2d, v3.2d
+; CHECK-NEXT:    ldp q23, q22, [sp]
+; CHECK-NEXT:    frintx v20.2d, v20.2d
+; CHECK-NEXT:    frintx v21.2d, v21.2d
+; CHECK-NEXT:    frintx v2.2d, v2.2d
+; CHECK-NEXT:    frintx v1.2d, v1.2d
+; CHECK-NEXT:    fcvtzs v16.2d, v16.2d
+; CHECK-NEXT:    fcvtzs v17.2d, v17.2d
+; CHECK-NEXT:    frintx v0.2d, v0.2d
+; CHECK-NEXT:    frintx v22.2d, v22.2d
+; CHECK-NEXT:    fcvtzs v18.2d, v18.2d
+; CHECK-NEXT:    frintx v23.2d, v23.2d
+; CHECK-NEXT:    fcvtzs v19.2d, v19.2d
+; CHECK-NEXT:    fcvtzs v20.2d, v20.2d
+; CHECK-NEXT:    fcvtzs v7.2d, v7.2d
+; CHECK-NEXT:    fcvtzs v6.2d, v6.2d
+; CHECK-NEXT:    fcvtzs v5.2d, v5.2d
+; CHECK-NEXT:    fcvtzs v4.2d, v4.2d
+; CHECK-NEXT:    stp q17, q16, [x8, #224]
+; CHECK-NEXT:    fcvtzs v16.2d, v21.2d
+; CHECK-NEXT:    fcvtzs v3.2d, v3.2d
+; CHECK-NEXT:    fcvtzs v17.2d, v22.2d
+; CHECK-NEXT:    fcvtzs v2.2d, v2.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
+; CHECK-NEXT:    stp q19, q18, [x8, #192]
+; CHECK-NEXT:    fcvtzs v18.2d, v23.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    stp q4, q5, [x8, #64]
+; CHECK-NEXT:    stp q6, q7, [x8, #96]
+; CHECK-NEXT:    stp q2, q3, [x8, #32]
+; CHECK-NEXT:    stp q0, q1, [x8]
+; CHECK-NEXT:    stp q18, q17, [x8, #128]
+; CHECK-NEXT:    stp q16, q20, [x8, #160]
 ; CHECK-NEXT:    ret
   %a = call <32 x i64> @llvm.llrint.v32i64.v16f64(<32 x double> %x)
   ret <32 x i64> %a
diff --git a/llvm/test/CodeGen/AArch64/vector-lrint.ll b/llvm/test/CodeGen/AArch64/vector-lrint.ll
index a58be8dcb7455..41ba13a863d97 100644
--- a/llvm/test/CodeGen/AArch64/vector-lrint.ll
+++ b/llvm/test/CodeGen/AArch64/vector-lrint.ll
@@ -1,5 +1,24 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64 -mattr=+neon | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+neon | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64 -mattr=+neon -global-isel -global-isel-abort=2 2>&1 |\
+; RUN:   FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
+; CHECK-GI:       warning: Instruction selection used fallback path for lrint_v2f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v4f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v8f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v16i64_v16f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v32i64_v32f16
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v2f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v4f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v8f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v16i64_v16f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v32i64_v32f32
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v2f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v4f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v8f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v16f64
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_v32f64
 
 define <1 x i64> @lrint_v1f16(<1 x half> %x) {
 ; CHECK-LABEL: lrint_v1f16:
@@ -17,12 +36,12 @@ declare <1 x i64> @llvm.lrint.v1i64.v1f16(<1 x half>)
 define <2 x i64> @lrint_v2f16(<2 x half> %x) {
 ; CHECK-LABEL: lrint_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
 ; CHECK-NEXT:    mov h1, v0.h[1]
 ; CHECK-NEXT:    fcvt s0, h0
 ; CHECK-NEXT:    fcvt s1, h1
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s1, s1
 ; CHECK-NEXT:    fcvtzs x8, s0
 ; CHECK-NEXT:    fcvtzs x9, s1
 ; CHECK-NEXT:    fmov d0, x8
@@ -37,22 +56,24 @@ define <4 x i64> @lrint_v4f16(<4 x half> %x) {
 ; CHECK-LABEL: lrint_v4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov h1, v0.h[2]
+; CHECK-NEXT:    dup v1.2s, v0.s[1]
+; CHECK-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NEXT:    fcvtn v1.4h, v1.4s
 ; CHECK-NEXT:    mov h2, v0.h[1]
-; CHECK-NEXT:    mov h3, v0.h[3]
 ; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    mov h3, v1.h[1]
 ; CHECK-NEXT:    fcvt s1, h1
 ; CHECK-NEXT:    fcvt s2, h2
-; CHECK-NEXT:    fcvt s3, h3
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    frintx s3, s3
 ; CHECK-NEXT:    fcvtzs x8, s0
+; CHECK-NEXT:    fcvt s3, h3
 ; CHECK-NEXT:    fcvtzs x9, s1
 ; CHECK-NEXT:    fcvtzs x10, s2
-; CHECK-NEXT:    fcvtzs x11, s3
 ; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fcvtzs x11, s3
 ; CHECK-NEXT:    fmov d1, x9
 ; CHECK-NEXT:    mov v0.d[1], x10
 ; CHECK-NEXT:    mov v1.d[1], x11
@@ -65,45 +86,48 @@ declare <4 x i64> @llvm.lrint.v4i64.v4f16(<4 x half>)
 define <8 x i64> @lrint_v8f16(<8 x half> %x) {
 ; CHECK-LABEL: lrint_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    mov h4, v0.h[2]
-; CHECK-NEXT:    mov h3, v0.h[1]
-; CHECK-NEXT:    mov h7, v0.h[3]
+; CHECK-NEXT:    dup v1.2s, v0.s[1]
+; CHECK-NEXT:    dup v2.2s, v0.s[3]
+; CHECK-NEXT:    fcvtl v3.4s, v0.4h
+; CHECK-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NEXT:    fcvtl v2.4s, v2.4h
+; CHECK-NEXT:    frintx v3.4s, v3.4s
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    frintx v2.4s, v2.4s
+; CHECK-NEXT:    fcvtn v3.4h, v3.4s
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NEXT:    fcvtn v2.4h, v2.4s
+; CHECK-NEXT:    mov h4, v3.h[1]
+; CHECK-NEXT:    mov h5, v0.h[1]
 ; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    mov h2, v1.h[2]
-; CHECK-NEXT:    mov h5, v1.h[1]
-; CHECK-NEXT:    mov h6, v1.h[3]
-; CHECK-NEXT:    fcvt s1, h1
-; CHECK-NEXT:    fcvt s4, h4
 ; CHECK-NEXT:    fcvt s3, h3
-; CHECK-NEXT:    fcvt s7, h7
-; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    mov h6, v1.h[1]
+; CHECK-NEXT:    mov h7, v2.h[1]
+; CHECK-NEXT:    fcvt s1, h1
 ; CHECK-NEXT:    fcvt s2, h2
+; CHECK-NEXT:    fcvt s4, h4
 ; CHECK-NEXT:    fcvt s5, h5
+; CHECK-NEXT:    fcvtzs x8, s0
+; CHECK-NEXT:    fcvtzs x9, s3
 ; CHECK-NEXT:    fcvt s6, h6
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    frintx s7, s7
-; CHECK-NEXT:    fcvtzs x9, s0
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvtzs x8, s1
-; CHECK-NEXT:    fcvtzs x12, s4
-; CHECK-NEXT:    fcvtzs x11, s3
-; CHECK-NEXT:    fcvtzs x15, s7
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    fcvtzs x10, s2
+; CHECK-NEXT:    fcvt s7, h7
+; CHECK-NEXT:    fcvtzs x11, s1
+; CHECK-NEXT:    fcvtzs x12, s2
+; CHECK-NEXT:    fcvtzs x10, s4
 ; CHECK-NEXT:    fcvtzs x13, s5
-; CHECK-NEXT:    fcvtzs x14, s6
 ; CHECK-NEXT:    fmov d2, x8
-; CHECK-NEXT:    fmov d1, x12
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    fmov d3, x10
+; CHECK-NEXT:    fmov d0, x9
+; CHECK-NEXT:    fcvtzs x14, s6
+; CHECK-NEXT:    fcvtzs x15, s7
+; CHECK-NEXT:    fmov d1, x11
+; CHECK-NEXT:    fmov d3, x12
+; CHECK-NEXT:    mov v0.d[1], x10
 ; CHECK-NEXT:    mov v2.d[1], x13
-; CHECK-NEXT:    mov v1.d[1], x15
-; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    mov v1.d[1], x14
+; CHECK-NEXT:    mov v3.d[1], x15
 ; CHECK-NEXT:    ret
   %a = call <8 x i64> @llvm.lrint.v8i64.v8f16(<8 x half> %x)
   ret <8 x i64> %a
@@ -113,84 +137,90 @@ declare <8 x i64> @llvm.lrint.v8i64.v8f16(<8 x half>)
 define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) {
 ; CHECK-LABEL: lrint_v16i64_v16f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    mov h17, v0.h[1]
-; CHECK-NEXT:    mov h19, v0.h[2]
-; CHECK-NEXT:    fcvt s18, h0
-; CHECK-NEXT:    mov h0, v0.h[3]
-; CHECK-NEXT:    mov h4, v2.h[1]
-; CHECK-NEXT:    mov h5, v2.h[2]
-; CHECK-NEXT:    fcvt s7, h3
-; CHECK-NEXT:    fcvt s6, h2
-; CHECK-NEXT:    mov h16, v3.h[2]
-; CHECK-NEXT:    mov h2, v2.h[3]
-; CHECK-NEXT:    fcvt s17, h17
-; CHECK-NEXT:    fcvt s19, h19
-; CHECK-NEXT:    frintx s18, s18
-; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    fcvtl2 v4.4s, v0.8h
+; CHECK-NEXT:    fcvtl2 v2.4s, v1.8h
+; CHECK-NEXT:    fcvtl v3.4s, v0.4h
+; CHECK-NEXT:    dup v5.2s, v0.s[1]
+; CHECK-NEXT:    dup v0.2s, v0.s[3]
+; CHECK-NEXT:    dup v6.2s, v1.s[1]
+; CHECK-NEXT:    dup v7.2s, v1.s[3]
+; CHECK-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NEXT:    frintx v4.4s, v4.4s
+; CHECK-NEXT:    frintx v2.4s, v2.4s
+; CHECK-NEXT:    frintx v3.4s, v3.4s
+; CHECK-NEXT:    fcvtl v5.4s, v5.4h
+; CHECK-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NEXT:    fcvtl v6.4s, v6.4h
+; CHECK-NEXT:    fcvtl v7.4s, v7.4h
+; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    fcvtn v4.4h, v4.4s
+; CHECK-NEXT:    fcvtn v2.4h, v2.4s
+; CHECK-NEXT:    fcvtn v3.4h, v3.4s
+; CHECK-NEXT:    frintx v5.4s, v5.4s
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    frintx v6.4s, v6.4s
+; CHECK-NEXT:    frintx v7.4s, v7.4s
+; CHECK-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NEXT:    mov h16, v4.h[1]
 ; CHECK-NEXT:    fcvt s4, h4
-; CHECK-NEXT:    fcvt s5, h5
-; CHECK-NEXT:    frintx s7, s7
-; CHECK-NEXT:    frintx s6, s6
+; CHECK-NEXT:    fcvt s17, h2
+; CHECK-NEXT:    mov h18, v3.h[1]
+; CHECK-NEXT:    fcvtn v5.4h, v5.4s
+; CHECK-NEXT:    fcvt s3, h3
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NEXT:    fcvtn v6.4h, v6.4s
+; CHECK-NEXT:    fcvtn v7.4h, v7.4s
+; CHECK-NEXT:    mov h2, v2.h[1]
 ; CHECK-NEXT:    fcvt s16, h16
-; CHECK-NEXT:    fcvt s2, h2
-; CHECK-NEXT:    frintx s17, s17
-; CHECK-NEXT:    frintx s19, s19
-; CHECK-NEXT:    fcvtzs x13, s18
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    fcvtzs x9, s7
-; CHECK-NEXT:    mov h7, v1.h[2]
-; CHECK-NEXT:    fcvtzs x8, s6
-; CHECK-NEXT:    mov h6, v1.h[1]
-; CHECK-NEXT:    frintx s16, s16
-; CHECK-NEXT:    fcvtzs x14, s17
-; CHECK-NEXT:    fcvtzs x15, s19
-; CHECK-NEXT:    fcvtzs x10, s4
-; CHECK-NEXT:    mov h4, v3.h[1]
-; CHECK-NEXT:    fcvtzs x11, s5
-; CHECK-NEXT:    mov h5, v1.h[3]
-; CHECK-NEXT:    mov h3, v3.h[3]
+; CHECK-NEXT:    fcvtzs x8, s4
+; CHECK-NEXT:    fcvtzs x9, s17
+; CHECK-NEXT:    fcvt s4, h18
+; CHECK-NEXT:    fcvt s17, h5
+; CHECK-NEXT:    fcvtzs x10, s3
+; CHECK-NEXT:    mov h3, v5.h[1]
+; CHECK-NEXT:    fcvt s5, h0
+; CHECK-NEXT:    mov h0, v0.h[1]
+; CHECK-NEXT:    mov h18, v6.h[1]
+; CHECK-NEXT:    mov h19, v7.h[1]
+; CHECK-NEXT:    fcvtzs x11, s16
+; CHECK-NEXT:    mov h16, v1.h[1]
 ; CHECK-NEXT:    fcvt s1, h1
-; CHECK-NEXT:    fcvt s7, h7
-; CHECK-NEXT:    fcvt s6, h6
-; CHECK-NEXT:    fcvtzs x12, s16
-; CHECK-NEXT:    frintx s16, s2
-; CHECK-NEXT:    fmov d2, x8
-; CHECK-NEXT:    fcvt s4, h4
+; CHECK-NEXT:    fcvtzs x12, s4
+; CHECK-NEXT:    fcvt s4, h6
+; CHECK-NEXT:    fcvtzs x13, s17
+; CHECK-NEXT:    fcvtzs x14, s5
+; CHECK-NEXT:    fcvt s5, h7
 ; CHECK-NEXT:    fcvt s3, h3
-; CHECK-NEXT:    fcvt s5, h5
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    frintx s7, s7
-; CHECK-NEXT:    frintx s17, s6
+; CHECK-NEXT:    fcvt s7, h2
+; CHECK-NEXT:    fcvt s17, h0
+; CHECK-NEXT:    fcvt s18, h18
+; CHECK-NEXT:    fcvt s16, h16
+; CHECK-NEXT:    fcvt s19, h19
+; CHECK-NEXT:    fcvtzs x15, s1
+; CHECK-NEXT:    fmov d2, x8
+; CHECK-NEXT:    fcvtzs x8, s4
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fcvtzs x10, s5
 ; CHECK-NEXT:    fmov d6, x9
-; CHECK-NEXT:    mov v2.d[1], x10
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    frintx s18, s3
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    fcvtzs x8, s1
-; CHECK-NEXT:    fcvtzs x9, s7
-; CHECK-NEXT:    fmov d3, x11
-; CHECK-NEXT:    fcvtzs x11, s0
-; CHECK-NEXT:    fmov d7, x12
-; CHECK-NEXT:    fcvtzs x12, s16
-; CHECK-NEXT:    fcvtzs x16, s17
-; CHECK-NEXT:    fcvtzs x17, s4
-; CHECK-NEXT:    fmov d0, x13
-; CHECK-NEXT:    fmov d1, x15
+; CHECK-NEXT:    fcvtzs x9, s3
+; CHECK-NEXT:    fmov d1, x13
+; CHECK-NEXT:    fcvtzs x13, s17
+; CHECK-NEXT:    fcvtzs x17, s7
+; CHECK-NEXT:    fcvtzs x16, s16
 ; CHECK-NEXT:    fcvtzs x18, s18
-; CHECK-NEXT:    fcvtzs x0, s5
-; CHECK-NEXT:    fmov d4, x8
-; CHECK-NEXT:    fmov d5, x9
-; CHECK-NEXT:    mov v0.d[1], x14
-; CHECK-NEXT:    mov v1.d[1], x11
-; CHECK-NEXT:    mov v3.d[1], x12
-; CHECK-NEXT:    mov v4.d[1], x16
+; CHECK-NEXT:    fcvtzs x0, s19
+; CHECK-NEXT:    fmov d3, x14
+; CHECK-NEXT:    fmov d4, x15
+; CHECK-NEXT:    fmov d5, x8
+; CHECK-NEXT:    fmov d7, x10
+; CHECK-NEXT:    mov v0.d[1], x12
+; CHECK-NEXT:    mov v1.d[1], x9
+; CHECK-NEXT:    mov v2.d[1], x11
 ; CHECK-NEXT:    mov v6.d[1], x17
-; CHECK-NEXT:    mov v7.d[1], x18
-; CHECK-NEXT:    mov v5.d[1], x0
+; CHECK-NEXT:    mov v3.d[1], x13
+; CHECK-NEXT:    mov v4.d[1], x16
+; CHECK-NEXT:    mov v5.d[1], x18
+; CHECK-NEXT:    mov v7.d[1], x0
 ; CHECK-NEXT:    ret
   %a = call <16 x i64> @llvm.lrint.v16i64.v16f16(<16 x half> %x)
   ret <16 x i64> %a
@@ -200,170 +230,182 @@ declare <16 x i64> @llvm.lrint.v16i64.v16f16(<16 x half>)
 define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
 ; CHECK-LABEL: lrint_v32i64_v32f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v4.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v5.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    ext v6.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT:    ext v7.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    mov h19, v0.h[1]
-; CHECK-NEXT:    fcvt s21, h0
-; CHECK-NEXT:    mov h23, v1.h[2]
-; CHECK-NEXT:    fcvt s22, h1
-; CHECK-NEXT:    fcvt s26, h2
-; CHECK-NEXT:    mov h27, v2.h[1]
-; CHECK-NEXT:    mov h28, v2.h[2]
-; CHECK-NEXT:    mov h16, v4.h[2]
-; CHECK-NEXT:    fcvt s17, h5
-; CHECK-NEXT:    mov h18, v5.h[2]
-; CHECK-NEXT:    mov h20, v6.h[2]
-; CHECK-NEXT:    fcvt s24, h7
-; CHECK-NEXT:    fcvt s25, h6
-; CHECK-NEXT:    fcvt s19, h19
-; CHECK-NEXT:    frintx s22, s22
-; CHECK-NEXT:    fcvt s16, h16
-; CHECK-NEXT:    frintx s17, s17
-; CHECK-NEXT:    fcvt s18, h18
-; CHECK-NEXT:    fcvt s20, h20
-; CHECK-NEXT:    frintx s16, s16
-; CHECK-NEXT:    fcvtzs x12, s17
-; CHECK-NEXT:    frintx s17, s18
-; CHECK-NEXT:    frintx s18, s21
-; CHECK-NEXT:    fcvt s21, h23
-; CHECK-NEXT:    frintx s23, s24
-; CHECK-NEXT:    frintx s24, s25
-; CHECK-NEXT:    frintx s25, s19
-; CHECK-NEXT:    mov h19, v7.h[1]
-; CHECK-NEXT:    fcvtzs x13, s16
-; CHECK-NEXT:    frintx s16, s20
-; CHECK-NEXT:    frintx s20, s26
-; CHECK-NEXT:    fcvtzs x9, s23
-; CHECK-NEXT:    mov h23, v3.h[2]
-; CHECK-NEXT:    fcvt s26, h27
-; CHECK-NEXT:    fcvtzs x15, s24
-; CHECK-NEXT:    fcvtzs x10, s25
-; CHECK-NEXT:    fcvt s24, h28
-; CHECK-NEXT:    mov h25, v3.h[3]
-; CHECK-NEXT:    fcvtzs x14, s17
-; CHECK-NEXT:    frintx s21, s21
-; CHECK-NEXT:    fmov d17, x12
-; CHECK-NEXT:    fcvtzs x12, s16
-; CHECK-NEXT:    fmov d16, x13
+; CHECK-NEXT:    dup v4.2s, v1.s[1]
+; CHECK-NEXT:    fcvtl v5.4s, v0.4h
+; CHECK-NEXT:    dup v6.2s, v1.s[3]
+; CHECK-NEXT:    fcvtl v7.4s, v1.4h
+; CHECK-NEXT:    dup v16.2s, v2.s[3]
+; CHECK-NEXT:    fcvtl v17.4s, v2.4h
+; CHECK-NEXT:    dup v19.2s, v2.s[1]
+; CHECK-NEXT:    dup v18.2s, v0.s[1]
+; CHECK-NEXT:    dup v21.2s, v3.s[1]
+; CHECK-NEXT:    dup v24.2s, v3.s[3]
+; CHECK-NEXT:    fcvtl2 v1.4s, v1.8h
+; CHECK-NEXT:    fcvtl2 v2.4s, v2.8h
+; CHECK-NEXT:    fcvtl v4.4s, v4.4h
+; CHECK-NEXT:    frintx v5.4s, v5.4s
+; CHECK-NEXT:    fcvtl v6.4s, v6.4h
+; CHECK-NEXT:    frintx v7.4s, v7.4s
+; CHECK-NEXT:    fcvtl v16.4s, v16.4h
+; CHECK-NEXT:    frintx v22.4s, v17.4s
+; CHECK-NEXT:    fcvtl v19.4s, v19.4h
+; CHECK-NEXT:    dup v17.2s, v0.s[3]
+; CHECK-NEXT:    fcvtl v21.4s, v21.4h
+; CHECK-NEXT:    fcvtl v24.4s, v24.4h
+; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    frintx v2.4s, v2.4s
+; CHECK-NEXT:    frintx v20.4s, v4.4s
+; CHECK-NEXT:    fcvtn v4.4h, v5.4s
+; CHECK-NEXT:    frintx v23.4s, v6.4s
+; CHECK-NEXT:    fcvtn v5.4h, v7.4s
+; CHECK-NEXT:    frintx v25.4s, v16.4s
+; CHECK-NEXT:    fcvtn v16.4h, v22.4s
+; CHECK-NEXT:    frintx v26.4s, v19.4s
+; CHECK-NEXT:    fcvtn v6.4h, v20.4s
+; CHECK-NEXT:    fcvtl v20.4s, v3.4h
+; CHECK-NEXT:    fcvt s22, h4
+; CHECK-NEXT:    fcvtn v7.4h, v23.4s
+; CHECK-NEXT:    fcvtl2 v23.4s, v3.8h
+; CHECK-NEXT:    fcvtl v3.4s, v18.4h
+; CHECK-NEXT:    fcvtn v25.4h, v25.4s
+; CHECK-NEXT:    fcvt s27, h5
+; CHECK-NEXT:    fcvtl v18.4s, v17.4h
+; CHECK-NEXT:    frintx v17.4s, v21.4s
+; CHECK-NEXT:    fcvt s29, h16
+; CHECK-NEXT:    mov h16, v16.h[1]
+; CHECK-NEXT:    frintx v20.4s, v20.4s
+; CHECK-NEXT:    fcvtzs x9, s22
+; CHECK-NEXT:    fcvt s28, h6
+; CHECK-NEXT:    fcvt s22, h7
+; CHECK-NEXT:    frintx v19.4s, v3.4s
+; CHECK-NEXT:    fcvtn v3.4h, v26.4s
+; CHECK-NEXT:    mov h21, v25.h[1]
+; CHECK-NEXT:    frintx v23.4s, v23.4s
+; CHECK-NEXT:    fcvtzs x10, s27
+; CHECK-NEXT:    fcvtl2 v26.4s, v0.8h
+; CHECK-NEXT:    fcvt s25, h25
+; CHECK-NEXT:    fcvtn v17.4h, v17.4s
+; CHECK-NEXT:    fcvtn v20.4h, v20.4s
+; CHECK-NEXT:    fcvtzs x12, s28
+; CHECK-NEXT:    fcvtzs x14, s29
 ; CHECK-NEXT:    fcvtzs x13, s22
-; CHECK-NEXT:    fcvt s22, h3
-; CHECK-NEXT:    mov h3, v3.h[1]
-; CHECK-NEXT:    mov h27, v0.h[2]
-; CHECK-NEXT:    mov h28, v2.h[3]
+; CHECK-NEXT:    frintx v22.4s, v24.4s
+; CHECK-NEXT:    fcvt s24, h3
+; CHECK-NEXT:    fcvt s21, h21
+; CHECK-NEXT:    fcvtn v23.4h, v23.4s
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fcvtzs x15, s25
+; CHECK-NEXT:    mov h25, v17.h[1]
+; CHECK-NEXT:    fcvt s17, h17
+; CHECK-NEXT:    mov h27, v20.h[1]
+; CHECK-NEXT:    fcvt s20, h20
+; CHECK-NEXT:    fcvtn v28.4h, v2.4s
+; CHECK-NEXT:    fcvtn v22.4h, v22.4s
+; CHECK-NEXT:    fcvtzs x10, s24
+; CHECK-NEXT:    frintx v24.4s, v26.4s
+; CHECK-NEXT:    fcvtzs x11, s21
+; CHECK-NEXT:    mov h26, v23.h[1]
 ; CHECK-NEXT:    fcvt s23, h23
-; CHECK-NEXT:    frintx s26, s26
+; CHECK-NEXT:    fcvt s25, h25
+; CHECK-NEXT:    fmov d2, x13
+; CHECK-NEXT:    fcvtzs x13, s17
+; CHECK-NEXT:    fcvt s21, h27
 ; CHECK-NEXT:    fcvtzs x16, s20
-; CHECK-NEXT:    frintx s20, s24
-; CHECK-NEXT:    fcvt s24, h25
-; CHECK-NEXT:    fcvtzs x11, s18
-; CHECK-NEXT:    fmov d18, x14
+; CHECK-NEXT:    fcvtn v27.4h, v1.4s
+; CHECK-NEXT:    mov h20, v22.h[1]
+; CHECK-NEXT:    fcvt s22, h22
+; CHECK-NEXT:    fcvtn v24.4h, v24.4s
+; CHECK-NEXT:    fmov d1, x12
+; CHECK-NEXT:    fcvtzs x0, s23
+; CHECK-NEXT:    fmov d17, x14
+; CHECK-NEXT:    fcvtzs x18, s25
+; CHECK-NEXT:    mov h25, v28.h[1]
+; CHECK-NEXT:    fcvt s23, h28
+; CHECK-NEXT:    fcvtzs x12, s21
+; CHECK-NEXT:    fcvt s21, h26
+; CHECK-NEXT:    fcvt s26, h27
+; CHECK-NEXT:    fcvt s20, h20
+; CHECK-NEXT:    fcvtzs x17, s22
+; CHECK-NEXT:    fcvt s22, h24
+; CHECK-NEXT:    frintx v18.4s, v18.4s
+; CHECK-NEXT:    mov h3, v3.h[1]
+; CHECK-NEXT:    mov h7, v7.h[1]
+; CHECK-NEXT:    fcvt s25, h25
+; CHECK-NEXT:    fcvtn v19.4h, v19.4s
+; CHECK-NEXT:    fcvt s16, h16
 ; CHECK-NEXT:    fcvtzs x14, s21
-; CHECK-NEXT:    frintx s22, s22
-; CHECK-NEXT:    fcvt s3, h3
-; CHECK-NEXT:    fcvt s25, h27
-; CHECK-NEXT:    fcvt s27, h28
-; CHECK-NEXT:    frintx s23, s23
-; CHECK-NEXT:    mov h21, v1.h[3]
-; CHECK-NEXT:    fmov d2, x15
-; CHECK-NEXT:    fcvtzs x15, s26
-; CHECK-NEXT:    fmov d26, x13
-; CHECK-NEXT:    mov h1, v1.h[1]
-; CHECK-NEXT:    fcvtzs x13, s20
-; CHECK-NEXT:    frintx s20, s24
-; CHECK-NEXT:    fmov d24, x14
-; CHECK-NEXT:    fcvtzs x14, s22
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    fmov d22, x16
-; CHECK-NEXT:    frintx s27, s27
-; CHECK-NEXT:    fcvtzs x16, s23
-; CHECK-NEXT:    fcvt s21, h21
-; CHECK-NEXT:    frintx s25, s25
-; CHECK-NEXT:    fcvt s1, h1
-; CHECK-NEXT:    mov h0, v0.h[3]
-; CHECK-NEXT:    mov h23, v7.h[2]
-; CHECK-NEXT:    mov v22.d[1], x15
+; CHECK-NEXT:    fmov d21, x15
+; CHECK-NEXT:    mov h5, v5.h[1]
 ; CHECK-NEXT:    fcvtzs x15, s20
-; CHECK-NEXT:    fmov d20, x13
-; CHECK-NEXT:    fcvtzs x13, s3
-; CHECK-NEXT:    fmov d3, x14
-; CHECK-NEXT:    fcvtzs x14, s27
-; CHECK-NEXT:    fmov d27, x16
-; CHECK-NEXT:    frintx s21, s21
-; CHECK-NEXT:    mov h7, v7.h[3]
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    fcvt s23, h23
-; CHECK-NEXT:    fcvt s19, h19
-; CHECK-NEXT:    mov v27.d[1], x15
-; CHECK-NEXT:    fcvtzs x15, s25
-; CHECK-NEXT:    mov h25, v6.h[3]
+; CHECK-NEXT:    fmov d20, x16
+; CHECK-NEXT:    fcvtzs x16, s22
+; CHECK-NEXT:    fmov d22, x17
+; CHECK-NEXT:    fcvtzs x17, s26
+; CHECK-NEXT:    fmov d26, x0
+; CHECK-NEXT:    fcvtn v18.4h, v18.4s
 ; CHECK-NEXT:    mov h6, v6.h[1]
-; CHECK-NEXT:    mov v3.d[1], x13
-; CHECK-NEXT:    fcvtzs x13, s21
-; CHECK-NEXT:    mov h21, v5.h[1]
-; CHECK-NEXT:    mov h5, v5.h[3]
-; CHECK-NEXT:    mov v20.d[1], x14
-; CHECK-NEXT:    fcvtzs x14, s1
-; CHECK-NEXT:    mov h1, v4.h[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    fcvt s25, h25
+; CHECK-NEXT:    fcvt s3, h3
+; CHECK-NEXT:    mov v20.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, s25
 ; CHECK-NEXT:    fcvt s7, h7
-; CHECK-NEXT:    stp q3, q27, [x8, #192]
+; CHECK-NEXT:    mov v26.d[1], x14
+; CHECK-NEXT:    mov v22.d[1], x15
+; CHECK-NEXT:    fcvtzs x14, s23
+; CHECK-NEXT:    fmov d23, x13
+; CHECK-NEXT:    mov v21.d[1], x11
+; CHECK-NEXT:    mov h4, v4.h[1]
+; CHECK-NEXT:    mov h25, v19.h[1]
 ; CHECK-NEXT:    fcvt s6, h6
-; CHECK-NEXT:    mov h3, v4.h[3]
-; CHECK-NEXT:    stp q22, q20, [x8, #128]
-; CHECK-NEXT:    fcvt s21, h21
+; CHECK-NEXT:    fcvtzs x11, s3
 ; CHECK-NEXT:    fcvt s5, h5
-; CHECK-NEXT:    mov v24.d[1], x13
-; CHECK-NEXT:    mov v26.d[1], x14
+; CHECK-NEXT:    fcvt s19, h19
+; CHECK-NEXT:    fcvtzs x13, s7
+; CHECK-NEXT:    stp q26, q22, [x8, #224]
+; CHECK-NEXT:    mov v23.d[1], x18
+; CHECK-NEXT:    mov h26, v27.h[1]
+; CHECK-NEXT:    fmov d22, x14
 ; CHECK-NEXT:    fcvt s4, h4
-; CHECK-NEXT:    frintx s22, s25
-; CHECK-NEXT:    fmov d20, x12
-; CHECK-NEXT:    fcvt s1, h1
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvt s3, h3
-; CHECK-NEXT:    fcvtzs x12, s0
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    frintx s21, s21
-; CHECK-NEXT:    fmov d0, x11
-; CHECK-NEXT:    stp q26, q24, [x8, #64]
-; CHECK-NEXT:    fmov d24, x15
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    fcvtzs x11, s22
-; CHECK-NEXT:    frintx s22, s23
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    fcvtzs x13, s6
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    frintx s6, s7
-; CHECK-NEXT:    fcvtzs x14, s5
-; CHECK-NEXT:    mov v24.d[1], x12
-; CHECK-NEXT:    frintx s5, s19
-; CHECK-NEXT:    fcvtzs x12, s21
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, s4
-; CHECK-NEXT:    mov v20.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, s22
-; CHECK-NEXT:    mov v2.d[1], x13
-; CHECK-NEXT:    fcvtzs x15, s3
-; CHECK-NEXT:    fcvtzs x13, s1
-; CHECK-NEXT:    mov v18.d[1], x14
+; CHECK-NEXT:    fmov d3, x16
+; CHECK-NEXT:    fcvt s7, h25
 ; CHECK-NEXT:    fcvtzs x14, s6
-; CHECK-NEXT:    stp q0, q24, [x8]
-; CHECK-NEXT:    mov v17.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, s5
-; CHECK-NEXT:    fmov d0, x10
-; CHECK-NEXT:    fmov d1, x11
-; CHECK-NEXT:    stp q2, q20, [x8, #224]
-; CHECK-NEXT:    fmov d2, x9
-; CHECK-NEXT:    mov v16.d[1], x15
-; CHECK-NEXT:    stp q17, q18, [x8, #160]
-; CHECK-NEXT:    mov v0.d[1], x13
+; CHECK-NEXT:    mov v2.d[1], x13
+; CHECK-NEXT:    stp q20, q23, [x8, #192]
+; CHECK-NEXT:    fcvt s23, h26
+; CHECK-NEXT:    mov v22.d[1], x12
+; CHECK-NEXT:    fmov d20, x10
+; CHECK-NEXT:    fcvtzs x10, s16
+; CHECK-NEXT:    mov h16, v24.h[1]
+; CHECK-NEXT:    mov h24, v18.h[1]
+; CHECK-NEXT:    fcvt s18, h18
 ; CHECK-NEXT:    mov v1.d[1], x14
-; CHECK-NEXT:    mov v2.d[1], x12
-; CHECK-NEXT:    stp q0, q16, [x8, #96]
-; CHECK-NEXT:    stp q2, q1, [x8, #32]
+; CHECK-NEXT:    fcvtzs x14, s7
+; CHECK-NEXT:    stp q22, q21, [x8, #160]
+; CHECK-NEXT:    fcvtzs x12, s23
+; CHECK-NEXT:    fmov d21, x17
+; CHECK-NEXT:    fcvt s16, h16
+; CHECK-NEXT:    mov v20.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, s5
+; CHECK-NEXT:    fcvt s22, h24
+; CHECK-NEXT:    mov v17.d[1], x10
+; CHECK-NEXT:    fcvtzs x10, s18
+; CHECK-NEXT:    mov v21.d[1], x12
+; CHECK-NEXT:    fcvtzs x12, s19
+; CHECK-NEXT:    fcvtzs x15, s16
+; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    fcvtzs x11, s4
+; CHECK-NEXT:    stp q17, q20, [x8, #128]
+; CHECK-NEXT:    fcvtzs x13, s22
+; CHECK-NEXT:    fmov d4, x10
+; CHECK-NEXT:    stp q21, q2, [x8, #96]
+; CHECK-NEXT:    fmov d5, x12
+; CHECK-NEXT:    fmov d2, x9
+; CHECK-NEXT:    stp q0, q1, [x8, #64]
+; CHECK-NEXT:    mov v3.d[1], x15
+; CHECK-NEXT:    mov v4.d[1], x13
+; CHECK-NEXT:    mov v5.d[1], x14
+; CHECK-NEXT:    mov v2.d[1], x11
+; CHECK-NEXT:    stp q3, q4, [x8, #32]
+; CHECK-NEXT:    stp q2, q5, [x8]
 ; CHECK-NEXT:    ret
   %a = call <32 x i64> @llvm.lrint.v32i64.v32f16(<32 x half> %x)
   ret <32 x i64> %a
@@ -371,13 +413,20 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
 declare <32 x i64> @llvm.lrint.v32i64.v32f16(<32 x half>)
 
 define <1 x i64> @lrint_v1f32(<1 x float> %x) {
-; CHECK-LABEL: lrint_v1f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    fcvtzs x8, s0
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: lrint_v1f32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    frintx v0.2s, v0.2s
+; CHECK-SD-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-SD-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: lrint_v1f32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    frintx s0, s0
+; CHECK-GI-NEXT:    fcvtzs x8, s0
+; CHECK-GI-NEXT:    fmov d0, x8
+; CHECK-GI-NEXT:    ret
   %a = call <1 x i64> @llvm.lrint.v1i64.v1f32(<1 x float> %x)
   ret <1 x i64> %a
 }
@@ -386,14 +435,9 @@ declare <1 x i64> @llvm.lrint.v1i64.v1f32(<1 x float>)
 define <2 x i64> @lrint_v2f32(<2 x float> %x) {
 ; CHECK-LABEL: lrint_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov s1, v0.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    fcvtzs x8, s0
-; CHECK-NEXT:    fcvtzs x9, s1
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %a = call <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float> %x)
   ret <2 x i64> %a
@@ -404,20 +448,12 @@ define <4 x i64> @lrint_v4f32(<4 x float> %x) {
 ; CHECK-LABEL: lrint_v4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    mov s3, v0.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    mov s2, v1.s[1]
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    fcvtzs x9, s0
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    fcvtzs x8, s1
-; CHECK-NEXT:    fcvtzs x11, s3
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    fcvtzs x10, s2
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    mov v1.d[1], x10
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    frintx v1.2s, v1.2s
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    fcvtl v1.2d, v1.2s
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
 ; CHECK-NEXT:    ret
   %a = call <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float> %x)
   ret <4 x i64> %a
@@ -429,34 +465,18 @@ define <8 x i64> @lrint_v8f32(<8 x float> %x) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
 ; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    mov s4, v0.s[1]
-; CHECK-NEXT:    mov s7, v1.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    mov s5, v2.s[1]
-; CHECK-NEXT:    mov s6, v3.s[1]
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    frintx s7, s7
-; CHECK-NEXT:    fcvtzs x9, s0
-; CHECK-NEXT:    fcvtzs x12, s1
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvtzs x8, s2
-; CHECK-NEXT:    fcvtzs x10, s3
-; CHECK-NEXT:    fcvtzs x11, s4
-; CHECK-NEXT:    fcvtzs x15, s7
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    fmov d2, x12
-; CHECK-NEXT:    fcvtzs x13, s5
-; CHECK-NEXT:    fcvtzs x14, s6
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    fmov d3, x10
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    mov v2.d[1], x15
-; CHECK-NEXT:    mov v1.d[1], x13
-; CHECK-NEXT:    mov v3.d[1], x14
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    frintx v1.2s, v1.2s
+; CHECK-NEXT:    frintx v2.2s, v2.2s
+; CHECK-NEXT:    frintx v3.2s, v3.2s
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    fcvtl v1.2d, v1.2s
+; CHECK-NEXT:    fcvtl v4.2d, v2.2s
+; CHECK-NEXT:    fcvtl v3.2d, v3.2s
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v2.2d, v1.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v4.2d
+; CHECK-NEXT:    fcvtzs v3.2d, v3.2d
 ; CHECK-NEXT:    ret
   %a = call <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float> %x)
   ret <8 x i64> %a
@@ -466,66 +486,34 @@ declare <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float>)
 define <16 x i64> @lrint_v16i64_v16f32(<16 x float> %x) {
 ; CHECK-LABEL: lrint_v16i64_v16f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v5.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v4.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v5.16b, v0.16b, v0.16b, #8
 ; CHECK-NEXT:    ext v6.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    frintx s7, s0
-; CHECK-NEXT:    ext v16.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT:    mov s0, v0.s[1]
-; CHECK-NEXT:    frintx s17, s4
-; CHECK-NEXT:    mov s4, v4.s[1]
-; CHECK-NEXT:    mov s18, v5.s[1]
-; CHECK-NEXT:    frintx s5, s5
-; CHECK-NEXT:    frintx s19, s6
-; CHECK-NEXT:    fcvtzs x8, s7
-; CHECK-NEXT:    frintx s7, s16
-; CHECK-NEXT:    mov s6, v6.s[1]
-; CHECK-NEXT:    mov s16, v16.s[1]
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    fcvtzs x9, s17
-; CHECK-NEXT:    frintx s17, s1
-; CHECK-NEXT:    mov s1, v1.s[1]
-; CHECK-NEXT:    frintx s18, s18
-; CHECK-NEXT:    fcvtzs x10, s5
-; CHECK-NEXT:    mov s5, v2.s[1]
-; CHECK-NEXT:    fcvtzs x11, s19
-; CHECK-NEXT:    mov s19, v3.s[1]
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    fcvtzs x12, s7
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvtzs x13, s4
-; CHECK-NEXT:    frintx s4, s3
-; CHECK-NEXT:    frintx s16, s16
-; CHECK-NEXT:    fcvtzs x14, s18
-; CHECK-NEXT:    frintx s18, s1
-; CHECK-NEXT:    fcvtzs x15, s17
-; CHECK-NEXT:    frintx s20, s5
-; CHECK-NEXT:    frintx s17, s19
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    fcvtzs x9, s2
-; CHECK-NEXT:    fmov d5, x11
-; CHECK-NEXT:    fmov d3, x10
-; CHECK-NEXT:    fcvtzs x11, s4
-; CHECK-NEXT:    fcvtzs x10, s0
-; CHECK-NEXT:    fmov d7, x12
-; CHECK-NEXT:    fcvtzs x12, s18
-; CHECK-NEXT:    fcvtzs x17, s6
-; CHECK-NEXT:    fcvtzs x18, s16
-; CHECK-NEXT:    fcvtzs x16, s20
-; CHECK-NEXT:    fcvtzs x0, s17
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d2, x15
-; CHECK-NEXT:    fmov d4, x9
-; CHECK-NEXT:    mov v1.d[1], x13
-; CHECK-NEXT:    fmov d6, x11
-; CHECK-NEXT:    mov v3.d[1], x14
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    mov v5.d[1], x17
-; CHECK-NEXT:    mov v7.d[1], x18
-; CHECK-NEXT:    mov v2.d[1], x12
-; CHECK-NEXT:    mov v4.d[1], x16
-; CHECK-NEXT:    mov v6.d[1], x0
+; CHECK-NEXT:    ext v7.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    frintx v1.2s, v1.2s
+; CHECK-NEXT:    frintx v2.2s, v2.2s
+; CHECK-NEXT:    frintx v3.2s, v3.2s
+; CHECK-NEXT:    frintx v5.2s, v5.2s
+; CHECK-NEXT:    frintx v4.2s, v4.2s
+; CHECK-NEXT:    frintx v6.2s, v6.2s
+; CHECK-NEXT:    frintx v7.2s, v7.2s
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    fcvtl v1.2d, v1.2s
+; CHECK-NEXT:    fcvtl v16.2d, v2.2s
+; CHECK-NEXT:    fcvtl v18.2d, v3.2s
+; CHECK-NEXT:    fcvtl v5.2d, v5.2s
+; CHECK-NEXT:    fcvtl v17.2d, v4.2s
+; CHECK-NEXT:    fcvtl v19.2d, v6.2s
+; CHECK-NEXT:    fcvtl v7.2d, v7.2s
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v2.2d, v1.2d
+; CHECK-NEXT:    fcvtzs v4.2d, v16.2d
+; CHECK-NEXT:    fcvtzs v6.2d, v18.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v5.2d
+; CHECK-NEXT:    fcvtzs v3.2d, v17.2d
+; CHECK-NEXT:    fcvtzs v5.2d, v19.2d
+; CHECK-NEXT:    fcvtzs v7.2d, v7.2d
 ; CHECK-NEXT:    ret
   %a = call <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float> %x)
   ret <16 x i64> %a
@@ -535,134 +523,70 @@ declare <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float>)
 define <32 x i64> @lrint_v32i64_v32f32(<32 x float> %x) {
 ; CHECK-LABEL: lrint_v32i64_v32f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v16.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    ext v20.16b, v5.16b, v5.16b, #8
-; CHECK-NEXT:    ext v17.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT:    ext v18.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v19.16b, v4.16b, v4.16b, #8
-; CHECK-NEXT:    ext v21.16b, v6.16b, v6.16b, #8
-; CHECK-NEXT:    ext v22.16b, v7.16b, v7.16b, #8
-; CHECK-NEXT:    frintx s24, s16
-; CHECK-NEXT:    mov s28, v20.s[1]
-; CHECK-NEXT:    frintx s25, s17
-; CHECK-NEXT:    frintx s26, s18
-; CHECK-NEXT:    frintx s27, s19
-; CHECK-NEXT:    frintx s29, s20
-; CHECK-NEXT:    mov s30, v21.s[1]
-; CHECK-NEXT:    frintx s20, s21
-; CHECK-NEXT:    frintx s21, s22
-; CHECK-NEXT:    mov s23, v22.s[1]
-; CHECK-NEXT:    mov s19, v19.s[1]
-; CHECK-NEXT:    mov s17, v17.s[1]
-; CHECK-NEXT:    fcvtzs x12, s24
-; CHECK-NEXT:    frintx s24, s28
-; CHECK-NEXT:    fcvtzs x13, s25
-; CHECK-NEXT:    mov s25, v7.s[1]
-; CHECK-NEXT:    fcvtzs x9, s26
-; CHECK-NEXT:    fcvtzs x11, s27
-; CHECK-NEXT:    fcvtzs x14, s20
-; CHECK-NEXT:    fcvtzs x15, s21
-; CHECK-NEXT:    frintx s26, s1
-; CHECK-NEXT:    frintx s23, s23
-; CHECK-NEXT:    frintx s27, s7
-; CHECK-NEXT:    frintx s22, s30
-; CHECK-NEXT:    fmov d20, x12
-; CHECK-NEXT:    fcvtzs x12, s24
-; CHECK-NEXT:    mov s24, v6.s[1]
-; CHECK-NEXT:    frintx s25, s25
-; CHECK-NEXT:    frintx s6, s6
-; CHECK-NEXT:    fcvtzs x10, s29
-; CHECK-NEXT:    fmov d7, x11
-; CHECK-NEXT:    fmov d21, x13
-; CHECK-NEXT:    frintx s28, s5
-; CHECK-NEXT:    fcvtzs x11, s23
-; CHECK-NEXT:    fmov d23, x14
-; CHECK-NEXT:    fcvtzs x14, s26
-; CHECK-NEXT:    fmov d26, x15
-; CHECK-NEXT:    fcvtzs x15, s27
-; CHECK-NEXT:    frintx s24, s24
-; CHECK-NEXT:    mov s27, v5.s[1]
-; CHECK-NEXT:    fcvtzs x13, s22
-; CHECK-NEXT:    fcvtzs x17, s25
-; CHECK-NEXT:    frintx s25, s4
-; CHECK-NEXT:    fcvtzs x18, s6
-; CHECK-NEXT:    fmov d6, x10
-; CHECK-NEXT:    frintx s22, s2
-; CHECK-NEXT:    mov v26.d[1], x11
-; CHECK-NEXT:    fmov d5, x14
-; CHECK-NEXT:    fcvtzs x10, s24
-; CHECK-NEXT:    fmov d24, x15
-; CHECK-NEXT:    fcvtzs x14, s28
-; CHECK-NEXT:    frintx s27, s27
-; CHECK-NEXT:    mov v23.d[1], x13
-; CHECK-NEXT:    mov s4, v4.s[1]
-; CHECK-NEXT:    fcvtzs x13, s25
-; CHECK-NEXT:    fmov d25, x18
-; CHECK-NEXT:    mov s16, v16.s[1]
-; CHECK-NEXT:    mov v24.d[1], x17
-; CHECK-NEXT:    fcvtzs x16, s22
-; CHECK-NEXT:    frintx s22, s3
-; CHECK-NEXT:    mov s3, v3.s[1]
-; CHECK-NEXT:    frintx s19, s19
-; CHECK-NEXT:    mov s2, v2.s[1]
-; CHECK-NEXT:    mov v25.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, s27
-; CHECK-NEXT:    frintx s4, s4
-; CHECK-NEXT:    mov v6.d[1], x12
-; CHECK-NEXT:    frintx s17, s17
-; CHECK-NEXT:    mov s18, v18.s[1]
-; CHECK-NEXT:    stp q24, q26, [x8, #224]
-; CHECK-NEXT:    fmov d24, x14
-; CHECK-NEXT:    fcvtzs x11, s22
-; CHECK-NEXT:    ext v22.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    mov s1, v1.s[1]
-; CHECK-NEXT:    frintx s3, s3
-; CHECK-NEXT:    stp q25, q23, [x8, #192]
-; CHECK-NEXT:    frintx s2, s2
-; CHECK-NEXT:    fcvtzs x12, s4
-; CHECK-NEXT:    mov v24.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, s19
-; CHECK-NEXT:    mov s19, v0.s[1]
-; CHECK-NEXT:    frintx s16, s16
-; CHECK-NEXT:    frintx s0, s0
-; CHECK-NEXT:    fmov d4, x11
-; CHECK-NEXT:    mov s27, v22.s[1]
-; CHECK-NEXT:    frintx s22, s22
-; CHECK-NEXT:    frintx s1, s1
-; CHECK-NEXT:    fcvtzs x11, s3
-; CHECK-NEXT:    fcvtzs x14, s2
-; CHECK-NEXT:    frintx s2, s18
-; CHECK-NEXT:    stp q24, q6, [x8, #160]
-; CHECK-NEXT:    fmov d6, x13
-; CHECK-NEXT:    fcvtzs x13, s17
-; CHECK-NEXT:    frintx s17, s19
-; CHECK-NEXT:    fmov d23, x16
-; CHECK-NEXT:    mov v7.d[1], x10
-; CHECK-NEXT:    frintx s3, s27
-; CHECK-NEXT:    fcvtzs x10, s22
-; CHECK-NEXT:    fcvtzs x15, s1
-; CHECK-NEXT:    mov v6.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, s16
-; CHECK-NEXT:    mov v4.d[1], x11
-; CHECK-NEXT:    mov v21.d[1], x13
-; CHECK-NEXT:    fcvtzs x13, s0
-; CHECK-NEXT:    mov v23.d[1], x14
-; CHECK-NEXT:    fcvtzs x14, s17
-; CHECK-NEXT:    fcvtzs x11, s3
-; CHECK-NEXT:    fmov d0, x10
-; CHECK-NEXT:    mov v5.d[1], x15
-; CHECK-NEXT:    stp q6, q7, [x8, #128]
-; CHECK-NEXT:    mov v20.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, s2
-; CHECK-NEXT:    stp q4, q21, [x8, #96]
-; CHECK-NEXT:    fmov d1, x13
-; CHECK-NEXT:    fmov d2, x9
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    stp q23, q20, [x8, #64]
-; CHECK-NEXT:    mov v1.d[1], x14
-; CHECK-NEXT:    mov v2.d[1], x12
-; CHECK-NEXT:    stp q5, q0, [x8, #32]
-; CHECK-NEXT:    stp q1, q2, [x8]
+; CHECK-NEXT:    ext v16.16b, v7.16b, v7.16b, #8
+; CHECK-NEXT:    ext v17.16b, v6.16b, v6.16b, #8
+; CHECK-NEXT:    frintx v7.2s, v7.2s
+; CHECK-NEXT:    frintx v6.2s, v6.2s
+; CHECK-NEXT:    ext v18.16b, v5.16b, v5.16b, #8
+; CHECK-NEXT:    ext v21.16b, v4.16b, v4.16b, #8
+; CHECK-NEXT:    ext v22.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    frintx v5.2s, v5.2s
+; CHECK-NEXT:    ext v23.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    frintx v4.2s, v4.2s
+; CHECK-NEXT:    ext v19.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v20.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    frintx v16.2s, v16.2s
+; CHECK-NEXT:    frintx v17.2s, v17.2s
+; CHECK-NEXT:    fcvtl v7.2d, v7.2s
+; CHECK-NEXT:    fcvtl v6.2d, v6.2s
+; CHECK-NEXT:    frintx v18.2s, v18.2s
+; CHECK-NEXT:    frintx v21.2s, v21.2s
+; CHECK-NEXT:    frintx v2.2s, v2.2s
+; CHECK-NEXT:    frintx v3.2s, v3.2s
+; CHECK-NEXT:    fcvtl v5.2d, v5.2s
+; CHECK-NEXT:    frintx v23.2s, v23.2s
+; CHECK-NEXT:    fcvtl v4.2d, v4.2s
+; CHECK-NEXT:    frintx v1.2s, v1.2s
+; CHECK-NEXT:    fcvtl v16.2d, v16.2s
+; CHECK-NEXT:    fcvtl v17.2d, v17.2s
+; CHECK-NEXT:    fcvtzs v7.2d, v7.2d
+; CHECK-NEXT:    fcvtzs v6.2d, v6.2d
+; CHECK-NEXT:    fcvtl v18.2d, v18.2s
+; CHECK-NEXT:    fcvtl v21.2d, v21.2s
+; CHECK-NEXT:    frintx v20.2s, v20.2s
+; CHECK-NEXT:    fcvtl v3.2d, v3.2s
+; CHECK-NEXT:    fcvtzs v5.2d, v5.2d
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    fcvtl v2.2d, v2.2s
+; CHECK-NEXT:    fcvtzs v4.2d, v4.2d
+; CHECK-NEXT:    fcvtzs v16.2d, v16.2d
+; CHECK-NEXT:    fcvtzs v17.2d, v17.2d
+; CHECK-NEXT:    fcvtl v1.2d, v1.2s
+; CHECK-NEXT:    fcvtzs v3.2d, v3.2d
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    fcvtzs v2.2d, v2.2d
+; CHECK-NEXT:    stp q6, q17, [x8, #192]
+; CHECK-NEXT:    fcvtl v6.2d, v23.2s
+; CHECK-NEXT:    frintx v17.2s, v19.2s
+; CHECK-NEXT:    stp q7, q16, [x8, #224]
+; CHECK-NEXT:    frintx v7.2s, v22.2s
+; CHECK-NEXT:    fcvtzs v16.2d, v18.2d
+; CHECK-NEXT:    fcvtzs v18.2d, v21.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v6.2d, v6.2d
+; CHECK-NEXT:    stp q5, q16, [x8, #160]
+; CHECK-NEXT:    fcvtl v7.2d, v7.2s
+; CHECK-NEXT:    fcvtl v5.2d, v20.2s
+; CHECK-NEXT:    stp q4, q18, [x8, #128]
+; CHECK-NEXT:    fcvtl v4.2d, v17.2s
+; CHECK-NEXT:    stp q3, q6, [x8, #96]
+; CHECK-NEXT:    fcvtzs v7.2d, v7.2d
+; CHECK-NEXT:    fcvtzs v3.2d, v5.2d
+; CHECK-NEXT:    stp q1, q3, [x8, #32]
+; CHECK-NEXT:    stp q2, q7, [x8, #64]
+; CHECK-NEXT:    fcvtzs v2.2d, v4.2d
+; CHECK-NEXT:    stp q0, q2, [x8]
 ; CHECK-NEXT:    ret
   %a = call <32 x i64> @llvm.lrint.v32i64.v32f32(<32 x float> %x)
   ret <32 x i64> %a
@@ -684,13 +608,8 @@ declare <1 x i64> @llvm.lrint.v1i64.v1f64(<1 x double>)
 define <2 x i64> @lrint_v2f64(<2 x double> %x) {
 ; CHECK-LABEL: lrint_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d1, v0.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    frintx v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %a = call <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double> %x)
   ret <2 x i64> %a
@@ -700,20 +619,10 @@ declare <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double>)
 define <4 x i64> @lrint_v4f64(<4 x double> %x) {
 ; CHECK-LABEL: lrint_v4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d2, v0.d[1]
-; CHECK-NEXT:    mov d3, v1.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    frintx d3, d3
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    fcvtzs x10, d2
-; CHECK-NEXT:    fcvtzs x11, d3
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    mov v1.d[1], x11
+; CHECK-NEXT:    frintx v0.2d, v0.2d
+; CHECK-NEXT:    frintx v1.2d, v1.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
 ; CHECK-NEXT:    ret
   %a = call <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double> %x)
   ret <4 x i64> %a
@@ -723,34 +632,14 @@ declare <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double>)
 define <8 x i64> @lrint_v8f64(<8 x double> %x) {
 ; CHECK-LABEL: lrint_v8f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d4, v0.d[1]
-; CHECK-NEXT:    mov d5, v1.d[1]
-; CHECK-NEXT:    mov d6, v2.d[1]
-; CHECK-NEXT:    mov d7, v3.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    frintx d3, d3
-; CHECK-NEXT:    frintx d4, d4
-; CHECK-NEXT:    frintx d5, d5
-; CHECK-NEXT:    frintx d6, d6
-; CHECK-NEXT:    frintx d7, d7
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    fcvtzs x10, d2
-; CHECK-NEXT:    fcvtzs x11, d3
-; CHECK-NEXT:    fcvtzs x12, d4
-; CHECK-NEXT:    fcvtzs x13, d5
-; CHECK-NEXT:    fcvtzs x14, d6
-; CHECK-NEXT:    fcvtzs x15, d7
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    fmov d2, x10
-; CHECK-NEXT:    fmov d3, x11
-; CHECK-NEXT:    mov v0.d[1], x12
-; CHECK-NEXT:    mov v1.d[1], x13
-; CHECK-NEXT:    mov v2.d[1], x14
-; CHECK-NEXT:    mov v3.d[1], x15
+; CHECK-NEXT:    frintx v0.2d, v0.2d
+; CHECK-NEXT:    frintx v1.2d, v1.2d
+; CHECK-NEXT:    frintx v2.2d, v2.2d
+; CHECK-NEXT:    frintx v3.2d, v3.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
+; CHECK-NEXT:    fcvtzs v2.2d, v2.2d
+; CHECK-NEXT:    fcvtzs v3.2d, v3.2d
 ; CHECK-NEXT:    ret
   %a = call <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double> %x)
   ret <8 x i64> %a
@@ -760,62 +649,22 @@ declare <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double>)
 define <16 x i64> @lrint_v16f64(<16 x double> %x) {
 ; CHECK-LABEL: lrint_v16f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov d16, v0.d[1]
-; CHECK-NEXT:    mov d17, v1.d[1]
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d18, d2
-; CHECK-NEXT:    mov d2, v2.d[1]
-; CHECK-NEXT:    frintx d19, d3
-; CHECK-NEXT:    mov d3, v3.d[1]
-; CHECK-NEXT:    frintx d16, d16
-; CHECK-NEXT:    frintx d17, d17
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    frintx d0, d4
-; CHECK-NEXT:    mov d4, v4.d[1]
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    frintx d1, d5
-; CHECK-NEXT:    mov d5, v5.d[1]
-; CHECK-NEXT:    fcvtzs x12, d18
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    fcvtzs x13, d19
-; CHECK-NEXT:    frintx d18, d3
-; CHECK-NEXT:    fcvtzs x10, d16
-; CHECK-NEXT:    mov d16, v6.d[1]
-; CHECK-NEXT:    fcvtzs x11, d17
-; CHECK-NEXT:    mov d17, v7.d[1]
-; CHECK-NEXT:    frintx d6, d6
-; CHECK-NEXT:    frintx d7, d7
-; CHECK-NEXT:    frintx d4, d4
-; CHECK-NEXT:    frintx d5, d5
-; CHECK-NEXT:    fcvtzs x14, d0
-; CHECK-NEXT:    fcvtzs x15, d1
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    frintx d16, d16
-; CHECK-NEXT:    fcvtzs x9, d2
-; CHECK-NEXT:    fmov d2, x12
-; CHECK-NEXT:    frintx d17, d17
-; CHECK-NEXT:    fcvtzs x8, d6
-; CHECK-NEXT:    fcvtzs x12, d7
-; CHECK-NEXT:    fmov d3, x13
-; CHECK-NEXT:    fcvtzs x13, d18
-; CHECK-NEXT:    fcvtzs x16, d4
-; CHECK-NEXT:    fcvtzs x17, d5
-; CHECK-NEXT:    fmov d4, x14
-; CHECK-NEXT:    fmov d5, x15
-; CHECK-NEXT:    fcvtzs x18, d16
-; CHECK-NEXT:    mov v0.d[1], x10
-; CHECK-NEXT:    mov v1.d[1], x11
-; CHECK-NEXT:    fcvtzs x0, d17
-; CHECK-NEXT:    fmov d6, x8
-; CHECK-NEXT:    fmov d7, x12
-; CHECK-NEXT:    mov v2.d[1], x9
-; CHECK-NEXT:    mov v3.d[1], x13
-; CHECK-NEXT:    mov v4.d[1], x16
-; CHECK-NEXT:    mov v5.d[1], x17
-; CHECK-NEXT:    mov v6.d[1], x18
-; CHECK-NEXT:    mov v7.d[1], x0
+; CHECK-NEXT:    frintx v0.2d, v0.2d
+; CHECK-NEXT:    frintx v1.2d, v1.2d
+; CHECK-NEXT:    frintx v2.2d, v2.2d
+; CHECK-NEXT:    frintx v3.2d, v3.2d
+; CHECK-NEXT:    frintx v4.2d, v4.2d
+; CHECK-NEXT:    frintx v5.2d, v5.2d
+; CHECK-NEXT:    frintx v6.2d, v6.2d
+; CHECK-NEXT:    frintx v7.2d, v7.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
+; CHECK-NEXT:    fcvtzs v2.2d, v2.2d
+; CHECK-NEXT:    fcvtzs v3.2d, v3.2d
+; CHECK-NEXT:    fcvtzs v4.2d, v4.2d
+; CHECK-NEXT:    fcvtzs v5.2d, v5.2d
+; CHECK-NEXT:    fcvtzs v6.2d, v6.2d
+; CHECK-NEXT:    fcvtzs v7.2d, v7.2d
 ; CHECK-NEXT:    ret
   %a = call <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double> %x)
   ret <16 x i64> %a
@@ -825,130 +674,50 @@ declare <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double>)
 define <32 x i64> @lrint_v32f64(<32 x double> %x) {
 ; CHECK-LABEL: lrint_v32f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintx d20, d0
-; CHECK-NEXT:    frintx d22, d3
-; CHECK-NEXT:    frintx d21, d4
+; CHECK-NEXT:    ldp q17, q16, [sp, #96]
+; CHECK-NEXT:    frintx v7.2d, v7.2d
 ; CHECK-NEXT:    ldp q19, q18, [sp, #64]
-; CHECK-NEXT:    frintx d23, d5
-; CHECK-NEXT:    ldp q27, q26, [sp, #96]
-; CHECK-NEXT:    mov d4, v4.d[1]
-; CHECK-NEXT:    ldp q16, q17, [sp, #32]
-; CHECK-NEXT:    mov d5, v5.d[1]
-; CHECK-NEXT:    fcvtzs x9, d20
-; CHECK-NEXT:    frintx d20, d6
-; CHECK-NEXT:    fcvtzs x11, d22
-; CHECK-NEXT:    frintx d22, d19
-; CHECK-NEXT:    fcvtzs x12, d21
-; CHECK-NEXT:    fcvtzs x10, d23
-; CHECK-NEXT:    mov d21, v26.d[1]
-; CHECK-NEXT:    frintx d23, d27
-; CHECK-NEXT:    mov d27, v27.d[1]
-; CHECK-NEXT:    frintx d24, d16
-; CHECK-NEXT:    mov d19, v19.d[1]
-; CHECK-NEXT:    frintx d25, d17
-; CHECK-NEXT:    fcvtzs x13, d20
-; CHECK-NEXT:    mov d20, v18.d[1]
-; CHECK-NEXT:    frintx d18, d18
-; CHECK-NEXT:    fcvtzs x16, d22
-; CHECK-NEXT:    frintx d22, d26
-; CHECK-NEXT:    mov d16, v16.d[1]
-; CHECK-NEXT:    frintx d21, d21
-; CHECK-NEXT:    fcvtzs x17, d23
-; CHECK-NEXT:    frintx d23, d27
-; CHECK-NEXT:    fcvtzs x14, d24
-; CHECK-NEXT:    frintx d26, d19
-; CHECK-NEXT:    fmov d19, x11
-; CHECK-NEXT:    frintx d20, d20
-; CHECK-NEXT:    mov d27, v17.d[1]
-; CHECK-NEXT:    fcvtzs x15, d25
-; CHECK-NEXT:    ldp q25, q24, [sp]
-; CHECK-NEXT:    fcvtzs x11, d22
-; CHECK-NEXT:    fmov d17, x12
-; CHECK-NEXT:    fcvtzs x12, d21
-; CHECK-NEXT:    fcvtzs x0, d23
-; CHECK-NEXT:    fmov d23, x14
-; CHECK-NEXT:    fcvtzs x14, d18
-; CHECK-NEXT:    fmov d18, x17
-; CHECK-NEXT:    fcvtzs x17, d20
-; CHECK-NEXT:    frintx d21, d7
-; CHECK-NEXT:    fcvtzs x18, d26
-; CHECK-NEXT:    fmov d20, x11
-; CHECK-NEXT:    frintx d22, d25
-; CHECK-NEXT:    frintx d26, d27
-; CHECK-NEXT:    frintx d16, d16
-; CHECK-NEXT:    mov v18.d[1], x0
-; CHECK-NEXT:    mov d25, v25.d[1]
-; CHECK-NEXT:    mov d7, v7.d[1]
-; CHECK-NEXT:    mov d6, v6.d[1]
-; CHECK-NEXT:    mov d0, v0.d[1]
-; CHECK-NEXT:    mov v20.d[1], x12
-; CHECK-NEXT:    fcvtzs x11, d21
-; CHECK-NEXT:    fmov d21, x15
-; CHECK-NEXT:    fcvtzs x12, d22
-; CHECK-NEXT:    fmov d22, x16
-; CHECK-NEXT:    fcvtzs x15, d26
-; CHECK-NEXT:    fmov d26, x14
-; CHECK-NEXT:    fcvtzs x14, d16
-; CHECK-NEXT:    frintx d25, d25
-; CHECK-NEXT:    frintx d7, d7
-; CHECK-NEXT:    mov d16, v1.d[1]
-; CHECK-NEXT:    mov d3, v3.d[1]
-; CHECK-NEXT:    stp q18, q20, [x8, #224]
-; CHECK-NEXT:    mov d18, v24.d[1]
-; CHECK-NEXT:    mov v22.d[1], x18
-; CHECK-NEXT:    mov v26.d[1], x17
-; CHECK-NEXT:    frintx d24, d24
-; CHECK-NEXT:    mov v21.d[1], x15
-; CHECK-NEXT:    mov v23.d[1], x14
-; CHECK-NEXT:    frintx d20, d2
-; CHECK-NEXT:    mov d2, v2.d[1]
-; CHECK-NEXT:    frintx d6, d6
-; CHECK-NEXT:    frintx d5, d5
-; CHECK-NEXT:    frintx d4, d4
-; CHECK-NEXT:    frintx d18, d18
-; CHECK-NEXT:    frintx d1, d1
-; CHECK-NEXT:    frintx d3, d3
-; CHECK-NEXT:    stp q22, q26, [x8, #192]
-; CHECK-NEXT:    fmov d22, x10
-; CHECK-NEXT:    fcvtzs x10, d24
-; CHECK-NEXT:    stp q23, q21, [x8, #160]
-; CHECK-NEXT:    fmov d21, x11
-; CHECK-NEXT:    fmov d24, x13
-; CHECK-NEXT:    frintx d2, d2
-; CHECK-NEXT:    fcvtzs x13, d6
-; CHECK-NEXT:    frintx d6, d16
-; CHECK-NEXT:    fcvtzs x11, d18
-; CHECK-NEXT:    fmov d18, x12
-; CHECK-NEXT:    fcvtzs x12, d25
-; CHECK-NEXT:    fmov d23, x10
-; CHECK-NEXT:    fcvtzs x10, d7
-; CHECK-NEXT:    fcvtzs x14, d5
-; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    fcvtzs x15, d3
-; CHECK-NEXT:    mov v24.d[1], x13
-; CHECK-NEXT:    fcvtzs x13, d2
-; CHECK-NEXT:    fmov d2, x9
-; CHECK-NEXT:    mov v23.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, d4
-; CHECK-NEXT:    mov v18.d[1], x12
-; CHECK-NEXT:    fcvtzs x12, d20
-; CHECK-NEXT:    mov v21.d[1], x10
-; CHECK-NEXT:    fcvtzs x10, d1
-; CHECK-NEXT:    mov v22.d[1], x14
-; CHECK-NEXT:    fcvtzs x14, d6
-; CHECK-NEXT:    mov v19.d[1], x15
-; CHECK-NEXT:    stp q18, q23, [x8, #128]
-; CHECK-NEXT:    mov v17.d[1], x11
-; CHECK-NEXT:    fcvtzs x11, d0
-; CHECK-NEXT:    stp q24, q21, [x8, #96]
-; CHECK-NEXT:    fmov d0, x12
-; CHECK-NEXT:    fmov d1, x10
-; CHECK-NEXT:    stp q17, q22, [x8, #64]
-; CHECK-NEXT:    mov v0.d[1], x13
-; CHECK-NEXT:    mov v1.d[1], x14
-; CHECK-NEXT:    mov v2.d[1], x11
-; CHECK-NEXT:    stp q0, q19, [x8, #32]
-; CHECK-NEXT:    stp q2, q1, [x8]
+; CHECK-NEXT:    frintx v6.2d, v6.2d
+; CHECK-NEXT:    ldp q21, q20, [sp, #32]
+; CHECK-NEXT:    frintx v5.2d, v5.2d
+; CHECK-NEXT:    frintx v16.2d, v16.2d
+; CHECK-NEXT:    frintx v17.2d, v17.2d
+; CHECK-NEXT:    frintx v4.2d, v4.2d
+; CHECK-NEXT:    frintx v18.2d, v18.2d
+; CHECK-NEXT:    frintx v19.2d, v19.2d
+; CHECK-NEXT:    frintx v3.2d, v3.2d
+; CHECK-NEXT:    ldp q23, q22, [sp]
+; CHECK-NEXT:    frintx v20.2d, v20.2d
+; CHECK-NEXT:    frintx v21.2d, v21.2d
+; CHECK-NEXT:    frintx v2.2d, v2.2d
+; CHECK-NEXT:    frintx v1.2d, v1.2d
+; CHECK-NEXT:    fcvtzs v16.2d, v16.2d
+; CHECK-NEXT:    fcvtzs v17.2d, v17.2d
+; CHECK-NEXT:    frintx v0.2d, v0.2d
+; CHECK-NEXT:    frintx v22.2d, v22.2d
+; CHECK-NEXT:    fcvtzs v18.2d, v18.2d
+; CHECK-NEXT:    frintx v23.2d, v23.2d
+; CHECK-NEXT:    fcvtzs v19.2d, v19.2d
+; CHECK-NEXT:    fcvtzs v20.2d, v20.2d
+; CHECK-NEXT:    fcvtzs v7.2d, v7.2d
+; CHECK-NEXT:    fcvtzs v6.2d, v6.2d
+; CHECK-NEXT:    fcvtzs v5.2d, v5.2d
+; CHECK-NEXT:    fcvtzs v4.2d, v4.2d
+; CHECK-NEXT:    stp q17, q16, [x8, #224]
+; CHECK-NEXT:    fcvtzs v16.2d, v21.2d
+; CHECK-NEXT:    fcvtzs v3.2d, v3.2d
+; CHECK-NEXT:    fcvtzs v17.2d, v22.2d
+; CHECK-NEXT:    fcvtzs v2.2d, v2.2d
+; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
+; CHECK-NEXT:    stp q19, q18, [x8, #192]
+; CHECK-NEXT:    fcvtzs v18.2d, v23.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    stp q4, q5, [x8, #64]
+; CHECK-NEXT:    stp q6, q7, [x8, #96]
+; CHECK-NEXT:    stp q2, q3, [x8, #32]
+; CHECK-NEXT:    stp q0, q1, [x8]
+; CHECK-NEXT:    stp q18, q17, [x8, #128]
+; CHECK-NEXT:    stp q16, q20, [x8, #160]
 ; CHECK-NEXT:    ret
   %a = call <32 x i64> @llvm.lrint.v32i64.v16f64(<32 x double> %x)
   ret <32 x i64> %a



More information about the llvm-commits mailing list