[llvm] Reapply "[AArch64] Wrap integer SCALAR_TO_VECTOR nodes in bitcasts (#172837)" (#183380) (PR #184403)

via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 4 04:31:23 PST 2026


https://github.com/Lukacma updated https://github.com/llvm/llvm-project/pull/184403

>From 3da579692618945caf2710a1145ceeaabd164756 Mon Sep 17 00:00:00 2001
From: Marian Lukac <Marian.Lukac at arm.com>
Date: Tue, 3 Mar 2026 18:02:22 +0000
Subject: [PATCH 1/2] Reapply "[AArch64] Wrap integer SCALAR_TO_VECTOR nodes in
 bitcasts  (#172837)" (#183380)

This reverts commit b7ce37c6703f2d82376f50f82a05b807a0ad90ad.
The [issue](https://github.com/llvm/llvm-project/pull/172837#issuecomment-3961532435)
this patch revealed was fixed by the [this patch](https://github.com/llvm/llvm-project/pull/183549).
---
 .../Target/AArch64/AArch64ISelDAGToDAG.cpp    |   62 +
 .../lib/Target/AArch64/AArch64InstrAtomics.td |    4 +-
 .../lib/Target/AArch64/AArch64InstrFormats.td |   26 +-
 llvm/lib/Target/AArch64/AArch64InstrInfo.td   |   79 +-
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td |    2 +-
 llvm/test/CodeGen/AArch64/aarch64-addv.ll     |    9 +-
 .../AArch64/aarch64-matrix-umull-smull.ll     |    8 +-
 llvm/test/CodeGen/AArch64/aarch64-pmull2.ll   |    4 +-
 .../aarch64-scal-to-vec-bitcast-insert.ll     |   12 +-
 .../AArch64/arm64-cvt-simd-intrinsics.ll      | 1206 ++++++++++++++++-
 ...arm64-fixed-point-scalar-cvt-dagcombine.ll |    3 +-
 llvm/test/CodeGen/AArch64/arm64-neon-copy.ll  |   57 +-
 .../CodeGen/AArch64/arm64-neon-select_cc.ll   |   24 +-
 llvm/test/CodeGen/AArch64/arm64-neon-v8.1a.ll |   56 +-
 llvm/test/CodeGen/AArch64/arm64-vcvt.ll       |   30 +-
 llvm/test/CodeGen/AArch64/arm64-vshift.ll     |    9 +-
 llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll  |   24 +-
 llvm/test/CodeGen/AArch64/bitcast-extend.ll   |    3 +-
 .../AArch64/concat-vector-add-combine.ll      |   20 +-
 llvm/test/CodeGen/AArch64/ctpop.ll            |   48 +-
 .../CodeGen/AArch64/fp-intrinsics-vector.ll   |    6 +-
 .../test/CodeGen/AArch64/fptosi-sat-vector.ll |   17 +-
 .../test/CodeGen/AArch64/fptoui-sat-vector.ll |   17 +-
 llvm/test/CodeGen/AArch64/fsh.ll              |   16 +-
 .../AArch64/ragreedy-local-interval-cost.ll   |  126 +-
 llvm/test/CodeGen/AArch64/sext.ll             |   42 +-
 .../AArch64/sve-fixed-length-fp-to-int.ll     |    6 +-
 .../AArch64/sve-fixed-vector-llrint.ll        |  357 +++--
 .../CodeGen/AArch64/sve-fixed-vector-lrint.ll | 1028 +++++++-------
 llvm/test/CodeGen/AArch64/vector-llrint.ll    |   17 +-
 llvm/test/CodeGen/AArch64/vector-lrint.ll     |  507 ++++---
 llvm/test/CodeGen/AArch64/zext.ll             |   38 +-
 32 files changed, 2515 insertions(+), 1348 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 67f4e127b0c87..a972254d4dad3 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -61,6 +61,7 @@ class AArch64DAGToDAGISel : public SelectionDAGISel {
   }
 
   void Select(SDNode *Node) override;
+  void PreprocessISelDAG() override;
 
   /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
   /// inline asm expressions.
@@ -537,6 +538,29 @@ char AArch64DAGToDAGISelLegacy::ID = 0;
 
 INITIALIZE_PASS(AArch64DAGToDAGISelLegacy, DEBUG_TYPE, PASS_NAME, false, false)
 
+/// addBitcastHints - This method adds bitcast hints to the operands of a node
+/// to help instruction selector determine which operands are in Neon registers.
+static SDValue addBitcastHints(SelectionDAG &DAG, SDNode &N) {
+  SDLoc DL(&N);
+  auto getFloatVT = [&](EVT VT) {
+    EVT ScalarVT = VT.getScalarType();
+    assert((ScalarVT == MVT::i32 || ScalarVT == MVT::i64) && "Unexpected VT");
+    return VT.changeElementType(*(DAG.getContext()),
+                                ScalarVT == MVT::i32 ? MVT::f32 : MVT::f64);
+  };
+  SmallVector<SDValue, 2> NewOps;
+  NewOps.reserve(N.getNumOperands());
+
+  for (unsigned I = 0, E = N.getNumOperands(); I < E; ++I) {
+    auto bitcasted = DAG.getBitcast(getFloatVT(N.getOperand(I).getValueType()),
+                                    N.getOperand(I));
+    NewOps.push_back(bitcasted);
+  }
+  EVT OrigVT = N.getValueType(0);
+  SDValue OpNode = DAG.getNode(N.getOpcode(), DL, getFloatVT(OrigVT), NewOps);
+  return DAG.getBitcast(OrigVT, OpNode);
+}
+
 /// isIntImmediate - This method tests to see if the node is a constant
 /// operand. If so Imm will receive the 32-bit value.
 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
@@ -7844,3 +7868,41 @@ bool AArch64DAGToDAGISel::SelectCmpBranchExtOperand(SDValue N, SDValue &Reg,
 
   return false;
 }
+
+void AArch64DAGToDAGISel::PreprocessISelDAG() {
+  bool MadeChange = false;
+  for (SDNode &N : llvm::make_early_inc_range(CurDAG->allnodes())) {
+    if (N.use_empty())
+      continue;
+
+    SDValue Result;
+    switch (N.getOpcode()) {
+    case ISD::SCALAR_TO_VECTOR: {
+      EVT ScalarTy = N.getValueType(0).getVectorElementType();
+      if ((ScalarTy == MVT::i32 || ScalarTy == MVT::i64) &&
+          ScalarTy == N.getOperand(0).getValueType())
+        Result = addBitcastHints(*CurDAG, N);
+
+      break;
+    }
+    default:
+      break;
+    }
+
+    if (Result) {
+      LLVM_DEBUG(dbgs() << "AArch64 DAG preprocessing replacing:\nOld:    ");
+      LLVM_DEBUG(N.dump(CurDAG));
+      LLVM_DEBUG(dbgs() << "\nNew: ");
+      LLVM_DEBUG(Result.dump(CurDAG));
+      LLVM_DEBUG(dbgs() << "\n");
+
+      CurDAG->ReplaceAllUsesOfValueWith(SDValue(&N, 0), Result);
+      MadeChange = true;
+    }
+  }
+
+  if (MadeChange)
+    CurDAG->RemoveDeadNodes();
+
+  SelectionDAGISel::PreprocessISelDAG();
+}
diff --git a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
index 21982300f5213..2187f21abb70f 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
@@ -577,10 +577,10 @@ let Predicates = [HasRCPC3, HasNEON] in {
   def : Pat<(vector_insert (v2f64 VecListOne128:$Rd),
                 (f64 (bitconvert (i64 (acquiring_load<atomic_load_nonext_64> GPR64sp:$Rn)))), (i64 VectorIndexD:$idx)),
             (LDAP1 VecListOne128:$Rd, VectorIndexD:$idx, GPR64sp:$Rn)>;
-  def : Pat<(v1i64 (scalar_to_vector
+  def : Pat<(v1i64 (scalar_to_vector_v1f64
                 (i64 (acquiring_load<atomic_load_nonext_64> GPR64sp:$Rn)))),
             (EXTRACT_SUBREG (LDAP1 (v2i64 (IMPLICIT_DEF)), (i64 0), GPR64sp:$Rn), dsub)>;
-  def : Pat<(v1f64 (scalar_to_vector
+  def : Pat<(v1f64 (scalar_to_vector_v1f64
                 (f64 (bitconvert (i64 (acquiring_load<atomic_load_nonext_64> GPR64sp:$Rn)))))),
             (EXTRACT_SUBREG (LDAP1 (v2f64 (IMPLICIT_DEF)), (i64 0), GPR64sp:$Rn), dsub)>;
 
diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index 7d4e034ca16c8..19332507efe1d 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -200,10 +200,34 @@ def dup_v4f32 :
              [(v2f32 (extract_subvector (v4f32 (AArch64duplane32 (v4f32 node:$LHS), node:$RHS)), (i64 0))),
               (v2f32 (AArch64duplane32 (v4f32 node:$LHS), node:$RHS))]>;
 
+// Match scalar_to_vector values, optionally wrapped in a bitcast through
+// f32/f64.
+def scalar_to_vector_v1f64 : PatFrags<(ops node:$src),
+                          [(scalar_to_vector node:$src),
+                           (bitconvert (v1f64 (scalar_to_vector (f64 (bitconvert node:$src)))))]>;
+def scalar_to_vector_v2f32 : PatFrags<(ops node:$src),
+                          [(scalar_to_vector node:$src),
+                           (bitconvert (v2f32 (scalar_to_vector (f32 (bitconvert node:$src)))))]>;
+def scalar_to_vector_v2f64 : PatFrags<(ops node:$src),
+                          [(scalar_to_vector node:$src),
+                           (bitconvert (v2f64 (scalar_to_vector (f64 (bitconvert node:$src)))))]>;
+def scalar_to_vector_v4f32 : PatFrags<(ops node:$src),
+                          [(scalar_to_vector node:$src),
+                           (bitconvert (v4f32 (scalar_to_vector (f32 (bitconvert node:$src)))))]>;
+
 // Match either a scalar_to_vector (from SDAG) or a vector_insert of undef (from GISel)
 def vec_ins_or_scal_vec : PatFrags<(ops node:$src),
                           [(vector_insert undef, node:$src, (i64 0)),
                            (scalar_to_vector node:$src)]>;
+def vec_ins_or_scal_vec_v2f32 : PatFrags<(ops node:$src),
+                             [(vector_insert undef, node:$src, (i64 0)),
+                              (scalar_to_vector_v2f32 node:$src)]>;
+def vec_ins_or_scal_vec_v2f64 : PatFrags<(ops node:$src),
+                              [(vector_insert undef, node:$src, (i64 0)),
+                               (scalar_to_vector_v2f64 node:$src)]>;
+def vec_ins_or_scal_vec_v4f32 : PatFrags<(ops node:$src),
+                              [(vector_insert undef, node:$src, (i64 0)),
+                               (scalar_to_vector_v4f32 node:$src)]>;
 
 //===----------------------------------------------------------------------===//
 // Asm Operand Classes.
@@ -8804,7 +8828,7 @@ multiclass SIMDScalarDUP<string asm> {
     let Inst{19-16} = 0b1000;
   }
 
-  def : Pat<(v1i64 (scalar_to_vector (i64 (vector_extract (v2i64 V128:$src),
+  def : Pat<(v1i64 (scalar_to_vector_v1f64 (i64 (vector_extract (v2i64 V128:$src),
                                                           VectorIndexD:$idx)))),
             (!cast<Instruction>(NAME # i64) V128:$src, VectorIndexD:$idx)>;
 
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index ace85b04595b8..9709ef2401965 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -4353,7 +4353,8 @@ multiclass LoadInsertVTPatterns<SDPatternOperator LoadOp, ValueType VT, ValueTyp
                                 Instruction LoadInst, Instruction UnscaledLoadInst,
                                 Instruction ROWLoadInst, Instruction ROXLoadInst,
                                 ROAddrMode ro, ComplexPattern Addr, ComplexPattern UnscaledAddr,
-                                Operand AddrImm, SubRegIndex SubReg> {
+                                Operand AddrImm, SubRegIndex SubReg,
+                                SDPatternOperator VecInsFrag> {
   // Scaled
   def : Pat <(vector_insert (VT immAllZerosV),
                 (ScalarVT (LoadOp (Addr GPR64sp:$Rn, AddrImm:$offset))), (i64 0)),
@@ -4372,74 +4373,85 @@ multiclass LoadInsertVTPatterns<SDPatternOperator LoadOp, ValueType VT, ValueTyp
              (SUBREG_TO_REG (ROXLoadInst GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend), SubReg)>;
 
   // Undef equivalents of the patterns above.
-  def : Pat <(VT (vec_ins_or_scal_vec
+  def : Pat <(VT (VecInsFrag
                 (ScalarVT (LoadOp (Addr GPR64sp:$Rn, AddrImm:$offset))))),
             (SUBREG_TO_REG (LoadInst GPR64sp:$Rn, AddrImm:$offset), SubReg)>;
-  def : Pat <(VT (vec_ins_or_scal_vec
+  def : Pat <(VT (VecInsFrag
                  (ScalarVT (LoadOp (UnscaledAddr GPR64sp:$Rn, simm9:$offset))))),
              (SUBREG_TO_REG (UnscaledLoadInst GPR64sp:$Rn, simm9:$offset), SubReg)>;
-  def : Pat <(VT (vec_ins_or_scal_vec
+  def : Pat <(VT (VecInsFrag
                  (ScalarVT (LoadOp (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))))),
              (SUBREG_TO_REG (ROWLoadInst GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend), SubReg)>;
-  def : Pat <(VT (vec_ins_or_scal_vec
+  def : Pat <(VT (VecInsFrag
                  (ScalarVT (LoadOp (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))))),
              (SUBREG_TO_REG (ROXLoadInst GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend), SubReg)>;
 }
 
 multiclass LoadInsertPatterns<SDPatternOperator LoadOp, ValueType VT, ValueType HVT, ValueType SVT,
-                              ValueType ScalarVT, Instruction LoadInst, Instruction UnscaledLoadInst,
-                              Instruction ROWLoadInst, Instruction ROXLoadInst,
-                              ROAddrMode ro, ComplexPattern Addr, ComplexPattern UnscaledAddr,
-                              Operand AddrImm, SubRegIndex SubReg> {
+                              ValueType ScalarVT, SDPatternOperator VecInsVT,
+                              SDPatternOperator VecInsHVT, Instruction LoadInst,
+                              Instruction UnscaledLoadInst, Instruction ROWLoadInst,
+                              Instruction ROXLoadInst, ROAddrMode ro, ComplexPattern Addr,
+                              ComplexPattern UnscaledAddr, Operand AddrImm, SubRegIndex SubReg
+                              > {
   defm : LoadInsertVTPatterns<LoadOp, VT, ScalarVT, LoadInst, UnscaledLoadInst, ROWLoadInst,
-                              ROXLoadInst, ro, Addr, UnscaledAddr, AddrImm, SubReg>;
+                              ROXLoadInst, ro, Addr, UnscaledAddr, AddrImm, SubReg, VecInsVT>;
   defm : LoadInsertVTPatterns<LoadOp, HVT, ScalarVT, LoadInst, UnscaledLoadInst, ROWLoadInst,
-                              ROXLoadInst, ro, Addr, UnscaledAddr, AddrImm, SubReg>;
+                              ROXLoadInst, ro, Addr, UnscaledAddr, AddrImm, SubReg, VecInsHVT>;
   defm : LoadInsertVTPatterns<LoadOp, SVT, ScalarVT, LoadInst, UnscaledLoadInst, ROWLoadInst,
-                              ROXLoadInst, ro, Addr, UnscaledAddr, AddrImm, SubReg>;
+                              ROXLoadInst, ro, Addr, UnscaledAddr, AddrImm, SubReg, vec_ins_or_scal_vec>;
 }
 
 // Accept i8 scalar argument in GlobalISel.
 defm : LoadInsertPatterns<load,       v16i8,  v8i8,   nxv16i8,  i8,
+                          vec_ins_or_scal_vec, vec_ins_or_scal_vec,
                           LDRBui, LDURBi, LDRBroW, LDRBroX,
                           ro8, am_indexed8,  am_unscaled8,  uimm12s1, bsub>;
 defm : LoadInsertPatterns<extloadi8,  v16i8,  v8i8,   nxv16i8,  i32,
+                          vec_ins_or_scal_vec, vec_ins_or_scal_vec,
                           LDRBui, LDURBi, LDRBroW, LDRBroX,
                           ro8, am_indexed8,  am_unscaled8,  uimm12s1, bsub>;
 defm : LoadInsertPatterns<extloadi16, v8i16,  v4i16,  nxv8i16,  i32,
+                          vec_ins_or_scal_vec, vec_ins_or_scal_vec,
                           LDRHui, LDURHi, LDRHroW, LDRHroX,
                           ro16, am_indexed16, am_unscaled16, uimm12s2, hsub>;
 defm : LoadInsertPatterns<load,       v4i32,  v2i32,  nxv4i32,  i32,
+                          vec_ins_or_scal_vec_v4f32, vec_ins_or_scal_vec_v2f32,
                           LDRSui, LDURSi, LDRSroW, LDRSroX,
                           ro32, am_indexed32, am_unscaled32, uimm12s4, ssub>;
 defm : LoadInsertPatterns<load,       v2i64,  isVoid, nxv2i64,  i64,
+                          vec_ins_or_scal_vec_v2f64, null_frag,
                           LDRDui, LDURDi, LDRDroW, LDRDroX,
                           ro64, am_indexed64, am_unscaled64, uimm12s8, dsub>;
 defm : LoadInsertPatterns<load,       v8f16,  v4f16,  nxv8f16,  f16,
+                          vec_ins_or_scal_vec, vec_ins_or_scal_vec,
                           LDRHui, LDURHi, LDRHroW, LDRHroX,
                           ro16, am_indexed16, am_unscaled16, uimm12s2, hsub>;
 defm : LoadInsertPatterns<load,       v8bf16, v4bf16, nxv8bf16, bf16,
+                          vec_ins_or_scal_vec, vec_ins_or_scal_vec,
                           LDRHui, LDURHi, LDRHroW, LDRHroX,
                           ro16, am_indexed16, am_unscaled16, uimm12s2, hsub>;
 defm : LoadInsertPatterns<load,       v4f32,  v2f32,  nxv4f32,  f32,
+                          vec_ins_or_scal_vec, vec_ins_or_scal_vec,
                           LDRSui, LDURSi, LDRSroW, LDRSroX,
                           ro32, am_indexed32, am_unscaled32, uimm12s4, ssub>;
 defm : LoadInsertPatterns<load,       v2f64,  isVoid, nxv2f64,  f64,
+                          vec_ins_or_scal_vec, vec_ins_or_scal_vec,
                           LDRDui, LDURDi, LDRDroW, LDRDroX,
                           ro64, am_indexed64, am_unscaled64, uimm12s8, dsub>;
 
 // Extra patterns for v1f64 scalar_to_vector(load), which need to avoid the
 // SUBREG_TO_REG used above.
-def : Pat <(v1i64 (scalar_to_vector (i64
+def : Pat <(v1i64 (scalar_to_vector_v1f64 (i64
                (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
-def : Pat <(v1i64 (scalar_to_vector (i64
+def : Pat <(v1i64 (scalar_to_vector_v1f64 (i64
                (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))))),
            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
-def : Pat <(v1i64 (scalar_to_vector (i64
+def : Pat <(v1i64 (scalar_to_vector_v1f64 (i64
                (load (ro64.Wpat GPR64sp:$Rn, GPR32:$Rm, ro64.Wext:$extend))))),
            (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro64.Wext:$extend)>;
-def : Pat <(v1i64 (scalar_to_vector (i64
+def : Pat <(v1i64 (scalar_to_vector_v1f64 (i64
                (load (ro64.Xpat GPR64sp:$Rn, GPR64:$Rm, ro64.Xext:$extend))))),
            (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro64.Xext:$extend)>;
 
@@ -4488,6 +4500,7 @@ multiclass ExtLoad8_16_32AllModes<ValueType OutTy, ValueType InnerTy,
 }
 
 // Instantiate bitconvert patterns for floating-point types.
+
 defm : ExtLoad8_16AllModes<f32, i32, bitconvert, zextloadi8, zextloadi16>;
 defm : ExtLoad8_16_32AllModes<f64, i64, bitconvert, zextloadi8, zextloadi16, zextloadi32>;
 
@@ -4496,10 +4509,10 @@ defm : ExtLoad8_16AllModes<v16i8, i32, scalar_to_vector, zextloadi8, zextloadi16
 defm : ExtLoad8_16AllModes<v16i8, i32, scalar_to_vector, extloadi8, extloadi16>;
 defm : ExtLoad8_16AllModes<v8i16, i32, scalar_to_vector, zextloadi8, zextloadi16>;
 defm : ExtLoad8_16AllModes<v8i16, i32, scalar_to_vector, extloadi8, extloadi16>;
-defm : ExtLoad8_16AllModes<v4i32, i32, scalar_to_vector, zextloadi8, zextloadi16>;
-defm : ExtLoad8_16AllModes<v4i32, i32, scalar_to_vector, extloadi8, extloadi16>;
-defm : ExtLoad8_16_32AllModes<v2i64, i64, scalar_to_vector, zextloadi8, zextloadi16, zextloadi32>;
-defm : ExtLoad8_16_32AllModes<v2i64, i64, scalar_to_vector, extloadi8, extloadi16, extloadi32>;
+defm : ExtLoad8_16AllModes<v4i32, i32, scalar_to_vector_v4f32, zextloadi8, zextloadi16>;
+defm : ExtLoad8_16AllModes<v4i32, i32, scalar_to_vector_v4f32, extloadi8, extloadi16>;
+defm : ExtLoad8_16_32AllModes<v2i64, i64, scalar_to_vector_v2f64, zextloadi8, zextloadi16, zextloadi32>;
+defm : ExtLoad8_16_32AllModes<v2i64, i64, scalar_to_vector_v2f64, extloadi8, extloadi16, extloadi32>;
 
 // Pre-fetch.
 defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
@@ -6679,7 +6692,7 @@ multiclass FPToIntegerSIMDScalarPatterns<SDPatternOperator OpN, string INST> {
             (!cast<Instruction>(INST # v1i32) FPR32:$Rn)>;
   def : Pat<(f64 (bitconvert (i64 (OpN (f64 FPR64:$Rn))))),
             (!cast<Instruction>(INST # v1i64) FPR64:$Rn)>;
-            
+
 }
 defm: FPToIntegerSIMDScalarPatterns<int_aarch64_neon_fcvtas, "FCVTAS">;
 defm: FPToIntegerSIMDScalarPatterns<int_aarch64_neon_fcvtau, "FCVTAU">;
@@ -8173,7 +8186,8 @@ def : Pat<(v2i64 (vector_insert v2i64:$src, (i64 (bitconvert (f64 FPR64:$Sn))),
 
 // Patterns for i8/i16 -> v2i32/v4i16 lane moves via insert and extract that go via i32.
 multiclass Neon_INS_elt_ext_pattern<ValueType VT128, ValueType VT64, ValueType OutVT,
-                                    Instruction INS, Instruction DUP, SubRegIndex DUPSub,
+                                    SDPatternOperator ScalToVec, Instruction INS, 
+                                    Instruction DUP, SubRegIndex DUPSub,
                                     SDNodeXForm VecIndexMult> {
   // VT64->OutVT
   def : Pat<(OutVT (vector_insert (OutVT V64:$src),
@@ -8183,7 +8197,7 @@ multiclass Neon_INS_elt_ext_pattern<ValueType VT128, ValueType VT64, ValueType O
               (INS (INSERT_SUBREG (VT128 (IMPLICIT_DEF)), V64:$src, dsub), (VecIndexMult imm:$Immd),
                    (INSERT_SUBREG (VT128 (IMPLICIT_DEF)), V64:$Rn, dsub), imm:$Immn),
               dsub)>;
-  def : Pat<(OutVT (scalar_to_vector (i32 (vector_extract (VT64 V64:$Rn), (i64 imm:$Immn))))),
+  def : Pat<(OutVT (ScalToVec (i32 (vector_extract (VT64 V64:$Rn), (i64 imm:$Immn))))),
             (EXTRACT_SUBREG
               (VT128 (SUBREG_TO_REG
                 (DUP (INSERT_SUBREG (VT128 (IMPLICIT_DEF)), V64:$Rn, dsub), imm:$Immn),
@@ -8198,7 +8212,7 @@ multiclass Neon_INS_elt_ext_pattern<ValueType VT128, ValueType VT64, ValueType O
               (INS (SUBREG_TO_REG V64:$src, dsub), (VecIndexMult imm:$Immd),
                    V128:$Rn, imm:$Immn),
               dsub)>;
-  def : Pat<(OutVT (scalar_to_vector (i32 (vector_extract (VT128 V128:$Rn), (i64 imm:$Immn))))),
+  def : Pat<(OutVT (ScalToVec (i32 (vector_extract (VT128 V128:$Rn), (i64 imm:$Immn))))),
             (EXTRACT_SUBREG
               (VT128 (SUBREG_TO_REG
                 (DUP V128:$Rn, imm:$Immn),
@@ -8206,9 +8220,9 @@ multiclass Neon_INS_elt_ext_pattern<ValueType VT128, ValueType VT64, ValueType O
               dsub)>;
 }
 
-defm : Neon_INS_elt_ext_pattern<v16i8, v8i8, v4i16, INSvi8lane, DUPi8, bsub, VecIndex_x2>;
-defm : Neon_INS_elt_ext_pattern<v16i8, v8i8, v2i32, INSvi8lane, DUPi8, bsub, VecIndex_x4>;
-defm : Neon_INS_elt_ext_pattern<v8i16, v4i16, v2i32, INSvi16lane, DUPi16, hsub, VecIndex_x2>;
+defm : Neon_INS_elt_ext_pattern<v16i8, v8i8, v4i16, scalar_to_vector, INSvi8lane, DUPi8, bsub, VecIndex_x2>;
+defm : Neon_INS_elt_ext_pattern<v16i8, v8i8, v2i32, scalar_to_vector_v2f32, INSvi8lane, DUPi8, bsub, VecIndex_x4>;
+defm : Neon_INS_elt_ext_pattern<v8i16, v4i16, v2i32, scalar_to_vector_v2f32, INSvi16lane, DUPi16, hsub, VecIndex_x2>;
 
 // bitcast of an extract
 // f32 bitcast(vector_extract(v4i32 src, 0)) -> EXTRACT_SUBREG(src)
@@ -9815,14 +9829,15 @@ def : Ld1Lane64IdxOpPat<extloadi8, VectorIndexH, v4i16, i32, LD1i8, VectorIndexH
 // scalar_to_vector + insert_subvector instead of insert_vector_elt.
 let Predicates = [HasNEON] in {
   class Ld1Lane128FirstElm<ValueType ResultTy, ValueType VecTy,
-                          SDPatternOperator ExtLoad, Instruction LD1>
-    : Pat<(ResultTy (vec_ins_or_scal_vec (i32 (ExtLoad GPR64sp:$Rn)))),
+                          SDPatternOperator ExtLoad, SDPatternOperator VecInsNode, 
+                          Instruction LD1>
+    : Pat<(ResultTy (VecInsNode (i32 (ExtLoad GPR64sp:$Rn)))),
             (ResultTy (EXTRACT_SUBREG
               (LD1 (VecTy (IMPLICIT_DEF)), 0, GPR64sp:$Rn), dsub))>;
 
-  def : Ld1Lane128FirstElm<v2i32, v8i16, extloadi16, LD1i16>;
-  def : Ld1Lane128FirstElm<v2i32, v16i8, extloadi8, LD1i8>;
-  def : Ld1Lane128FirstElm<v4i16, v16i8, extloadi8, LD1i8>;
+  def : Ld1Lane128FirstElm<v2i32, v8i16, extloadi16, vec_ins_or_scal_vec_v2f32, LD1i16>;
+  def : Ld1Lane128FirstElm<v2i32, v16i8, extloadi8, vec_ins_or_scal_vec_v2f32, LD1i8>;
+  def : Ld1Lane128FirstElm<v4i16, v16i8, extloadi8, vec_ins_or_scal_vec, LD1i8>;
 }
 class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
                    ValueType VTy, ValueType STy, Instruction LD1>
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index b1b1203ab512f..273249b9ff44c 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -3619,7 +3619,7 @@ let Predicates = [HasSVE_or_SME] in {
   def : Pat<(v1f64 (scalar_to_vector
                      (f64 (vector_extract nxv2f64:$vec, VectorIndexD:$index)))),
             (DUPi64 (EXTRACT_SUBREG nxv2f64:$vec, zsub), VectorIndexD:$index)>;
-  def : Pat<(v1i64 (scalar_to_vector
+  def : Pat<(v1i64 (scalar_to_vector_v1f64
                      (i64 (vector_extract nxv2i64:$vec, VectorIndexD:$index)))),
             (DUPi64 (EXTRACT_SUBREG nxv2i64:$vec, zsub), VectorIndexD:$index)>;
   } // End HasNEON
diff --git a/llvm/test/CodeGen/AArch64/aarch64-addv.ll b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
index d8aeeff79b936..9b60e0bde5a90 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-addv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
@@ -543,8 +543,7 @@ define i8 @addv_zero_lanes_negative_v8i8(ptr %arr)  {
 define i8 @addv_zero_lanes_v16i8(ptr %arr)  {
 ; CHECK-SD-LABEL: addv_zero_lanes_v16i8:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    ldrb w8, [x0]
-; CHECK-SD-NEXT:    fmov d0, x8
+; CHECK-SD-NEXT:    ldr b0, [x0]
 ; CHECK-SD-NEXT:    fmov d0, d0
 ; CHECK-SD-NEXT:    addv b0, v0.16b
 ; CHECK-SD-NEXT:    fmov w0, s0
@@ -568,8 +567,7 @@ define i8 @addv_zero_lanes_v16i8(ptr %arr)  {
 define i16 @addv_zero_lanes_v8i16(ptr %arr)  {
 ; CHECK-SD-LABEL: addv_zero_lanes_v8i16:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    ldrh w8, [x0]
-; CHECK-SD-NEXT:    fmov d0, x8
+; CHECK-SD-NEXT:    ldr h0, [x0]
 ; CHECK-SD-NEXT:    fmov d0, d0
 ; CHECK-SD-NEXT:    addv h0, v0.8h
 ; CHECK-SD-NEXT:    fmov w0, s0
@@ -593,8 +591,7 @@ define i16 @addv_zero_lanes_v8i16(ptr %arr)  {
 define i32 @addv_zero_lanes_v4i32(ptr %arr)  {
 ; CHECK-SD-LABEL: addv_zero_lanes_v4i32:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    ldr w8, [x0]
-; CHECK-SD-NEXT:    fmov d0, x8
+; CHECK-SD-NEXT:    ldr s0, [x0]
 ; CHECK-SD-NEXT:    fmov d0, d0
 ; CHECK-SD-NEXT:    addv s0, v0.4s
 ; CHECK-SD-NEXT:    fmov w0, s0
diff --git a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
index fa982ce27c7d0..578a96aee6d59 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
@@ -1519,10 +1519,10 @@ for.end12:                                        ; preds = %vector.body
 define void @matrix_mul_signed_and(i32 %N, ptr nocapture %C, ptr nocapture readonly %A, i32 %val) {
 ; CHECK-SD-LABEL: matrix_mul_signed_and:
 ; CHECK-SD:       // %bb.0: // %vector.header
-; CHECK-SD-NEXT:    and w9, w3, #0xffff
+; CHECK-SD-NEXT:    and w8, w3, #0xffff
 ; CHECK-SD-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT:    fmov s0, w8
 ; CHECK-SD-NEXT:    and x8, x0, #0xfffffff8
-; CHECK-SD-NEXT:    fmov s0, w9
 ; CHECK-SD-NEXT:  .LBB13_1: // %vector.body
 ; CHECK-SD-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-SD-NEXT:    add x9, x2, w0, uxtw #1
@@ -1607,10 +1607,10 @@ for.end12:                                        ; preds = %vector.body
 define void @matrix_mul_signed_and_double(i32 %N, ptr nocapture %C, ptr nocapture readonly %A, i32 %val) {
 ; CHECK-SD-LABEL: matrix_mul_signed_and_double:
 ; CHECK-SD:       // %bb.0: // %vector.header
-; CHECK-SD-NEXT:    and w9, w3, #0xffff
+; CHECK-SD-NEXT:    and w8, w3, #0xffff
 ; CHECK-SD-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT:    fmov s0, w8
 ; CHECK-SD-NEXT:    and x8, x0, #0xfffffff0
-; CHECK-SD-NEXT:    fmov s0, w9
 ; CHECK-SD-NEXT:  .LBB14_1: // %vector.body
 ; CHECK-SD-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-SD-NEXT:    add x9, x2, w0, uxtw #1
diff --git a/llvm/test/CodeGen/AArch64/aarch64-pmull2.ll b/llvm/test/CodeGen/AArch64/aarch64-pmull2.ll
index 9d7aa78ec139f..b5cee616ee9dc 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-pmull2.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-pmull2.ll
@@ -9,9 +9,9 @@ define void @test1(ptr %0, ptr %1) {
 ; CHECK-LABEL: test1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #56824 // =0xddf8
-; CHECK-NEXT:    mov w9, #61186 // =0xef02
+; CHECK-NEXT:    mov x9, #61186 // =0xef02
 ; CHECK-NEXT:    movk w8, #40522, lsl #16
-; CHECK-NEXT:    movk w9, #29710, lsl #16
+; CHECK-NEXT:    movk x9, #29710, lsl #16
 ; CHECK-NEXT:    ldp q0, q1, [x1]
 ; CHECK-NEXT:    dup v2.2d, x8
 ; CHECK-NEXT:    fmov d3, x9
diff --git a/llvm/test/CodeGen/AArch64/aarch64-scal-to-vec-bitcast-insert.ll b/llvm/test/CodeGen/AArch64/aarch64-scal-to-vec-bitcast-insert.ll
index 0b9f8b1ca5312..14eaf14507f38 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-scal-to-vec-bitcast-insert.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-scal-to-vec-bitcast-insert.ll
@@ -7,8 +7,7 @@
 define <2 x i32> @fcvtzs_v2i32_scalar_to_vector(float %a) {
 ; CHECK-LABEL: fcvtzs_v2i32_scalar_to_vector:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs w8, s0
-; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    fcvtzs s0, s0
 ; CHECK-NEXT:    ret
   %c = fptosi float %a to i32
   %v = insertelement <2 x i32> poison, i32 %c, i32 0
@@ -18,8 +17,7 @@ define <2 x i32> @fcvtzs_v2i32_scalar_to_vector(float %a) {
 define <4 x i32> @fcvtzs_v4i32_scalar_to_vector(float %a) {
 ; CHECK-LABEL: fcvtzs_v4i32_scalar_to_vector:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs w8, s0
-; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    fcvtzs s0, s0
 ; CHECK-NEXT:    ret
   %c = fptosi float %a to i32
   %v = insertelement <4 x i32> poison, i32 %c, i32 0
@@ -29,8 +27,7 @@ define <4 x i32> @fcvtzs_v4i32_scalar_to_vector(float %a) {
 define <1 x i64> @fcvtzs_v1i64_scalar_to_vector(double %a) {
 ; CHECK-LABEL: fcvtzs_v1i64_scalar_to_vector:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fcvtzs d0, d0
 ; CHECK-NEXT:    ret
   %c = fptosi double %a to i64
   %v = insertelement <1 x i64> poison, i64 %c, i32 0
@@ -40,8 +37,7 @@ define <1 x i64> @fcvtzs_v1i64_scalar_to_vector(double %a) {
 define <2 x i64> @fcvtzs_v2i64_scalar_to_vector(double %a) {
 ; CHECK-LABEL: fcvtzs_v2i64_scalar_to_vector:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fcvtzs d0, d0
 ; CHECK-NEXT:    ret
   %c = fptosi double %a to i64
   %v = insertelement <2 x i64> poison, i64 %c, i32 0
diff --git a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-intrinsics.ll b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-intrinsics.ll
index b1b9fcf8a8b3c..e0fe663bc625d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-intrinsics.ll
@@ -4,7 +4,7 @@
 
 
 ;
-; Intriniscs
+; Intrinsics (bitcast)
 ;
 
 define float @fcvtas_1s1d_simd(double %A) nounwind {
@@ -607,3 +607,1207 @@ define  float @fcvtzu_1s1s_simd(float %a) {
   %d = bitcast i32 %vcvtah_s32_f32 to float
   ret float %d
 }
+
+;
+; Intrinsics (scalar_to_vector)
+;
+
+define <2 x i32> @fcvtas_v2i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtas_v2i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas s0, h0
+; CHECK-NEXT:    ret
+  %fcvtas_scalar = tail call i32 @llvm.aarch64.neon.fcvtas.i32.f16(half %a)
+  %fcvtas_vector = insertelement <2 x i32> poison, i32 %fcvtas_scalar, i32 0
+  ret <2 x i32> %fcvtas_vector
+}
+
+define <2 x i32> @fcvtas_v2i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtas_v2i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas s0, s0
+; CHECK-NEXT:    ret
+  %fcvtas_scalar = call i32 @llvm.aarch64.neon.fcvtas.i32.f32(float %A)
+  %fcvtas_vector = insertelement <2 x i32> poison, i32 %fcvtas_scalar, i32 0
+  ret <2 x i32> %fcvtas_vector
+}
+
+define <2 x i32> @fcvtas_v2i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtas_v2i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas s0, d0
+; CHECK-NEXT:    ret
+  %fcvtas_scalar = tail call i32 @llvm.aarch64.neon.fcvtas.i32.f64(double %a)
+  %fcvtas_vector = insertelement <2 x i32> poison, i32 %fcvtas_scalar, i32 0
+  ret <2 x i32> %fcvtas_vector
+}
+
+define <4 x i32> @fcvtas_v4i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtas_v4i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas s0, h0
+; CHECK-NEXT:    ret
+  %fcvtas_scalar = tail call i32 @llvm.aarch64.neon.fcvtas.i32.f16(half %a)
+  %fcvtas_vector = insertelement <4 x i32> poison, i32 %fcvtas_scalar, i32 0
+  ret <4 x i32> %fcvtas_vector
+}
+
+define <4 x i32> @fcvtas_v4i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtas_v4i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas s0, s0
+; CHECK-NEXT:    ret
+  %fcvtas_scalar = call i32 @llvm.aarch64.neon.fcvtas.i32.f32(float %A)
+  %fcvtas_vector = insertelement <4 x i32> poison, i32 %fcvtas_scalar, i32 0
+  ret <4 x i32> %fcvtas_vector
+}
+
+define <4 x i32> @fcvtas_v4i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtas_v4i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas s0, d0
+; CHECK-NEXT:    ret
+  %fcvtas_scalar = tail call i32 @llvm.aarch64.neon.fcvtas.i32.f64(double %a)
+  %fcvtas_vector = insertelement <4 x i32> poison, i32 %fcvtas_scalar, i32 0
+  ret <4 x i32> %fcvtas_vector
+}
+
+define <1 x i64> @fcvtas_v1i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtas_v1i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas d0, h0
+; CHECK-NEXT:    ret
+  %fcvtas_scalar = tail call i64 @llvm.aarch64.neon.fcvtas.i64.f16(half %a)
+  %fcvtas_vector = insertelement <1 x i64> poison, i64 %fcvtas_scalar, i32 0
+  ret <1 x i64> %fcvtas_vector
+}
+
+define <1 x i64> @fcvtas_v1i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtas_v1i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas d0, s0
+; CHECK-NEXT:    ret
+  %fcvtas_scalar = call i64 @llvm.aarch64.neon.fcvtas.i64.f32(float %A)
+  %fcvtas_vector = insertelement <1 x i64> poison, i64 %fcvtas_scalar, i32 0
+  ret <1 x i64> %fcvtas_vector
+}
+
+define <1 x i64> @fcvtas_v1i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtas_v1i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas d0, d0
+; CHECK-NEXT:    ret
+  %fcvtas_scalar = tail call i64 @llvm.aarch64.neon.fcvtas.i64.f64(double %a)
+  %fcvtas_vector = insertelement <1 x i64> poison, i64 %fcvtas_scalar, i32 0
+  ret <1 x i64> %fcvtas_vector
+}
+
+define <2 x i64> @fcvtas_v2i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtas_v2i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas d0, h0
+; CHECK-NEXT:    ret
+  %fcvtas_scalar = tail call i64 @llvm.aarch64.neon.fcvtas.i64.f16(half %a)
+  %fcvtas_vector = insertelement <2 x i64> poison, i64 %fcvtas_scalar, i32 0
+  ret <2 x i64> %fcvtas_vector
+}
+
+define <2 x i64> @fcvtas_v2i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtas_v2i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas d0, s0
+; CHECK-NEXT:    ret
+  %fcvtas_scalar = call i64 @llvm.aarch64.neon.fcvtas.i64.f32(float %A)
+  %fcvtas_vector = insertelement <2 x i64> poison, i64 %fcvtas_scalar, i32 0
+  ret <2 x i64> %fcvtas_vector
+}
+
+define <2 x i64> @fcvtas_v2i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtas_v2i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas d0, d0
+; CHECK-NEXT:    ret
+  %fcvtas_scalar = tail call i64 @llvm.aarch64.neon.fcvtas.i64.f64(double %a)
+  %fcvtas_vector = insertelement <2 x i64> poison, i64 %fcvtas_scalar, i32 0
+  ret <2 x i64> %fcvtas_vector
+}
+
+define <2 x i32> @fcvtau_v2i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtau_v2i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtau s0, h0
+; CHECK-NEXT:    ret
+  %fcvtau_scalar = tail call i32 @llvm.aarch64.neon.fcvtau.i32.f16(half %a)
+  %fcvtau_vector = insertelement <2 x i32> poison, i32 %fcvtau_scalar, i32 0
+  ret <2 x i32> %fcvtau_vector
+}
+
+define <2 x i32> @fcvtau_v2i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtau_v2i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtau s0, s0
+; CHECK-NEXT:    ret
+  %fcvtau_scalar = call i32 @llvm.aarch64.neon.fcvtau.i32.f32(float %A)
+  %fcvtau_vector = insertelement <2 x i32> poison, i32 %fcvtau_scalar, i32 0
+  ret <2 x i32> %fcvtau_vector
+}
+
+define <2 x i32> @fcvtau_v2i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtau_v2i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtau s0, d0
+; CHECK-NEXT:    ret
+  %fcvtau_scalar = tail call i32 @llvm.aarch64.neon.fcvtau.i32.f64(double %a)
+  %fcvtau_vector = insertelement <2 x i32> poison, i32 %fcvtau_scalar, i32 0
+  ret <2 x i32> %fcvtau_vector
+}
+
+define <4 x i32> @fcvtau_v4i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtau_v4i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtau s0, h0
+; CHECK-NEXT:    ret
+  %fcvtau_scalar = tail call i32 @llvm.aarch64.neon.fcvtau.i32.f16(half %a)
+  %fcvtau_vector = insertelement <4 x i32> poison, i32 %fcvtau_scalar, i32 0
+  ret <4 x i32> %fcvtau_vector
+}
+
+define <4 x i32> @fcvtau_v4i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtau_v4i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtau s0, s0
+; CHECK-NEXT:    ret
+  %fcvtau_scalar = call i32 @llvm.aarch64.neon.fcvtau.i32.f32(float %A)
+  %fcvtau_vector = insertelement <4 x i32> poison, i32 %fcvtau_scalar, i32 0
+  ret <4 x i32> %fcvtau_vector
+}
+
+define <4 x i32> @fcvtau_v4i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtau_v4i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtau s0, d0
+; CHECK-NEXT:    ret
+  %fcvtau_scalar = tail call i32 @llvm.aarch64.neon.fcvtau.i32.f64(double %a)
+  %fcvtau_vector = insertelement <4 x i32> poison, i32 %fcvtau_scalar, i32 0
+  ret <4 x i32> %fcvtau_vector
+}
+
+define <1 x i64> @fcvtau_v1i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtau_v1i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtau d0, h0
+; CHECK-NEXT:    ret
+  %fcvtau_scalar = tail call i64 @llvm.aarch64.neon.fcvtau.i64.f16(half %a)
+  %fcvtau_vector = insertelement <1 x i64> poison, i64 %fcvtau_scalar, i32 0
+  ret <1 x i64> %fcvtau_vector
+}
+
+define <1 x i64> @fcvtau_v1i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtau_v1i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtau d0, s0
+; CHECK-NEXT:    ret
+  %fcvtau_scalar = call i64 @llvm.aarch64.neon.fcvtau.i64.f32(float %A)
+  %fcvtau_vector = insertelement <1 x i64> poison, i64 %fcvtau_scalar, i32 0
+  ret <1 x i64> %fcvtau_vector
+}
+
+define <1 x i64> @fcvtau_v1i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtau_v1i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtau d0, d0
+; CHECK-NEXT:    ret
+  %fcvtau_scalar = tail call i64 @llvm.aarch64.neon.fcvtau.i64.f64(double %a)
+  %fcvtau_vector = insertelement <1 x i64> poison, i64 %fcvtau_scalar, i32 0
+  ret <1 x i64> %fcvtau_vector
+}
+
+define <2 x i64> @fcvtau_v2i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtau_v2i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtau d0, h0
+; CHECK-NEXT:    ret
+  %fcvtau_scalar = tail call i64 @llvm.aarch64.neon.fcvtau.i64.f16(half %a)
+  %fcvtau_vector = insertelement <2 x i64> poison, i64 %fcvtau_scalar, i32 0
+  ret <2 x i64> %fcvtau_vector
+}
+
+define <2 x i64> @fcvtau_v2i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtau_v2i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtau d0, s0
+; CHECK-NEXT:    ret
+  %fcvtau_scalar = call i64 @llvm.aarch64.neon.fcvtau.i64.f32(float %A)
+  %fcvtau_vector = insertelement <2 x i64> poison, i64 %fcvtau_scalar, i32 0
+  ret <2 x i64> %fcvtau_vector
+}
+
+define <2 x i64> @fcvtau_v2i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtau_v2i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtau d0, d0
+; CHECK-NEXT:    ret
+  %fcvtau_scalar = tail call i64 @llvm.aarch64.neon.fcvtau.i64.f64(double %a)
+  %fcvtau_vector = insertelement <2 x i64> poison, i64 %fcvtau_scalar, i32 0
+  ret <2 x i64> %fcvtau_vector
+}
+
+define <2 x i32> @fcvtms_v2i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtms_v2i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtms s0, h0
+; CHECK-NEXT:    ret
+  %fcvtms_scalar = tail call i32 @llvm.aarch64.neon.fcvtms.i32.f16(half %a)
+  %fcvtms_vector = insertelement <2 x i32> poison, i32 %fcvtms_scalar, i32 0
+  ret <2 x i32> %fcvtms_vector
+}
+
+define <2 x i32> @fcvtms_v2i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtms_v2i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtms s0, s0
+; CHECK-NEXT:    ret
+  %fcvtms_scalar = call i32 @llvm.aarch64.neon.fcvtms.i32.f32(float %A)
+  %fcvtms_vector = insertelement <2 x i32> poison, i32 %fcvtms_scalar, i32 0
+  ret <2 x i32> %fcvtms_vector
+}
+
+define <2 x i32> @fcvtms_v2i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtms_v2i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtms s0, d0
+; CHECK-NEXT:    ret
+  %fcvtms_scalar = tail call i32 @llvm.aarch64.neon.fcvtms.i32.f64(double %a)
+  %fcvtms_vector = insertelement <2 x i32> poison, i32 %fcvtms_scalar, i32 0
+  ret <2 x i32> %fcvtms_vector
+}
+
+define <4 x i32> @fcvtms_v4i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtms_v4i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtms s0, h0
+; CHECK-NEXT:    ret
+  %fcvtms_scalar = tail call i32 @llvm.aarch64.neon.fcvtms.i32.f16(half %a)
+  %fcvtms_vector = insertelement <4 x i32> poison, i32 %fcvtms_scalar, i32 0
+  ret <4 x i32> %fcvtms_vector
+}
+
+define <4 x i32> @fcvtms_v4i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtms_v4i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtms s0, s0
+; CHECK-NEXT:    ret
+  %fcvtms_scalar = call i32 @llvm.aarch64.neon.fcvtms.i32.f32(float %A)
+  %fcvtms_vector = insertelement <4 x i32> poison, i32 %fcvtms_scalar, i32 0
+  ret <4 x i32> %fcvtms_vector
+}
+
+define <4 x i32> @fcvtms_v4i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtms_v4i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtms s0, d0
+; CHECK-NEXT:    ret
+  %fcvtms_scalar = tail call i32 @llvm.aarch64.neon.fcvtms.i32.f64(double %a)
+  %fcvtms_vector = insertelement <4 x i32> poison, i32 %fcvtms_scalar, i32 0
+  ret <4 x i32> %fcvtms_vector
+}
+
+define <1 x i64> @fcvtms_v1i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtms_v1i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtms d0, h0
+; CHECK-NEXT:    ret
+  %fcvtms_scalar = tail call i64 @llvm.aarch64.neon.fcvtms.i64.f16(half %a)
+  %fcvtms_vector = insertelement <1 x i64> poison, i64 %fcvtms_scalar, i32 0
+  ret <1 x i64> %fcvtms_vector
+}
+
+define <1 x i64> @fcvtms_v1i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtms_v1i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtms d0, s0
+; CHECK-NEXT:    ret
+  %fcvtms_scalar = call i64 @llvm.aarch64.neon.fcvtms.i64.f32(float %A)
+  %fcvtms_vector = insertelement <1 x i64> poison, i64 %fcvtms_scalar, i32 0
+  ret <1 x i64> %fcvtms_vector
+}
+
+define <1 x i64> @fcvtms_v1i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtms_v1i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtms d0, d0
+; CHECK-NEXT:    ret
+  %fcvtms_scalar = tail call i64 @llvm.aarch64.neon.fcvtms.i64.f64(double %a)
+  %fcvtms_vector = insertelement <1 x i64> poison, i64 %fcvtms_scalar, i32 0
+  ret <1 x i64> %fcvtms_vector
+}
+
+define <2 x i64> @fcvtms_v2i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtms_v2i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtms d0, h0
+; CHECK-NEXT:    ret
+  %fcvtms_scalar = tail call i64 @llvm.aarch64.neon.fcvtms.i64.f16(half %a)
+  %fcvtms_vector = insertelement <2 x i64> poison, i64 %fcvtms_scalar, i32 0
+  ret <2 x i64> %fcvtms_vector
+}
+
+define <2 x i64> @fcvtms_v2i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtms_v2i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtms d0, s0
+; CHECK-NEXT:    ret
+  %fcvtms_scalar = call i64 @llvm.aarch64.neon.fcvtms.i64.f32(float %A)
+  %fcvtms_vector = insertelement <2 x i64> poison, i64 %fcvtms_scalar, i32 0
+  ret <2 x i64> %fcvtms_vector
+}
+
+define <2 x i64> @fcvtms_v2i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtms_v2i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtms d0, d0
+; CHECK-NEXT:    ret
+  %fcvtms_scalar = tail call i64 @llvm.aarch64.neon.fcvtms.i64.f64(double %a)
+  %fcvtms_vector = insertelement <2 x i64> poison, i64 %fcvtms_scalar, i32 0
+  ret <2 x i64> %fcvtms_vector
+}
+
+define <2 x i32> @fcvtmu_v2i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtmu_v2i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtmu s0, h0
+; CHECK-NEXT:    ret
+  %fcvtmu_scalar = tail call i32 @llvm.aarch64.neon.fcvtmu.i32.f16(half %a)
+  %fcvtmu_vector = insertelement <2 x i32> poison, i32 %fcvtmu_scalar, i32 0
+  ret <2 x i32> %fcvtmu_vector
+}
+
+define <2 x i32> @fcvtmu_v2i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtmu_v2i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtmu s0, s0
+; CHECK-NEXT:    ret
+  %fcvtmu_scalar = call i32 @llvm.aarch64.neon.fcvtmu.i32.f32(float %A)
+  %fcvtmu_vector = insertelement <2 x i32> poison, i32 %fcvtmu_scalar, i32 0
+  ret <2 x i32> %fcvtmu_vector
+}
+
+define <2 x i32> @fcvtmu_v2i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtmu_v2i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtmu s0, d0
+; CHECK-NEXT:    ret
+  %fcvtmu_scalar = tail call i32 @llvm.aarch64.neon.fcvtmu.i32.f64(double %a)
+  %fcvtmu_vector = insertelement <2 x i32> poison, i32 %fcvtmu_scalar, i32 0
+  ret <2 x i32> %fcvtmu_vector
+}
+
+define <4 x i32> @fcvtmu_v4i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtmu_v4i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtmu s0, h0
+; CHECK-NEXT:    ret
+  %fcvtmu_scalar = tail call i32 @llvm.aarch64.neon.fcvtmu.i32.f16(half %a)
+  %fcvtmu_vector = insertelement <4 x i32> poison, i32 %fcvtmu_scalar, i32 0
+  ret <4 x i32> %fcvtmu_vector
+}
+
+define <4 x i32> @fcvtmu_v4i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtmu_v4i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtmu s0, s0
+; CHECK-NEXT:    ret
+  %fcvtmu_scalar = call i32 @llvm.aarch64.neon.fcvtmu.i32.f32(float %A)
+  %fcvtmu_vector = insertelement <4 x i32> poison, i32 %fcvtmu_scalar, i32 0
+  ret <4 x i32> %fcvtmu_vector
+}
+
+define <4 x i32> @fcvtmu_v4i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtmu_v4i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtmu s0, d0
+; CHECK-NEXT:    ret
+  %fcvtmu_scalar = tail call i32 @llvm.aarch64.neon.fcvtmu.i32.f64(double %a)
+  %fcvtmu_vector = insertelement <4 x i32> poison, i32 %fcvtmu_scalar, i32 0
+  ret <4 x i32> %fcvtmu_vector
+}
+
+define <1 x i64> @fcvtmu_v1i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtmu_v1i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtmu d0, h0
+; CHECK-NEXT:    ret
+  %fcvtmu_scalar = tail call i64 @llvm.aarch64.neon.fcvtmu.i64.f16(half %a)
+  %fcvtmu_vector = insertelement <1 x i64> poison, i64 %fcvtmu_scalar, i32 0
+  ret <1 x i64> %fcvtmu_vector
+}
+
+define <1 x i64> @fcvtmu_v1i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtmu_v1i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtmu d0, s0
+; CHECK-NEXT:    ret
+  %fcvtmu_scalar = call i64 @llvm.aarch64.neon.fcvtmu.i64.f32(float %A)
+  %fcvtmu_vector = insertelement <1 x i64> poison, i64 %fcvtmu_scalar, i32 0
+  ret <1 x i64> %fcvtmu_vector
+}
+
+define <1 x i64> @fcvtmu_v1i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtmu_v1i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtmu d0, d0
+; CHECK-NEXT:    ret
+  %fcvtmu_scalar = tail call i64 @llvm.aarch64.neon.fcvtmu.i64.f64(double %a)
+  %fcvtmu_vector = insertelement <1 x i64> poison, i64 %fcvtmu_scalar, i32 0
+  ret <1 x i64> %fcvtmu_vector
+}
+
+define <2 x i64> @fcvtmu_v2i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtmu_v2i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtmu d0, h0
+; CHECK-NEXT:    ret
+  %fcvtmu_scalar = tail call i64 @llvm.aarch64.neon.fcvtmu.i64.f16(half %a)
+  %fcvtmu_vector = insertelement <2 x i64> poison, i64 %fcvtmu_scalar, i32 0
+  ret <2 x i64> %fcvtmu_vector
+}
+
+define <2 x i64> @fcvtmu_v2i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtmu_v2i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtmu d0, s0
+; CHECK-NEXT:    ret
+  %fcvtmu_scalar = call i64 @llvm.aarch64.neon.fcvtmu.i64.f32(float %A)
+  %fcvtmu_vector = insertelement <2 x i64> poison, i64 %fcvtmu_scalar, i32 0
+  ret <2 x i64> %fcvtmu_vector
+}
+
+define <2 x i64> @fcvtmu_v2i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtmu_v2i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtmu d0, d0
+; CHECK-NEXT:    ret
+  %fcvtmu_scalar = tail call i64 @llvm.aarch64.neon.fcvtmu.i64.f64(double %a)
+  %fcvtmu_vector = insertelement <2 x i64> poison, i64 %fcvtmu_scalar, i32 0
+  ret <2 x i64> %fcvtmu_vector
+}
+
+define <2 x i32> @fcvtns_v2i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtns_v2i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtns s0, h0
+; CHECK-NEXT:    ret
+  %fcvtns_scalar = tail call i32 @llvm.aarch64.neon.fcvtns.i32.f16(half %a)
+  %fcvtns_vector = insertelement <2 x i32> poison, i32 %fcvtns_scalar, i32 0
+  ret <2 x i32> %fcvtns_vector
+}
+
+define <2 x i32> @fcvtns_v2i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtns_v2i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtns s0, s0
+; CHECK-NEXT:    ret
+  %fcvtns_scalar = call i32 @llvm.aarch64.neon.fcvtns.i32.f32(float %A)
+  %fcvtns_vector = insertelement <2 x i32> poison, i32 %fcvtns_scalar, i32 0
+  ret <2 x i32> %fcvtns_vector
+}
+
+define <2 x i32> @fcvtns_v2i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtns_v2i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtns s0, d0
+; CHECK-NEXT:    ret
+  %fcvtns_scalar = tail call i32 @llvm.aarch64.neon.fcvtns.i32.f64(double %a)
+  %fcvtns_vector = insertelement <2 x i32> poison, i32 %fcvtns_scalar, i32 0
+  ret <2 x i32> %fcvtns_vector
+}
+
+define <4 x i32> @fcvtns_v4i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtns_v4i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtns s0, h0
+; CHECK-NEXT:    ret
+  %fcvtns_scalar = tail call i32 @llvm.aarch64.neon.fcvtns.i32.f16(half %a)
+  %fcvtns_vector = insertelement <4 x i32> poison, i32 %fcvtns_scalar, i32 0
+  ret <4 x i32> %fcvtns_vector
+}
+
+define <4 x i32> @fcvtns_v4i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtns_v4i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtns s0, s0
+; CHECK-NEXT:    ret
+  %fcvtns_scalar = call i32 @llvm.aarch64.neon.fcvtns.i32.f32(float %A)
+  %fcvtns_vector = insertelement <4 x i32> poison, i32 %fcvtns_scalar, i32 0
+  ret <4 x i32> %fcvtns_vector
+}
+
+define <4 x i32> @fcvtns_v4i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtns_v4i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtns s0, d0
+; CHECK-NEXT:    ret
+  %fcvtns_scalar = tail call i32 @llvm.aarch64.neon.fcvtns.i32.f64(double %a)
+  %fcvtns_vector = insertelement <4 x i32> poison, i32 %fcvtns_scalar, i32 0
+  ret <4 x i32> %fcvtns_vector
+}
+
+define <1 x i64> @fcvtns_v1i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtns_v1i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtns d0, h0
+; CHECK-NEXT:    ret
+  %fcvtns_scalar = tail call i64 @llvm.aarch64.neon.fcvtns.i64.f16(half %a)
+  %fcvtns_vector = insertelement <1 x i64> poison, i64 %fcvtns_scalar, i32 0
+  ret <1 x i64> %fcvtns_vector
+}
+
+define <1 x i64> @fcvtns_v1i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtns_v1i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtns d0, s0
+; CHECK-NEXT:    ret
+  %fcvtns_scalar = call i64 @llvm.aarch64.neon.fcvtns.i64.f32(float %A)
+  %fcvtns_vector = insertelement <1 x i64> poison, i64 %fcvtns_scalar, i32 0
+  ret <1 x i64> %fcvtns_vector
+}
+
+define <1 x i64> @fcvtns_v1i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtns_v1i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtns d0, d0
+; CHECK-NEXT:    ret
+  %fcvtns_scalar = tail call i64 @llvm.aarch64.neon.fcvtns.i64.f64(double %a)
+  %fcvtns_vector = insertelement <1 x i64> poison, i64 %fcvtns_scalar, i32 0
+  ret <1 x i64> %fcvtns_vector
+}
+
+define <2 x i64> @fcvtns_v2i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtns_v2i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtns d0, h0
+; CHECK-NEXT:    ret
+  %fcvtns_scalar = tail call i64 @llvm.aarch64.neon.fcvtns.i64.f16(half %a)
+  %fcvtns_vector = insertelement <2 x i64> poison, i64 %fcvtns_scalar, i32 0
+  ret <2 x i64> %fcvtns_vector
+}
+
+define <2 x i64> @fcvtns_v2i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtns_v2i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtns d0, s0
+; CHECK-NEXT:    ret
+  %fcvtns_scalar = call i64 @llvm.aarch64.neon.fcvtns.i64.f32(float %A)
+  %fcvtns_vector = insertelement <2 x i64> poison, i64 %fcvtns_scalar, i32 0
+  ret <2 x i64> %fcvtns_vector
+}
+
+define <2 x i64> @fcvtns_v2i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtns_v2i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtns d0, d0
+; CHECK-NEXT:    ret
+  %fcvtns_scalar = tail call i64 @llvm.aarch64.neon.fcvtns.i64.f64(double %a)
+  %fcvtns_vector = insertelement <2 x i64> poison, i64 %fcvtns_scalar, i32 0
+  ret <2 x i64> %fcvtns_vector
+}
+
+define <2 x i32> @fcvtnu_v2i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtnu_v2i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtnu s0, h0
+; CHECK-NEXT:    ret
+  %fcvtnu_scalar = tail call i32 @llvm.aarch64.neon.fcvtnu.i32.f16(half %a)
+  %fcvtnu_vector = insertelement <2 x i32> poison, i32 %fcvtnu_scalar, i32 0
+  ret <2 x i32> %fcvtnu_vector
+}
+
+define <2 x i32> @fcvtnu_v2i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtnu_v2i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtnu s0, s0
+; CHECK-NEXT:    ret
+  %fcvtnu_scalar = call i32 @llvm.aarch64.neon.fcvtnu.i32.f32(float %A)
+  %fcvtnu_vector = insertelement <2 x i32> poison, i32 %fcvtnu_scalar, i32 0
+  ret <2 x i32> %fcvtnu_vector
+}
+
+define <2 x i32> @fcvtnu_v2i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtnu_v2i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtnu s0, d0
+; CHECK-NEXT:    ret
+  %fcvtnu_scalar = tail call i32 @llvm.aarch64.neon.fcvtnu.i32.f64(double %a)
+  %fcvtnu_vector = insertelement <2 x i32> poison, i32 %fcvtnu_scalar, i32 0
+  ret <2 x i32> %fcvtnu_vector
+}
+
+define <4 x i32> @fcvtnu_v4i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtnu_v4i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtnu s0, h0
+; CHECK-NEXT:    ret
+  %fcvtnu_scalar = tail call i32 @llvm.aarch64.neon.fcvtnu.i32.f16(half %a)
+  %fcvtnu_vector = insertelement <4 x i32> poison, i32 %fcvtnu_scalar, i32 0
+  ret <4 x i32> %fcvtnu_vector
+}
+
+define <4 x i32> @fcvtnu_v4i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtnu_v4i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtnu s0, s0
+; CHECK-NEXT:    ret
+  %fcvtnu_scalar = call i32 @llvm.aarch64.neon.fcvtnu.i32.f32(float %A)
+  %fcvtnu_vector = insertelement <4 x i32> poison, i32 %fcvtnu_scalar, i32 0
+  ret <4 x i32> %fcvtnu_vector
+}
+
+define <4 x i32> @fcvtnu_v4i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtnu_v4i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtnu s0, d0
+; CHECK-NEXT:    ret
+  %fcvtnu_scalar = tail call i32 @llvm.aarch64.neon.fcvtnu.i32.f64(double %a)
+  %fcvtnu_vector = insertelement <4 x i32> poison, i32 %fcvtnu_scalar, i32 0
+  ret <4 x i32> %fcvtnu_vector
+}
+
+define <1 x i64> @fcvtnu_v1i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtnu_v1i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtnu d0, h0
+; CHECK-NEXT:    ret
+  %fcvtnu_scalar = tail call i64 @llvm.aarch64.neon.fcvtnu.i64.f16(half %a)
+  %fcvtnu_vector = insertelement <1 x i64> poison, i64 %fcvtnu_scalar, i32 0
+  ret <1 x i64> %fcvtnu_vector
+}
+
+define <1 x i64> @fcvtnu_v1i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtnu_v1i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtnu d0, s0
+; CHECK-NEXT:    ret
+  %fcvtnu_scalar = call i64 @llvm.aarch64.neon.fcvtnu.i64.f32(float %A)
+  %fcvtnu_vector = insertelement <1 x i64> poison, i64 %fcvtnu_scalar, i32 0
+  ret <1 x i64> %fcvtnu_vector
+}
+
+define <1 x i64> @fcvtnu_v1i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtnu_v1i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtnu d0, d0
+; CHECK-NEXT:    ret
+  %fcvtnu_scalar = tail call i64 @llvm.aarch64.neon.fcvtnu.i64.f64(double %a)
+  %fcvtnu_vector = insertelement <1 x i64> poison, i64 %fcvtnu_scalar, i32 0
+  ret <1 x i64> %fcvtnu_vector
+}
+
+define <2 x i64> @fcvtnu_v2i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtnu_v2i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtnu d0, h0
+; CHECK-NEXT:    ret
+  %fcvtnu_scalar = tail call i64 @llvm.aarch64.neon.fcvtnu.i64.f16(half %a)
+  %fcvtnu_vector = insertelement <2 x i64> poison, i64 %fcvtnu_scalar, i32 0
+  ret <2 x i64> %fcvtnu_vector
+}
+
+define <2 x i64> @fcvtnu_v2i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtnu_v2i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtnu d0, s0
+; CHECK-NEXT:    ret
+  %fcvtnu_scalar = call i64 @llvm.aarch64.neon.fcvtnu.i64.f32(float %A)
+  %fcvtnu_vector = insertelement <2 x i64> poison, i64 %fcvtnu_scalar, i32 0
+  ret <2 x i64> %fcvtnu_vector
+}
+
+define <2 x i64> @fcvtnu_v2i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtnu_v2i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtnu d0, d0
+; CHECK-NEXT:    ret
+  %fcvtnu_scalar = tail call i64 @llvm.aarch64.neon.fcvtnu.i64.f64(double %a)
+  %fcvtnu_vector = insertelement <2 x i64> poison, i64 %fcvtnu_scalar, i32 0
+  ret <2 x i64> %fcvtnu_vector
+}
+
+define <2 x i32> @fcvtps_v2i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtps_v2i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtps s0, h0
+; CHECK-NEXT:    ret
+  %fcvtps_scalar = tail call i32 @llvm.aarch64.neon.fcvtps.i32.f16(half %a)
+  %fcvtps_vector = insertelement <2 x i32> poison, i32 %fcvtps_scalar, i32 0
+  ret <2 x i32> %fcvtps_vector
+}
+
+define <2 x i32> @fcvtps_v2i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtps_v2i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtps s0, s0
+; CHECK-NEXT:    ret
+  %fcvtps_scalar = call i32 @llvm.aarch64.neon.fcvtps.i32.f32(float %A)
+  %fcvtps_vector = insertelement <2 x i32> poison, i32 %fcvtps_scalar, i32 0
+  ret <2 x i32> %fcvtps_vector
+}
+
+define <2 x i32> @fcvtps_v2i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtps_v2i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtps s0, d0
+; CHECK-NEXT:    ret
+  %fcvtps_scalar = tail call i32 @llvm.aarch64.neon.fcvtps.i32.f64(double %a)
+  %fcvtps_vector = insertelement <2 x i32> poison, i32 %fcvtps_scalar, i32 0
+  ret <2 x i32> %fcvtps_vector
+}
+
+define <4 x i32> @fcvtps_v4i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtps_v4i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtps s0, h0
+; CHECK-NEXT:    ret
+  %fcvtps_scalar = tail call i32 @llvm.aarch64.neon.fcvtps.i32.f16(half %a)
+  %fcvtps_vector = insertelement <4 x i32> poison, i32 %fcvtps_scalar, i32 0
+  ret <4 x i32> %fcvtps_vector
+}
+
+define <4 x i32> @fcvtps_v4i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtps_v4i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtps s0, s0
+; CHECK-NEXT:    ret
+  %fcvtps_scalar = call i32 @llvm.aarch64.neon.fcvtps.i32.f32(float %A)
+  %fcvtps_vector = insertelement <4 x i32> poison, i32 %fcvtps_scalar, i32 0
+  ret <4 x i32> %fcvtps_vector
+}
+
+define <4 x i32> @fcvtps_v4i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtps_v4i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtps s0, d0
+; CHECK-NEXT:    ret
+  %fcvtps_scalar = tail call i32 @llvm.aarch64.neon.fcvtps.i32.f64(double %a)
+  %fcvtps_vector = insertelement <4 x i32> poison, i32 %fcvtps_scalar, i32 0
+  ret <4 x i32> %fcvtps_vector
+}
+
+define <1 x i64> @fcvtps_v1i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtps_v1i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtps d0, h0
+; CHECK-NEXT:    ret
+  %fcvtps_scalar = tail call i64 @llvm.aarch64.neon.fcvtps.i64.f16(half %a)
+  %fcvtps_vector = insertelement <1 x i64> poison, i64 %fcvtps_scalar, i32 0
+  ret <1 x i64> %fcvtps_vector
+}
+
+define <1 x i64> @fcvtps_v1i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtps_v1i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtps d0, s0
+; CHECK-NEXT:    ret
+  %fcvtps_scalar = call i64 @llvm.aarch64.neon.fcvtps.i64.f32(float %A)
+  %fcvtps_vector = insertelement <1 x i64> poison, i64 %fcvtps_scalar, i32 0
+  ret <1 x i64> %fcvtps_vector
+}
+
+define <1 x i64> @fcvtps_v1i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtps_v1i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtps d0, d0
+; CHECK-NEXT:    ret
+  %fcvtps_scalar = tail call i64 @llvm.aarch64.neon.fcvtps.i64.f64(double %a)
+  %fcvtps_vector = insertelement <1 x i64> poison, i64 %fcvtps_scalar, i32 0
+  ret <1 x i64> %fcvtps_vector
+}
+
+define <2 x i64> @fcvtps_v2i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtps_v2i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtps d0, h0
+; CHECK-NEXT:    ret
+  %fcvtps_scalar = tail call i64 @llvm.aarch64.neon.fcvtps.i64.f16(half %a)
+  %fcvtps_vector = insertelement <2 x i64> poison, i64 %fcvtps_scalar, i32 0
+  ret <2 x i64> %fcvtps_vector
+}
+
+define <2 x i64> @fcvtps_v2i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtps_v2i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtps d0, s0
+; CHECK-NEXT:    ret
+  %fcvtps_scalar = call i64 @llvm.aarch64.neon.fcvtps.i64.f32(float %A)
+  %fcvtps_vector = insertelement <2 x i64> poison, i64 %fcvtps_scalar, i32 0
+  ret <2 x i64> %fcvtps_vector
+}
+
+define <2 x i64> @fcvtps_v2i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtps_v2i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtps d0, d0
+; CHECK-NEXT:    ret
+  %fcvtps_scalar = tail call i64 @llvm.aarch64.neon.fcvtps.i64.f64(double %a)
+  %fcvtps_vector = insertelement <2 x i64> poison, i64 %fcvtps_scalar, i32 0
+  ret <2 x i64> %fcvtps_vector
+}
+
+define <2 x i32> @fcvtpu_v2i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtpu_v2i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtpu s0, h0
+; CHECK-NEXT:    ret
+  %fcvtpu_scalar = tail call i32 @llvm.aarch64.neon.fcvtpu.i32.f16(half %a)
+  %fcvtpu_vector = insertelement <2 x i32> poison, i32 %fcvtpu_scalar, i32 0
+  ret <2 x i32> %fcvtpu_vector
+}
+
+define <2 x i32> @fcvtpu_v2i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtpu_v2i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtpu s0, s0
+; CHECK-NEXT:    ret
+  %fcvtpu_scalar = call i32 @llvm.aarch64.neon.fcvtpu.i32.f32(float %A)
+  %fcvtpu_vector = insertelement <2 x i32> poison, i32 %fcvtpu_scalar, i32 0
+  ret <2 x i32> %fcvtpu_vector
+}
+
+define <2 x i32> @fcvtpu_v2i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtpu_v2i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtpu s0, d0
+; CHECK-NEXT:    ret
+  %fcvtpu_scalar = tail call i32 @llvm.aarch64.neon.fcvtpu.i32.f64(double %a)
+  %fcvtpu_vector = insertelement <2 x i32> poison, i32 %fcvtpu_scalar, i32 0
+  ret <2 x i32> %fcvtpu_vector
+}
+
+define <4 x i32> @fcvtpu_v4i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtpu_v4i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtpu s0, h0
+; CHECK-NEXT:    ret
+  %fcvtpu_scalar = tail call i32 @llvm.aarch64.neon.fcvtpu.i32.f16(half %a)
+  %fcvtpu_vector = insertelement <4 x i32> poison, i32 %fcvtpu_scalar, i32 0
+  ret <4 x i32> %fcvtpu_vector
+}
+
+define <4 x i32> @fcvtpu_v4i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtpu_v4i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtpu s0, s0
+; CHECK-NEXT:    ret
+  %fcvtpu_scalar = call i32 @llvm.aarch64.neon.fcvtpu.i32.f32(float %A)
+  %fcvtpu_vector = insertelement <4 x i32> poison, i32 %fcvtpu_scalar, i32 0
+  ret <4 x i32> %fcvtpu_vector
+}
+
+define <4 x i32> @fcvtpu_v4i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtpu_v4i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtpu s0, d0
+; CHECK-NEXT:    ret
+  %fcvtpu_scalar = tail call i32 @llvm.aarch64.neon.fcvtpu.i32.f64(double %a)
+  %fcvtpu_vector = insertelement <4 x i32> poison, i32 %fcvtpu_scalar, i32 0
+  ret <4 x i32> %fcvtpu_vector
+}
+
+define <1 x i64> @fcvtpu_v1i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtpu_v1i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtpu d0, h0
+; CHECK-NEXT:    ret
+  %fcvtpu_scalar = tail call i64 @llvm.aarch64.neon.fcvtpu.i64.f16(half %a)
+  %fcvtpu_vector = insertelement <1 x i64> poison, i64 %fcvtpu_scalar, i32 0
+  ret <1 x i64> %fcvtpu_vector
+}
+
+define <1 x i64> @fcvtpu_v1i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtpu_v1i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtpu d0, s0
+; CHECK-NEXT:    ret
+  %fcvtpu_scalar = call i64 @llvm.aarch64.neon.fcvtpu.i64.f32(float %A)
+  %fcvtpu_vector = insertelement <1 x i64> poison, i64 %fcvtpu_scalar, i32 0
+  ret <1 x i64> %fcvtpu_vector
+}
+
+define <1 x i64> @fcvtpu_v1i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtpu_v1i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtpu d0, d0
+; CHECK-NEXT:    ret
+  %fcvtpu_scalar = tail call i64 @llvm.aarch64.neon.fcvtpu.i64.f64(double %a)
+  %fcvtpu_vector = insertelement <1 x i64> poison, i64 %fcvtpu_scalar, i32 0
+  ret <1 x i64> %fcvtpu_vector
+}
+
+define <2 x i64> @fcvtpu_v2i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtpu_v2i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtpu d0, h0
+; CHECK-NEXT:    ret
+  %fcvtpu_scalar = tail call i64 @llvm.aarch64.neon.fcvtpu.i64.f16(half %a)
+  %fcvtpu_vector = insertelement <2 x i64> poison, i64 %fcvtpu_scalar, i32 0
+  ret <2 x i64> %fcvtpu_vector
+}
+
+define <2 x i64> @fcvtpu_v2i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtpu_v2i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtpu d0, s0
+; CHECK-NEXT:    ret
+  %fcvtpu_scalar = call i64 @llvm.aarch64.neon.fcvtpu.i64.f32(float %A)
+  %fcvtpu_vector = insertelement <2 x i64> poison, i64 %fcvtpu_scalar, i32 0
+  ret <2 x i64> %fcvtpu_vector
+}
+
+define <2 x i64> @fcvtpu_v2i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtpu_v2i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtpu d0, d0
+; CHECK-NEXT:    ret
+  %fcvtpu_scalar = tail call i64 @llvm.aarch64.neon.fcvtpu.i64.f64(double %a)
+  %fcvtpu_vector = insertelement <2 x i64> poison, i64 %fcvtpu_scalar, i32 0
+  ret <2 x i64> %fcvtpu_vector
+}
+
+define <2 x i32> @fcvtzs_v2i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtzs_v2i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs s0, h0
+; CHECK-NEXT:    ret
+  %fcvtzs_scalar = tail call i32 @llvm.aarch64.neon.fcvtzs.i32.f16(half %a)
+  %fcvtzs_vector = insertelement <2 x i32> poison, i32 %fcvtzs_scalar, i32 0
+  ret <2 x i32> %fcvtzs_vector
+}
+
+define <2 x i32> @fcvtzs_v2i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtzs_v2i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs s0, s0
+; CHECK-NEXT:    ret
+  %fcvtzs_scalar = call i32 @llvm.aarch64.neon.fcvtzs.i32.f32(float %A)
+  %fcvtzs_vector = insertelement <2 x i32> poison, i32 %fcvtzs_scalar, i32 0
+  ret <2 x i32> %fcvtzs_vector
+}
+
+define <2 x i32> @fcvtzs_v2i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtzs_v2i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs s0, d0
+; CHECK-NEXT:    ret
+  %fcvtzs_scalar = tail call i32 @llvm.aarch64.neon.fcvtzs.i32.f64(double %a)
+  %fcvtzs_vector = insertelement <2 x i32> poison, i32 %fcvtzs_scalar, i32 0
+  ret <2 x i32> %fcvtzs_vector
+}
+
+define <4 x i32> @fcvtzs_v4i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtzs_v4i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs s0, h0
+; CHECK-NEXT:    ret
+  %fcvtzs_scalar = tail call i32 @llvm.aarch64.neon.fcvtzs.i32.f16(half %a)
+  %fcvtzs_vector = insertelement <4 x i32> poison, i32 %fcvtzs_scalar, i32 0
+  ret <4 x i32> %fcvtzs_vector
+}
+
+define <4 x i32> @fcvtzs_v4i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtzs_v4i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs s0, s0
+; CHECK-NEXT:    ret
+  %fcvtzs_scalar = call i32 @llvm.aarch64.neon.fcvtzs.i32.f32(float %A)
+  %fcvtzs_vector = insertelement <4 x i32> poison, i32 %fcvtzs_scalar, i32 0
+  ret <4 x i32> %fcvtzs_vector
+}
+
+define <4 x i32> @fcvtzs_v4i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtzs_v4i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs s0, d0
+; CHECK-NEXT:    ret
+  %fcvtzs_scalar = tail call i32 @llvm.aarch64.neon.fcvtzs.i32.f64(double %a)
+  %fcvtzs_vector = insertelement <4 x i32> poison, i32 %fcvtzs_scalar, i32 0
+  ret <4 x i32> %fcvtzs_vector
+}
+
+define <1 x i64> @fcvtzs_v1i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtzs_v1i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs d0, h0
+; CHECK-NEXT:    ret
+  %fcvtzs_scalar = tail call i64 @llvm.aarch64.neon.fcvtzs.i64.f16(half %a)
+  %fcvtzs_vector = insertelement <1 x i64> poison, i64 %fcvtzs_scalar, i32 0
+  ret <1 x i64> %fcvtzs_vector
+}
+
+define <1 x i64> @fcvtzs_v1i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtzs_v1i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs d0, s0
+; CHECK-NEXT:    ret
+  %fcvtzs_scalar = call i64 @llvm.aarch64.neon.fcvtzs.i64.f32(float %A)
+  %fcvtzs_vector = insertelement <1 x i64> poison, i64 %fcvtzs_scalar, i32 0
+  ret <1 x i64> %fcvtzs_vector
+}
+
+define <1 x i64> @fcvtzs_v1i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtzs_v1i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
+  %fcvtzs_scalar = tail call i64 @llvm.aarch64.neon.fcvtzs.i64.f64(double %a)
+  %fcvtzs_vector = insertelement <1 x i64> poison, i64 %fcvtzs_scalar, i32 0
+  ret <1 x i64> %fcvtzs_vector
+}
+
+define <2 x i64> @fcvtzs_v2i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtzs_v2i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs d0, h0
+; CHECK-NEXT:    ret
+  %fcvtzs_scalar = tail call i64 @llvm.aarch64.neon.fcvtzs.i64.f16(half %a)
+  %fcvtzs_vector = insertelement <2 x i64> poison, i64 %fcvtzs_scalar, i32 0
+  ret <2 x i64> %fcvtzs_vector
+}
+
+define <2 x i64> @fcvtzs_v2i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtzs_v2i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs d0, s0
+; CHECK-NEXT:    ret
+  %fcvtzs_scalar = call i64 @llvm.aarch64.neon.fcvtzs.i64.f32(float %A)
+  %fcvtzs_vector = insertelement <2 x i64> poison, i64 %fcvtzs_scalar, i32 0
+  ret <2 x i64> %fcvtzs_vector
+}
+
+define <2 x i64> @fcvtzs_v2i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtzs_v2i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
+  %fcvtzs_scalar = tail call i64 @llvm.aarch64.neon.fcvtzs.i64.f64(double %a)
+  %fcvtzs_vector = insertelement <2 x i64> poison, i64 %fcvtzs_scalar, i32 0
+  ret <2 x i64> %fcvtzs_vector
+}
+
+define <2 x i32> @fcvtzu_v2i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtzu_v2i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu s0, h0
+; CHECK-NEXT:    ret
+  %fcvtzu_scalar = tail call i32 @llvm.aarch64.neon.fcvtzu.i32.f16(half %a)
+  %fcvtzu_vector = insertelement <2 x i32> poison, i32 %fcvtzu_scalar, i32 0
+  ret <2 x i32> %fcvtzu_vector
+}
+
+define <2 x i32> @fcvtzu_v2i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtzu_v2i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu s0, s0
+; CHECK-NEXT:    ret
+  %fcvtzu_scalar = call i32 @llvm.aarch64.neon.fcvtzu.i32.f32(float %A)
+  %fcvtzu_vector = insertelement <2 x i32> poison, i32 %fcvtzu_scalar, i32 0
+  ret <2 x i32> %fcvtzu_vector
+}
+
+define <2 x i32> @fcvtzu_v2i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtzu_v2i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu s0, d0
+; CHECK-NEXT:    ret
+  %fcvtzu_scalar = tail call i32 @llvm.aarch64.neon.fcvtzu.i32.f64(double %a)
+  %fcvtzu_vector = insertelement <2 x i32> poison, i32 %fcvtzu_scalar, i32 0
+  ret <2 x i32> %fcvtzu_vector
+}
+
+define <4 x i32> @fcvtzu_v4i32_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtzu_v4i32_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu s0, h0
+; CHECK-NEXT:    ret
+  %fcvtzu_scalar = tail call i32 @llvm.aarch64.neon.fcvtzu.i32.f16(half %a)
+  %fcvtzu_vector = insertelement <4 x i32> poison, i32 %fcvtzu_scalar, i32 0
+  ret <4 x i32> %fcvtzu_vector
+}
+
+define <4 x i32> @fcvtzu_v4i32_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtzu_v4i32_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu s0, s0
+; CHECK-NEXT:    ret
+  %fcvtzu_scalar = call i32 @llvm.aarch64.neon.fcvtzu.i32.f32(float %A)
+  %fcvtzu_vector = insertelement <4 x i32> poison, i32 %fcvtzu_scalar, i32 0
+  ret <4 x i32> %fcvtzu_vector
+}
+
+define <4 x i32> @fcvtzu_v4i32_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtzu_v4i32_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu s0, d0
+; CHECK-NEXT:    ret
+  %fcvtzu_scalar = tail call i32 @llvm.aarch64.neon.fcvtzu.i32.f64(double %a)
+  %fcvtzu_vector = insertelement <4 x i32> poison, i32 %fcvtzu_scalar, i32 0
+  ret <4 x i32> %fcvtzu_vector
+}
+
+define <1 x i64> @fcvtzu_v1i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtzu_v1i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu d0, h0
+; CHECK-NEXT:    ret
+  %fcvtzu_scalar = tail call i64 @llvm.aarch64.neon.fcvtzu.i64.f16(half %a)
+  %fcvtzu_vector = insertelement <1 x i64> poison, i64 %fcvtzu_scalar, i32 0
+  ret <1 x i64> %fcvtzu_vector
+}
+
+define <1 x i64> @fcvtzu_v1i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtzu_v1i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu d0, s0
+; CHECK-NEXT:    ret
+  %fcvtzu_scalar = call i64 @llvm.aarch64.neon.fcvtzu.i64.f32(float %A)
+  %fcvtzu_vector = insertelement <1 x i64> poison, i64 %fcvtzu_scalar, i32 0
+  ret <1 x i64> %fcvtzu_vector
+}
+
+define <1 x i64> @fcvtzu_v1i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtzu_v1i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu d0, d0
+; CHECK-NEXT:    ret
+  %fcvtzu_scalar = tail call i64 @llvm.aarch64.neon.fcvtzu.i64.f64(double %a)
+  %fcvtzu_vector = insertelement <1 x i64> poison, i64 %fcvtzu_scalar, i32 0
+  ret <1 x i64> %fcvtzu_vector
+}
+
+define <2 x i64> @fcvtzu_v2i64_from_f16_scalar_to_vector_simd(half %a) {
+; CHECK-LABEL: fcvtzu_v2i64_from_f16_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu d0, h0
+; CHECK-NEXT:    ret
+  %fcvtzu_scalar = tail call i64 @llvm.aarch64.neon.fcvtzu.i64.f16(half %a)
+  %fcvtzu_vector = insertelement <2 x i64> poison, i64 %fcvtzu_scalar, i32 0
+  ret <2 x i64> %fcvtzu_vector
+}
+
+define <2 x i64> @fcvtzu_v2i64_from_f32_scalar_to_vector_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtzu_v2i64_from_f32_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu d0, s0
+; CHECK-NEXT:    ret
+  %fcvtzu_scalar = call i64 @llvm.aarch64.neon.fcvtzu.i64.f32(float %A)
+  %fcvtzu_vector = insertelement <2 x i64> poison, i64 %fcvtzu_scalar, i32 0
+  ret <2 x i64> %fcvtzu_vector
+}
+
+define <2 x i64> @fcvtzu_v2i64_from_f64_scalar_to_vector_simd(double %a) {
+; CHECK-LABEL: fcvtzu_v2i64_from_f64_scalar_to_vector_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu d0, d0
+; CHECK-NEXT:    ret
+  %fcvtzu_scalar = tail call i64 @llvm.aarch64.neon.fcvtzu.i64.f64(double %a)
+  %fcvtzu_vector = insertelement <2 x i64> poison, i64 %fcvtzu_scalar, i32 0
+  ret <2 x i64> %fcvtzu_vector
+}
diff --git a/llvm/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll b/llvm/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll
index b580c4921fb66..35f62e52ffd76 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll
@@ -21,8 +21,7 @@ define double @bar(ptr %iVals, ptr %fVals, ptr %dVals) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x2, #128]
 ; CHECK-NEXT:    frinti d0, d0
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fcvtzs d0, d0
 ; CHECK-NEXT:    sri d0, d0, #1
 ; CHECK-NEXT:    scvtf.2d v0, v0, #1
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll b/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
index d8f370884c84a..c2f39fb14ee24 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
@@ -980,18 +980,11 @@ define <1 x double> @test_bitcasti64tov1f64(i64 %in) {
 }
 
 define <1 x i64> @test_bitcastv8i8tov1f64(<8 x i8> %a) #0 {
-; CHECK-SD-LABEL: test_bitcastv8i8tov1f64:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    neg v0.8b, v0.8b
-; CHECK-SD-NEXT:    fcvtzs x8, d0
-; CHECK-SD-NEXT:    fmov d0, x8
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: test_bitcastv8i8tov1f64:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    neg v0.8b, v0.8b
-; CHECK-GI-NEXT:    fcvtzs d0, d0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: test_bitcastv8i8tov1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg v0.8b, v0.8b
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
   %sub.i = sub <8 x i8> zeroinitializer, %a
   %1 = bitcast <8 x i8> %sub.i to <1 x double>
   %vcvt.i = fptosi <1 x double> %1 to <1 x i64>
@@ -999,18 +992,11 @@ define <1 x i64> @test_bitcastv8i8tov1f64(<8 x i8> %a) #0 {
 }
 
 define <1 x i64> @test_bitcastv4i16tov1f64(<4 x i16> %a) #0 {
-; CHECK-SD-LABEL: test_bitcastv4i16tov1f64:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    neg v0.4h, v0.4h
-; CHECK-SD-NEXT:    fcvtzs x8, d0
-; CHECK-SD-NEXT:    fmov d0, x8
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: test_bitcastv4i16tov1f64:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    neg v0.4h, v0.4h
-; CHECK-GI-NEXT:    fcvtzs d0, d0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: test_bitcastv4i16tov1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg v0.4h, v0.4h
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
   %sub.i = sub <4 x i16> zeroinitializer, %a
   %1 = bitcast <4 x i16> %sub.i to <1 x double>
   %vcvt.i = fptosi <1 x double> %1 to <1 x i64>
@@ -1018,18 +1004,11 @@ define <1 x i64> @test_bitcastv4i16tov1f64(<4 x i16> %a) #0 {
 }
 
 define <1 x i64> @test_bitcastv2i32tov1f64(<2 x i32> %a) #0 {
-; CHECK-SD-LABEL: test_bitcastv2i32tov1f64:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    neg v0.2s, v0.2s
-; CHECK-SD-NEXT:    fcvtzs x8, d0
-; CHECK-SD-NEXT:    fmov d0, x8
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: test_bitcastv2i32tov1f64:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    neg v0.2s, v0.2s
-; CHECK-GI-NEXT:    fcvtzs d0, d0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: test_bitcastv2i32tov1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
   %sub.i = sub <2 x i32> zeroinitializer, %a
   %1 = bitcast <2 x i32> %sub.i to <1 x double>
   %vcvt.i = fptosi <1 x double> %1 to <1 x i64>
@@ -1040,8 +1019,7 @@ define <1 x i64> @test_bitcastv1i64tov1f64(<1 x i64> %a) #0 {
 ; CHECK-SD-LABEL: test_bitcastv1i64tov1f64:
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    neg d0, d0
-; CHECK-SD-NEXT:    fcvtzs x8, d0
-; CHECK-SD-NEXT:    fmov d0, x8
+; CHECK-SD-NEXT:    fcvtzs d0, d0
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_bitcastv1i64tov1f64:
@@ -1061,8 +1039,7 @@ define <1 x i64> @test_bitcastv2f32tov1f64(<2 x float> %a) #0 {
 ; CHECK-LABEL: test_bitcastv2f32tov1f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fneg v0.2s, v0.2s
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fcvtzs d0, d0
 ; CHECK-NEXT:    ret
   %sub.i = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %a
   %1 = bitcast <2 x float> %sub.i to <1 x double>
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-select_cc.ll b/llvm/test/CodeGen/AArch64/arm64-neon-select_cc.ll
index cad3fb58086d6..b72fbe0a91684 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-select_cc.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-select_cc.ll
@@ -114,9 +114,9 @@ define <8x i16> @test_select_cc_v8i16(i16 %a, i16 %b, <8x i16> %c, <8x i16> %d )
 define <2x i32> @test_select_cc_v2i32(i32 %a, i32 %b, <2x i32> %c, <2x i32> %d ) {
 ; CHECK-LABEL: test_select_cc_v2i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov s2, w1
-; CHECK-NEXT:    fmov s3, w0
-; CHECK-NEXT:    cmeq v2.2s, v3.2s, v2.2s
+; CHECK-NEXT:    fmov s2, w0
+; CHECK-NEXT:    fmov s3, w1
+; CHECK-NEXT:    cmeq v2.2s, v2.2s, v3.2s
 ; CHECK-NEXT:    dup v2.2s, v2.s[0]
 ; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
 ; CHECK-NEXT:    ret
@@ -128,9 +128,9 @@ define <2x i32> @test_select_cc_v2i32(i32 %a, i32 %b, <2x i32> %c, <2x i32> %d )
 define <4x i32> @test_select_cc_v4i32(i32 %a, i32 %b, <4x i32> %c, <4x i32> %d ) {
 ; CHECK-LABEL: test_select_cc_v4i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov s2, w1
-; CHECK-NEXT:    fmov s3, w0
-; CHECK-NEXT:    cmeq v2.4s, v3.4s, v2.4s
+; CHECK-NEXT:    fmov s2, w0
+; CHECK-NEXT:    fmov s3, w1
+; CHECK-NEXT:    cmeq v2.4s, v2.4s, v3.4s
 ; CHECK-NEXT:    dup v2.4s, v2.s[0]
 ; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    ret
@@ -155,9 +155,9 @@ define <1x i64> @test_select_cc_v1i64(i64 %a, i64 %b, <1x i64> %c, <1x i64> %d )
 define <2x i64> @test_select_cc_v2i64(i64 %a, i64 %b, <2x i64> %c, <2x i64> %d ) {
 ; CHECK-LABEL: test_select_cc_v2i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov d2, x1
-; CHECK-NEXT:    fmov d3, x0
-; CHECK-NEXT:    cmeq v2.2d, v3.2d, v2.2d
+; CHECK-NEXT:    fmov d2, x0
+; CHECK-NEXT:    fmov d3, x1
+; CHECK-NEXT:    cmeq v2.2d, v2.2d, v3.2d
 ; CHECK-NEXT:    dup v2.2d, v2.d[0]
 ; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    ret
@@ -210,9 +210,9 @@ define <4x float> @test_select_cc_v4f32(float %a, float %b, <4x float> %c, <4x f
 define <4x float> @test_select_cc_v4f32_icmp(i32 %a, i32 %b, <4x float> %c, <4x float> %d ) {
 ; CHECK-LABEL: test_select_cc_v4f32_icmp:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov s2, w1
-; CHECK-NEXT:    fmov s3, w0
-; CHECK-NEXT:    cmeq v2.4s, v3.4s, v2.4s
+; CHECK-NEXT:    fmov s2, w0
+; CHECK-NEXT:    fmov s3, w1
+; CHECK-NEXT:    cmeq v2.4s, v2.4s, v3.4s
 ; CHECK-NEXT:    dup v2.4s, v2.s[0]
 ; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-v8.1a.ll b/llvm/test/CodeGen/AArch64/arm64-neon-v8.1a.ll
index a4b9878579ebf..d9bc4b77f9061 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-v8.1a.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-v8.1a.ll
@@ -405,15 +405,25 @@ define i16 @test_sqrdmlah_v1i16(i16 %acc, i16 %x, i16 %y) {
 }
 
 define i32 @test_sqrdmlah_v1i32(i32 %acc, i32 %x, i32 %y) {
-; CHECK-LABEL: test_sqrdmlah_v1i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov s0, w1
-; CHECK-NEXT:    fmov s1, w2
-; CHECK-NEXT:    sqrdmulh v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    fmov s1, w0
-; CHECK-NEXT:    sqadd v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    fmov w0, s0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_sqrdmlah_v1i32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fmov s0, w2
+; CHECK-SD-NEXT:    fmov s1, w1
+; CHECK-SD-NEXT:    sqrdmulh v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    fmov s1, w0
+; CHECK-SD-NEXT:    sqadd v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_sqrdmlah_v1i32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov s0, w1
+; CHECK-GI-NEXT:    fmov s1, w2
+; CHECK-GI-NEXT:    sqrdmulh v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    fmov s1, w0
+; CHECK-GI-NEXT:    sqadd v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
   %x_vec = insertelement <4 x i32> undef, i32 %x, i64 0
   %y_vec = insertelement <4 x i32> undef, i32 %y, i64 0
   %prod_vec = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> %x_vec,  <4 x i32> %y_vec)
@@ -444,15 +454,25 @@ define i16 @test_sqrdmlsh_v1i16(i16 %acc, i16 %x, i16 %y) {
 }
 
 define i32 @test_sqrdmlsh_v1i32(i32 %acc, i32 %x, i32 %y) {
-; CHECK-LABEL: test_sqrdmlsh_v1i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov s0, w1
-; CHECK-NEXT:    fmov s1, w2
-; CHECK-NEXT:    sqrdmulh v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    fmov s1, w0
-; CHECK-NEXT:    sqsub v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    fmov w0, s0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_sqrdmlsh_v1i32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fmov s0, w2
+; CHECK-SD-NEXT:    fmov s1, w1
+; CHECK-SD-NEXT:    sqrdmulh v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    fmov s1, w0
+; CHECK-SD-NEXT:    sqsub v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_sqrdmlsh_v1i32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov s0, w1
+; CHECK-GI-NEXT:    fmov s1, w2
+; CHECK-GI-NEXT:    sqrdmulh v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    fmov s1, w0
+; CHECK-GI-NEXT:    sqsub v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
   %x_vec = insertelement <4 x i32> undef, i32 %x, i64 0
   %y_vec = insertelement <4 x i32> undef, i32 %y, i64 0
   %prod_vec = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> %x_vec,  <4 x i32> %y_vec)
diff --git a/llvm/test/CodeGen/AArch64/arm64-vcvt.ll b/llvm/test/CodeGen/AArch64/arm64-vcvt.ll
index 1e0cfa0201263..7f6b16d3ccbb4 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vcvt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vcvt.ll
@@ -357,18 +357,11 @@ define <2 x i64> @fcvtzs_2d(<2 x double> %A) nounwind {
 	ret <2 x i64> %tmp3
 }
 
-; FIXME: Generate "fcvtzs d0, d0"?
 define <1 x i64> @fcvtzs_1d(<1 x double> %A) nounwind {
-; CHECK-SD-LABEL: fcvtzs_1d:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    fcvtzs x8, d0
-; CHECK-SD-NEXT:    fmov d0, x8
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: fcvtzs_1d:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    fcvtzs d0, d0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: fcvtzs_1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
 	%tmp3 = fptosi <1 x double> %A to <1 x i64>
 	ret <1 x i64> %tmp3
 }
@@ -441,18 +434,11 @@ define <2 x i64> @fcvtzu_2d(<2 x double> %A) nounwind {
 	ret <2 x i64> %tmp3
 }
 
-; FIXME: Generate "fcvtzu d0, d0"?
 define <1 x i64> @fcvtzu_1d(<1 x double> %A) nounwind {
-; CHECK-SD-LABEL: fcvtzu_1d:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    fcvtzu x8, d0
-; CHECK-SD-NEXT:    fmov d0, x8
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: fcvtzu_1d:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    fcvtzu d0, d0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: fcvtzu_1d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu d0, d0
+; CHECK-NEXT:    ret
 	%tmp3 = fptoui <1 x double> %A to <1 x i64>
 	ret <1 x i64> %tmp3
 }
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index 29c06b8fa228c..8d17836a2b761 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -2507,8 +2507,7 @@ define <1 x i64> @neon_ushl_vscalar_constant_shift(ptr %A) nounwind {
 define i64 @neon_ushl_scalar_constant_shift(ptr %A) nounwind {
 ; CHECK-SD-LABEL: neon_ushl_scalar_constant_shift:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    ldr w8, [x0]
-; CHECK-SD-NEXT:    fmov d0, x8
+; CHECK-SD-NEXT:    ldr s0, [x0]
 ; CHECK-SD-NEXT:    shl d0, d0, #1
 ; CHECK-SD-NEXT:    fmov x0, d0
 ; CHECK-SD-NEXT:    ret
@@ -2800,8 +2799,7 @@ define <1 x i64> @neon_sshll_vscalar_constant_shift(ptr %A) nounwind {
 define i64 @neon_sshll_scalar_constant_shift(ptr %A) nounwind {
 ; CHECK-SD-LABEL: neon_sshll_scalar_constant_shift:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    ldr w8, [x0]
-; CHECK-SD-NEXT:    fmov d0, x8
+; CHECK-SD-NEXT:    ldr s0, [x0]
 ; CHECK-SD-NEXT:    shl d0, d0, #1
 ; CHECK-SD-NEXT:    fmov x0, d0
 ; CHECK-SD-NEXT:    ret
@@ -2824,8 +2822,7 @@ define i64 @neon_sshll_scalar_constant_shift(ptr %A) nounwind {
 define i64 @neon_sshll_scalar_constant_shift_m1(ptr %A) nounwind {
 ; CHECK-SD-LABEL: neon_sshll_scalar_constant_shift_m1:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    ldr w8, [x0]
-; CHECK-SD-NEXT:    fmov d0, x8
+; CHECK-SD-NEXT:    ldr s0, [x0]
 ; CHECK-SD-NEXT:    sshr d0, d0, #1
 ; CHECK-SD-NEXT:    fmov x0, d0
 ; CHECK-SD-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
index d7a2a83cf3660..bd9162e36f299 100644
--- a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
@@ -107,20 +107,20 @@ define <4 x i32> @lower_trunc_4xi32(i64 %a, i64 %b, i64 %c, i64 %d) {
 define <8 x i32> @lower_trunc_8xi32(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g, i64 %h) {
 ; CHECK-LABEL: lower_trunc_8xi32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov d0, x6
-; CHECK-NEXT:    fmov d1, x4
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    fmov d1, x6
 ; CHECK-NEXT:    fmov d2, x2
-; CHECK-NEXT:    fmov d3, x0
-; CHECK-NEXT:    mov v0.d[1], x7
-; CHECK-NEXT:    mov v1.d[1], x5
+; CHECK-NEXT:    fmov d3, x4
+; CHECK-NEXT:    mov v1.d[1], x7
 ; CHECK-NEXT:    mov v2.d[1], x3
-; CHECK-NEXT:    mov v3.d[1], x1
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v0.4s
-; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    add v3.4s, v1.4s, v1.4s
-; CHECK-NEXT:    add v0.4s, v2.4s, v2.4s
-; CHECK-NEXT:    eor v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    eor v0.16b, v2.16b, v0.16b
+; CHECK-NEXT:    mov v0.d[1], x1
+; CHECK-NEXT:    mov v3.d[1], x5
+; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    uzp1 v1.4s, v3.4s, v1.4s
+; CHECK-NEXT:    add v3.4s, v0.4s, v0.4s
+; CHECK-NEXT:    add v2.4s, v1.4s, v1.4s
+; CHECK-NEXT:    eor v0.16b, v0.16b, v3.16b
+; CHECK-NEXT:    eor v1.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    ret
   %a1 = insertelement <8 x i64> poison, i64 %a, i64 0
   %b1 = insertelement <8 x i64> %a1, i64 %b, i64 1
diff --git a/llvm/test/CodeGen/AArch64/bitcast-extend.ll b/llvm/test/CodeGen/AArch64/bitcast-extend.ll
index 2bd91a8dc9a7d..c68e55d7a8aef 100644
--- a/llvm/test/CodeGen/AArch64/bitcast-extend.ll
+++ b/llvm/test/CodeGen/AArch64/bitcast-extend.ll
@@ -339,8 +339,7 @@ define <8 x i8> @load_sext_i32_v8i8(ptr %p) {
 define <16 x i8> @load_zext_v16i8(ptr %p) {
 ; CHECK-SD-LABEL: load_zext_v16i8:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    ldr w8, [x0]
-; CHECK-SD-NEXT:    fmov d0, x8
+; CHECK-SD-NEXT:    ldr s0, [x0]
 ; CHECK-SD-NEXT:    fmov d0, d0
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll b/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
index 545da98034527..171f74149c905 100644
--- a/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
+++ b/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
@@ -60,16 +60,16 @@ define i16 @combine_add_16xi16(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16 %f, i
 define i32 @combine_add_8xi32(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) local_unnamed_addr #0 {
 ; CHECK-LABEL: combine_add_8xi32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov s0, w4
-; CHECK-NEXT:    fmov s1, w0
-; CHECK-NEXT:    mov v0.s[1], w5
-; CHECK-NEXT:    mov v1.s[1], w1
-; CHECK-NEXT:    mov v0.s[2], w6
-; CHECK-NEXT:    mov v1.s[2], w2
-; CHECK-NEXT:    mov v0.s[3], w7
-; CHECK-NEXT:    mov v1.s[3], w3
-; CHECK-NEXT:    uzp2 v2.8h, v1.8h, v0.8h
-; CHECK-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT:    fmov s0, w0
+; CHECK-NEXT:    fmov s1, w4
+; CHECK-NEXT:    mov v1.s[1], w5
+; CHECK-NEXT:    mov v0.s[1], w1
+; CHECK-NEXT:    mov v1.s[2], w6
+; CHECK-NEXT:    mov v0.s[2], w2
+; CHECK-NEXT:    mov v1.s[3], w7
+; CHECK-NEXT:    mov v0.s[3], w3
+; CHECK-NEXT:    uzp2 v2.8h, v0.8h, v1.8h
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    uhadd v0.8h, v0.8h, v2.8h
 ; CHECK-NEXT:    uaddlv s0, v0.8h
 ; CHECK-NEXT:    fmov w0, s0
diff --git a/llvm/test/CodeGen/AArch64/ctpop.ll b/llvm/test/CodeGen/AArch64/ctpop.ll
index df817afb12368..c2490c1e1b331 100644
--- a/llvm/test/CodeGen/AArch64/ctpop.ll
+++ b/llvm/test/CodeGen/AArch64/ctpop.ll
@@ -395,24 +395,24 @@ entry:
 define <3 x i128> @v3i128(<3 x i128> %d) {
 ; CHECK-SD-LABEL: v3i128:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    fmov d0, x4
+; CHECK-SD-NEXT:    fmov d0, x0
 ; CHECK-SD-NEXT:    fmov d1, x2
-; CHECK-SD-NEXT:    fmov d2, x0
-; CHECK-SD-NEXT:    mov v0.d[1], x5
+; CHECK-SD-NEXT:    fmov d2, x4
+; CHECK-SD-NEXT:    mov v2.d[1], x5
 ; CHECK-SD-NEXT:    mov v1.d[1], x3
-; CHECK-SD-NEXT:    mov v2.d[1], x1
+; CHECK-SD-NEXT:    mov v0.d[1], x1
 ; CHECK-SD-NEXT:    mov x1, xzr
 ; CHECK-SD-NEXT:    mov x3, xzr
 ; CHECK-SD-NEXT:    mov x5, xzr
-; CHECK-SD-NEXT:    cnt v0.16b, v0.16b
-; CHECK-SD-NEXT:    cnt v1.16b, v1.16b
 ; CHECK-SD-NEXT:    cnt v2.16b, v2.16b
-; CHECK-SD-NEXT:    addv b0, v0.16b
-; CHECK-SD-NEXT:    addv b1, v1.16b
+; CHECK-SD-NEXT:    cnt v1.16b, v1.16b
+; CHECK-SD-NEXT:    cnt v0.16b, v0.16b
 ; CHECK-SD-NEXT:    addv b2, v2.16b
-; CHECK-SD-NEXT:    fmov x0, d2
+; CHECK-SD-NEXT:    addv b1, v1.16b
+; CHECK-SD-NEXT:    addv b0, v0.16b
+; CHECK-SD-NEXT:    fmov x0, d0
 ; CHECK-SD-NEXT:    fmov x2, d1
-; CHECK-SD-NEXT:    fmov x4, d0
+; CHECK-SD-NEXT:    fmov x4, d2
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: v3i128:
@@ -444,30 +444,30 @@ entry:
 define <4 x i128> @v4i128(<4 x i128> %d) {
 ; CHECK-SD-LABEL: v4i128:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    fmov d0, x6
-; CHECK-SD-NEXT:    fmov d1, x4
-; CHECK-SD-NEXT:    fmov d2, x2
-; CHECK-SD-NEXT:    fmov d3, x0
-; CHECK-SD-NEXT:    mov v1.d[1], x5
-; CHECK-SD-NEXT:    mov v2.d[1], x3
-; CHECK-SD-NEXT:    mov v0.d[1], x7
-; CHECK-SD-NEXT:    mov v3.d[1], x1
+; CHECK-SD-NEXT:    fmov d0, x0
+; CHECK-SD-NEXT:    fmov d1, x2
+; CHECK-SD-NEXT:    fmov d2, x4
+; CHECK-SD-NEXT:    fmov d3, x6
+; CHECK-SD-NEXT:    mov v2.d[1], x5
+; CHECK-SD-NEXT:    mov v1.d[1], x3
+; CHECK-SD-NEXT:    mov v0.d[1], x1
+; CHECK-SD-NEXT:    mov v3.d[1], x7
 ; CHECK-SD-NEXT:    mov x1, xzr
 ; CHECK-SD-NEXT:    mov x3, xzr
 ; CHECK-SD-NEXT:    mov x5, xzr
 ; CHECK-SD-NEXT:    mov x7, xzr
-; CHECK-SD-NEXT:    cnt v1.16b, v1.16b
 ; CHECK-SD-NEXT:    cnt v2.16b, v2.16b
+; CHECK-SD-NEXT:    cnt v1.16b, v1.16b
 ; CHECK-SD-NEXT:    cnt v0.16b, v0.16b
 ; CHECK-SD-NEXT:    cnt v3.16b, v3.16b
-; CHECK-SD-NEXT:    addv b1, v1.16b
 ; CHECK-SD-NEXT:    addv b2, v2.16b
+; CHECK-SD-NEXT:    addv b1, v1.16b
 ; CHECK-SD-NEXT:    addv b0, v0.16b
 ; CHECK-SD-NEXT:    addv b3, v3.16b
-; CHECK-SD-NEXT:    fmov x2, d2
-; CHECK-SD-NEXT:    fmov x4, d1
-; CHECK-SD-NEXT:    fmov x6, d0
-; CHECK-SD-NEXT:    fmov x0, d3
+; CHECK-SD-NEXT:    fmov x0, d0
+; CHECK-SD-NEXT:    fmov x2, d1
+; CHECK-SD-NEXT:    fmov x4, d2
+; CHECK-SD-NEXT:    fmov x6, d3
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: v4i128:
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
index c3da22757f1d2..0b05e00a1b0db 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
@@ -717,8 +717,7 @@ define <1 x i32> @fptoui_v1i32_v1f64(<1 x double> %x) #0 {
 define <1 x i64> @fptosi_v1i64_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: fptosi_v1i64_v1f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fcvtzs d0, d0
 ; CHECK-NEXT:    ret
   %val = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x i64> %val
@@ -727,8 +726,7 @@ define <1 x i64> @fptosi_v1i64_v1f64(<1 x double> %x) #0 {
 define <1 x i64> @fptoui_v1i64_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: fptoui_v1i64_v1f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu x8, d0
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fcvtzu d0, d0
 ; CHECK-NEXT:    ret
   %val = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x i64> %val
diff --git a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
index f797fcd2a8823..6a06d99689df9 100644
--- a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
@@ -1161,24 +1161,17 @@ declare <7 x i32> @llvm.fptosi.sat.v7f16.v7i32 (<7 x half>)
 declare <8 x i32> @llvm.fptosi.sat.v8f16.v8i32 (<8 x half>)
 
 define <1 x i32> @test_signed_v1f16_v1i32(<1 x half> %f) {
-; CHECK-SD-CVT-LABEL: test_signed_v1f16_v1i32:
-; CHECK-SD-CVT:       // %bb.0:
-; CHECK-SD-CVT-NEXT:    fcvt s0, h0
-; CHECK-SD-CVT-NEXT:    fcvtzs w8, s0
-; CHECK-SD-CVT-NEXT:    fmov s0, w8
-; CHECK-SD-CVT-NEXT:    ret
+; CHECK-CVT-LABEL: test_signed_v1f16_v1i32:
+; CHECK-CVT:       // %bb.0:
+; CHECK-CVT-NEXT:    fcvt s0, h0
+; CHECK-CVT-NEXT:    fcvtzs s0, s0
+; CHECK-CVT-NEXT:    ret
 ;
 ; CHECK-FP16-LABEL: test_signed_v1f16_v1i32:
 ; CHECK-FP16:       // %bb.0:
 ; CHECK-FP16-NEXT:    fcvtzs w8, h0
 ; CHECK-FP16-NEXT:    fmov s0, w8
 ; CHECK-FP16-NEXT:    ret
-;
-; CHECK-GI-CVT-LABEL: test_signed_v1f16_v1i32:
-; CHECK-GI-CVT:       // %bb.0:
-; CHECK-GI-CVT-NEXT:    fcvt s0, h0
-; CHECK-GI-CVT-NEXT:    fcvtzs s0, s0
-; CHECK-GI-CVT-NEXT:    ret
     %x = call <1 x i32> @llvm.fptosi.sat.v1f16.v1i32(<1 x half> %f)
     ret <1 x i32> %x
 }
diff --git a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
index ecca1165753bf..e461ace0ea0a2 100644
--- a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
@@ -992,24 +992,17 @@ declare <7 x i32> @llvm.fptoui.sat.v7f16.v7i32 (<7 x half>)
 declare <8 x i32> @llvm.fptoui.sat.v8f16.v8i32 (<8 x half>)
 
 define <1 x i32> @test_unsigned_v1f16_v1i32(<1 x half> %f) {
-; CHECK-SD-CVT-LABEL: test_unsigned_v1f16_v1i32:
-; CHECK-SD-CVT:       // %bb.0:
-; CHECK-SD-CVT-NEXT:    fcvt s0, h0
-; CHECK-SD-CVT-NEXT:    fcvtzu w8, s0
-; CHECK-SD-CVT-NEXT:    fmov s0, w8
-; CHECK-SD-CVT-NEXT:    ret
+; CHECK-CVT-LABEL: test_unsigned_v1f16_v1i32:
+; CHECK-CVT:       // %bb.0:
+; CHECK-CVT-NEXT:    fcvt s0, h0
+; CHECK-CVT-NEXT:    fcvtzu s0, s0
+; CHECK-CVT-NEXT:    ret
 ;
 ; CHECK-FP16-LABEL: test_unsigned_v1f16_v1i32:
 ; CHECK-FP16:       // %bb.0:
 ; CHECK-FP16-NEXT:    fcvtzu w8, h0
 ; CHECK-FP16-NEXT:    fmov s0, w8
 ; CHECK-FP16-NEXT:    ret
-;
-; CHECK-GI-CVT-LABEL: test_unsigned_v1f16_v1i32:
-; CHECK-GI-CVT:       // %bb.0:
-; CHECK-GI-CVT-NEXT:    fcvt s0, h0
-; CHECK-GI-CVT-NEXT:    fcvtzu s0, s0
-; CHECK-GI-CVT-NEXT:    ret
     %x = call <1 x i32> @llvm.fptoui.sat.v1f16.v1i32(<1 x half> %f)
     ret <1 x i32> %x
 }
diff --git a/llvm/test/CodeGen/AArch64/fsh.ll b/llvm/test/CodeGen/AArch64/fsh.ll
index 15b8ce8226d71..7845742946d96 100644
--- a/llvm/test/CodeGen/AArch64/fsh.ll
+++ b/llvm/test/CodeGen/AArch64/fsh.ll
@@ -2609,25 +2609,25 @@ define <7 x i32> @fshr_v7i32(<7 x i32> %a, <7 x i32> %b, <7 x i32> %c) {
 ; CHECK-SD-NEXT:    mov v3.s[2], w6
 ; CHECK-SD-NEXT:    ld1 { v4.s }[2], [x9]
 ; CHECK-SD-NEXT:    ld1 { v6.s }[1], [x8]
-; CHECK-SD-NEXT:    bic v16.16b, v5.16b, v2.16b
-; CHECK-SD-NEXT:    and v2.16b, v2.16b, v5.16b
+; CHECK-SD-NEXT:    and v16.16b, v2.16b, v5.16b
 ; CHECK-SD-NEXT:    add x8, sp, #40
 ; CHECK-SD-NEXT:    add x9, sp, #16
+; CHECK-SD-NEXT:    bic v2.16b, v5.16b, v2.16b
 ; CHECK-SD-NEXT:    mov v1.s[3], w3
 ; CHECK-SD-NEXT:    and v7.16b, v0.16b, v5.16b
 ; CHECK-SD-NEXT:    bic v0.16b, v5.16b, v0.16b
 ; CHECK-SD-NEXT:    ld1 { v4.s }[3], [x9]
 ; CHECK-SD-NEXT:    ld1 { v6.s }[2], [x8]
 ; CHECK-SD-NEXT:    add v3.4s, v3.4s, v3.4s
-; CHECK-SD-NEXT:    neg v2.4s, v2.4s
 ; CHECK-SD-NEXT:    neg v5.4s, v7.4s
+; CHECK-SD-NEXT:    neg v7.4s, v16.4s
 ; CHECK-SD-NEXT:    add v1.4s, v1.4s, v1.4s
-; CHECK-SD-NEXT:    ushl v3.4s, v3.4s, v16.4s
-; CHECK-SD-NEXT:    ushl v2.4s, v6.4s, v2.4s
+; CHECK-SD-NEXT:    ushl v4.4s, v4.4s, v5.4s
 ; CHECK-SD-NEXT:    ushl v0.4s, v1.4s, v0.4s
-; CHECK-SD-NEXT:    ushl v1.4s, v4.4s, v5.4s
-; CHECK-SD-NEXT:    orr v0.16b, v0.16b, v1.16b
-; CHECK-SD-NEXT:    orr v1.16b, v3.16b, v2.16b
+; CHECK-SD-NEXT:    ushl v1.4s, v3.4s, v2.4s
+; CHECK-SD-NEXT:    ushl v2.4s, v6.4s, v7.4s
+; CHECK-SD-NEXT:    orr v0.16b, v0.16b, v4.16b
+; CHECK-SD-NEXT:    orr v1.16b, v1.16b, v2.16b
 ; CHECK-SD-NEXT:    mov w1, v0.s[1]
 ; CHECK-SD-NEXT:    mov w2, v0.s[2]
 ; CHECK-SD-NEXT:    mov w3, v0.s[3]
diff --git a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
index ae71cd00b9aa4..b94a26fd0e80b 100644
--- a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
+++ b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
@@ -25,7 +25,7 @@ define dso_local void @run_test() local_unnamed_addr uwtable {
 ; CHECK-NEXT:    .cfi_offset b13, -64
 ; CHECK-NEXT:    .cfi_offset b14, -72
 ; CHECK-NEXT:    .cfi_offset b15, -80
-; CHECK-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-NEXT:    movi v7.2d, #0000000000000000
 ; CHECK-NEXT:    adrp x14, B+48
 ; CHECK-NEXT:    add x14, x14, :lo12:B+48
 ; CHECK-NEXT:    // implicit-def: $q18
@@ -43,15 +43,15 @@ define dso_local void @run_test() local_unnamed_addr uwtable {
 ; CHECK-NEXT:    // implicit-def: $q3
 ; CHECK-NEXT:    // implicit-def: $q4
 ; CHECK-NEXT:    // implicit-def: $q5
-; CHECK-NEXT:    // implicit-def: $q6
+; CHECK-NEXT:    // implicit-def: $q1
 ; CHECK-NEXT:    // implicit-def: $q16
 ; CHECK-NEXT:    // implicit-def: $q17
-; CHECK-NEXT:    // implicit-def: $q7
+; CHECK-NEXT:    // implicit-def: $q6
 ; CHECK-NEXT:    // implicit-def: $q19
 ; CHECK-NEXT:    // implicit-def: $q20
 ; CHECK-NEXT:    // implicit-def: $q21
 ; CHECK-NEXT:    // implicit-def: $q22
-; CHECK-NEXT:    // implicit-def: $q24
+; CHECK-NEXT:    // implicit-def: $q12
 ; CHECK-NEXT:    // implicit-def: $q23
 ; CHECK-NEXT:    // implicit-def: $q25
 ; CHECK-NEXT:    // implicit-def: $q26
@@ -59,7 +59,7 @@ define dso_local void @run_test() local_unnamed_addr uwtable {
 ; CHECK-NEXT:    // implicit-def: $q30
 ; CHECK-NEXT:    // implicit-def: $q8
 ; CHECK-NEXT:    // implicit-def: $q11
-; CHECK-NEXT:    // implicit-def: $q12
+; CHECK-NEXT:    // implicit-def: $q28
 ; CHECK-NEXT:    // implicit-def: $q29
 ; CHECK-NEXT:    // implicit-def: $q13
 ; CHECK-NEXT:    // implicit-def: $q10
@@ -69,111 +69,107 @@ define dso_local void @run_test() local_unnamed_addr uwtable {
 ; CHECK-NEXT:    // kill: killed $q18
 ; CHECK-NEXT:  .LBB0_1: // %for.cond1.preheader
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    ldr x17, [x8]
+; CHECK-NEXT:    ldr x18, [x8]
 ; CHECK-NEXT:    ldr x15, [x8]
 ; CHECK-NEXT:    mov v18.16b, v0.16b
 ; CHECK-NEXT:    ldr x16, [x9]
 ; CHECK-NEXT:    stp q15, q4, [sp] // 32-byte Folded Spill
 ; CHECK-NEXT:    add x5, x10, x11
-; CHECK-NEXT:    mul x1, x15, x17
+; CHECK-NEXT:    mul x1, x15, x18
 ; CHECK-NEXT:    ldr x2, [x13], #64
-; CHECK-NEXT:    ldr x5, [x5, #128]
-; CHECK-NEXT:    stp q7, q23, [sp, #32] // 32-byte Folded Spill
-; CHECK-NEXT:    ldr x14, [x14, #8]
-; CHECK-NEXT:    mul x0, x17, x17
+; CHECK-NEXT:    stp q6, q23, [sp, #32] // 32-byte Folded Spill
 ; CHECK-NEXT:    ldr q23, [sp, #80] // 16-byte Reload
+; CHECK-NEXT:    ldr x14, [x14, #8]
+; CHECK-NEXT:    mul x0, x18, x18
+; CHECK-NEXT:    ldr x5, [x5, #128]
 ; CHECK-NEXT:    mov v9.16b, v30.16b
 ; CHECK-NEXT:    mov v30.16b, v25.16b
 ; CHECK-NEXT:    mov v25.16b, v20.16b
-; CHECK-NEXT:    mov v20.16b, v6.16b
-; CHECK-NEXT:    mul x18, x16, x17
-; CHECK-NEXT:    mov v6.16b, v1.16b
-; CHECK-NEXT:    mov v28.16b, v24.16b
-; CHECK-NEXT:    fmov d14, x1
-; CHECK-NEXT:    mov v24.16b, v19.16b
-; CHECK-NEXT:    mov v19.16b, v5.16b
-; CHECK-NEXT:    mul x4, x2, x17
+; CHECK-NEXT:    mov v20.16b, v1.16b
+; CHECK-NEXT:    mul x17, x16, x18
 ; CHECK-NEXT:    mov v31.16b, v26.16b
 ; CHECK-NEXT:    mov v26.16b, v21.16b
-; CHECK-NEXT:    fmov d15, x0
+; CHECK-NEXT:    fmov d14, x1
 ; CHECK-NEXT:    mov v21.16b, v16.16b
 ; CHECK-NEXT:    mov v16.16b, v2.16b
-; CHECK-NEXT:    mov v0.16b, v14.16b
-; CHECK-NEXT:    mul x20, x2, x5
-; CHECK-NEXT:    mov v7.16b, v10.16b
+; CHECK-NEXT:    mul x4, x2, x18
+; CHECK-NEXT:    mov v6.16b, v10.16b
 ; CHECK-NEXT:    mov v10.16b, v17.16b
+; CHECK-NEXT:    fmov d15, x0
 ; CHECK-NEXT:    mov v17.16b, v3.16b
+; CHECK-NEXT:    mov v24.16b, v19.16b
+; CHECK-NEXT:    mov v0.16b, v14.16b
+; CHECK-NEXT:    mul x3, x14, x18
+; CHECK-NEXT:    mov v19.16b, v5.16b
 ; CHECK-NEXT:    add x11, x11, #8
-; CHECK-NEXT:    mov v15.d[1], x18
-; CHECK-NEXT:    mul x3, x14, x17
+; CHECK-NEXT:    add x12, x12, #1
+; CHECK-NEXT:    mov v15.d[1], x17
+; CHECK-NEXT:    mul x6, x15, x15
 ; CHECK-NEXT:    cmp x11, #64
 ; CHECK-NEXT:    mov v0.d[1], x1
 ; CHECK-NEXT:    fmov d1, x4
-; CHECK-NEXT:    add x12, x12, #1
-; CHECK-NEXT:    mul x17, x17, x5
-; CHECK-NEXT:    fmov d5, x20
-; CHECK-NEXT:    mul x6, x15, x15
+; CHECK-NEXT:    mul x7, x15, x5
+; CHECK-NEXT:    mul x18, x18, x5
+; CHECK-NEXT:    mov v1.d[1], x3
 ; CHECK-NEXT:    add v23.2d, v23.2d, v0.2d
 ; CHECK-NEXT:    ldr q0, [sp, #64] // 16-byte Reload
-; CHECK-NEXT:    mov v1.d[1], x3
-; CHECK-NEXT:    mul x7, x15, x5
-; CHECK-NEXT:    add v0.2d, v0.2d, v15.2d
-; CHECK-NEXT:    fmov d2, x17
-; CHECK-NEXT:    mul x0, x14, x5
 ; CHECK-NEXT:    fmov d4, x6
+; CHECK-NEXT:    mul x20, x2, x5
+; CHECK-NEXT:    add v0.2d, v0.2d, v15.2d
+; CHECK-NEXT:    fmov d3, x7
 ; CHECK-NEXT:    mul x19, x16, x5
+; CHECK-NEXT:    mov v4.d[1], x6
+; CHECK-NEXT:    fmov d2, x18
+; CHECK-NEXT:    mul x0, x14, x5
 ; CHECK-NEXT:    stp q0, q23, [sp, #64] // 32-byte Folded Spill
 ; CHECK-NEXT:    ldr q0, [sp, #96] // 16-byte Reload
-; CHECK-NEXT:    fmov d3, x7
+; CHECK-NEXT:    fmov d5, x20
+; CHECK-NEXT:    mov v3.d[1], x7
 ; CHECK-NEXT:    ldr q23, [sp, #48] // 16-byte Reload
 ; CHECK-NEXT:    mul x17, x2, x15
 ; CHECK-NEXT:    add v0.2d, v0.2d, v15.2d
 ; CHECK-NEXT:    ldr q15, [sp] // 16-byte Reload
-; CHECK-NEXT:    mov v5.d[1], x0
-; CHECK-NEXT:    mov v4.d[1], x6
+; CHECK-NEXT:    mov v2.d[1], x19
+; CHECK-NEXT:    add v13.2d, v13.2d, v4.2d
+; CHECK-NEXT:    add v12.2d, v12.2d, v4.2d
 ; CHECK-NEXT:    mul x16, x16, x15
-; CHECK-NEXT:    mov v3.d[1], x7
 ; CHECK-NEXT:    add v15.2d, v15.2d, v1.2d
-; CHECK-NEXT:    mov v2.d[1], x19
+; CHECK-NEXT:    mov v1.16b, v20.16b
+; CHECK-NEXT:    mov v5.d[1], x0
 ; CHECK-NEXT:    str q0, [sp, #96] // 16-byte Spill
-; CHECK-NEXT:    mov v1.16b, v6.16b
-; CHECK-NEXT:    mul x14, x14, x15
-; CHECK-NEXT:    mov v6.16b, v20.16b
 ; CHECK-NEXT:    mov v20.16b, v25.16b
-; CHECK-NEXT:    fmov d0, x17
+; CHECK-NEXT:    mul x14, x14, x15
 ; CHECK-NEXT:    mov v25.16b, v30.16b
-; CHECK-NEXT:    add v30.2d, v9.2d, v5.2d
-; CHECK-NEXT:    mov v5.16b, v19.16b
-; CHECK-NEXT:    mov v19.16b, v24.16b
 ; CHECK-NEXT:    add v11.2d, v11.2d, v3.2d
-; CHECK-NEXT:    mov v14.d[1], x16
+; CHECK-NEXT:    fmov d0, x17
 ; CHECK-NEXT:    mov v3.16b, v17.16b
 ; CHECK-NEXT:    mov v17.16b, v10.16b
-; CHECK-NEXT:    mov v10.16b, v7.16b
+; CHECK-NEXT:    mov v10.16b, v6.16b
 ; CHECK-NEXT:    add v8.2d, v8.2d, v2.2d
 ; CHECK-NEXT:    mov v2.16b, v16.16b
-; CHECK-NEXT:    mov v0.d[1], x14
+; CHECK-NEXT:    mov v14.d[1], x16
 ; CHECK-NEXT:    mov v16.16b, v21.16b
 ; CHECK-NEXT:    mov v21.16b, v26.16b
-; CHECK-NEXT:    add v13.2d, v13.2d, v4.2d
+; CHECK-NEXT:    add v30.2d, v9.2d, v5.2d
+; CHECK-NEXT:    mov v5.16b, v19.16b
 ; CHECK-NEXT:    add v26.2d, v31.2d, v4.2d
-; CHECK-NEXT:    add v24.2d, v28.2d, v4.2d
-; CHECK-NEXT:    add v19.2d, v19.2d, v4.2d
-; CHECK-NEXT:    add v6.2d, v6.2d, v4.2d
+; CHECK-NEXT:    mov v0.d[1], x14
+; CHECK-NEXT:    add v19.2d, v24.2d, v4.2d
 ; CHECK-NEXT:    add v1.2d, v1.2d, v4.2d
-; CHECK-NEXT:    ldp q4, q7, [sp, #16] // 32-byte Folded Reload
+; CHECK-NEXT:    add v7.2d, v7.2d, v4.2d
+; CHECK-NEXT:    ldp q4, q6, [sp, #16] // 32-byte Folded Reload
 ; CHECK-NEXT:    add v10.2d, v10.2d, v14.2d
 ; CHECK-NEXT:    add v29.2d, v29.2d, v14.2d
 ; CHECK-NEXT:    add v27.2d, v27.2d, v14.2d
 ; CHECK-NEXT:    add v23.2d, v23.2d, v14.2d
 ; CHECK-NEXT:    add v22.2d, v22.2d, v14.2d
 ; CHECK-NEXT:    add v20.2d, v20.2d, v14.2d
+; CHECK-NEXT:    add v6.2d, v6.2d, v14.2d
 ; CHECK-NEXT:    add v16.2d, v16.2d, v14.2d
-; CHECK-NEXT:    add v7.2d, v7.2d, v14.2d
 ; CHECK-NEXT:    add v5.2d, v5.2d, v14.2d
 ; CHECK-NEXT:    add v3.2d, v3.2d, v14.2d
 ; CHECK-NEXT:    add v2.2d, v2.2d, v14.2d
-; CHECK-NEXT:    add v12.2d, v12.2d, v0.2d
+; CHECK-NEXT:    add v28.2d, v28.2d, v0.2d
 ; CHECK-NEXT:    add v25.2d, v25.2d, v0.2d
 ; CHECK-NEXT:    add v21.2d, v21.2d, v0.2d
 ; CHECK-NEXT:    add v17.2d, v17.2d, v0.2d
@@ -182,30 +178,30 @@ define dso_local void @run_test() local_unnamed_addr uwtable {
 ; CHECK-NEXT:    mov x14, x13
 ; CHECK-NEXT:    b.ne .LBB0_1
 ; CHECK-NEXT:  // %bb.2: // %for.cond.cleanup
-; CHECK-NEXT:    ldp q28, q18, [sp, #64] // 32-byte Folded Reload
+; CHECK-NEXT:    ldp q24, q18, [sp, #64] // 32-byte Folded Reload
 ; CHECK-NEXT:    adrp x8, C
 ; CHECK-NEXT:    add x8, x8, :lo12:C
 ; CHECK-NEXT:    ldp x20, x19, [sp, #176] // 16-byte Folded Reload
 ; CHECK-NEXT:    stp q10, q13, [x8, #64]
-; CHECK-NEXT:    stp q28, q18, [x8]
+; CHECK-NEXT:    stp q24, q18, [x8]
 ; CHECK-NEXT:    ldr q18, [sp, #96] // 16-byte Reload
-; CHECK-NEXT:    stp q29, q12, [x8, #96]
-; CHECK-NEXT:    ldp d13, d12, [sp, #128] // 16-byte Folded Reload
+; CHECK-NEXT:    stp q29, q28, [x8, #96]
 ; CHECK-NEXT:    stp q18, q15, [x8, #32]
 ; CHECK-NEXT:    ldp d15, d14, [sp, #112] // 16-byte Folded Reload
 ; CHECK-NEXT:    stp q11, q8, [x8, #144]
 ; CHECK-NEXT:    ldp d9, d8, [sp, #160] // 16-byte Folded Reload
-; CHECK-NEXT:    stp q30, q27, [x8, #176]
+; CHECK-NEXT:    stp q12, q22, [x8, #272]
 ; CHECK-NEXT:    ldp d11, d10, [sp, #144] // 16-byte Folded Reload
+; CHECK-NEXT:    ldp d13, d12, [sp, #128] // 16-byte Folded Reload
+; CHECK-NEXT:    stp q30, q27, [x8, #176]
 ; CHECK-NEXT:    str q26, [x8, #208]
 ; CHECK-NEXT:    stp q25, q23, [x8, #240]
-; CHECK-NEXT:    stp q24, q22, [x8, #272]
 ; CHECK-NEXT:    stp q21, q20, [x8, #304]
-; CHECK-NEXT:    stp q19, q7, [x8, #336]
+; CHECK-NEXT:    stp q19, q6, [x8, #336]
 ; CHECK-NEXT:    stp q17, q16, [x8, #368]
-; CHECK-NEXT:    stp q6, q5, [x8, #400]
+; CHECK-NEXT:    stp q1, q5, [x8, #400]
 ; CHECK-NEXT:    stp q4, q3, [x8, #432]
-; CHECK-NEXT:    stp q1, q2, [x8, #464]
+; CHECK-NEXT:    stp q7, q2, [x8, #464]
 ; CHECK-NEXT:    str q0, [x8, #496]
 ; CHECK-NEXT:    add sp, sp, #192
 ; CHECK-NEXT:    .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/AArch64/sext.ll b/llvm/test/CodeGen/AArch64/sext.ll
index ef6b65cd50a1e..e6af0256fe6e2 100644
--- a/llvm/test/CodeGen/AArch64/sext.ll
+++ b/llvm/test/CodeGen/AArch64/sext.ll
@@ -1144,47 +1144,47 @@ entry:
 define <16 x i64> @sext_v16i10_v16i64(<16 x i10> %a) {
 ; CHECK-SD-LABEL: sext_v16i10_v16i64:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    fmov s0, w2
-; CHECK-SD-NEXT:    fmov s1, w0
-; CHECK-SD-NEXT:    ldr s2, [sp]
-; CHECK-SD-NEXT:    fmov s3, w4
-; CHECK-SD-NEXT:    fmov s4, w6
+; CHECK-SD-NEXT:    fmov s0, w6
+; CHECK-SD-NEXT:    fmov s2, w4
+; CHECK-SD-NEXT:    ldr s1, [sp]
+; CHECK-SD-NEXT:    fmov s3, w2
+; CHECK-SD-NEXT:    fmov s4, w0
 ; CHECK-SD-NEXT:    add x8, sp, #8
 ; CHECK-SD-NEXT:    ldr s5, [sp, #16]
 ; CHECK-SD-NEXT:    ldr s6, [sp, #32]
 ; CHECK-SD-NEXT:    ldr s7, [sp, #48]
-; CHECK-SD-NEXT:    mov v1.s[1], w1
-; CHECK-SD-NEXT:    mov v0.s[1], w3
-; CHECK-SD-NEXT:    ld1 { v2.s }[1], [x8]
-; CHECK-SD-NEXT:    mov v3.s[1], w5
-; CHECK-SD-NEXT:    mov v4.s[1], w7
+; CHECK-SD-NEXT:    ld1 { v1.s }[1], [x8]
+; CHECK-SD-NEXT:    mov v2.s[1], w5
+; CHECK-SD-NEXT:    mov v0.s[1], w7
+; CHECK-SD-NEXT:    mov v4.s[1], w1
+; CHECK-SD-NEXT:    mov v3.s[1], w3
 ; CHECK-SD-NEXT:    add x8, sp, #24
 ; CHECK-SD-NEXT:    add x9, sp, #40
 ; CHECK-SD-NEXT:    add x10, sp, #56
 ; CHECK-SD-NEXT:    ld1 { v5.s }[1], [x8]
 ; CHECK-SD-NEXT:    ld1 { v6.s }[1], [x9]
 ; CHECK-SD-NEXT:    ld1 { v7.s }[1], [x10]
-; CHECK-SD-NEXT:    ushll v2.2d, v2.2s, #0
 ; CHECK-SD-NEXT:    ushll v1.2d, v1.2s, #0
+; CHECK-SD-NEXT:    ushll v2.2d, v2.2s, #0
 ; CHECK-SD-NEXT:    ushll v0.2d, v0.2s, #0
-; CHECK-SD-NEXT:    ushll v3.2d, v3.2s, #0
 ; CHECK-SD-NEXT:    ushll v4.2d, v4.2s, #0
+; CHECK-SD-NEXT:    ushll v3.2d, v3.2s, #0
 ; CHECK-SD-NEXT:    ushll v5.2d, v5.2s, #0
 ; CHECK-SD-NEXT:    ushll v6.2d, v6.2s, #0
 ; CHECK-SD-NEXT:    ushll v7.2d, v7.2s, #0
-; CHECK-SD-NEXT:    shl v17.2d, v2.2d, #54
-; CHECK-SD-NEXT:    shl v1.2d, v1.2d, #54
-; CHECK-SD-NEXT:    shl v16.2d, v0.2d, #54
-; CHECK-SD-NEXT:    shl v3.2d, v3.2d, #54
+; CHECK-SD-NEXT:    shl v16.2d, v1.2d, #54
+; CHECK-SD-NEXT:    shl v2.2d, v2.2d, #54
+; CHECK-SD-NEXT:    shl v17.2d, v0.2d, #54
 ; CHECK-SD-NEXT:    shl v4.2d, v4.2d, #54
+; CHECK-SD-NEXT:    shl v3.2d, v3.2d, #54
 ; CHECK-SD-NEXT:    shl v5.2d, v5.2d, #54
 ; CHECK-SD-NEXT:    shl v6.2d, v6.2d, #54
 ; CHECK-SD-NEXT:    shl v7.2d, v7.2d, #54
-; CHECK-SD-NEXT:    sshr v0.2d, v1.2d, #54
-; CHECK-SD-NEXT:    sshr v1.2d, v16.2d, #54
-; CHECK-SD-NEXT:    sshr v2.2d, v3.2d, #54
-; CHECK-SD-NEXT:    sshr v3.2d, v4.2d, #54
-; CHECK-SD-NEXT:    sshr v4.2d, v17.2d, #54
+; CHECK-SD-NEXT:    sshr v2.2d, v2.2d, #54
+; CHECK-SD-NEXT:    sshr v0.2d, v4.2d, #54
+; CHECK-SD-NEXT:    sshr v1.2d, v3.2d, #54
+; CHECK-SD-NEXT:    sshr v4.2d, v16.2d, #54
+; CHECK-SD-NEXT:    sshr v3.2d, v17.2d, #54
 ; CHECK-SD-NEXT:    sshr v5.2d, v5.2d, #54
 ; CHECK-SD-NEXT:    sshr v6.2d, v6.2d, #54
 ; CHECK-SD-NEXT:    sshr v7.2d, v7.2d, #54
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-to-int.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-to-int.ll
index c8f6d98f5a63f..312d158cfb2b6 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-to-int.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-to-int.ll
@@ -815,8 +815,7 @@ define void @fcvtzu_v32f64_v32i32(ptr %a, ptr %b) vscale_range(16,0) #0 {
 define <1 x i64> @fcvtzu_v1f64_v1i64(<1 x double> %op1) vscale_range(2,0) #0 {
 ; CHECK-LABEL: fcvtzu_v1f64_v1i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzu x8, d0
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fcvtzu d0, d0
 ; CHECK-NEXT:    ret
   %res = fptoui <1 x double> %op1 to <1 x i64>
   ret <1 x i64> %res
@@ -1710,8 +1709,7 @@ define void @fcvtzs_v32f64_v32i32(ptr %a, ptr %b) vscale_range(16,0) #0 {
 define <1 x i64> @fcvtzs_v1f64_v1i64(<1 x double> %op1) vscale_range(2,0) #0 {
 ; CHECK-LABEL: fcvtzs_v1f64_v1i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fcvtzs d0, d0
 ; CHECK-NEXT:    ret
   %res = fptosi <1 x double> %op1 to <1 x i64>
   ret <1 x i64> %res
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll b/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll
index c95fa965cd4d2..9372f2a82a795 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll
@@ -57,28 +57,28 @@ define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) nounwind {
 ; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
 ; CHECK-NEXT:    frintx v0.4h, v0.4h
 ; CHECK-NEXT:    frintx v1.4h, v1.4h
-; CHECK-NEXT:    mov h4, v0.h[2]
-; CHECK-NEXT:    mov h2, v0.h[1]
-; CHECK-NEXT:    mov h7, v0.h[3]
+; CHECK-NEXT:    mov h3, v0.h[2]
+; CHECK-NEXT:    mov h4, v0.h[1]
+; CHECK-NEXT:    mov h5, v0.h[3]
 ; CHECK-NEXT:    fcvtzs x8, h0
-; CHECK-NEXT:    mov h3, v1.h[2]
-; CHECK-NEXT:    mov h5, v1.h[3]
-; CHECK-NEXT:    mov h6, v1.h[1]
-; CHECK-NEXT:    fcvtzs x11, h1
+; CHECK-NEXT:    mov h2, v1.h[2]
+; CHECK-NEXT:    mov h6, v1.h[3]
+; CHECK-NEXT:    mov h7, v1.h[1]
+; CHECK-NEXT:    fcvtzs x10, h1
+; CHECK-NEXT:    fcvtzs x11, h3
 ; CHECK-NEXT:    fcvtzs x12, h4
-; CHECK-NEXT:    fcvtzs x9, h2
-; CHECK-NEXT:    fcvtzs x15, h7
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fcvtzs x10, h3
 ; CHECK-NEXT:    fcvtzs x13, h5
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fcvtzs x9, h2
 ; CHECK-NEXT:    fcvtzs x14, h6
-; CHECK-NEXT:    fmov d1, x12
-; CHECK-NEXT:    fmov d2, x11
-; CHECK-NEXT:    mov v0.d[1], x9
-; CHECK-NEXT:    fmov d3, x10
-; CHECK-NEXT:    mov v1.d[1], x15
-; CHECK-NEXT:    mov v2.d[1], x14
-; CHECK-NEXT:    mov v3.d[1], x13
+; CHECK-NEXT:    fcvtzs x15, h7
+; CHECK-NEXT:    fmov d2, x10
+; CHECK-NEXT:    fmov d1, x11
+; CHECK-NEXT:    mov v0.d[1], x12
+; CHECK-NEXT:    fmov d3, x9
+; CHECK-NEXT:    mov v1.d[1], x13
+; CHECK-NEXT:    mov v2.d[1], x15
+; CHECK-NEXT:    mov v3.d[1], x14
 ; CHECK-NEXT:    ret
   %a = call <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half> %x)
   ret <8 x i64> %a
@@ -89,55 +89,55 @@ define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) nounwind {
 ; CHECK-LABEL: llrint_v16i64_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    frintx v0.4h, v0.4h
 ; CHECK-NEXT:    frintx v1.4h, v1.4h
-; CHECK-NEXT:    frintx v3.4h, v0.4h
-; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECK-NEXT:    frintx v2.4h, v2.4h
+; CHECK-NEXT:    frintx v3.4h, v3.4h
+; CHECK-NEXT:    mov h5, v0.h[2]
 ; CHECK-NEXT:    mov h4, v1.h[2]
+; CHECK-NEXT:    mov h6, v0.h[1]
+; CHECK-NEXT:    fcvtzs x8, h1
+; CHECK-NEXT:    mov h16, v0.h[3]
+; CHECK-NEXT:    fcvtzs x9, h0
+; CHECK-NEXT:    mov h7, v1.h[1]
+; CHECK-NEXT:    mov h1, v1.h[3]
+; CHECK-NEXT:    mov h0, v2.h[3]
+; CHECK-NEXT:    mov h17, v2.h[2]
+; CHECK-NEXT:    fcvtzs x12, h5
 ; CHECK-NEXT:    mov h5, v3.h[2]
-; CHECK-NEXT:    frintx v0.4h, v0.4h
-; CHECK-NEXT:    mov h6, v3.h[1]
-; CHECK-NEXT:    fcvtzs x9, h3
-; CHECK-NEXT:    mov h16, v1.h[1]
-; CHECK-NEXT:    fcvtzs x12, h1
-; CHECK-NEXT:    mov h3, v3.h[3]
-; CHECK-NEXT:    mov h17, v1.h[3]
-; CHECK-NEXT:    mov h7, v2.h[3]
-; CHECK-NEXT:    fcvtzs x8, h4
-; CHECK-NEXT:    fcvtzs x10, h5
-; CHECK-NEXT:    mov h4, v2.h[2]
-; CHECK-NEXT:    mov h5, v0.h[2]
-; CHECK-NEXT:    fcvtzs x11, h6
-; CHECK-NEXT:    mov h6, v0.h[3]
-; CHECK-NEXT:    fcvtzs x15, h2
-; CHECK-NEXT:    mov h2, v2.h[1]
-; CHECK-NEXT:    fcvtzs x14, h0
-; CHECK-NEXT:    fcvtzs x17, h3
-; CHECK-NEXT:    fcvtzs x0, h17
-; CHECK-NEXT:    fcvtzs x13, h7
-; CHECK-NEXT:    mov h7, v0.h[1]
+; CHECK-NEXT:    fcvtzs x11, h2
+; CHECK-NEXT:    mov h18, v3.h[3]
+; CHECK-NEXT:    fcvtzs x14, h3
+; CHECK-NEXT:    mov h3, v3.h[1]
+; CHECK-NEXT:    mov h19, v2.h[1]
+; CHECK-NEXT:    fcvtzs x10, h4
+; CHECK-NEXT:    fmov d4, x8
+; CHECK-NEXT:    fcvtzs x13, h6
+; CHECK-NEXT:    fcvtzs x15, h0
+; CHECK-NEXT:    fcvtzs x8, h17
 ; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    fcvtzs x16, h4
 ; CHECK-NEXT:    fcvtzs x9, h5
-; CHECK-NEXT:    fmov d4, x12
-; CHECK-NEXT:    fcvtzs x12, h16
-; CHECK-NEXT:    fmov d1, x10
-; CHECK-NEXT:    fcvtzs x10, h6
-; CHECK-NEXT:    fmov d5, x8
-; CHECK-NEXT:    fcvtzs x8, h2
+; CHECK-NEXT:    fcvtzs x16, h7
+; CHECK-NEXT:    fcvtzs x17, h16
+; CHECK-NEXT:    fmov d6, x11
+; CHECK-NEXT:    fcvtzs x11, h18
+; CHECK-NEXT:    fcvtzs x18, h3
 ; CHECK-NEXT:    fmov d2, x14
-; CHECK-NEXT:    fcvtzs x18, h7
-; CHECK-NEXT:    fmov d6, x15
-; CHECK-NEXT:    mov v0.d[1], x11
+; CHECK-NEXT:    fcvtzs x14, h19
+; CHECK-NEXT:    fcvtzs x0, h1
+; CHECK-NEXT:    fmov d5, x10
+; CHECK-NEXT:    fmov d1, x12
+; CHECK-NEXT:    fmov d7, x8
 ; CHECK-NEXT:    fmov d3, x9
-; CHECK-NEXT:    fmov d7, x16
+; CHECK-NEXT:    mov v0.d[1], x13
+; CHECK-NEXT:    mov v4.d[1], x16
+; CHECK-NEXT:    mov v2.d[1], x18
 ; CHECK-NEXT:    mov v1.d[1], x17
-; CHECK-NEXT:    mov v4.d[1], x12
 ; CHECK-NEXT:    mov v5.d[1], x0
-; CHECK-NEXT:    mov v6.d[1], x8
-; CHECK-NEXT:    mov v2.d[1], x18
-; CHECK-NEXT:    mov v3.d[1], x10
-; CHECK-NEXT:    mov v7.d[1], x13
+; CHECK-NEXT:    mov v6.d[1], x14
+; CHECK-NEXT:    mov v3.d[1], x11
+; CHECK-NEXT:    mov v7.d[1], x15
 ; CHECK-NEXT:    ret
   %a = call <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half> %x)
   ret <16 x i64> %a
@@ -324,27 +324,27 @@ declare <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float>)
 define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) nounwind {
 ; CHECK-LABEL: llrint_v8i64_v8f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintx v0.4s, v0.4s
 ; CHECK-NEXT:    frintx v1.4s, v1.4s
-; CHECK-NEXT:    mov s3, v1.s[2]
-; CHECK-NEXT:    mov s4, v0.s[2]
-; CHECK-NEXT:    mov s2, v0.s[1]
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    mov s2, v1.s[2]
+; CHECK-NEXT:    mov s3, v0.s[2]
+; CHECK-NEXT:    mov s4, v0.s[1]
 ; CHECK-NEXT:    mov s5, v1.s[3]
 ; CHECK-NEXT:    mov s6, v1.s[1]
 ; CHECK-NEXT:    mov s7, v0.s[3]
 ; CHECK-NEXT:    fcvtzs x8, s0
 ; CHECK-NEXT:    fcvtzs x10, s1
+; CHECK-NEXT:    fcvtzs x9, s2
 ; CHECK-NEXT:    fcvtzs x11, s3
 ; CHECK-NEXT:    fcvtzs x12, s4
-; CHECK-NEXT:    fcvtzs x9, s2
 ; CHECK-NEXT:    fcvtzs x13, s5
 ; CHECK-NEXT:    fcvtzs x14, s6
 ; CHECK-NEXT:    fcvtzs x15, s7
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    fmov d2, x10
-; CHECK-NEXT:    fmov d1, x12
-; CHECK-NEXT:    fmov d3, x11
-; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    fmov d3, x9
+; CHECK-NEXT:    fmov d1, x11
+; CHECK-NEXT:    mov v0.d[1], x12
 ; CHECK-NEXT:    mov v2.d[1], x14
 ; CHECK-NEXT:    mov v1.d[1], x15
 ; CHECK-NEXT:    mov v3.d[1], x13
@@ -363,48 +363,48 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) nounwind {
 ; CHECK-NEXT:    frintx v0.4s, v0.4s
 ; CHECK-NEXT:    mov s4, v3.s[2]
 ; CHECK-NEXT:    mov s5, v2.s[2]
-; CHECK-NEXT:    mov s6, v1.s[2]
-; CHECK-NEXT:    mov s7, v0.s[2]
-; CHECK-NEXT:    fcvtzs x10, s1
-; CHECK-NEXT:    fcvtzs x11, s0
-; CHECK-NEXT:    mov s16, v0.s[1]
-; CHECK-NEXT:    mov s17, v1.s[1]
-; CHECK-NEXT:    mov s18, v3.s[1]
-; CHECK-NEXT:    fcvtzs x14, s3
-; CHECK-NEXT:    fcvtzs x16, s2
-; CHECK-NEXT:    fcvtzs x8, s4
-; CHECK-NEXT:    mov s4, v2.s[1]
-; CHECK-NEXT:    fcvtzs x9, s5
-; CHECK-NEXT:    mov s5, v1.s[3]
-; CHECK-NEXT:    fcvtzs x12, s6
-; CHECK-NEXT:    mov s6, v0.s[3]
-; CHECK-NEXT:    fcvtzs x13, s7
-; CHECK-NEXT:    mov s7, v3.s[3]
-; CHECK-NEXT:    fmov d0, x11
-; CHECK-NEXT:    fcvtzs x17, s16
-; CHECK-NEXT:    fcvtzs x18, s18
-; CHECK-NEXT:    fcvtzs x15, s4
-; CHECK-NEXT:    mov s4, v2.s[3]
-; CHECK-NEXT:    fmov d2, x10
+; CHECK-NEXT:    mov s6, v2.s[1]
+; CHECK-NEXT:    mov s7, v1.s[2]
+; CHECK-NEXT:    fcvtzs x8, s3
+; CHECK-NEXT:    mov s16, v0.s[2]
+; CHECK-NEXT:    fcvtzs x9, s2
+; CHECK-NEXT:    mov s17, v1.s[3]
+; CHECK-NEXT:    mov s18, v0.s[1]
+; CHECK-NEXT:    mov s19, v3.s[3]
+; CHECK-NEXT:    fcvtzs x14, s1
+; CHECK-NEXT:    mov s1, v1.s[1]
+; CHECK-NEXT:    fcvtzs x10, s4
 ; CHECK-NEXT:    fcvtzs x11, s5
-; CHECK-NEXT:    fcvtzs x10, s6
-; CHECK-NEXT:    fmov d3, x12
-; CHECK-NEXT:    fmov d1, x13
-; CHECK-NEXT:    fcvtzs x12, s17
+; CHECK-NEXT:    mov s5, v0.s[3]
+; CHECK-NEXT:    mov s3, v3.s[1]
+; CHECK-NEXT:    mov s2, v2.s[3]
+; CHECK-NEXT:    fcvtzs x12, s6
 ; CHECK-NEXT:    fcvtzs x13, s7
-; CHECK-NEXT:    fmov d5, x9
-; CHECK-NEXT:    fmov d6, x14
-; CHECK-NEXT:    fmov d7, x8
-; CHECK-NEXT:    fcvtzs x0, s4
-; CHECK-NEXT:    fmov d4, x16
+; CHECK-NEXT:    fcvtzs x15, s16
+; CHECK-NEXT:    fmov d6, x8
+; CHECK-NEXT:    fcvtzs x8, s0
+; CHECK-NEXT:    fmov d4, x9
+; CHECK-NEXT:    fcvtzs x9, s17
+; CHECK-NEXT:    fcvtzs x16, s5
+; CHECK-NEXT:    fcvtzs x17, s18
+; CHECK-NEXT:    fmov d7, x10
+; CHECK-NEXT:    fmov d5, x11
+; CHECK-NEXT:    fcvtzs x10, s1
+; CHECK-NEXT:    fcvtzs x11, s19
+; CHECK-NEXT:    fcvtzs x18, s3
+; CHECK-NEXT:    fcvtzs x0, s2
+; CHECK-NEXT:    fmov d3, x13
+; CHECK-NEXT:    fmov d1, x15
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fmov d2, x14
+; CHECK-NEXT:    mov v4.d[1], x12
+; CHECK-NEXT:    mov v3.d[1], x9
+; CHECK-NEXT:    mov v7.d[1], x11
 ; CHECK-NEXT:    mov v0.d[1], x17
-; CHECK-NEXT:    mov v1.d[1], x10
-; CHECK-NEXT:    mov v3.d[1], x11
-; CHECK-NEXT:    mov v2.d[1], x12
-; CHECK-NEXT:    mov v6.d[1], x18
-; CHECK-NEXT:    mov v7.d[1], x13
-; CHECK-NEXT:    mov v4.d[1], x15
+; CHECK-NEXT:    mov v1.d[1], x16
+; CHECK-NEXT:    mov v2.d[1], x10
 ; CHECK-NEXT:    mov v5.d[1], x0
+; CHECK-NEXT:    mov v6.d[1], x18
 ; CHECK-NEXT:    ret
   %a = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> %x)
   ret <16 x i64> %a
@@ -542,8 +542,7 @@ define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) nounwind {
 ; CHECK-LABEL: llrint_v1i64_v1f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fcvtzs d0, d0
 ; CHECK-NEXT:    ret
   %a = call <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double> %x)
   ret <1 x i64> %a
@@ -570,17 +569,15 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) nounwind {
 ; CHECK-NEXT:    splice z0.d, p0, z0.d, z1.d
 ; CHECK-NEXT:    ptrue p0.d, vl4
 ; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
-; CHECK-NEXT:    mov z1.d, z0.d[2]
-; CHECK-NEXT:    mov z2.d, z0.d[3]
+; CHECK-NEXT:    mov z1.d, z0.d[3]
+; CHECK-NEXT:    mov z2.d, z0.d[2]
 ; CHECK-NEXT:    mov z3.d, z0.d[1]
-; CHECK-NEXT:    fcvtzs x9, d0
+; CHECK-NEXT:    fcvtzs d0, d0
 ; CHECK-NEXT:    fcvtzs x8, d1
-; CHECK-NEXT:    fcvtzs x10, d2
-; CHECK-NEXT:    fcvtzs x11, d3
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    mov v0.d[1], x11
-; CHECK-NEXT:    mov v1.d[1], x10
+; CHECK-NEXT:    fcvtzs d1, d2
+; CHECK-NEXT:    fcvtzs x9, d3
+; CHECK-NEXT:    mov v0.d[1], x9
+; CHECK-NEXT:    mov v1.d[1], x8
 ; CHECK-NEXT:    ret
   %a = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> %x)
   ret <4 x i64> %a
@@ -598,30 +595,26 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) nounwind {
 ; CHECK-NEXT:    splice z0.d, p0, z0.d, z1.d
 ; CHECK-NEXT:    splice z2.d, p0, z2.d, z3.d
 ; CHECK-NEXT:    ptrue p0.d, vl4
-; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
 ; CHECK-NEXT:    frintx z2.d, p0/m, z2.d
-; CHECK-NEXT:    mov z4.d, z2.d[2]
-; CHECK-NEXT:    mov z5.d, z0.d[2]
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    mov z3.d, z2.d[3]
-; CHECK-NEXT:    mov z6.d, z0.d[3]
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    mov z0.d, z2.d[1]
-; CHECK-NEXT:    fcvtzs x10, d2
-; CHECK-NEXT:    fcvtzs x11, d4
-; CHECK-NEXT:    fcvtzs x12, d5
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    fcvtzs x13, d3
-; CHECK-NEXT:    fcvtzs x14, d6
-; CHECK-NEXT:    fcvtzs x15, d0
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d2, x10
-; CHECK-NEXT:    fmov d1, x12
-; CHECK-NEXT:    fmov d3, x11
-; CHECK-NEXT:    mov v0.d[1], x9
-; CHECK-NEXT:    mov v2.d[1], x15
-; CHECK-NEXT:    mov v1.d[1], x14
-; CHECK-NEXT:    mov v3.d[1], x13
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    mov z1.d, z2.d[3]
+; CHECK-NEXT:    mov z3.d, z0.d[3]
+; CHECK-NEXT:    mov z4.d, z0.d[1]
+; CHECK-NEXT:    mov z5.d, z2.d[2]
+; CHECK-NEXT:    mov z6.d, z0.d[2]
+; CHECK-NEXT:    mov z7.d, z2.d[1]
+; CHECK-NEXT:    fcvtzs d2, d2
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    fcvtzs x8, d1
+; CHECK-NEXT:    fcvtzs x9, d3
+; CHECK-NEXT:    fcvtzs x10, d4
+; CHECK-NEXT:    fcvtzs d3, d5
+; CHECK-NEXT:    fcvtzs d1, d6
+; CHECK-NEXT:    fcvtzs x11, d7
+; CHECK-NEXT:    mov v0.d[1], x10
+; CHECK-NEXT:    mov v2.d[1], x11
+; CHECK-NEXT:    mov v1.d[1], x9
+; CHECK-NEXT:    mov v3.d[1], x8
 ; CHECK-NEXT:    ret
   %a = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> %x)
   ret <8 x i64> %a
@@ -631,68 +624,60 @@ declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>)
 define <16 x i64> @llrint_v16f64(<16 x double> %x) nounwind {
 ; CHECK-LABEL: llrint_v16f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d, vl2
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    // kill: def $q6 killed $q6 def $z6
 ; CHECK-NEXT:    // kill: def $q4 killed $q4 def $z4
 ; CHECK-NEXT:    // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT:    // kill: def $q7 killed $q7 def $z7
 ; CHECK-NEXT:    // kill: def $q5 killed $q5 def $z5
 ; CHECK-NEXT:    // kill: def $q3 killed $q3 def $z3
-; CHECK-NEXT:    // kill: def $q6 killed $q6 def $z6
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    // kill: def $q7 killed $q7 def $z7
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    splice z6.d, p0, z6.d, z7.d
+; CHECK-NEXT:    splice z2.d, p0, z2.d, z3.d
+; CHECK-NEXT:    splice z4.d, p0, z4.d, z5.d
+; CHECK-NEXT:    splice z0.d, p0, z0.d, z1.d
 ; CHECK-NEXT:    ptrue p0.d, vl4
-; CHECK-NEXT:    splice z4.d, p1, z4.d, z5.d
-; CHECK-NEXT:    splice z2.d, p1, z2.d, z3.d
-; CHECK-NEXT:    splice z6.d, p1, z6.d, z7.d
-; CHECK-NEXT:    splice z0.d, p1, z0.d, z1.d
+; CHECK-NEXT:    frintx z6.d, p0/m, z6.d
 ; CHECK-NEXT:    frintx z4.d, p0/m, z4.d
 ; CHECK-NEXT:    frintx z2.d, p0/m, z2.d
-; CHECK-NEXT:    frintx z6.d, p0/m, z6.d
 ; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
-; CHECK-NEXT:    mov z3.d, z4.d[2]
+; CHECK-NEXT:    mov z1.d, z6.d[3]
+; CHECK-NEXT:    mov z3.d, z4.d[3]
 ; CHECK-NEXT:    mov z5.d, z2.d[3]
-; CHECK-NEXT:    mov z1.d, z6.d[2]
-; CHECK-NEXT:    fcvtzs x11, d0
-; CHECK-NEXT:    fcvtzs x12, d4
-; CHECK-NEXT:    fcvtzs x13, d2
-; CHECK-NEXT:    fcvtzs x14, d6
-; CHECK-NEXT:    mov z7.d, z6.d[3]
-; CHECK-NEXT:    mov z16.d, z0.d[3]
-; CHECK-NEXT:    fcvtzs x10, d3
-; CHECK-NEXT:    mov z3.d, z2.d[2]
-; CHECK-NEXT:    fcvtzs x8, d5
-; CHECK-NEXT:    mov z5.d, z0.d[2]
-; CHECK-NEXT:    fcvtzs x9, d1
-; CHECK-NEXT:    mov z1.d, z4.d[3]
-; CHECK-NEXT:    mov z2.d, z2.d[1]
-; CHECK-NEXT:    mov z17.d, z6.d[1]
-; CHECK-NEXT:    fcvtzs x17, d7
-; CHECK-NEXT:    fcvtzs x15, d3
+; CHECK-NEXT:    mov z16.d, z4.d[1]
+; CHECK-NEXT:    mov z7.d, z0.d[3]
+; CHECK-NEXT:    mov z17.d, z0.d[2]
+; CHECK-NEXT:    mov z18.d, z4.d[2]
+; CHECK-NEXT:    mov z19.d, z6.d[1]
+; CHECK-NEXT:    fcvtzs d4, d4
+; CHECK-NEXT:    fcvtzs x8, d1
+; CHECK-NEXT:    mov z1.d, z2.d[1]
+; CHECK-NEXT:    fcvtzs x9, d3
 ; CHECK-NEXT:    mov z3.d, z0.d[1]
-; CHECK-NEXT:    fmov d0, x11
-; CHECK-NEXT:    fcvtzs x16, d5
-; CHECK-NEXT:    mov z5.d, z4.d[1]
-; CHECK-NEXT:    fmov d4, x12
-; CHECK-NEXT:    fcvtzs x11, d2
-; CHECK-NEXT:    fmov d2, x13
+; CHECK-NEXT:    fcvtzs x10, d5
+; CHECK-NEXT:    mov z5.d, z6.d[2]
 ; CHECK-NEXT:    fcvtzs x12, d16
-; CHECK-NEXT:    fcvtzs x13, d3
-; CHECK-NEXT:    fmov d6, x14
-; CHECK-NEXT:    fcvtzs x18, d1
-; CHECK-NEXT:    fcvtzs x14, d5
-; CHECK-NEXT:    fcvtzs x0, d17
-; CHECK-NEXT:    fmov d3, x15
-; CHECK-NEXT:    fmov d1, x16
-; CHECK-NEXT:    fmov d5, x10
-; CHECK-NEXT:    fmov d7, x9
-; CHECK-NEXT:    mov v2.d[1], x11
-; CHECK-NEXT:    mov v0.d[1], x13
-; CHECK-NEXT:    mov v3.d[1], x8
-; CHECK-NEXT:    mov v4.d[1], x14
-; CHECK-NEXT:    mov v1.d[1], x12
-; CHECK-NEXT:    mov v6.d[1], x0
-; CHECK-NEXT:    mov v5.d[1], x18
-; CHECK-NEXT:    mov v7.d[1], x17
+; CHECK-NEXT:    mov z16.d, z2.d[2]
+; CHECK-NEXT:    fcvtzs x11, d7
+; CHECK-NEXT:    fcvtzs x13, d1
+; CHECK-NEXT:    fcvtzs d1, d17
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    fcvtzs x14, d3
+; CHECK-NEXT:    fcvtzs d7, d5
+; CHECK-NEXT:    fcvtzs d2, d2
+; CHECK-NEXT:    fcvtzs d3, d16
+; CHECK-NEXT:    fcvtzs d5, d18
+; CHECK-NEXT:    fcvtzs x15, d19
+; CHECK-NEXT:    fcvtzs d6, d6
+; CHECK-NEXT:    mov v4.d[1], x12
+; CHECK-NEXT:    mov v1.d[1], x11
+; CHECK-NEXT:    mov v0.d[1], x14
+; CHECK-NEXT:    mov v2.d[1], x13
+; CHECK-NEXT:    mov v7.d[1], x8
+; CHECK-NEXT:    mov v3.d[1], x10
+; CHECK-NEXT:    mov v5.d[1], x9
+; CHECK-NEXT:    mov v6.d[1], x15
 ; CHECK-NEXT:    ret
   %a = call <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double> %x)
   ret <16 x i64> %a
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll b/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll
index 2b8e340a1dc0d..2d7fe4a22ad0a 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll
@@ -97,17 +97,17 @@ define <8 x iXLen> @lrint_v8f16(<8 x half> %x) nounwind {
 ; CHECK-i32-NEXT:    mov h3, v2.h[7]
 ; CHECK-i32-NEXT:    fcvtzs w12, h4
 ; CHECK-i32-NEXT:    mov h2, v2.h[3]
-; CHECK-i32-NEXT:    fcvtzs w13, h0
-; CHECK-i32-NEXT:    fmov s0, w9
 ; CHECK-i32-NEXT:    fmov s1, w8
-; CHECK-i32-NEXT:    fcvtzs w8, h3
-; CHECK-i32-NEXT:    fcvtzs w9, h2
+; CHECK-i32-NEXT:    fcvtzs w8, h0
+; CHECK-i32-NEXT:    fmov s0, w9
+; CHECK-i32-NEXT:    fcvtzs w9, h3
 ; CHECK-i32-NEXT:    mov v0.s[1], w11
 ; CHECK-i32-NEXT:    mov v1.s[1], w10
-; CHECK-i32-NEXT:    mov v0.s[2], w13
+; CHECK-i32-NEXT:    fcvtzs w10, h2
+; CHECK-i32-NEXT:    mov v0.s[2], w8
 ; CHECK-i32-NEXT:    mov v1.s[2], w12
-; CHECK-i32-NEXT:    mov v0.s[3], w9
-; CHECK-i32-NEXT:    mov v1.s[3], w8
+; CHECK-i32-NEXT:    mov v0.s[3], w10
+; CHECK-i32-NEXT:    mov v1.s[3], w9
 ; CHECK-i32-NEXT:    ret
 ;
 ; CHECK-i64-LABEL: lrint_v8f16:
@@ -115,28 +115,28 @@ define <8 x iXLen> @lrint_v8f16(<8 x half> %x) nounwind {
 ; CHECK-i64-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
 ; CHECK-i64-NEXT:    frintx v0.4h, v0.4h
 ; CHECK-i64-NEXT:    frintx v1.4h, v1.4h
-; CHECK-i64-NEXT:    mov h4, v0.h[2]
-; CHECK-i64-NEXT:    mov h2, v0.h[1]
-; CHECK-i64-NEXT:    mov h7, v0.h[3]
+; CHECK-i64-NEXT:    mov h3, v0.h[2]
+; CHECK-i64-NEXT:    mov h4, v0.h[1]
+; CHECK-i64-NEXT:    mov h5, v0.h[3]
 ; CHECK-i64-NEXT:    fcvtzs x8, h0
-; CHECK-i64-NEXT:    mov h3, v1.h[2]
-; CHECK-i64-NEXT:    mov h5, v1.h[3]
-; CHECK-i64-NEXT:    mov h6, v1.h[1]
-; CHECK-i64-NEXT:    fcvtzs x11, h1
+; CHECK-i64-NEXT:    mov h2, v1.h[2]
+; CHECK-i64-NEXT:    mov h6, v1.h[3]
+; CHECK-i64-NEXT:    mov h7, v1.h[1]
+; CHECK-i64-NEXT:    fcvtzs x10, h1
+; CHECK-i64-NEXT:    fcvtzs x11, h3
 ; CHECK-i64-NEXT:    fcvtzs x12, h4
-; CHECK-i64-NEXT:    fcvtzs x9, h2
-; CHECK-i64-NEXT:    fcvtzs x15, h7
-; CHECK-i64-NEXT:    fmov d0, x8
-; CHECK-i64-NEXT:    fcvtzs x10, h3
 ; CHECK-i64-NEXT:    fcvtzs x13, h5
+; CHECK-i64-NEXT:    fmov d0, x8
+; CHECK-i64-NEXT:    fcvtzs x9, h2
 ; CHECK-i64-NEXT:    fcvtzs x14, h6
-; CHECK-i64-NEXT:    fmov d1, x12
-; CHECK-i64-NEXT:    fmov d2, x11
-; CHECK-i64-NEXT:    mov v0.d[1], x9
-; CHECK-i64-NEXT:    fmov d3, x10
-; CHECK-i64-NEXT:    mov v1.d[1], x15
-; CHECK-i64-NEXT:    mov v2.d[1], x14
-; CHECK-i64-NEXT:    mov v3.d[1], x13
+; CHECK-i64-NEXT:    fcvtzs x15, h7
+; CHECK-i64-NEXT:    fmov d2, x10
+; CHECK-i64-NEXT:    fmov d1, x11
+; CHECK-i64-NEXT:    mov v0.d[1], x12
+; CHECK-i64-NEXT:    fmov d3, x9
+; CHECK-i64-NEXT:    mov v1.d[1], x13
+; CHECK-i64-NEXT:    mov v2.d[1], x15
+; CHECK-i64-NEXT:    mov v3.d[1], x14
 ; CHECK-i64-NEXT:    ret
   %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half> %x)
   ret <8 x iXLen> %a
@@ -147,107 +147,107 @@ define <16 x iXLen> @lrint_v16f16(<16 x half> %x) nounwind {
 ; CHECK-i32-LABEL: lrint_v16f16:
 ; CHECK-i32:       // %bb.0:
 ; CHECK-i32-NEXT:    frintx v1.8h, v1.8h
-; CHECK-i32-NEXT:    frintx v0.8h, v0.8h
-; CHECK-i32-NEXT:    mov h3, v1.h[4]
+; CHECK-i32-NEXT:    frintx v4.8h, v0.8h
+; CHECK-i32-NEXT:    mov h0, v1.h[6]
 ; CHECK-i32-NEXT:    mov h2, v1.h[5]
-; CHECK-i32-NEXT:    mov h5, v0.h[4]
-; CHECK-i32-NEXT:    mov h4, v1.h[1]
-; CHECK-i32-NEXT:    mov h6, v0.h[1]
-; CHECK-i32-NEXT:    fcvtzs w11, h0
-; CHECK-i32-NEXT:    fcvtzs w14, h1
-; CHECK-i32-NEXT:    mov h7, v1.h[6]
+; CHECK-i32-NEXT:    mov h3, v1.h[4]
+; CHECK-i32-NEXT:    mov h5, v4.h[4]
+; CHECK-i32-NEXT:    mov h7, v4.h[1]
+; CHECK-i32-NEXT:    fcvtzs w10, h1
+; CHECK-i32-NEXT:    fcvtzs w13, h4
+; CHECK-i32-NEXT:    mov h6, v1.h[2]
 ; CHECK-i32-NEXT:    mov h16, v1.h[3]
-; CHECK-i32-NEXT:    mov h17, v0.h[7]
-; CHECK-i32-NEXT:    mov h18, v0.h[3]
-; CHECK-i32-NEXT:    fcvtzs w9, h3
-; CHECK-i32-NEXT:    mov h3, v0.h[5]
-; CHECK-i32-NEXT:    fcvtzs w8, h2
-; CHECK-i32-NEXT:    mov h2, v1.h[2]
+; CHECK-i32-NEXT:    mov h17, v4.h[7]
+; CHECK-i32-NEXT:    fcvtzs w8, h0
+; CHECK-i32-NEXT:    mov h0, v1.h[1]
+; CHECK-i32-NEXT:    fcvtzs w9, h2
+; CHECK-i32-NEXT:    mov h2, v4.h[5]
+; CHECK-i32-NEXT:    fcvtzs w11, h3
+; CHECK-i32-NEXT:    mov h3, v4.h[6]
 ; CHECK-i32-NEXT:    fcvtzs w12, h5
-; CHECK-i32-NEXT:    fcvtzs w10, h4
-; CHECK-i32-NEXT:    mov h4, v0.h[6]
-; CHECK-i32-NEXT:    mov h5, v0.h[2]
-; CHECK-i32-NEXT:    fcvtzs w13, h6
-; CHECK-i32-NEXT:    mov h6, v1.h[7]
-; CHECK-i32-NEXT:    fmov s0, w11
-; CHECK-i32-NEXT:    fcvtzs w16, h7
-; CHECK-i32-NEXT:    fcvtzs w15, h3
-; CHECK-i32-NEXT:    fmov s3, w9
-; CHECK-i32-NEXT:    fcvtzs w9, h16
-; CHECK-i32-NEXT:    fcvtzs w17, h2
+; CHECK-i32-NEXT:    mov h5, v4.h[2]
+; CHECK-i32-NEXT:    fcvtzs w14, h7
+; CHECK-i32-NEXT:    mov h7, v1.h[7]
+; CHECK-i32-NEXT:    fcvtzs w17, h6
+; CHECK-i32-NEXT:    mov h4, v4.h[3]
+; CHECK-i32-NEXT:    fcvtzs w15, h0
+; CHECK-i32-NEXT:    fmov s0, w13
+; CHECK-i32-NEXT:    fcvtzs w16, h2
+; CHECK-i32-NEXT:    fmov s2, w10
+; CHECK-i32-NEXT:    fcvtzs w10, h3
+; CHECK-i32-NEXT:    fmov s3, w11
 ; CHECK-i32-NEXT:    fmov s1, w12
-; CHECK-i32-NEXT:    fmov s2, w14
-; CHECK-i32-NEXT:    fcvtzs w11, h4
 ; CHECK-i32-NEXT:    fcvtzs w18, h5
-; CHECK-i32-NEXT:    mov v0.s[1], w13
-; CHECK-i32-NEXT:    mov v3.s[1], w8
-; CHECK-i32-NEXT:    fcvtzs w8, h6
-; CHECK-i32-NEXT:    fcvtzs w12, h18
-; CHECK-i32-NEXT:    mov v1.s[1], w15
-; CHECK-i32-NEXT:    mov v2.s[1], w10
-; CHECK-i32-NEXT:    fcvtzs w10, h17
+; CHECK-i32-NEXT:    mov v0.s[1], w14
+; CHECK-i32-NEXT:    fcvtzs w11, h16
+; CHECK-i32-NEXT:    fcvtzs w12, h17
+; CHECK-i32-NEXT:    mov v2.s[1], w15
+; CHECK-i32-NEXT:    fcvtzs w13, h4
+; CHECK-i32-NEXT:    mov v1.s[1], w16
+; CHECK-i32-NEXT:    mov v3.s[1], w9
+; CHECK-i32-NEXT:    fcvtzs w9, h7
 ; CHECK-i32-NEXT:    mov v0.s[2], w18
-; CHECK-i32-NEXT:    mov v3.s[2], w16
-; CHECK-i32-NEXT:    mov v1.s[2], w11
 ; CHECK-i32-NEXT:    mov v2.s[2], w17
-; CHECK-i32-NEXT:    mov v0.s[3], w12
-; CHECK-i32-NEXT:    mov v3.s[3], w8
-; CHECK-i32-NEXT:    mov v1.s[3], w10
-; CHECK-i32-NEXT:    mov v2.s[3], w9
+; CHECK-i32-NEXT:    mov v1.s[2], w10
+; CHECK-i32-NEXT:    mov v3.s[2], w8
+; CHECK-i32-NEXT:    mov v0.s[3], w13
+; CHECK-i32-NEXT:    mov v2.s[3], w11
+; CHECK-i32-NEXT:    mov v1.s[3], w12
+; CHECK-i32-NEXT:    mov v3.s[3], w9
 ; CHECK-i32-NEXT:    ret
 ;
 ; CHECK-i64-LABEL: lrint_v16f16:
 ; CHECK-i64:       // %bb.0:
 ; CHECK-i64-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
+; CHECK-i64-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
+; CHECK-i64-NEXT:    frintx v0.4h, v0.4h
 ; CHECK-i64-NEXT:    frintx v1.4h, v1.4h
-; CHECK-i64-NEXT:    frintx v3.4h, v0.4h
-; CHECK-i64-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECK-i64-NEXT:    frintx v2.4h, v2.4h
+; CHECK-i64-NEXT:    frintx v3.4h, v3.4h
+; CHECK-i64-NEXT:    mov h5, v0.h[2]
 ; CHECK-i64-NEXT:    mov h4, v1.h[2]
+; CHECK-i64-NEXT:    mov h6, v0.h[1]
+; CHECK-i64-NEXT:    fcvtzs x8, h1
+; CHECK-i64-NEXT:    mov h16, v0.h[3]
+; CHECK-i64-NEXT:    fcvtzs x9, h0
+; CHECK-i64-NEXT:    mov h7, v1.h[1]
+; CHECK-i64-NEXT:    mov h1, v1.h[3]
+; CHECK-i64-NEXT:    mov h0, v2.h[3]
+; CHECK-i64-NEXT:    mov h17, v2.h[2]
+; CHECK-i64-NEXT:    fcvtzs x12, h5
 ; CHECK-i64-NEXT:    mov h5, v3.h[2]
-; CHECK-i64-NEXT:    frintx v0.4h, v0.4h
-; CHECK-i64-NEXT:    mov h6, v3.h[1]
-; CHECK-i64-NEXT:    fcvtzs x9, h3
-; CHECK-i64-NEXT:    mov h16, v1.h[1]
-; CHECK-i64-NEXT:    fcvtzs x12, h1
-; CHECK-i64-NEXT:    mov h3, v3.h[3]
-; CHECK-i64-NEXT:    mov h17, v1.h[3]
-; CHECK-i64-NEXT:    mov h7, v2.h[3]
-; CHECK-i64-NEXT:    fcvtzs x8, h4
-; CHECK-i64-NEXT:    fcvtzs x10, h5
-; CHECK-i64-NEXT:    mov h4, v2.h[2]
-; CHECK-i64-NEXT:    mov h5, v0.h[2]
-; CHECK-i64-NEXT:    fcvtzs x11, h6
-; CHECK-i64-NEXT:    mov h6, v0.h[3]
-; CHECK-i64-NEXT:    fcvtzs x15, h2
-; CHECK-i64-NEXT:    mov h2, v2.h[1]
-; CHECK-i64-NEXT:    fcvtzs x14, h0
-; CHECK-i64-NEXT:    fcvtzs x17, h3
-; CHECK-i64-NEXT:    fcvtzs x0, h17
-; CHECK-i64-NEXT:    fcvtzs x13, h7
-; CHECK-i64-NEXT:    mov h7, v0.h[1]
+; CHECK-i64-NEXT:    fcvtzs x11, h2
+; CHECK-i64-NEXT:    mov h18, v3.h[3]
+; CHECK-i64-NEXT:    fcvtzs x14, h3
+; CHECK-i64-NEXT:    mov h3, v3.h[1]
+; CHECK-i64-NEXT:    mov h19, v2.h[1]
+; CHECK-i64-NEXT:    fcvtzs x10, h4
+; CHECK-i64-NEXT:    fmov d4, x8
+; CHECK-i64-NEXT:    fcvtzs x13, h6
+; CHECK-i64-NEXT:    fcvtzs x15, h0
+; CHECK-i64-NEXT:    fcvtzs x8, h17
 ; CHECK-i64-NEXT:    fmov d0, x9
-; CHECK-i64-NEXT:    fcvtzs x16, h4
 ; CHECK-i64-NEXT:    fcvtzs x9, h5
-; CHECK-i64-NEXT:    fmov d4, x12
-; CHECK-i64-NEXT:    fcvtzs x12, h16
-; CHECK-i64-NEXT:    fmov d1, x10
-; CHECK-i64-NEXT:    fcvtzs x10, h6
-; CHECK-i64-NEXT:    fmov d5, x8
-; CHECK-i64-NEXT:    fcvtzs x8, h2
+; CHECK-i64-NEXT:    fcvtzs x16, h7
+; CHECK-i64-NEXT:    fcvtzs x17, h16
+; CHECK-i64-NEXT:    fmov d6, x11
+; CHECK-i64-NEXT:    fcvtzs x11, h18
+; CHECK-i64-NEXT:    fcvtzs x18, h3
 ; CHECK-i64-NEXT:    fmov d2, x14
-; CHECK-i64-NEXT:    fcvtzs x18, h7
-; CHECK-i64-NEXT:    fmov d6, x15
-; CHECK-i64-NEXT:    mov v0.d[1], x11
+; CHECK-i64-NEXT:    fcvtzs x14, h19
+; CHECK-i64-NEXT:    fcvtzs x0, h1
+; CHECK-i64-NEXT:    fmov d5, x10
+; CHECK-i64-NEXT:    fmov d1, x12
+; CHECK-i64-NEXT:    fmov d7, x8
 ; CHECK-i64-NEXT:    fmov d3, x9
-; CHECK-i64-NEXT:    fmov d7, x16
+; CHECK-i64-NEXT:    mov v0.d[1], x13
+; CHECK-i64-NEXT:    mov v4.d[1], x16
+; CHECK-i64-NEXT:    mov v2.d[1], x18
 ; CHECK-i64-NEXT:    mov v1.d[1], x17
-; CHECK-i64-NEXT:    mov v4.d[1], x12
 ; CHECK-i64-NEXT:    mov v5.d[1], x0
-; CHECK-i64-NEXT:    mov v6.d[1], x8
-; CHECK-i64-NEXT:    mov v2.d[1], x18
-; CHECK-i64-NEXT:    mov v3.d[1], x10
-; CHECK-i64-NEXT:    mov v7.d[1], x13
+; CHECK-i64-NEXT:    mov v6.d[1], x14
+; CHECK-i64-NEXT:    mov v3.d[1], x11
+; CHECK-i64-NEXT:    mov v7.d[1], x15
 ; CHECK-i64-NEXT:    ret
   %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half> %x)
   ret <16 x iXLen> %a
@@ -257,110 +257,104 @@ declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half>)
 define <32 x iXLen> @lrint_v32f16(<32 x half> %x) nounwind {
 ; CHECK-i32-LABEL: lrint_v32f16:
 ; CHECK-i32:       // %bb.0:
-; CHECK-i32-NEXT:    stp x26, x25, [sp, #-64]! // 16-byte Folded Spill
+; CHECK-i32-NEXT:    stp x20, x19, [sp, #-16]! // 16-byte Folded Spill
 ; CHECK-i32-NEXT:    frintx v3.8h, v3.8h
 ; CHECK-i32-NEXT:    frintx v2.8h, v2.8h
-; CHECK-i32-NEXT:    stp x20, x19, [sp, #48] // 16-byte Folded Spill
 ; CHECK-i32-NEXT:    frintx v1.8h, v1.8h
-; CHECK-i32-NEXT:    frintx v0.8h, v0.8h
-; CHECK-i32-NEXT:    stp x24, x23, [sp, #16] // 16-byte Folded Spill
-; CHECK-i32-NEXT:    stp x22, x21, [sp, #32] // 16-byte Folded Spill
+; CHECK-i32-NEXT:    mov h16, v3.h[3]
+; CHECK-i32-NEXT:    mov h17, v3.h[2]
 ; CHECK-i32-NEXT:    mov h4, v3.h[7]
 ; CHECK-i32-NEXT:    mov h5, v3.h[6]
 ; CHECK-i32-NEXT:    mov h6, v3.h[5]
 ; CHECK-i32-NEXT:    mov h7, v3.h[4]
-; CHECK-i32-NEXT:    mov h16, v3.h[3]
-; CHECK-i32-NEXT:    mov h17, v3.h[2]
 ; CHECK-i32-NEXT:    mov h18, v3.h[1]
-; CHECK-i32-NEXT:    mov h19, v2.h[7]
-; CHECK-i32-NEXT:    fcvtzs w1, h3
-; CHECK-i32-NEXT:    mov h3, v1.h[6]
-; CHECK-i32-NEXT:    fcvtzs w7, h2
-; CHECK-i32-NEXT:    fcvtzs w22, h0
+; CHECK-i32-NEXT:    fcvtzs w13, h3
+; CHECK-i32-NEXT:    mov h3, v2.h[7]
+; CHECK-i32-NEXT:    mov h19, v2.h[4]
+; CHECK-i32-NEXT:    fcvtzs w18, h2
+; CHECK-i32-NEXT:    mov h20, v2.h[3]
+; CHECK-i32-NEXT:    fcvtzs w9, h16
+; CHECK-i32-NEXT:    fcvtzs w11, h17
+; CHECK-i32-NEXT:    mov h16, v2.h[1]
+; CHECK-i32-NEXT:    frintx v17.8h, v0.8h
 ; CHECK-i32-NEXT:    fcvtzs w8, h4
 ; CHECK-i32-NEXT:    mov h4, v2.h[6]
+; CHECK-i32-NEXT:    mov h0, v1.h[6]
 ; CHECK-i32-NEXT:    fcvtzs w10, h5
 ; CHECK-i32-NEXT:    mov h5, v2.h[5]
+; CHECK-i32-NEXT:    mov h21, v2.h[2]
+; CHECK-i32-NEXT:    mov h2, v1.h[4]
+; CHECK-i32-NEXT:    fcvtzs w15, h7
+; CHECK-i32-NEXT:    fcvtzs w1, h16
 ; CHECK-i32-NEXT:    fcvtzs w12, h6
-; CHECK-i32-NEXT:    mov h6, v2.h[4]
-; CHECK-i32-NEXT:    fcvtzs w13, h7
-; CHECK-i32-NEXT:    mov h7, v2.h[3]
-; CHECK-i32-NEXT:    fcvtzs w9, h16
-; CHECK-i32-NEXT:    fcvtzs w11, h17
-; CHECK-i32-NEXT:    mov h16, v2.h[2]
-; CHECK-i32-NEXT:    mov h17, v2.h[1]
-; CHECK-i32-NEXT:    fcvtzs w17, h4
-; CHECK-i32-NEXT:    mov h4, v1.h[5]
-; CHECK-i32-NEXT:    mov h2, v0.h[5]
+; CHECK-i32-NEXT:    fcvtzs w17, h19
+; CHECK-i32-NEXT:    mov h16, v17.h[4]
+; CHECK-i32-NEXT:    fcvtzs w14, h18
+; CHECK-i32-NEXT:    fmov s6, w13
+; CHECK-i32-NEXT:    fcvtzs w13, h3
+; CHECK-i32-NEXT:    fcvtzs w16, h4
+; CHECK-i32-NEXT:    mov h3, v1.h[5]
+; CHECK-i32-NEXT:    mov h18, v17.h[5]
+; CHECK-i32-NEXT:    fmov s4, w18
+; CHECK-i32-NEXT:    fcvtzs w18, h0
+; CHECK-i32-NEXT:    mov h0, v17.h[1]
+; CHECK-i32-NEXT:    mov h19, v1.h[1]
+; CHECK-i32-NEXT:    fcvtzs w2, h2
+; CHECK-i32-NEXT:    mov h2, v1.h[2]
+; CHECK-i32-NEXT:    fcvtzs w4, h1
+; CHECK-i32-NEXT:    fcvtzs w6, h16
+; CHECK-i32-NEXT:    fcvtzs w7, h17
+; CHECK-i32-NEXT:    fmov s7, w15
 ; CHECK-i32-NEXT:    fcvtzs w0, h5
-; CHECK-i32-NEXT:    fcvtzs w3, h6
-; CHECK-i32-NEXT:    mov h5, v1.h[4]
-; CHECK-i32-NEXT:    mov h6, v0.h[4]
-; CHECK-i32-NEXT:    fcvtzs w16, h7
-; CHECK-i32-NEXT:    mov h7, v0.h[1]
-; CHECK-i32-NEXT:    fcvtzs w15, h18
-; CHECK-i32-NEXT:    fcvtzs w2, h3
-; CHECK-i32-NEXT:    mov h3, v1.h[2]
-; CHECK-i32-NEXT:    fcvtzs w19, h4
-; CHECK-i32-NEXT:    mov h4, v1.h[1]
-; CHECK-i32-NEXT:    mov h18, v0.h[6]
-; CHECK-i32-NEXT:    fcvtzs w20, h5
-; CHECK-i32-NEXT:    fcvtzs w23, h2
-; CHECK-i32-NEXT:    mov h2, v0.h[2]
-; CHECK-i32-NEXT:    fcvtzs w21, h6
-; CHECK-i32-NEXT:    fcvtzs w25, h1
-; CHECK-i32-NEXT:    fcvtzs w4, h17
-; CHECK-i32-NEXT:    fcvtzs w24, h7
-; CHECK-i32-NEXT:    fcvtzs w14, h19
-; CHECK-i32-NEXT:    fcvtzs w18, h16
-; CHECK-i32-NEXT:    fcvtzs w26, h4
+; CHECK-i32-NEXT:    fcvtzs w15, h20
+; CHECK-i32-NEXT:    fcvtzs w3, h3
+; CHECK-i32-NEXT:    mov h20, v17.h[6]
+; CHECK-i32-NEXT:    fcvtzs w5, h18
+; CHECK-i32-NEXT:    mov h18, v17.h[2]
+; CHECK-i32-NEXT:    fcvtzs w19, h0
+; CHECK-i32-NEXT:    fcvtzs w20, h19
+; CHECK-i32-NEXT:    fmov s5, w17
+; CHECK-i32-NEXT:    fcvtzs w17, h21
 ; CHECK-i32-NEXT:    mov h16, v1.h[7]
-; CHECK-i32-NEXT:    mov h17, v1.h[3]
-; CHECK-i32-NEXT:    fcvtzs w5, h3
-; CHECK-i32-NEXT:    mov h19, v0.h[7]
+; CHECK-i32-NEXT:    fmov s3, w2
+; CHECK-i32-NEXT:    mov h21, v1.h[3]
+; CHECK-i32-NEXT:    fcvtzs w2, h2
+; CHECK-i32-NEXT:    fmov s2, w4
+; CHECK-i32-NEXT:    fmov s1, w6
+; CHECK-i32-NEXT:    fmov s0, w7
+; CHECK-i32-NEXT:    mov h19, v17.h[7]
+; CHECK-i32-NEXT:    fcvtzs w4, h20
 ; CHECK-i32-NEXT:    fcvtzs w6, h18
-; CHECK-i32-NEXT:    mov h18, v0.h[3]
-; CHECK-i32-NEXT:    fmov s0, w22
-; CHECK-i32-NEXT:    fmov s1, w21
-; CHECK-i32-NEXT:    fcvtzs w21, h2
-; CHECK-i32-NEXT:    fmov s2, w25
-; CHECK-i32-NEXT:    fmov s3, w20
-; CHECK-i32-NEXT:    fmov s4, w7
-; CHECK-i32-NEXT:    fmov s5, w3
-; CHECK-i32-NEXT:    fmov s6, w1
-; CHECK-i32-NEXT:    fmov s7, w13
-; CHECK-i32-NEXT:    mov v0.s[1], w24
-; CHECK-i32-NEXT:    mov v1.s[1], w23
-; CHECK-i32-NEXT:    ldp x24, x23, [sp, #16] // 16-byte Folded Reload
-; CHECK-i32-NEXT:    mov v2.s[1], w26
-; CHECK-i32-NEXT:    mov v3.s[1], w19
-; CHECK-i32-NEXT:    ldp x20, x19, [sp, #48] // 16-byte Folded Reload
-; CHECK-i32-NEXT:    mov v4.s[1], w4
+; CHECK-i32-NEXT:    mov h17, v17.h[3]
+; CHECK-i32-NEXT:    mov v3.s[1], w3
+; CHECK-i32-NEXT:    mov v1.s[1], w5
+; CHECK-i32-NEXT:    mov v2.s[1], w20
+; CHECK-i32-NEXT:    mov v4.s[1], w1
+; CHECK-i32-NEXT:    mov v0.s[1], w19
 ; CHECK-i32-NEXT:    mov v5.s[1], w0
-; CHECK-i32-NEXT:    mov v6.s[1], w15
+; CHECK-i32-NEXT:    mov v6.s[1], w14
 ; CHECK-i32-NEXT:    mov v7.s[1], w12
 ; CHECK-i32-NEXT:    fcvtzs w12, h16
-; CHECK-i32-NEXT:    fcvtzs w13, h17
-; CHECK-i32-NEXT:    fcvtzs w15, h19
-; CHECK-i32-NEXT:    fcvtzs w0, h18
-; CHECK-i32-NEXT:    mov v0.s[2], w21
-; CHECK-i32-NEXT:    ldp x22, x21, [sp, #32] // 16-byte Folded Reload
-; CHECK-i32-NEXT:    mov v1.s[2], w6
-; CHECK-i32-NEXT:    mov v2.s[2], w5
-; CHECK-i32-NEXT:    mov v3.s[2], w2
-; CHECK-i32-NEXT:    mov v4.s[2], w18
-; CHECK-i32-NEXT:    mov v5.s[2], w17
+; CHECK-i32-NEXT:    fcvtzs w14, h21
+; CHECK-i32-NEXT:    fcvtzs w0, h19
+; CHECK-i32-NEXT:    fcvtzs w1, h17
+; CHECK-i32-NEXT:    mov v3.s[2], w18
+; CHECK-i32-NEXT:    mov v1.s[2], w4
+; CHECK-i32-NEXT:    mov v2.s[2], w2
+; CHECK-i32-NEXT:    mov v4.s[2], w17
+; CHECK-i32-NEXT:    mov v0.s[2], w6
+; CHECK-i32-NEXT:    mov v5.s[2], w16
 ; CHECK-i32-NEXT:    mov v6.s[2], w11
 ; CHECK-i32-NEXT:    mov v7.s[2], w10
-; CHECK-i32-NEXT:    mov v0.s[3], w0
-; CHECK-i32-NEXT:    mov v1.s[3], w15
-; CHECK-i32-NEXT:    mov v2.s[3], w13
 ; CHECK-i32-NEXT:    mov v3.s[3], w12
-; CHECK-i32-NEXT:    mov v4.s[3], w16
-; CHECK-i32-NEXT:    mov v5.s[3], w14
+; CHECK-i32-NEXT:    mov v1.s[3], w0
+; CHECK-i32-NEXT:    mov v2.s[3], w14
+; CHECK-i32-NEXT:    mov v4.s[3], w15
+; CHECK-i32-NEXT:    mov v0.s[3], w1
+; CHECK-i32-NEXT:    mov v5.s[3], w13
 ; CHECK-i32-NEXT:    mov v6.s[3], w9
 ; CHECK-i32-NEXT:    mov v7.s[3], w8
-; CHECK-i32-NEXT:    ldp x26, x25, [sp], #64 // 16-byte Folded Reload
+; CHECK-i32-NEXT:    ldp x20, x19, [sp], #16 // 16-byte Folded Reload
 ; CHECK-i32-NEXT:    ret
 ;
 ; CHECK-i64-LABEL: lrint_v32f16:
@@ -567,54 +561,52 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x) nounwind {
 ; CHECK-i32-NEXT:    ptrue p0.s, vl8
 ; CHECK-i32-NEXT:    movprfx z2, z0
 ; CHECK-i32-NEXT:    frintx z2.s, p0/m, z0.s
-; CHECK-i32-NEXT:    mov z0.s, z2.s[4]
-; CHECK-i32-NEXT:    mov z1.s, z2.s[5]
+; CHECK-i32-NEXT:    mov z0.s, z2.s[5]
+; CHECK-i32-NEXT:    mov z1.s, z2.s[4]
 ; CHECK-i32-NEXT:    mov z3.s, z2.s[1]
-; CHECK-i32-NEXT:    fcvtzs w9, s2
+; CHECK-i32-NEXT:    mov z4.s, z2.s[6]
+; CHECK-i32-NEXT:    mov z5.s, z2.s[2]
 ; CHECK-i32-NEXT:    fcvtzs w8, s0
-; CHECK-i32-NEXT:    mov z0.s, z2.s[6]
-; CHECK-i32-NEXT:    fcvtzs w10, s1
-; CHECK-i32-NEXT:    mov z1.s, z2.s[2]
-; CHECK-i32-NEXT:    fcvtzs w11, s3
+; CHECK-i32-NEXT:    fcvtzs s1, s1
+; CHECK-i32-NEXT:    fcvtzs w9, s3
+; CHECK-i32-NEXT:    fcvtzs s0, s2
+; CHECK-i32-NEXT:    fcvtzs w10, s4
+; CHECK-i32-NEXT:    fcvtzs w11, s5
 ; CHECK-i32-NEXT:    mov z3.s, z2.s[7]
 ; CHECK-i32-NEXT:    mov z2.s, z2.s[3]
-; CHECK-i32-NEXT:    fcvtzs w12, s0
-; CHECK-i32-NEXT:    fmov s0, w9
-; CHECK-i32-NEXT:    fcvtzs w13, s1
-; CHECK-i32-NEXT:    fmov s1, w8
+; CHECK-i32-NEXT:    mov v1.s[1], w8
+; CHECK-i32-NEXT:    mov v0.s[1], w9
 ; CHECK-i32-NEXT:    fcvtzs w8, s3
 ; CHECK-i32-NEXT:    fcvtzs w9, s2
-; CHECK-i32-NEXT:    mov v0.s[1], w11
-; CHECK-i32-NEXT:    mov v1.s[1], w10
-; CHECK-i32-NEXT:    mov v0.s[2], w13
-; CHECK-i32-NEXT:    mov v1.s[2], w12
-; CHECK-i32-NEXT:    mov v0.s[3], w9
+; CHECK-i32-NEXT:    mov v1.s[2], w10
+; CHECK-i32-NEXT:    mov v0.s[2], w11
 ; CHECK-i32-NEXT:    mov v1.s[3], w8
+; CHECK-i32-NEXT:    mov v0.s[3], w9
 ; CHECK-i32-NEXT:    ret
 ;
 ; CHECK-i64-LABEL: lrint_v8f32:
 ; CHECK-i64:       // %bb.0:
-; CHECK-i64-NEXT:    frintx v0.4s, v0.4s
 ; CHECK-i64-NEXT:    frintx v1.4s, v1.4s
-; CHECK-i64-NEXT:    mov s3, v1.s[2]
-; CHECK-i64-NEXT:    mov s4, v0.s[2]
-; CHECK-i64-NEXT:    mov s2, v0.s[1]
+; CHECK-i64-NEXT:    frintx v0.4s, v0.4s
+; CHECK-i64-NEXT:    mov s2, v1.s[2]
+; CHECK-i64-NEXT:    mov s3, v0.s[2]
+; CHECK-i64-NEXT:    mov s4, v0.s[1]
 ; CHECK-i64-NEXT:    mov s5, v1.s[3]
 ; CHECK-i64-NEXT:    mov s6, v1.s[1]
 ; CHECK-i64-NEXT:    mov s7, v0.s[3]
 ; CHECK-i64-NEXT:    fcvtzs x8, s0
 ; CHECK-i64-NEXT:    fcvtzs x10, s1
+; CHECK-i64-NEXT:    fcvtzs x9, s2
 ; CHECK-i64-NEXT:    fcvtzs x11, s3
 ; CHECK-i64-NEXT:    fcvtzs x12, s4
-; CHECK-i64-NEXT:    fcvtzs x9, s2
 ; CHECK-i64-NEXT:    fcvtzs x13, s5
 ; CHECK-i64-NEXT:    fcvtzs x14, s6
 ; CHECK-i64-NEXT:    fcvtzs x15, s7
 ; CHECK-i64-NEXT:    fmov d0, x8
 ; CHECK-i64-NEXT:    fmov d2, x10
-; CHECK-i64-NEXT:    fmov d1, x12
-; CHECK-i64-NEXT:    fmov d3, x11
-; CHECK-i64-NEXT:    mov v0.d[1], x9
+; CHECK-i64-NEXT:    fmov d3, x9
+; CHECK-i64-NEXT:    fmov d1, x11
+; CHECK-i64-NEXT:    mov v0.d[1], x12
 ; CHECK-i64-NEXT:    mov v2.d[1], x14
 ; CHECK-i64-NEXT:    mov v1.d[1], x15
 ; CHECK-i64-NEXT:    mov v3.d[1], x13
@@ -629,60 +621,58 @@ define <16 x iXLen> @lrint_v16f32(<16 x float> %x) nounwind {
 ; CHECK-i32:       // %bb.0:
 ; CHECK-i32-NEXT:    ptrue p0.d, vl2
 ; CHECK-i32-NEXT:    // kill: def $q2 killed $q2 def $z2
-; CHECK-i32-NEXT:    // kill: def $q3 killed $q3 def $z3
 ; CHECK-i32-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-i32-NEXT:    // kill: def $q3 killed $q3 def $z3
 ; CHECK-i32-NEXT:    // kill: def $q1 killed $q1 def $z1
 ; CHECK-i32-NEXT:    splice z2.d, p0, z2.d, z3.d
 ; CHECK-i32-NEXT:    splice z0.d, p0, z0.d, z1.d
 ; CHECK-i32-NEXT:    ptrue p0.s, vl8
-; CHECK-i32-NEXT:    frintx z2.s, p0/m, z2.s
-; CHECK-i32-NEXT:    frintx z0.s, p0/m, z0.s
-; CHECK-i32-NEXT:    mov z1.s, z2.s[5]
-; CHECK-i32-NEXT:    mov z3.s, z2.s[4]
-; CHECK-i32-NEXT:    mov z5.s, z0.s[5]
-; CHECK-i32-NEXT:    mov z7.s, z0.s[1]
-; CHECK-i32-NEXT:    fcvtzs w11, s0
-; CHECK-i32-NEXT:    fcvtzs w13, s2
-; CHECK-i32-NEXT:    mov z4.s, z2.s[7]
-; CHECK-i32-NEXT:    mov z6.s, z2.s[6]
-; CHECK-i32-NEXT:    mov z16.s, z0.s[7]
-; CHECK-i32-NEXT:    fcvtzs w8, s1
-; CHECK-i32-NEXT:    mov z1.s, z0.s[4]
-; CHECK-i32-NEXT:    fcvtzs w9, s3
-; CHECK-i32-NEXT:    mov z3.s, z2.s[1]
-; CHECK-i32-NEXT:    fcvtzs w10, s5
-; CHECK-i32-NEXT:    fcvtzs w12, s7
-; CHECK-i32-NEXT:    mov z5.s, z0.s[6]
-; CHECK-i32-NEXT:    mov z7.s, z2.s[2]
-; CHECK-i32-NEXT:    mov z17.s, z2.s[3]
-; CHECK-i32-NEXT:    fcvtzs w14, s1
-; CHECK-i32-NEXT:    mov z1.s, z0.s[2]
-; CHECK-i32-NEXT:    mov z18.s, z0.s[3]
-; CHECK-i32-NEXT:    fcvtzs w15, s3
-; CHECK-i32-NEXT:    fmov s0, w11
-; CHECK-i32-NEXT:    fmov s2, w13
-; CHECK-i32-NEXT:    fmov s3, w9
-; CHECK-i32-NEXT:    fcvtzs w16, s6
-; CHECK-i32-NEXT:    fcvtzs w17, s5
-; CHECK-i32-NEXT:    fcvtzs w18, s1
+; CHECK-i32-NEXT:    movprfx z4, z2
+; CHECK-i32-NEXT:    frintx z4.s, p0/m, z2.s
+; CHECK-i32-NEXT:    movprfx z5, z0
+; CHECK-i32-NEXT:    frintx z5.s, p0/m, z0.s
+; CHECK-i32-NEXT:    mov z0.s, z4.s[5]
+; CHECK-i32-NEXT:    mov z1.s, z5.s[5]
+; CHECK-i32-NEXT:    mov z3.s, z4.s[4]
+; CHECK-i32-NEXT:    mov z2.s, z4.s[1]
+; CHECK-i32-NEXT:    mov z7.s, z5.s[1]
+; CHECK-i32-NEXT:    mov z17.s, z5.s[4]
+; CHECK-i32-NEXT:    mov z6.s, z4.s[6]
+; CHECK-i32-NEXT:    mov z16.s, z5.s[6]
+; CHECK-i32-NEXT:    mov z18.s, z4.s[2]
+; CHECK-i32-NEXT:    fcvtzs w8, s0
+; CHECK-i32-NEXT:    fcvtzs w9, s1
+; CHECK-i32-NEXT:    fcvtzs s0, s5
+; CHECK-i32-NEXT:    fcvtzs w10, s2
 ; CHECK-i32-NEXT:    fcvtzs w11, s7
-; CHECK-i32-NEXT:    fcvtzs w9, s16
-; CHECK-i32-NEXT:    fmov s1, w14
-; CHECK-i32-NEXT:    mov v0.s[1], w12
-; CHECK-i32-NEXT:    fcvtzs w12, s18
-; CHECK-i32-NEXT:    mov v2.s[1], w15
+; CHECK-i32-NEXT:    fcvtzs s2, s4
+; CHECK-i32-NEXT:    fcvtzs s3, s3
+; CHECK-i32-NEXT:    fcvtzs s1, s17
+; CHECK-i32-NEXT:    mov z19.s, z5.s[2]
+; CHECK-i32-NEXT:    fcvtzs w12, s6
+; CHECK-i32-NEXT:    fcvtzs w13, s16
+; CHECK-i32-NEXT:    fcvtzs w14, s18
+; CHECK-i32-NEXT:    mov z6.s, z4.s[7]
+; CHECK-i32-NEXT:    mov z7.s, z5.s[7]
+; CHECK-i32-NEXT:    mov z4.s, z4.s[3]
+; CHECK-i32-NEXT:    fcvtzs w15, s19
+; CHECK-i32-NEXT:    mov v0.s[1], w11
+; CHECK-i32-NEXT:    mov v2.s[1], w10
+; CHECK-i32-NEXT:    mov v1.s[1], w9
 ; CHECK-i32-NEXT:    mov v3.s[1], w8
-; CHECK-i32-NEXT:    fcvtzs w8, s4
-; CHECK-i32-NEXT:    mov v1.s[1], w10
-; CHECK-i32-NEXT:    fcvtzs w10, s17
-; CHECK-i32-NEXT:    mov v0.s[2], w18
-; CHECK-i32-NEXT:    mov v2.s[2], w11
-; CHECK-i32-NEXT:    mov v3.s[2], w16
-; CHECK-i32-NEXT:    mov v1.s[2], w17
-; CHECK-i32-NEXT:    mov v0.s[3], w12
+; CHECK-i32-NEXT:    mov z5.s, z5.s[3]
+; CHECK-i32-NEXT:    fcvtzs w8, s6
+; CHECK-i32-NEXT:    fcvtzs w9, s7
+; CHECK-i32-NEXT:    fcvtzs w10, s4
+; CHECK-i32-NEXT:    fcvtzs w11, s5
+; CHECK-i32-NEXT:    mov v0.s[2], w15
+; CHECK-i32-NEXT:    mov v2.s[2], w14
+; CHECK-i32-NEXT:    mov v1.s[2], w13
+; CHECK-i32-NEXT:    mov v3.s[2], w12
+; CHECK-i32-NEXT:    mov v0.s[3], w11
 ; CHECK-i32-NEXT:    mov v2.s[3], w10
-; CHECK-i32-NEXT:    mov v3.s[3], w8
 ; CHECK-i32-NEXT:    mov v1.s[3], w9
+; CHECK-i32-NEXT:    mov v3.s[3], w8
 ; CHECK-i32-NEXT:    ret
 ;
 ; CHECK-i64-LABEL: lrint_v16f32:
@@ -693,48 +683,48 @@ define <16 x iXLen> @lrint_v16f32(<16 x float> %x) nounwind {
 ; CHECK-i64-NEXT:    frintx v0.4s, v0.4s
 ; CHECK-i64-NEXT:    mov s4, v3.s[2]
 ; CHECK-i64-NEXT:    mov s5, v2.s[2]
-; CHECK-i64-NEXT:    mov s6, v1.s[2]
-; CHECK-i64-NEXT:    mov s7, v0.s[2]
-; CHECK-i64-NEXT:    fcvtzs x10, s1
-; CHECK-i64-NEXT:    fcvtzs x11, s0
-; CHECK-i64-NEXT:    mov s16, v0.s[1]
-; CHECK-i64-NEXT:    mov s17, v1.s[1]
-; CHECK-i64-NEXT:    mov s18, v3.s[1]
-; CHECK-i64-NEXT:    fcvtzs x14, s3
-; CHECK-i64-NEXT:    fcvtzs x16, s2
-; CHECK-i64-NEXT:    fcvtzs x8, s4
-; CHECK-i64-NEXT:    mov s4, v2.s[1]
-; CHECK-i64-NEXT:    fcvtzs x9, s5
-; CHECK-i64-NEXT:    mov s5, v1.s[3]
-; CHECK-i64-NEXT:    fcvtzs x12, s6
-; CHECK-i64-NEXT:    mov s6, v0.s[3]
-; CHECK-i64-NEXT:    fcvtzs x13, s7
-; CHECK-i64-NEXT:    mov s7, v3.s[3]
-; CHECK-i64-NEXT:    fmov d0, x11
-; CHECK-i64-NEXT:    fcvtzs x17, s16
-; CHECK-i64-NEXT:    fcvtzs x18, s18
-; CHECK-i64-NEXT:    fcvtzs x15, s4
-; CHECK-i64-NEXT:    mov s4, v2.s[3]
-; CHECK-i64-NEXT:    fmov d2, x10
+; CHECK-i64-NEXT:    mov s6, v2.s[1]
+; CHECK-i64-NEXT:    mov s7, v1.s[2]
+; CHECK-i64-NEXT:    fcvtzs x8, s3
+; CHECK-i64-NEXT:    mov s16, v0.s[2]
+; CHECK-i64-NEXT:    fcvtzs x9, s2
+; CHECK-i64-NEXT:    mov s17, v1.s[3]
+; CHECK-i64-NEXT:    mov s18, v0.s[1]
+; CHECK-i64-NEXT:    mov s19, v3.s[3]
+; CHECK-i64-NEXT:    fcvtzs x14, s1
+; CHECK-i64-NEXT:    mov s1, v1.s[1]
+; CHECK-i64-NEXT:    fcvtzs x10, s4
 ; CHECK-i64-NEXT:    fcvtzs x11, s5
-; CHECK-i64-NEXT:    fcvtzs x10, s6
-; CHECK-i64-NEXT:    fmov d3, x12
-; CHECK-i64-NEXT:    fmov d1, x13
-; CHECK-i64-NEXT:    fcvtzs x12, s17
+; CHECK-i64-NEXT:    mov s5, v0.s[3]
+; CHECK-i64-NEXT:    mov s3, v3.s[1]
+; CHECK-i64-NEXT:    mov s2, v2.s[3]
+; CHECK-i64-NEXT:    fcvtzs x12, s6
 ; CHECK-i64-NEXT:    fcvtzs x13, s7
-; CHECK-i64-NEXT:    fmov d5, x9
-; CHECK-i64-NEXT:    fmov d6, x14
-; CHECK-i64-NEXT:    fmov d7, x8
-; CHECK-i64-NEXT:    fcvtzs x0, s4
-; CHECK-i64-NEXT:    fmov d4, x16
+; CHECK-i64-NEXT:    fcvtzs x15, s16
+; CHECK-i64-NEXT:    fmov d6, x8
+; CHECK-i64-NEXT:    fcvtzs x8, s0
+; CHECK-i64-NEXT:    fmov d4, x9
+; CHECK-i64-NEXT:    fcvtzs x9, s17
+; CHECK-i64-NEXT:    fcvtzs x16, s5
+; CHECK-i64-NEXT:    fcvtzs x17, s18
+; CHECK-i64-NEXT:    fmov d7, x10
+; CHECK-i64-NEXT:    fmov d5, x11
+; CHECK-i64-NEXT:    fcvtzs x10, s1
+; CHECK-i64-NEXT:    fcvtzs x11, s19
+; CHECK-i64-NEXT:    fcvtzs x18, s3
+; CHECK-i64-NEXT:    fcvtzs x0, s2
+; CHECK-i64-NEXT:    fmov d3, x13
+; CHECK-i64-NEXT:    fmov d1, x15
+; CHECK-i64-NEXT:    fmov d0, x8
+; CHECK-i64-NEXT:    fmov d2, x14
+; CHECK-i64-NEXT:    mov v4.d[1], x12
+; CHECK-i64-NEXT:    mov v3.d[1], x9
+; CHECK-i64-NEXT:    mov v7.d[1], x11
 ; CHECK-i64-NEXT:    mov v0.d[1], x17
-; CHECK-i64-NEXT:    mov v1.d[1], x10
-; CHECK-i64-NEXT:    mov v3.d[1], x11
-; CHECK-i64-NEXT:    mov v2.d[1], x12
-; CHECK-i64-NEXT:    mov v6.d[1], x18
-; CHECK-i64-NEXT:    mov v7.d[1], x13
-; CHECK-i64-NEXT:    mov v4.d[1], x15
+; CHECK-i64-NEXT:    mov v1.d[1], x16
+; CHECK-i64-NEXT:    mov v2.d[1], x10
 ; CHECK-i64-NEXT:    mov v5.d[1], x0
+; CHECK-i64-NEXT:    mov v6.d[1], x18
 ; CHECK-i64-NEXT:    ret
   %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float> %x)
   ret <16 x iXLen> %a
@@ -744,126 +734,114 @@ declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float>)
 define <32 x iXLen> @lrint_v32f32(<32 x float> %x) nounwind {
 ; CHECK-i32-LABEL: lrint_v32f32:
 ; CHECK-i32:       // %bb.0:
-; CHECK-i32-NEXT:    str x27, [sp, #-80]! // 8-byte Folded Spill
+; CHECK-i32-NEXT:    str x19, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-i32-NEXT:    ptrue p1.d, vl2
 ; CHECK-i32-NEXT:    // kill: def $q6 killed $q6 def $z6
 ; CHECK-i32-NEXT:    // kill: def $q7 killed $q7 def $z7
-; CHECK-i32-NEXT:    // kill: def $q2 killed $q2 def $z2
-; CHECK-i32-NEXT:    // kill: def $q3 killed $q3 def $z3
 ; CHECK-i32-NEXT:    // kill: def $q4 killed $q4 def $z4
+; CHECK-i32-NEXT:    // kill: def $q2 killed $q2 def $z2
+; CHECK-i32-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-i32-NEXT:    // kill: def $q5 killed $q5 def $z5
+; CHECK-i32-NEXT:    // kill: def $q3 killed $q3 def $z3
 ; CHECK-i32-NEXT:    // kill: def $q1 killed $q1 def $z1
-; CHECK-i32-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-i32-NEXT:    stp x24, x23, [sp, #32] // 16-byte Folded Spill
 ; CHECK-i32-NEXT:    ptrue p0.s, vl8
-; CHECK-i32-NEXT:    stp x22, x21, [sp, #48] // 16-byte Folded Spill
 ; CHECK-i32-NEXT:    splice z6.d, p1, z6.d, z7.d
-; CHECK-i32-NEXT:    splice z2.d, p1, z2.d, z3.d
 ; CHECK-i32-NEXT:    splice z4.d, p1, z4.d, z5.d
+; CHECK-i32-NEXT:    splice z2.d, p1, z2.d, z3.d
 ; CHECK-i32-NEXT:    splice z0.d, p1, z0.d, z1.d
-; CHECK-i32-NEXT:    stp x26, x25, [sp, #16] // 16-byte Folded Spill
-; CHECK-i32-NEXT:    stp x20, x19, [sp, #64] // 16-byte Folded Spill
-; CHECK-i32-NEXT:    frintx z6.s, p0/m, z6.s
-; CHECK-i32-NEXT:    frintx z2.s, p0/m, z2.s
-; CHECK-i32-NEXT:    frintx z4.s, p0/m, z4.s
-; CHECK-i32-NEXT:    frintx z0.s, p0/m, z0.s
-; CHECK-i32-NEXT:    mov z1.s, z6.s[7]
-; CHECK-i32-NEXT:    mov z3.s, z6.s[6]
-; CHECK-i32-NEXT:    mov z5.s, z6.s[5]
-; CHECK-i32-NEXT:    mov z16.s, z4.s[7]
-; CHECK-i32-NEXT:    mov z7.s, z6.s[4]
-; CHECK-i32-NEXT:    mov z17.s, z4.s[6]
-; CHECK-i32-NEXT:    mov z18.s, z4.s[5]
-; CHECK-i32-NEXT:    mov z19.s, z4.s[4]
-; CHECK-i32-NEXT:    fcvtzs w7, s6
-; CHECK-i32-NEXT:    fcvtzs w8, s1
-; CHECK-i32-NEXT:    mov z1.s, z2.s[7]
-; CHECK-i32-NEXT:    fcvtzs w10, s3
-; CHECK-i32-NEXT:    mov z3.s, z2.s[6]
-; CHECK-i32-NEXT:    fcvtzs w13, s5
-; CHECK-i32-NEXT:    fcvtzs w9, s16
-; CHECK-i32-NEXT:    mov z5.s, z2.s[4]
-; CHECK-i32-NEXT:    mov z16.s, z0.s[6]
-; CHECK-i32-NEXT:    fcvtzs w14, s7
-; CHECK-i32-NEXT:    fcvtzs w11, s1
-; CHECK-i32-NEXT:    mov z1.s, z2.s[5]
-; CHECK-i32-NEXT:    mov z7.s, z0.s[7]
-; CHECK-i32-NEXT:    fcvtzs w16, s3
-; CHECK-i32-NEXT:    mov z3.s, z0.s[4]
-; CHECK-i32-NEXT:    fcvtzs w12, s17
-; CHECK-i32-NEXT:    fcvtzs w15, s18
-; CHECK-i32-NEXT:    fcvtzs w17, s19
-; CHECK-i32-NEXT:    mov z17.s, z0.s[5]
-; CHECK-i32-NEXT:    fcvtzs w3, s1
-; CHECK-i32-NEXT:    mov z1.s, z6.s[1]
-; CHECK-i32-NEXT:    mov z18.s, z6.s[2]
-; CHECK-i32-NEXT:    fcvtzs w4, s5
-; CHECK-i32-NEXT:    fcvtzs w0, s16
-; CHECK-i32-NEXT:    fcvtzs w6, s3
-; CHECK-i32-NEXT:    mov z16.s, z6.s[3]
-; CHECK-i32-NEXT:    mov z5.s, z4.s[1]
-; CHECK-i32-NEXT:    mov z6.s, z2.s[1]
-; CHECK-i32-NEXT:    fcvtzs w2, s1
-; CHECK-i32-NEXT:    mov z1.s, z0.s[1]
-; CHECK-i32-NEXT:    fcvtzs w21, s4
-; CHECK-i32-NEXT:    fcvtzs w22, s0
-; CHECK-i32-NEXT:    fcvtzs w23, s2
-; CHECK-i32-NEXT:    fcvtzs w18, s7
-; CHECK-i32-NEXT:    mov z3.s, z4.s[2]
-; CHECK-i32-NEXT:    mov z7.s, z2.s[2]
-; CHECK-i32-NEXT:    fcvtzs w5, s17
-; CHECK-i32-NEXT:    fcvtzs w24, s1
-; CHECK-i32-NEXT:    fcvtzs w25, s5
-; CHECK-i32-NEXT:    fcvtzs w26, s6
-; CHECK-i32-NEXT:    fcvtzs w1, s18
-; CHECK-i32-NEXT:    mov z18.s, z0.s[2]
-; CHECK-i32-NEXT:    mov z17.s, z4.s[3]
-; CHECK-i32-NEXT:    fcvtzs w19, s3
-; CHECK-i32-NEXT:    mov z19.s, z2.s[3]
-; CHECK-i32-NEXT:    fcvtzs w20, s7
-; CHECK-i32-NEXT:    mov z20.s, z0.s[3]
-; CHECK-i32-NEXT:    fmov s0, w22
-; CHECK-i32-NEXT:    fmov s2, w23
-; CHECK-i32-NEXT:    fmov s4, w21
-; CHECK-i32-NEXT:    ldp x22, x21, [sp, #48] // 16-byte Folded Reload
-; CHECK-i32-NEXT:    fmov s1, w6
-; CHECK-i32-NEXT:    fmov s6, w7
-; CHECK-i32-NEXT:    fmov s3, w4
-; CHECK-i32-NEXT:    fmov s5, w17
-; CHECK-i32-NEXT:    fmov s7, w14
-; CHECK-i32-NEXT:    fcvtzs w27, s18
-; CHECK-i32-NEXT:    mov v0.s[1], w24
-; CHECK-i32-NEXT:    ldp x24, x23, [sp, #32] // 16-byte Folded Reload
-; CHECK-i32-NEXT:    mov v2.s[1], w26
-; CHECK-i32-NEXT:    mov v4.s[1], w25
-; CHECK-i32-NEXT:    mov v1.s[1], w5
-; CHECK-i32-NEXT:    ldp x26, x25, [sp, #16] // 16-byte Folded Reload
-; CHECK-i32-NEXT:    mov v3.s[1], w3
-; CHECK-i32-NEXT:    mov v6.s[1], w2
-; CHECK-i32-NEXT:    mov v5.s[1], w15
+; CHECK-i32-NEXT:    movprfx z16, z6
+; CHECK-i32-NEXT:    frintx z16.s, p0/m, z6.s
+; CHECK-i32-NEXT:    movprfx z17, z4
+; CHECK-i32-NEXT:    frintx z17.s, p0/m, z4.s
+; CHECK-i32-NEXT:    movprfx z18, z2
+; CHECK-i32-NEXT:    frintx z18.s, p0/m, z2.s
+; CHECK-i32-NEXT:    movprfx z19, z0
+; CHECK-i32-NEXT:    frintx z19.s, p0/m, z0.s
+; CHECK-i32-NEXT:    mov z0.s, z16.s[7]
+; CHECK-i32-NEXT:    mov z2.s, z16.s[5]
+; CHECK-i32-NEXT:    mov z3.s, z16.s[4]
+; CHECK-i32-NEXT:    mov z1.s, z16.s[6]
+; CHECK-i32-NEXT:    mov z4.s, z17.s[7]
+; CHECK-i32-NEXT:    mov z6.s, z17.s[6]
+; CHECK-i32-NEXT:    mov z20.s, z17.s[5]
+; CHECK-i32-NEXT:    mov z5.s, z17.s[4]
+; CHECK-i32-NEXT:    mov z21.s, z19.s[1]
+; CHECK-i32-NEXT:    fcvtzs w8, s0
+; CHECK-i32-NEXT:    mov z0.s, z18.s[7]
+; CHECK-i32-NEXT:    fcvtzs w13, s2
+; CHECK-i32-NEXT:    mov z2.s, z18.s[5]
+; CHECK-i32-NEXT:    fcvtzs s7, s3
+; CHECK-i32-NEXT:    mov z3.s, z19.s[7]
+; CHECK-i32-NEXT:    fcvtzs w10, s1
+; CHECK-i32-NEXT:    mov z1.s, z18.s[6]
+; CHECK-i32-NEXT:    fcvtzs w9, s4
+; CHECK-i32-NEXT:    fcvtzs w12, s6
+; CHECK-i32-NEXT:    fcvtzs w11, s0
+; CHECK-i32-NEXT:    mov z0.s, z19.s[6]
+; CHECK-i32-NEXT:    mov z4.s, z19.s[5]
+; CHECK-i32-NEXT:    fcvtzs w18, s2
+; CHECK-i32-NEXT:    mov z2.s, z18.s[4]
+; CHECK-i32-NEXT:    fcvtzs w15, s3
+; CHECK-i32-NEXT:    mov z3.s, z16.s[1]
+; CHECK-i32-NEXT:    mov z6.s, z17.s[1]
+; CHECK-i32-NEXT:    fcvtzs w14, s1
+; CHECK-i32-NEXT:    mov z1.s, z16.s[2]
+; CHECK-i32-NEXT:    fcvtzs w17, s0
+; CHECK-i32-NEXT:    fcvtzs w1, s4
+; CHECK-i32-NEXT:    mov z0.s, z17.s[2]
+; CHECK-i32-NEXT:    mov z4.s, z19.s[4]
+; CHECK-i32-NEXT:    fcvtzs w2, s3
+; CHECK-i32-NEXT:    fcvtzs s3, s2
+; CHECK-i32-NEXT:    mov z2.s, z18.s[1]
+; CHECK-i32-NEXT:    fcvtzs w6, s6
+; CHECK-i32-NEXT:    mov z6.s, z19.s[2]
+; CHECK-i32-NEXT:    fcvtzs w16, s20
+; CHECK-i32-NEXT:    fcvtzs w0, s1
+; CHECK-i32-NEXT:    fcvtzs w3, s0
+; CHECK-i32-NEXT:    fcvtzs s1, s4
+; CHECK-i32-NEXT:    fcvtzs w7, s21
+; CHECK-i32-NEXT:    fcvtzs s0, s19
+; CHECK-i32-NEXT:    fcvtzs s4, s17
+; CHECK-i32-NEXT:    fcvtzs w19, s2
+; CHECK-i32-NEXT:    fcvtzs s2, s18
+; CHECK-i32-NEXT:    fcvtzs s5, s5
+; CHECK-i32-NEXT:    fcvtzs w5, s6
+; CHECK-i32-NEXT:    fcvtzs s6, s16
+; CHECK-i32-NEXT:    mov z20.s, z18.s[2]
+; CHECK-i32-NEXT:    mov v1.s[1], w1
+; CHECK-i32-NEXT:    mov v3.s[1], w18
 ; CHECK-i32-NEXT:    mov v7.s[1], w13
+; CHECK-i32-NEXT:    mov v0.s[1], w7
+; CHECK-i32-NEXT:    mov v4.s[1], w6
+; CHECK-i32-NEXT:    mov z16.s, z16.s[3]
+; CHECK-i32-NEXT:    fcvtzs w4, s20
+; CHECK-i32-NEXT:    mov v2.s[1], w19
+; CHECK-i32-NEXT:    mov v5.s[1], w16
+; CHECK-i32-NEXT:    mov v6.s[1], w2
+; CHECK-i32-NEXT:    mov z17.s, z17.s[3]
+; CHECK-i32-NEXT:    mov z18.s, z18.s[3]
+; CHECK-i32-NEXT:    mov z19.s, z19.s[3]
 ; CHECK-i32-NEXT:    fcvtzs w13, s16
-; CHECK-i32-NEXT:    fcvtzs w14, s17
-; CHECK-i32-NEXT:    fcvtzs w15, s19
-; CHECK-i32-NEXT:    fcvtzs w17, s20
-; CHECK-i32-NEXT:    mov v0.s[2], w27
-; CHECK-i32-NEXT:    mov v1.s[2], w0
-; CHECK-i32-NEXT:    mov v2.s[2], w20
-; CHECK-i32-NEXT:    mov v4.s[2], w19
-; CHECK-i32-NEXT:    mov v3.s[2], w16
-; CHECK-i32-NEXT:    ldp x20, x19, [sp, #64] // 16-byte Folded Reload
-; CHECK-i32-NEXT:    mov v6.s[2], w1
+; CHECK-i32-NEXT:    mov v1.s[2], w17
+; CHECK-i32-NEXT:    mov v0.s[2], w5
+; CHECK-i32-NEXT:    mov v4.s[2], w3
+; CHECK-i32-NEXT:    mov v3.s[2], w14
+; CHECK-i32-NEXT:    fcvtzs w16, s17
+; CHECK-i32-NEXT:    fcvtzs w18, s18
+; CHECK-i32-NEXT:    mov v2.s[2], w4
+; CHECK-i32-NEXT:    fcvtzs w1, s19
+; CHECK-i32-NEXT:    mov v6.s[2], w0
 ; CHECK-i32-NEXT:    mov v5.s[2], w12
 ; CHECK-i32-NEXT:    mov v7.s[2], w10
-; CHECK-i32-NEXT:    mov v0.s[3], w17
-; CHECK-i32-NEXT:    mov v1.s[3], w18
-; CHECK-i32-NEXT:    mov v2.s[3], w15
-; CHECK-i32-NEXT:    mov v4.s[3], w14
+; CHECK-i32-NEXT:    mov v1.s[3], w15
 ; CHECK-i32-NEXT:    mov v3.s[3], w11
+; CHECK-i32-NEXT:    mov v2.s[3], w18
+; CHECK-i32-NEXT:    mov v4.s[3], w16
+; CHECK-i32-NEXT:    mov v0.s[3], w1
 ; CHECK-i32-NEXT:    mov v6.s[3], w13
 ; CHECK-i32-NEXT:    mov v5.s[3], w9
 ; CHECK-i32-NEXT:    mov v7.s[3], w8
-; CHECK-i32-NEXT:    ldr x27, [sp], #80 // 8-byte Folded Reload
+; CHECK-i32-NEXT:    ldr x19, [sp], #16 // 8-byte Folded Reload
 ; CHECK-i32-NEXT:    ret
 ;
 ; CHECK-i64-LABEL: lrint_v32f32:
@@ -1003,8 +981,7 @@ define <1 x iXLen> @lrint_v1f64(<1 x double> %x) nounwind {
 ; CHECK-i64-LABEL: lrint_v1f64:
 ; CHECK-i64:       // %bb.0:
 ; CHECK-i64-NEXT:    frintx d0, d0
-; CHECK-i64-NEXT:    fcvtzs x8, d0
-; CHECK-i64-NEXT:    fmov d0, x8
+; CHECK-i64-NEXT:    fcvtzs d0, d0
 ; CHECK-i64-NEXT:    ret
   %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double> %x)
   ret <1 x iXLen> %a
@@ -1064,17 +1041,15 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x) nounwind {
 ; CHECK-i64-NEXT:    splice z0.d, p0, z0.d, z1.d
 ; CHECK-i64-NEXT:    ptrue p0.d, vl4
 ; CHECK-i64-NEXT:    frintx z0.d, p0/m, z0.d
-; CHECK-i64-NEXT:    mov z1.d, z0.d[2]
-; CHECK-i64-NEXT:    mov z2.d, z0.d[3]
+; CHECK-i64-NEXT:    mov z1.d, z0.d[3]
+; CHECK-i64-NEXT:    mov z2.d, z0.d[2]
 ; CHECK-i64-NEXT:    mov z3.d, z0.d[1]
-; CHECK-i64-NEXT:    fcvtzs x9, d0
+; CHECK-i64-NEXT:    fcvtzs d0, d0
 ; CHECK-i64-NEXT:    fcvtzs x8, d1
-; CHECK-i64-NEXT:    fcvtzs x10, d2
-; CHECK-i64-NEXT:    fcvtzs x11, d3
-; CHECK-i64-NEXT:    fmov d0, x9
-; CHECK-i64-NEXT:    fmov d1, x8
-; CHECK-i64-NEXT:    mov v0.d[1], x11
-; CHECK-i64-NEXT:    mov v1.d[1], x10
+; CHECK-i64-NEXT:    fcvtzs d1, d2
+; CHECK-i64-NEXT:    fcvtzs x9, d3
+; CHECK-i64-NEXT:    mov v0.d[1], x9
+; CHECK-i64-NEXT:    mov v1.d[1], x8
 ; CHECK-i64-NEXT:    ret
   %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double> %x)
   ret <4 x iXLen> %a
@@ -1129,30 +1104,26 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x) nounwind {
 ; CHECK-i64-NEXT:    splice z0.d, p0, z0.d, z1.d
 ; CHECK-i64-NEXT:    splice z2.d, p0, z2.d, z3.d
 ; CHECK-i64-NEXT:    ptrue p0.d, vl4
-; CHECK-i64-NEXT:    frintx z0.d, p0/m, z0.d
 ; CHECK-i64-NEXT:    frintx z2.d, p0/m, z2.d
-; CHECK-i64-NEXT:    mov z4.d, z2.d[2]
-; CHECK-i64-NEXT:    mov z5.d, z0.d[2]
-; CHECK-i64-NEXT:    mov z1.d, z0.d[1]
-; CHECK-i64-NEXT:    mov z3.d, z2.d[3]
-; CHECK-i64-NEXT:    mov z6.d, z0.d[3]
-; CHECK-i64-NEXT:    fcvtzs x8, d0
-; CHECK-i64-NEXT:    mov z0.d, z2.d[1]
-; CHECK-i64-NEXT:    fcvtzs x10, d2
-; CHECK-i64-NEXT:    fcvtzs x11, d4
-; CHECK-i64-NEXT:    fcvtzs x12, d5
-; CHECK-i64-NEXT:    fcvtzs x9, d1
-; CHECK-i64-NEXT:    fcvtzs x13, d3
-; CHECK-i64-NEXT:    fcvtzs x14, d6
-; CHECK-i64-NEXT:    fcvtzs x15, d0
-; CHECK-i64-NEXT:    fmov d0, x8
-; CHECK-i64-NEXT:    fmov d2, x10
-; CHECK-i64-NEXT:    fmov d1, x12
-; CHECK-i64-NEXT:    fmov d3, x11
-; CHECK-i64-NEXT:    mov v0.d[1], x9
-; CHECK-i64-NEXT:    mov v2.d[1], x15
-; CHECK-i64-NEXT:    mov v1.d[1], x14
-; CHECK-i64-NEXT:    mov v3.d[1], x13
+; CHECK-i64-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-i64-NEXT:    mov z1.d, z2.d[3]
+; CHECK-i64-NEXT:    mov z3.d, z0.d[3]
+; CHECK-i64-NEXT:    mov z4.d, z0.d[1]
+; CHECK-i64-NEXT:    mov z5.d, z2.d[2]
+; CHECK-i64-NEXT:    mov z6.d, z0.d[2]
+; CHECK-i64-NEXT:    mov z7.d, z2.d[1]
+; CHECK-i64-NEXT:    fcvtzs d2, d2
+; CHECK-i64-NEXT:    fcvtzs d0, d0
+; CHECK-i64-NEXT:    fcvtzs x8, d1
+; CHECK-i64-NEXT:    fcvtzs x9, d3
+; CHECK-i64-NEXT:    fcvtzs x10, d4
+; CHECK-i64-NEXT:    fcvtzs d3, d5
+; CHECK-i64-NEXT:    fcvtzs d1, d6
+; CHECK-i64-NEXT:    fcvtzs x11, d7
+; CHECK-i64-NEXT:    mov v0.d[1], x10
+; CHECK-i64-NEXT:    mov v2.d[1], x11
+; CHECK-i64-NEXT:    mov v1.d[1], x9
+; CHECK-i64-NEXT:    mov v3.d[1], x8
 ; CHECK-i64-NEXT:    ret
   %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double> %x)
   ret <8 x iXLen> %a
@@ -1230,68 +1201,60 @@ define <16 x iXLen> @lrint_v16f64(<16 x double> %x) nounwind {
 ;
 ; CHECK-i64-LABEL: lrint_v16f64:
 ; CHECK-i64:       // %bb.0:
-; CHECK-i64-NEXT:    ptrue p1.d, vl2
+; CHECK-i64-NEXT:    ptrue p0.d, vl2
+; CHECK-i64-NEXT:    // kill: def $q6 killed $q6 def $z6
 ; CHECK-i64-NEXT:    // kill: def $q4 killed $q4 def $z4
 ; CHECK-i64-NEXT:    // kill: def $q2 killed $q2 def $z2
+; CHECK-i64-NEXT:    // kill: def $q7 killed $q7 def $z7
 ; CHECK-i64-NEXT:    // kill: def $q5 killed $q5 def $z5
 ; CHECK-i64-NEXT:    // kill: def $q3 killed $q3 def $z3
-; CHECK-i64-NEXT:    // kill: def $q6 killed $q6 def $z6
 ; CHECK-i64-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-i64-NEXT:    // kill: def $q7 killed $q7 def $z7
 ; CHECK-i64-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-i64-NEXT:    splice z6.d, p0, z6.d, z7.d
+; CHECK-i64-NEXT:    splice z2.d, p0, z2.d, z3.d
+; CHECK-i64-NEXT:    splice z4.d, p0, z4.d, z5.d
+; CHECK-i64-NEXT:    splice z0.d, p0, z0.d, z1.d
 ; CHECK-i64-NEXT:    ptrue p0.d, vl4
-; CHECK-i64-NEXT:    splice z4.d, p1, z4.d, z5.d
-; CHECK-i64-NEXT:    splice z2.d, p1, z2.d, z3.d
-; CHECK-i64-NEXT:    splice z6.d, p1, z6.d, z7.d
-; CHECK-i64-NEXT:    splice z0.d, p1, z0.d, z1.d
+; CHECK-i64-NEXT:    frintx z6.d, p0/m, z6.d
 ; CHECK-i64-NEXT:    frintx z4.d, p0/m, z4.d
 ; CHECK-i64-NEXT:    frintx z2.d, p0/m, z2.d
-; CHECK-i64-NEXT:    frintx z6.d, p0/m, z6.d
 ; CHECK-i64-NEXT:    frintx z0.d, p0/m, z0.d
-; CHECK-i64-NEXT:    mov z3.d, z4.d[2]
+; CHECK-i64-NEXT:    mov z1.d, z6.d[3]
+; CHECK-i64-NEXT:    mov z3.d, z4.d[3]
 ; CHECK-i64-NEXT:    mov z5.d, z2.d[3]
-; CHECK-i64-NEXT:    mov z1.d, z6.d[2]
-; CHECK-i64-NEXT:    fcvtzs x11, d0
-; CHECK-i64-NEXT:    fcvtzs x12, d4
-; CHECK-i64-NEXT:    fcvtzs x13, d2
-; CHECK-i64-NEXT:    fcvtzs x14, d6
-; CHECK-i64-NEXT:    mov z7.d, z6.d[3]
-; CHECK-i64-NEXT:    mov z16.d, z0.d[3]
-; CHECK-i64-NEXT:    fcvtzs x10, d3
-; CHECK-i64-NEXT:    mov z3.d, z2.d[2]
-; CHECK-i64-NEXT:    fcvtzs x8, d5
-; CHECK-i64-NEXT:    mov z5.d, z0.d[2]
-; CHECK-i64-NEXT:    fcvtzs x9, d1
-; CHECK-i64-NEXT:    mov z1.d, z4.d[3]
-; CHECK-i64-NEXT:    mov z2.d, z2.d[1]
-; CHECK-i64-NEXT:    mov z17.d, z6.d[1]
-; CHECK-i64-NEXT:    fcvtzs x17, d7
-; CHECK-i64-NEXT:    fcvtzs x15, d3
+; CHECK-i64-NEXT:    mov z16.d, z4.d[1]
+; CHECK-i64-NEXT:    mov z7.d, z0.d[3]
+; CHECK-i64-NEXT:    mov z17.d, z0.d[2]
+; CHECK-i64-NEXT:    mov z18.d, z4.d[2]
+; CHECK-i64-NEXT:    mov z19.d, z6.d[1]
+; CHECK-i64-NEXT:    fcvtzs d4, d4
+; CHECK-i64-NEXT:    fcvtzs x8, d1
+; CHECK-i64-NEXT:    mov z1.d, z2.d[1]
+; CHECK-i64-NEXT:    fcvtzs x9, d3
 ; CHECK-i64-NEXT:    mov z3.d, z0.d[1]
-; CHECK-i64-NEXT:    fmov d0, x11
-; CHECK-i64-NEXT:    fcvtzs x16, d5
-; CHECK-i64-NEXT:    mov z5.d, z4.d[1]
-; CHECK-i64-NEXT:    fmov d4, x12
-; CHECK-i64-NEXT:    fcvtzs x11, d2
-; CHECK-i64-NEXT:    fmov d2, x13
+; CHECK-i64-NEXT:    fcvtzs x10, d5
+; CHECK-i64-NEXT:    mov z5.d, z6.d[2]
 ; CHECK-i64-NEXT:    fcvtzs x12, d16
-; CHECK-i64-NEXT:    fcvtzs x13, d3
-; CHECK-i64-NEXT:    fmov d6, x14
-; CHECK-i64-NEXT:    fcvtzs x18, d1
-; CHECK-i64-NEXT:    fcvtzs x14, d5
-; CHECK-i64-NEXT:    fcvtzs x0, d17
-; CHECK-i64-NEXT:    fmov d3, x15
-; CHECK-i64-NEXT:    fmov d1, x16
-; CHECK-i64-NEXT:    fmov d5, x10
-; CHECK-i64-NEXT:    fmov d7, x9
-; CHECK-i64-NEXT:    mov v2.d[1], x11
-; CHECK-i64-NEXT:    mov v0.d[1], x13
-; CHECK-i64-NEXT:    mov v3.d[1], x8
-; CHECK-i64-NEXT:    mov v4.d[1], x14
-; CHECK-i64-NEXT:    mov v1.d[1], x12
-; CHECK-i64-NEXT:    mov v6.d[1], x0
-; CHECK-i64-NEXT:    mov v5.d[1], x18
-; CHECK-i64-NEXT:    mov v7.d[1], x17
+; CHECK-i64-NEXT:    mov z16.d, z2.d[2]
+; CHECK-i64-NEXT:    fcvtzs x11, d7
+; CHECK-i64-NEXT:    fcvtzs x13, d1
+; CHECK-i64-NEXT:    fcvtzs d1, d17
+; CHECK-i64-NEXT:    fcvtzs d0, d0
+; CHECK-i64-NEXT:    fcvtzs x14, d3
+; CHECK-i64-NEXT:    fcvtzs d7, d5
+; CHECK-i64-NEXT:    fcvtzs d2, d2
+; CHECK-i64-NEXT:    fcvtzs d3, d16
+; CHECK-i64-NEXT:    fcvtzs d5, d18
+; CHECK-i64-NEXT:    fcvtzs x15, d19
+; CHECK-i64-NEXT:    fcvtzs d6, d6
+; CHECK-i64-NEXT:    mov v4.d[1], x12
+; CHECK-i64-NEXT:    mov v1.d[1], x11
+; CHECK-i64-NEXT:    mov v0.d[1], x14
+; CHECK-i64-NEXT:    mov v2.d[1], x13
+; CHECK-i64-NEXT:    mov v7.d[1], x8
+; CHECK-i64-NEXT:    mov v3.d[1], x10
+; CHECK-i64-NEXT:    mov v5.d[1], x9
+; CHECK-i64-NEXT:    mov v6.d[1], x15
 ; CHECK-i64-NEXT:    ret
   %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f64(<16 x double> %x)
   ret <16 x iXLen> %a
@@ -1716,13 +1679,13 @@ define <8 x iXLen> @lrint_v8fp128(<8 x fp128> %x) nounwind {
 ; CHECK-i32-NEXT:    sub sp, sp, #176
 ; CHECK-i32-NEXT:    str q0, [sp, #96] // 16-byte Spill
 ; CHECK-i32-NEXT:    mov v0.16b, v7.16b
-; CHECK-i32-NEXT:    stp x30, x25, [sp, #112] // 16-byte Folded Spill
+; CHECK-i32-NEXT:    str x30, [sp, #112] // 8-byte Spill
 ; CHECK-i32-NEXT:    stp x24, x23, [sp, #128] // 16-byte Folded Spill
 ; CHECK-i32-NEXT:    stp x22, x21, [sp, #144] // 16-byte Folded Spill
 ; CHECK-i32-NEXT:    stp x20, x19, [sp, #160] // 16-byte Folded Spill
 ; CHECK-i32-NEXT:    stp q6, q5, [sp] // 32-byte Folded Spill
-; CHECK-i32-NEXT:    stp q4, q3, [sp, #32] // 32-byte Folded Spill
-; CHECK-i32-NEXT:    stp q2, q1, [sp, #64] // 32-byte Folded Spill
+; CHECK-i32-NEXT:    stp q3, q2, [sp, #32] // 32-byte Folded Spill
+; CHECK-i32-NEXT:    stp q1, q4, [sp, #64] // 32-byte Folded Spill
 ; CHECK-i32-NEXT:    bl lrintl
 ; CHECK-i32-NEXT:    ldr q0, [sp] // 16-byte Reload
 ; CHECK-i32-NEXT:    mov w19, w0
@@ -1742,21 +1705,22 @@ define <8 x iXLen> @lrint_v8fp128(<8 x fp128> %x) nounwind {
 ; CHECK-i32-NEXT:    ldr q0, [sp, #80] // 16-byte Reload
 ; CHECK-i32-NEXT:    mov w24, w0
 ; CHECK-i32-NEXT:    bl lrintl
+; CHECK-i32-NEXT:    fmov s1, w0
 ; CHECK-i32-NEXT:    ldr q0, [sp, #96] // 16-byte Reload
-; CHECK-i32-NEXT:    mov w25, w0
+; CHECK-i32-NEXT:    str q1, [sp, #96] // 16-byte Spill
 ; CHECK-i32-NEXT:    bl lrintl
-; CHECK-i32-NEXT:    fmov s1, w22
 ; CHECK-i32-NEXT:    fmov s0, w0
-; CHECK-i32-NEXT:    mov v0.s[1], w25
+; CHECK-i32-NEXT:    ldr q1, [sp, #96] // 16-byte Reload
+; CHECK-i32-NEXT:    ldr x30, [sp, #112] // 8-byte Reload
 ; CHECK-i32-NEXT:    mov v1.s[1], w21
-; CHECK-i32-NEXT:    ldp x22, x21, [sp, #144] // 16-byte Folded Reload
-; CHECK-i32-NEXT:    ldp x30, x25, [sp, #112] // 16-byte Folded Reload
-; CHECK-i32-NEXT:    mov v0.s[2], w24
+; CHECK-i32-NEXT:    mov v0.s[1], w24
 ; CHECK-i32-NEXT:    mov v1.s[2], w20
-; CHECK-i32-NEXT:    mov v0.s[3], w23
+; CHECK-i32-NEXT:    mov v0.s[2], w23
+; CHECK-i32-NEXT:    ldp x24, x23, [sp, #128] // 16-byte Folded Reload
 ; CHECK-i32-NEXT:    mov v1.s[3], w19
 ; CHECK-i32-NEXT:    ldp x20, x19, [sp, #160] // 16-byte Folded Reload
-; CHECK-i32-NEXT:    ldp x24, x23, [sp, #128] // 16-byte Folded Reload
+; CHECK-i32-NEXT:    mov v0.s[3], w22
+; CHECK-i32-NEXT:    ldp x22, x21, [sp, #144] // 16-byte Folded Reload
 ; CHECK-i32-NEXT:    add sp, sp, #176
 ; CHECK-i32-NEXT:    ret
 ;
@@ -1840,11 +1804,10 @@ define <16 x iXLen> @lrint_v16fp128(<16 x fp128> %x) nounwind {
 ; CHECK-i32-LABEL: lrint_v16fp128:
 ; CHECK-i32:       // %bb.0:
 ; CHECK-i32-NEXT:    sub sp, sp, #368
-; CHECK-i32-NEXT:    stp q3, q0, [sp, #144] // 32-byte Folded Spill
-; CHECK-i32-NEXT:    stp q2, q1, [sp, #176] // 32-byte Folded Spill
+; CHECK-i32-NEXT:    stp q2, q1, [sp, #128] // 32-byte Folded Spill
 ; CHECK-i32-NEXT:    ldr q1, [sp, #368]
 ; CHECK-i32-NEXT:    stp x29, x30, [sp, #272] // 16-byte Folded Spill
-; CHECK-i32-NEXT:    str q1, [sp, #64] // 16-byte Spill
+; CHECK-i32-NEXT:    str q1, [sp, #160] // 16-byte Spill
 ; CHECK-i32-NEXT:    ldr q1, [sp, #384]
 ; CHECK-i32-NEXT:    stp x28, x27, [sp, #288] // 16-byte Folded Spill
 ; CHECK-i32-NEXT:    str q1, [sp, #48] // 16-byte Spill
@@ -1853,43 +1816,40 @@ define <16 x iXLen> @lrint_v16fp128(<16 x fp128> %x) nounwind {
 ; CHECK-i32-NEXT:    str q1, [sp, #32] // 16-byte Spill
 ; CHECK-i32-NEXT:    ldr q1, [sp, #416]
 ; CHECK-i32-NEXT:    stp x24, x23, [sp, #320] // 16-byte Folded Spill
-; CHECK-i32-NEXT:    str q1, [sp, #208] // 16-byte Spill
-; CHECK-i32-NEXT:    ldr q1, [sp, #432]
 ; CHECK-i32-NEXT:    stp x22, x21, [sp, #336] // 16-byte Folded Spill
-; CHECK-i32-NEXT:    str q1, [sp, #16] // 16-byte Spill
-; CHECK-i32-NEXT:    ldr q1, [sp, #448]
 ; CHECK-i32-NEXT:    stp x20, x19, [sp, #352] // 16-byte Folded Spill
-; CHECK-i32-NEXT:    str q1, [sp, #224] // 16-byte Spill
+; CHECK-i32-NEXT:    stp q7, q6, [sp, #64] // 32-byte Folded Spill
+; CHECK-i32-NEXT:    stp q5, q3, [sp, #96] // 32-byte Folded Spill
+; CHECK-i32-NEXT:    stp q0, q1, [sp, #208] // 32-byte Folded Spill
+; CHECK-i32-NEXT:    ldr q1, [sp, #432]
+; CHECK-i32-NEXT:    stp q1, q4, [sp, #176] // 32-byte Folded Spill
+; CHECK-i32-NEXT:    ldr q1, [sp, #448]
+; CHECK-i32-NEXT:    str q1, [sp, #16] // 16-byte Spill
 ; CHECK-i32-NEXT:    ldr q1, [sp, #464]
-; CHECK-i32-NEXT:    stp q7, q6, [sp, #80] // 32-byte Folded Spill
 ; CHECK-i32-NEXT:    str q1, [sp, #240] // 16-byte Spill
 ; CHECK-i32-NEXT:    ldr q1, [sp, #480]
-; CHECK-i32-NEXT:    stp q5, q4, [sp, #112] // 32-byte Folded Spill
 ; CHECK-i32-NEXT:    mov v0.16b, v1.16b
 ; CHECK-i32-NEXT:    bl lrintl
 ; CHECK-i32-NEXT:    ldr q0, [sp, #240] // 16-byte Reload
 ; CHECK-i32-NEXT:    str w0, [sp, #268] // 4-byte Spill
 ; CHECK-i32-NEXT:    bl lrintl
-; CHECK-i32-NEXT:    ldr q0, [sp, #224] // 16-byte Reload
-; CHECK-i32-NEXT:    str w0, [sp, #240] // 4-byte Spill
-; CHECK-i32-NEXT:    bl lrintl
 ; CHECK-i32-NEXT:    ldr q0, [sp, #16] // 16-byte Reload
-; CHECK-i32-NEXT:    str w0, [sp, #224] // 4-byte Spill
+; CHECK-i32-NEXT:    str w0, [sp, #240] // 4-byte Spill
 ; CHECK-i32-NEXT:    bl lrintl
-; CHECK-i32-NEXT:    ldr q0, [sp, #208] // 16-byte Reload
-; CHECK-i32-NEXT:    mov w23, w0
+; CHECK-i32-NEXT:    ldr q0, [sp, #224] // 16-byte Reload
+; CHECK-i32-NEXT:    mov w22, w0
 ; CHECK-i32-NEXT:    bl lrintl
 ; CHECK-i32-NEXT:    ldr q0, [sp, #32] // 16-byte Reload
-; CHECK-i32-NEXT:    str w0, [sp, #208] // 4-byte Spill
+; CHECK-i32-NEXT:    str w0, [sp, #224] // 4-byte Spill
 ; CHECK-i32-NEXT:    bl lrintl
 ; CHECK-i32-NEXT:    ldr q0, [sp, #48] // 16-byte Reload
-; CHECK-i32-NEXT:    mov w24, w0
+; CHECK-i32-NEXT:    mov w23, w0
 ; CHECK-i32-NEXT:    bl lrintl
 ; CHECK-i32-NEXT:    ldr q0, [sp, #64] // 16-byte Reload
 ; CHECK-i32-NEXT:    mov w25, w0
 ; CHECK-i32-NEXT:    bl lrintl
 ; CHECK-i32-NEXT:    ldr q0, [sp, #80] // 16-byte Reload
-; CHECK-i32-NEXT:    mov w27, w0
+; CHECK-i32-NEXT:    mov w24, w0
 ; CHECK-i32-NEXT:    bl lrintl
 ; CHECK-i32-NEXT:    ldr q0, [sp, #96] // 16-byte Reload
 ; CHECK-i32-NEXT:    mov w26, w0
@@ -1898,46 +1858,52 @@ define <16 x iXLen> @lrint_v16fp128(<16 x fp128> %x) nounwind {
 ; CHECK-i32-NEXT:    mov w28, w0
 ; CHECK-i32-NEXT:    bl lrintl
 ; CHECK-i32-NEXT:    ldr q0, [sp, #128] // 16-byte Reload
-; CHECK-i32-NEXT:    mov w29, w0
+; CHECK-i32-NEXT:    mov w27, w0
 ; CHECK-i32-NEXT:    bl lrintl
 ; CHECK-i32-NEXT:    ldr q0, [sp, #144] // 16-byte Reload
+; CHECK-i32-NEXT:    mov w29, w0
+; CHECK-i32-NEXT:    bl lrintl
+; CHECK-i32-NEXT:    ldr q0, [sp, #192] // 16-byte Reload
 ; CHECK-i32-NEXT:    mov w19, w0
 ; CHECK-i32-NEXT:    bl lrintl
-; CHECK-i32-NEXT:    ldr q0, [sp, #176] // 16-byte Reload
+; CHECK-i32-NEXT:    ldr q0, [sp, #160] // 16-byte Reload
 ; CHECK-i32-NEXT:    mov w20, w0
 ; CHECK-i32-NEXT:    bl lrintl
-; CHECK-i32-NEXT:    ldr q0, [sp, #192] // 16-byte Reload
+; CHECK-i32-NEXT:    ldr q0, [sp, #176] // 16-byte Reload
 ; CHECK-i32-NEXT:    mov w21, w0
 ; CHECK-i32-NEXT:    bl lrintl
-; CHECK-i32-NEXT:    ldr q0, [sp, #160] // 16-byte Reload
-; CHECK-i32-NEXT:    mov w22, w0
+; CHECK-i32-NEXT:    fmov s1, w0
+; CHECK-i32-NEXT:    ldr q0, [sp, #208] // 16-byte Reload
+; CHECK-i32-NEXT:    str q1, [sp, #208] // 16-byte Spill
+; CHECK-i32-NEXT:    fmov s1, w21
+; CHECK-i32-NEXT:    str q1, [sp, #192] // 16-byte Spill
+; CHECK-i32-NEXT:    fmov s1, w20
+; CHECK-i32-NEXT:    str q1, [sp, #176] // 16-byte Spill
 ; CHECK-i32-NEXT:    bl lrintl
-; CHECK-i32-NEXT:    fmov s1, w19
 ; CHECK-i32-NEXT:    fmov s0, w0
-; CHECK-i32-NEXT:    ldr w8, [sp, #224] // 4-byte Reload
-; CHECK-i32-NEXT:    fmov s2, w27
-; CHECK-i32-NEXT:    fmov s3, w23
-; CHECK-i32-NEXT:    mov v0.s[1], w22
-; CHECK-i32-NEXT:    mov v1.s[1], w29
-; CHECK-i32-NEXT:    mov v2.s[1], w25
-; CHECK-i32-NEXT:    mov v3.s[1], w8
+; CHECK-i32-NEXT:    ldp q1, q2, [sp, #176] // 32-byte Folded Reload
+; CHECK-i32-NEXT:    ldr q3, [sp, #208] // 16-byte Reload
 ; CHECK-i32-NEXT:    ldr w8, [sp, #240] // 4-byte Reload
-; CHECK-i32-NEXT:    ldp x29, x30, [sp, #272] // 16-byte Folded Reload
-; CHECK-i32-NEXT:    mov v0.s[2], w21
-; CHECK-i32-NEXT:    mov v1.s[2], w28
-; CHECK-i32-NEXT:    mov v2.s[2], w24
-; CHECK-i32-NEXT:    mov v3.s[2], w8
-; CHECK-i32-NEXT:    ldr w8, [sp, #208] // 4-byte Reload
+; CHECK-i32-NEXT:    mov v0.s[1], w19
+; CHECK-i32-NEXT:    mov v1.s[1], w28
+; CHECK-i32-NEXT:    mov v2.s[1], w25
+; CHECK-i32-NEXT:    mov v3.s[1], w22
+; CHECK-i32-NEXT:    ldp x20, x19, [sp, #352] // 16-byte Folded Reload
 ; CHECK-i32-NEXT:    ldp x22, x21, [sp, #336] // 16-byte Folded Reload
-; CHECK-i32-NEXT:    ldp x24, x23, [sp, #320] // 16-byte Folded Reload
-; CHECK-i32-NEXT:    mov v0.s[3], w20
-; CHECK-i32-NEXT:    mov v1.s[3], w26
+; CHECK-i32-NEXT:    mov v0.s[2], w29
+; CHECK-i32-NEXT:    mov v1.s[2], w26
+; CHECK-i32-NEXT:    mov v2.s[2], w23
+; CHECK-i32-NEXT:    mov v3.s[2], w8
+; CHECK-i32-NEXT:    ldr w8, [sp, #224] // 4-byte Reload
+; CHECK-i32-NEXT:    ldp x26, x25, [sp, #304] // 16-byte Folded Reload
+; CHECK-i32-NEXT:    ldp x29, x30, [sp, #272] // 16-byte Folded Reload
+; CHECK-i32-NEXT:    mov v0.s[3], w27
+; CHECK-i32-NEXT:    mov v1.s[3], w24
 ; CHECK-i32-NEXT:    mov v2.s[3], w8
 ; CHECK-i32-NEXT:    ldr w8, [sp, #268] // 4-byte Reload
-; CHECK-i32-NEXT:    ldp x20, x19, [sp, #352] // 16-byte Folded Reload
-; CHECK-i32-NEXT:    ldp x26, x25, [sp, #304] // 16-byte Folded Reload
-; CHECK-i32-NEXT:    mov v3.s[3], w8
+; CHECK-i32-NEXT:    ldp x24, x23, [sp, #320] // 16-byte Folded Reload
 ; CHECK-i32-NEXT:    ldp x28, x27, [sp, #288] // 16-byte Folded Reload
+; CHECK-i32-NEXT:    mov v3.s[3], w8
 ; CHECK-i32-NEXT:    add sp, sp, #368
 ; CHECK-i32-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/vector-llrint.ll b/llvm/test/CodeGen/AArch64/vector-llrint.ll
index ae7617d9c0b66..d9a9e57fe0a63 100644
--- a/llvm/test/CodeGen/AArch64/vector-llrint.ll
+++ b/llvm/test/CodeGen/AArch64/vector-llrint.ll
@@ -806,18 +806,11 @@ define <32 x i64> @llrint_v32i64_v32f32(<32 x float> %x) nounwind {
 declare <32 x i64> @llvm.llrint.v32i64.v32f32(<32 x float>)
 
 define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) nounwind {
-; CHECK-SD-LABEL: llrint_v1i64_v1f64:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    frintx d0, d0
-; CHECK-SD-NEXT:    fcvtzs x8, d0
-; CHECK-SD-NEXT:    fmov d0, x8
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: llrint_v1i64_v1f64:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    frintx d0, d0
-; CHECK-GI-NEXT:    fcvtzs d0, d0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: llrint_v1i64_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
   %a = call <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double> %x)
   ret <1 x i64> %a
 }
diff --git a/llvm/test/CodeGen/AArch64/vector-lrint.ll b/llvm/test/CodeGen/AArch64/vector-lrint.ll
index 9eaad687fb4a2..2abe0b7ae2106 100644
--- a/llvm/test/CodeGen/AArch64/vector-lrint.ll
+++ b/llvm/test/CodeGen/AArch64/vector-lrint.ll
@@ -11,13 +11,12 @@
 ; RUN:   FileCheck %s --check-prefixes=CHECK-i64,CHECK-i64-GI
 
 define <1 x iXLen> @lrint_v1f16(<1 x half> %x) nounwind {
-; CHECK-i32-SD-LABEL: lrint_v1f16:
-; CHECK-i32-SD:       // %bb.0:
-; CHECK-i32-SD-NEXT:    fcvt s0, h0
-; CHECK-i32-SD-NEXT:    frintx s0, s0
-; CHECK-i32-SD-NEXT:    fcvtzs w8, s0
-; CHECK-i32-SD-NEXT:    fmov s0, w8
-; CHECK-i32-SD-NEXT:    ret
+; CHECK-i32-LABEL: lrint_v1f16:
+; CHECK-i32:       // %bb.0:
+; CHECK-i32-NEXT:    fcvt s0, h0
+; CHECK-i32-NEXT:    frintx s0, s0
+; CHECK-i32-NEXT:    fcvtzs s0, s0
+; CHECK-i32-NEXT:    ret
 ;
 ; CHECK-i64-LABEL: lrint_v1f16:
 ; CHECK-i64:       // %bb.0:
@@ -26,13 +25,6 @@ define <1 x iXLen> @lrint_v1f16(<1 x half> %x) nounwind {
 ; CHECK-i64-NEXT:    fcvtzs x8, s0
 ; CHECK-i64-NEXT:    fmov d0, x8
 ; CHECK-i64-NEXT:    ret
-;
-; CHECK-i32-GI-LABEL: lrint_v1f16:
-; CHECK-i32-GI:       // %bb.0:
-; CHECK-i32-GI-NEXT:    fcvt s0, h0
-; CHECK-i32-GI-NEXT:    frintx s0, s0
-; CHECK-i32-GI-NEXT:    fcvtzs s0, s0
-; CHECK-i32-GI-NEXT:    ret
   %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half> %x)
   ret <1 x iXLen> %a
 }
@@ -47,10 +39,9 @@ define <2 x iXLen> @lrint_v2f16(<2 x half> %x) nounwind {
 ; CHECK-i32-SD-NEXT:    fcvt s1, h1
 ; CHECK-i32-SD-NEXT:    frintx s0, s0
 ; CHECK-i32-SD-NEXT:    frintx s1, s1
-; CHECK-i32-SD-NEXT:    fcvtzs w8, s0
-; CHECK-i32-SD-NEXT:    fcvtzs w9, s1
-; CHECK-i32-SD-NEXT:    fmov s0, w8
-; CHECK-i32-SD-NEXT:    mov v0.s[1], w9
+; CHECK-i32-SD-NEXT:    fcvtzs s0, s0
+; CHECK-i32-SD-NEXT:    fcvtzs w8, s1
+; CHECK-i32-SD-NEXT:    mov v0.s[1], w8
 ; CHECK-i32-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-i32-SD-NEXT:    ret
 ;
@@ -98,17 +89,16 @@ define <4 x iXLen> @lrint_v4f16(<4 x half> %x) nounwind {
 ; CHECK-i32-SD-NEXT:    mov h1, v0.h[1]
 ; CHECK-i32-SD-NEXT:    fcvt s2, h0
 ; CHECK-i32-SD-NEXT:    mov h3, v0.h[2]
-; CHECK-i32-SD-NEXT:    mov h0, v0.h[3]
+; CHECK-i32-SD-NEXT:    mov h4, v0.h[3]
 ; CHECK-i32-SD-NEXT:    fcvt s1, h1
-; CHECK-i32-SD-NEXT:    frintx s2, s2
-; CHECK-i32-SD-NEXT:    fcvt s3, h3
+; CHECK-i32-SD-NEXT:    frintx s0, s2
+; CHECK-i32-SD-NEXT:    fcvt s2, h3
 ; CHECK-i32-SD-NEXT:    frintx s1, s1
-; CHECK-i32-SD-NEXT:    fcvtzs w8, s2
-; CHECK-i32-SD-NEXT:    fcvt s2, h0
-; CHECK-i32-SD-NEXT:    fcvtzs w9, s1
-; CHECK-i32-SD-NEXT:    frintx s1, s3
-; CHECK-i32-SD-NEXT:    fmov s0, w8
-; CHECK-i32-SD-NEXT:    mov v0.s[1], w9
+; CHECK-i32-SD-NEXT:    fcvtzs s0, s0
+; CHECK-i32-SD-NEXT:    fcvtzs w8, s1
+; CHECK-i32-SD-NEXT:    frintx s1, s2
+; CHECK-i32-SD-NEXT:    fcvt s2, h4
+; CHECK-i32-SD-NEXT:    mov v0.s[1], w8
 ; CHECK-i32-SD-NEXT:    fcvtzs w8, s1
 ; CHECK-i32-SD-NEXT:    frintx s1, s2
 ; CHECK-i32-SD-NEXT:    mov v0.s[2], w8
@@ -169,41 +159,39 @@ define <8 x iXLen> @lrint_v8f16(<8 x half> %x) nounwind {
 ; CHECK-i32-SD-LABEL: lrint_v8f16:
 ; CHECK-i32-SD:       // %bb.0:
 ; CHECK-i32-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-i32-SD-NEXT:    mov h3, v0.h[1]
-; CHECK-i32-SD-NEXT:    fcvt s6, h0
+; CHECK-i32-SD-NEXT:    mov h2, v0.h[1]
 ; CHECK-i32-SD-NEXT:    mov h4, v0.h[2]
+; CHECK-i32-SD-NEXT:    fcvt s7, h0
 ; CHECK-i32-SD-NEXT:    mov h0, v0.h[3]
-; CHECK-i32-SD-NEXT:    mov h2, v1.h[1]
+; CHECK-i32-SD-NEXT:    mov h3, v1.h[1]
+; CHECK-i32-SD-NEXT:    fcvt s2, h2
 ; CHECK-i32-SD-NEXT:    fcvt s5, h1
-; CHECK-i32-SD-NEXT:    mov h7, v1.h[2]
-; CHECK-i32-SD-NEXT:    fcvt s3, h3
-; CHECK-i32-SD-NEXT:    frintx s6, s6
+; CHECK-i32-SD-NEXT:    mov h6, v1.h[2]
 ; CHECK-i32-SD-NEXT:    fcvt s4, h4
-; CHECK-i32-SD-NEXT:    mov h1, v1.h[3]
-; CHECK-i32-SD-NEXT:    fcvt s2, h2
+; CHECK-i32-SD-NEXT:    mov h16, v1.h[3]
+; CHECK-i32-SD-NEXT:    frintx s7, s7
+; CHECK-i32-SD-NEXT:    fcvt s3, h3
+; CHECK-i32-SD-NEXT:    frintx s2, s2
 ; CHECK-i32-SD-NEXT:    frintx s5, s5
-; CHECK-i32-SD-NEXT:    fcvt s7, h7
+; CHECK-i32-SD-NEXT:    fcvt s6, h6
 ; CHECK-i32-SD-NEXT:    frintx s3, s3
-; CHECK-i32-SD-NEXT:    fcvtzs w9, s6
-; CHECK-i32-SD-NEXT:    frintx s4, s4
-; CHECK-i32-SD-NEXT:    frintx s2, s2
-; CHECK-i32-SD-NEXT:    fcvtzs w8, s5
-; CHECK-i32-SD-NEXT:    fcvt s5, h1
-; CHECK-i32-SD-NEXT:    fcvtzs w11, s3
-; CHECK-i32-SD-NEXT:    fcvt s3, h0
-; CHECK-i32-SD-NEXT:    fmov s0, w9
-; CHECK-i32-SD-NEXT:    fcvtzs w12, s4
-; CHECK-i32-SD-NEXT:    fcvtzs w10, s2
-; CHECK-i32-SD-NEXT:    frintx s2, s7
-; CHECK-i32-SD-NEXT:    fmov s1, w8
-; CHECK-i32-SD-NEXT:    mov v0.s[1], w11
 ; CHECK-i32-SD-NEXT:    fcvtzs w8, s2
-; CHECK-i32-SD-NEXT:    mov v1.s[1], w10
-; CHECK-i32-SD-NEXT:    frintx s2, s3
+; CHECK-i32-SD-NEXT:    frintx s2, s4
+; CHECK-i32-SD-NEXT:    fcvtzs s1, s5
+; CHECK-i32-SD-NEXT:    fcvt s4, h0
+; CHECK-i32-SD-NEXT:    fcvt s5, h16
+; CHECK-i32-SD-NEXT:    fcvtzs s0, s7
+; CHECK-i32-SD-NEXT:    fcvtzs w9, s3
+; CHECK-i32-SD-NEXT:    frintx s3, s6
+; CHECK-i32-SD-NEXT:    fcvtzs w10, s2
+; CHECK-i32-SD-NEXT:    frintx s2, s4
+; CHECK-i32-SD-NEXT:    mov v0.s[1], w8
+; CHECK-i32-SD-NEXT:    fcvtzs w11, s3
+; CHECK-i32-SD-NEXT:    mov v1.s[1], w9
 ; CHECK-i32-SD-NEXT:    frintx s3, s5
-; CHECK-i32-SD-NEXT:    mov v0.s[2], w12
-; CHECK-i32-SD-NEXT:    mov v1.s[2], w8
 ; CHECK-i32-SD-NEXT:    fcvtzs w9, s2
+; CHECK-i32-SD-NEXT:    mov v0.s[2], w10
+; CHECK-i32-SD-NEXT:    mov v1.s[2], w11
 ; CHECK-i32-SD-NEXT:    fcvtzs w8, s3
 ; CHECK-i32-SD-NEXT:    mov v0.s[3], w9
 ; CHECK-i32-SD-NEXT:    mov v1.s[3], w8
@@ -295,82 +283,78 @@ define <16 x iXLen> @lrint_v16f16(<16 x half> %x) nounwind {
 ; CHECK-i32-SD:       // %bb.0:
 ; CHECK-i32-SD-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
 ; CHECK-i32-SD-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
-; CHECK-i32-SD-NEXT:    mov h18, v0.h[1]
-; CHECK-i32-SD-NEXT:    mov h19, v1.h[1]
-; CHECK-i32-SD-NEXT:    fcvt s20, h0
-; CHECK-i32-SD-NEXT:    mov h21, v0.h[2]
+; CHECK-i32-SD-NEXT:    mov h4, v0.h[1]
+; CHECK-i32-SD-NEXT:    mov h5, v1.h[1]
+; CHECK-i32-SD-NEXT:    mov h16, v0.h[2]
+; CHECK-i32-SD-NEXT:    fcvt s19, h0
+; CHECK-i32-SD-NEXT:    mov h20, v1.h[2]
 ; CHECK-i32-SD-NEXT:    mov h0, v0.h[3]
-; CHECK-i32-SD-NEXT:    mov h4, v2.h[1]
-; CHECK-i32-SD-NEXT:    mov h5, v2.h[2]
-; CHECK-i32-SD-NEXT:    fcvt s6, h2
-; CHECK-i32-SD-NEXT:    fcvt s7, h3
-; CHECK-i32-SD-NEXT:    mov h16, v3.h[1]
-; CHECK-i32-SD-NEXT:    mov h17, v3.h[2]
-; CHECK-i32-SD-NEXT:    fcvt s18, h18
-; CHECK-i32-SD-NEXT:    fcvt s19, h19
-; CHECK-i32-SD-NEXT:    mov h2, v2.h[3]
+; CHECK-i32-SD-NEXT:    mov h21, v1.h[3]
+; CHECK-i32-SD-NEXT:    mov h6, v2.h[1]
+; CHECK-i32-SD-NEXT:    mov h7, v3.h[1]
 ; CHECK-i32-SD-NEXT:    fcvt s4, h4
 ; CHECK-i32-SD-NEXT:    fcvt s5, h5
+; CHECK-i32-SD-NEXT:    mov h17, v2.h[2]
+; CHECK-i32-SD-NEXT:    mov h18, v3.h[2]
+; CHECK-i32-SD-NEXT:    frintx s19, s19
+; CHECK-i32-SD-NEXT:    fcvt s23, h0
+; CHECK-i32-SD-NEXT:    fcvt s6, h6
+; CHECK-i32-SD-NEXT:    fcvt s7, h7
+; CHECK-i32-SD-NEXT:    frintx s4, s4
+; CHECK-i32-SD-NEXT:    frintx s5, s5
+; CHECK-i32-SD-NEXT:    fcvtzs s0, s19
 ; CHECK-i32-SD-NEXT:    frintx s6, s6
 ; CHECK-i32-SD-NEXT:    frintx s7, s7
-; CHECK-i32-SD-NEXT:    fcvt s16, h16
-; CHECK-i32-SD-NEXT:    fcvt s17, h17
+; CHECK-i32-SD-NEXT:    fcvtzs w8, s4
+; CHECK-i32-SD-NEXT:    fcvt s4, h2
+; CHECK-i32-SD-NEXT:    fcvtzs w9, s5
+; CHECK-i32-SD-NEXT:    fcvt s5, h3
+; CHECK-i32-SD-NEXT:    mov h2, v2.h[3]
+; CHECK-i32-SD-NEXT:    fcvtzs w10, s6
+; CHECK-i32-SD-NEXT:    fcvt s6, h1
+; CHECK-i32-SD-NEXT:    fcvtzs w11, s7
+; CHECK-i32-SD-NEXT:    fcvt s7, h16
+; CHECK-i32-SD-NEXT:    fcvt s16, h17
+; CHECK-i32-SD-NEXT:    fcvt s17, h18
+; CHECK-i32-SD-NEXT:    fcvt s18, h20
+; CHECK-i32-SD-NEXT:    frintx s4, s4
+; CHECK-i32-SD-NEXT:    frintx s5, s5
+; CHECK-i32-SD-NEXT:    mov h20, v3.h[3]
+; CHECK-i32-SD-NEXT:    fcvt s22, h2
+; CHECK-i32-SD-NEXT:    mov v0.s[1], w8
+; CHECK-i32-SD-NEXT:    frintx s6, s6
+; CHECK-i32-SD-NEXT:    frintx s16, s16
+; CHECK-i32-SD-NEXT:    frintx s17, s17
+; CHECK-i32-SD-NEXT:    frintx s7, s7
 ; CHECK-i32-SD-NEXT:    frintx s18, s18
-; CHECK-i32-SD-NEXT:    fcvt s2, h2
+; CHECK-i32-SD-NEXT:    fcvtzs s1, s4
+; CHECK-i32-SD-NEXT:    fcvtzs s3, s5
+; CHECK-i32-SD-NEXT:    fcvt s4, h20
+; CHECK-i32-SD-NEXT:    fcvt s5, h21
+; CHECK-i32-SD-NEXT:    fcvtzs s2, s6
+; CHECK-i32-SD-NEXT:    frintx s6, s22
+; CHECK-i32-SD-NEXT:    fcvtzs w12, s16
+; CHECK-i32-SD-NEXT:    fcvtzs w13, s17
+; CHECK-i32-SD-NEXT:    fcvtzs w14, s7
+; CHECK-i32-SD-NEXT:    fcvtzs w15, s18
+; CHECK-i32-SD-NEXT:    frintx s7, s23
+; CHECK-i32-SD-NEXT:    mov v1.s[1], w10
+; CHECK-i32-SD-NEXT:    mov v3.s[1], w11
 ; CHECK-i32-SD-NEXT:    frintx s4, s4
 ; CHECK-i32-SD-NEXT:    frintx s5, s5
+; CHECK-i32-SD-NEXT:    mov v2.s[1], w9
 ; CHECK-i32-SD-NEXT:    fcvtzs w8, s6
-; CHECK-i32-SD-NEXT:    fcvt s6, h1
+; CHECK-i32-SD-NEXT:    mov v0.s[2], w14
 ; CHECK-i32-SD-NEXT:    fcvtzs w9, s7
-; CHECK-i32-SD-NEXT:    mov h7, v1.h[2]
-; CHECK-i32-SD-NEXT:    frintx s16, s16
-; CHECK-i32-SD-NEXT:    fcvtzs w15, s18
+; CHECK-i32-SD-NEXT:    mov v1.s[2], w12
+; CHECK-i32-SD-NEXT:    mov v3.s[2], w13
 ; CHECK-i32-SD-NEXT:    fcvtzs w10, s4
-; CHECK-i32-SD-NEXT:    frintx s4, s17
 ; CHECK-i32-SD-NEXT:    fcvtzs w11, s5
-; CHECK-i32-SD-NEXT:    frintx s5, s20
-; CHECK-i32-SD-NEXT:    fcvt s17, h21
-; CHECK-i32-SD-NEXT:    frintx s6, s6
-; CHECK-i32-SD-NEXT:    fcvtzs w12, s16
-; CHECK-i32-SD-NEXT:    frintx s16, s19
-; CHECK-i32-SD-NEXT:    fcvt s7, h7
-; CHECK-i32-SD-NEXT:    mov h19, v1.h[3]
-; CHECK-i32-SD-NEXT:    fmov s1, w8
-; CHECK-i32-SD-NEXT:    fcvtzs w13, s4
-; CHECK-i32-SD-NEXT:    mov h4, v3.h[3]
-; CHECK-i32-SD-NEXT:    fmov s3, w9
-; CHECK-i32-SD-NEXT:    fcvtzs w14, s5
-; CHECK-i32-SD-NEXT:    frintx s5, s17
-; CHECK-i32-SD-NEXT:    fcvtzs w16, s6
-; CHECK-i32-SD-NEXT:    fcvt s17, h0
-; CHECK-i32-SD-NEXT:    fcvtzs w8, s16
-; CHECK-i32-SD-NEXT:    frintx s6, s7
-; CHECK-i32-SD-NEXT:    fcvt s7, h19
-; CHECK-i32-SD-NEXT:    mov v1.s[1], w10
-; CHECK-i32-SD-NEXT:    mov v3.s[1], w12
-; CHECK-i32-SD-NEXT:    fcvt s4, h4
-; CHECK-i32-SD-NEXT:    fcvtzs w9, s5
-; CHECK-i32-SD-NEXT:    fmov s0, w14
-; CHECK-i32-SD-NEXT:    frintx s5, s2
-; CHECK-i32-SD-NEXT:    fmov s2, w16
-; CHECK-i32-SD-NEXT:    frintx s16, s17
-; CHECK-i32-SD-NEXT:    fcvtzs w10, s6
-; CHECK-i32-SD-NEXT:    frintx s6, s7
-; CHECK-i32-SD-NEXT:    mov v1.s[2], w11
-; CHECK-i32-SD-NEXT:    mov v3.s[2], w13
-; CHECK-i32-SD-NEXT:    mov v0.s[1], w15
-; CHECK-i32-SD-NEXT:    frintx s4, s4
-; CHECK-i32-SD-NEXT:    mov v2.s[1], w8
-; CHECK-i32-SD-NEXT:    fcvtzs w8, s5
-; CHECK-i32-SD-NEXT:    fcvtzs w12, s16
-; CHECK-i32-SD-NEXT:    mov v0.s[2], w9
-; CHECK-i32-SD-NEXT:    fcvtzs w9, s4
-; CHECK-i32-SD-NEXT:    mov v2.s[2], w10
-; CHECK-i32-SD-NEXT:    fcvtzs w10, s6
+; CHECK-i32-SD-NEXT:    mov v2.s[2], w15
+; CHECK-i32-SD-NEXT:    mov v0.s[3], w9
 ; CHECK-i32-SD-NEXT:    mov v1.s[3], w8
-; CHECK-i32-SD-NEXT:    mov v0.s[3], w12
-; CHECK-i32-SD-NEXT:    mov v3.s[3], w9
-; CHECK-i32-SD-NEXT:    mov v2.s[3], w10
+; CHECK-i32-SD-NEXT:    mov v3.s[3], w10
+; CHECK-i32-SD-NEXT:    mov v2.s[3], w11
 ; CHECK-i32-SD-NEXT:    ret
 ;
 ; CHECK-i64-SD-LABEL: lrint_v16f16:
@@ -522,164 +506,156 @@ declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half>)
 define <32 x iXLen> @lrint_v32f16(<32 x half> %x) nounwind {
 ; CHECK-i32-SD-LABEL: lrint_v32f16:
 ; CHECK-i32-SD:       // %bb.0:
-; CHECK-i32-SD-NEXT:    ext v5.16b, v0.16b, v0.16b, #8
-; CHECK-i32-SD-NEXT:    ext v4.16b, v1.16b, v1.16b, #8
-; CHECK-i32-SD-NEXT:    ext v17.16b, v2.16b, v2.16b, #8
-; CHECK-i32-SD-NEXT:    mov h6, v5.h[1]
-; CHECK-i32-SD-NEXT:    fcvt s7, h5
-; CHECK-i32-SD-NEXT:    mov h16, v5.h[2]
-; CHECK-i32-SD-NEXT:    mov h5, v5.h[3]
-; CHECK-i32-SD-NEXT:    mov h18, v4.h[1]
-; CHECK-i32-SD-NEXT:    mov h20, v4.h[3]
-; CHECK-i32-SD-NEXT:    mov h19, v4.h[2]
-; CHECK-i32-SD-NEXT:    fcvt s21, h4
-; CHECK-i32-SD-NEXT:    mov h23, v17.h[1]
-; CHECK-i32-SD-NEXT:    ext v4.16b, v3.16b, v3.16b, #8
-; CHECK-i32-SD-NEXT:    fcvt s22, h17
-; CHECK-i32-SD-NEXT:    fcvt s6, h6
-; CHECK-i32-SD-NEXT:    frintx s7, s7
-; CHECK-i32-SD-NEXT:    fcvt s16, h16
+; CHECK-i32-SD-NEXT:    str x19, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-i32-SD-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
+; CHECK-i32-SD-NEXT:    ext v5.16b, v1.16b, v1.16b, #8
+; CHECK-i32-SD-NEXT:    ext v6.16b, v2.16b, v2.16b, #8
+; CHECK-i32-SD-NEXT:    mov h27, v3.h[2]
+; CHECK-i32-SD-NEXT:    mov h16, v4.h[2]
+; CHECK-i32-SD-NEXT:    mov h17, v4.h[3]
+; CHECK-i32-SD-NEXT:    mov h18, v5.h[1]
+; CHECK-i32-SD-NEXT:    mov h7, v4.h[1]
+; CHECK-i32-SD-NEXT:    mov h19, v5.h[2]
+; CHECK-i32-SD-NEXT:    mov h20, v5.h[3]
+; CHECK-i32-SD-NEXT:    mov h21, v6.h[1]
+; CHECK-i32-SD-NEXT:    mov h22, v6.h[2]
+; CHECK-i32-SD-NEXT:    fcvt s4, h4
 ; CHECK-i32-SD-NEXT:    fcvt s5, h5
+; CHECK-i32-SD-NEXT:    fcvt s16, h16
+; CHECK-i32-SD-NEXT:    fcvt s17, h17
 ; CHECK-i32-SD-NEXT:    fcvt s18, h18
-; CHECK-i32-SD-NEXT:    fcvt s20, h20
+; CHECK-i32-SD-NEXT:    fcvt s23, h7
 ; CHECK-i32-SD-NEXT:    fcvt s19, h19
-; CHECK-i32-SD-NEXT:    frintx s22, s22
-; CHECK-i32-SD-NEXT:    frintx s6, s6
-; CHECK-i32-SD-NEXT:    fcvtzs w12, s7
-; CHECK-i32-SD-NEXT:    frintx s7, s16
+; CHECK-i32-SD-NEXT:    ext v7.16b, v3.16b, v3.16b, #8
+; CHECK-i32-SD-NEXT:    fcvt s20, h20
+; CHECK-i32-SD-NEXT:    fcvt s21, h21
+; CHECK-i32-SD-NEXT:    fcvt s22, h22
+; CHECK-i32-SD-NEXT:    frintx s4, s4
 ; CHECK-i32-SD-NEXT:    frintx s5, s5
-; CHECK-i32-SD-NEXT:    frintx s16, s21
-; CHECK-i32-SD-NEXT:    fcvt s21, h23
+; CHECK-i32-SD-NEXT:    frintx s24, s16
+; CHECK-i32-SD-NEXT:    frintx s17, s17
 ; CHECK-i32-SD-NEXT:    frintx s18, s18
-; CHECK-i32-SD-NEXT:    frintx s20, s20
+; CHECK-i32-SD-NEXT:    mov h16, v6.h[3]
 ; CHECK-i32-SD-NEXT:    frintx s19, s19
-; CHECK-i32-SD-NEXT:    fcvtzs w15, s22
-; CHECK-i32-SD-NEXT:    mov h22, v1.h[2]
-; CHECK-i32-SD-NEXT:    fcvtzs w17, s6
-; CHECK-i32-SD-NEXT:    mov h6, v17.h[2]
-; CHECK-i32-SD-NEXT:    mov h17, v17.h[3]
-; CHECK-i32-SD-NEXT:    fcvtzs w9, s7
-; CHECK-i32-SD-NEXT:    mov h7, v4.h[2]
-; CHECK-i32-SD-NEXT:    fcvtzs w8, s5
-; CHECK-i32-SD-NEXT:    mov h5, v4.h[1]
-; CHECK-i32-SD-NEXT:    fcvtzs w13, s16
-; CHECK-i32-SD-NEXT:    frintx s16, s21
-; CHECK-i32-SD-NEXT:    fcvtzs w14, s18
-; CHECK-i32-SD-NEXT:    fcvtzs w10, s20
-; CHECK-i32-SD-NEXT:    fcvt s18, h4
+; CHECK-i32-SD-NEXT:    mov h25, v7.h[1]
+; CHECK-i32-SD-NEXT:    mov h26, v7.h[2]
+; CHECK-i32-SD-NEXT:    frintx s20, s20
+; CHECK-i32-SD-NEXT:    frintx s21, s21
+; CHECK-i32-SD-NEXT:    frintx s22, s22
+; CHECK-i32-SD-NEXT:    frintx s23, s23
 ; CHECK-i32-SD-NEXT:    fcvt s6, h6
+; CHECK-i32-SD-NEXT:    fcvtzs w8, s17
+; CHECK-i32-SD-NEXT:    fcvtzs w12, s18
+; CHECK-i32-SD-NEXT:    mov h17, v0.h[1]
+; CHECK-i32-SD-NEXT:    mov h18, v0.h[2]
+; CHECK-i32-SD-NEXT:    fcvt s16, h16
+; CHECK-i32-SD-NEXT:    fcvtzs w9, s24
+; CHECK-i32-SD-NEXT:    fcvtzs w10, s19
+; CHECK-i32-SD-NEXT:    fcvtzs w13, s20
+; CHECK-i32-SD-NEXT:    mov h19, v0.h[3]
+; CHECK-i32-SD-NEXT:    mov h20, v1.h[1]
+; CHECK-i32-SD-NEXT:    fcvtzs w15, s21
+; CHECK-i32-SD-NEXT:    fcvtzs w14, s22
+; CHECK-i32-SD-NEXT:    fcvt s21, h25
+; CHECK-i32-SD-NEXT:    fcvt s22, h26
 ; CHECK-i32-SD-NEXT:    fcvt s17, h17
-; CHECK-i32-SD-NEXT:    mov h20, v0.h[2]
-; CHECK-i32-SD-NEXT:    fcvt s7, h7
-; CHECK-i32-SD-NEXT:    fcvtzs w11, s19
-; CHECK-i32-SD-NEXT:    mov h19, v0.h[1]
-; CHECK-i32-SD-NEXT:    fcvt s5, h5
-; CHECK-i32-SD-NEXT:    fcvtzs w0, s16
-; CHECK-i32-SD-NEXT:    mov h21, v1.h[1]
-; CHECK-i32-SD-NEXT:    frintx s18, s18
-; CHECK-i32-SD-NEXT:    mov h4, v4.h[3]
-; CHECK-i32-SD-NEXT:    frintx s6, s6
-; CHECK-i32-SD-NEXT:    frintx s16, s17
-; CHECK-i32-SD-NEXT:    mov h17, v0.h[3]
-; CHECK-i32-SD-NEXT:    fcvt s0, h0
+; CHECK-i32-SD-NEXT:    fcvt s18, h18
+; CHECK-i32-SD-NEXT:    mov h24, v2.h[1]
+; CHECK-i32-SD-NEXT:    mov h25, v2.h[2]
+; CHECK-i32-SD-NEXT:    frintx s16, s16
+; CHECK-i32-SD-NEXT:    mov h26, v3.h[1]
+; CHECK-i32-SD-NEXT:    fcvtzs w11, s23
+; CHECK-i32-SD-NEXT:    mov h23, v1.h[2]
 ; CHECK-i32-SD-NEXT:    fcvt s19, h19
-; CHECK-i32-SD-NEXT:    frintx s5, s5
-; CHECK-i32-SD-NEXT:    fcvtzs w2, s18
-; CHECK-i32-SD-NEXT:    fcvt s18, h21
-; CHECK-i32-SD-NEXT:    fcvt s21, h2
-; CHECK-i32-SD-NEXT:    fcvtzs w18, s6
-; CHECK-i32-SD-NEXT:    frintx s6, s7
-; CHECK-i32-SD-NEXT:    fcvt s7, h20
-; CHECK-i32-SD-NEXT:    fcvtzs w16, s16
-; CHECK-i32-SD-NEXT:    fcvt s16, h17
-; CHECK-i32-SD-NEXT:    fcvt s17, h1
-; CHECK-i32-SD-NEXT:    frintx s0, s0
-; CHECK-i32-SD-NEXT:    fcvtzs w3, s5
-; CHECK-i32-SD-NEXT:    frintx s5, s19
-; CHECK-i32-SD-NEXT:    fcvt s19, h22
-; CHECK-i32-SD-NEXT:    mov h1, v1.h[3]
-; CHECK-i32-SD-NEXT:    fcvtzs w1, s6
-; CHECK-i32-SD-NEXT:    frintx s6, s7
-; CHECK-i32-SD-NEXT:    mov h7, v2.h[1]
+; CHECK-i32-SD-NEXT:    fcvt s20, h20
 ; CHECK-i32-SD-NEXT:    frintx s17, s17
-; CHECK-i32-SD-NEXT:    frintx s20, s16
-; CHECK-i32-SD-NEXT:    fmov s16, w12
-; CHECK-i32-SD-NEXT:    fcvtzs w4, s0
-; CHECK-i32-SD-NEXT:    frintx s0, s18
-; CHECK-i32-SD-NEXT:    fcvtzs w5, s5
-; CHECK-i32-SD-NEXT:    frintx s5, s19
-; CHECK-i32-SD-NEXT:    frintx s18, s21
-; CHECK-i32-SD-NEXT:    fcvt s19, h3
-; CHECK-i32-SD-NEXT:    fcvtzs w12, s6
-; CHECK-i32-SD-NEXT:    fcvt s6, h7
-; CHECK-i32-SD-NEXT:    mov h7, v3.h[1]
-; CHECK-i32-SD-NEXT:    fcvtzs w6, s17
-; CHECK-i32-SD-NEXT:    fmov s17, w13
-; CHECK-i32-SD-NEXT:    mov v16.s[1], w17
-; CHECK-i32-SD-NEXT:    fcvtzs w17, s20
-; CHECK-i32-SD-NEXT:    fcvtzs w7, s0
-; CHECK-i32-SD-NEXT:    mov h0, v2.h[2]
-; CHECK-i32-SD-NEXT:    mov h20, v3.h[2]
-; CHECK-i32-SD-NEXT:    fcvtzs w13, s5
-; CHECK-i32-SD-NEXT:    fmov s5, w15
-; CHECK-i32-SD-NEXT:    frintx s6, s6
-; CHECK-i32-SD-NEXT:    fcvt s7, h7
-; CHECK-i32-SD-NEXT:    mov v17.s[1], w14
-; CHECK-i32-SD-NEXT:    fcvtzs w14, s18
-; CHECK-i32-SD-NEXT:    frintx s18, s19
-; CHECK-i32-SD-NEXT:    mov h2, v2.h[3]
+; CHECK-i32-SD-NEXT:    frintx s21, s21
+; CHECK-i32-SD-NEXT:    frintx s22, s22
+; CHECK-i32-SD-NEXT:    frintx s18, s18
+; CHECK-i32-SD-NEXT:    fcvt s24, h24
+; CHECK-i32-SD-NEXT:    fcvt s25, h25
+; CHECK-i32-SD-NEXT:    fcvtzs w16, s16
+; CHECK-i32-SD-NEXT:    fcvt s16, h26
+; CHECK-i32-SD-NEXT:    fcvt s26, h27
+; CHECK-i32-SD-NEXT:    fcvt s23, h23
+; CHECK-i32-SD-NEXT:    frintx s19, s19
+; CHECK-i32-SD-NEXT:    frintx s20, s20
+; CHECK-i32-SD-NEXT:    fcvtzs w2, s17
+; CHECK-i32-SD-NEXT:    fcvtzs w1, s21
 ; CHECK-i32-SD-NEXT:    fcvt s0, h0
-; CHECK-i32-SD-NEXT:    mov h3, v3.h[3]
-; CHECK-i32-SD-NEXT:    mov v5.s[1], w0
-; CHECK-i32-SD-NEXT:    fcvt s19, h20
-; CHECK-i32-SD-NEXT:    fcvt s1, h1
-; CHECK-i32-SD-NEXT:    mov v16.s[2], w9
-; CHECK-i32-SD-NEXT:    fcvtzs w15, s6
-; CHECK-i32-SD-NEXT:    frintx s6, s7
-; CHECK-i32-SD-NEXT:    fmov s7, w2
 ; CHECK-i32-SD-NEXT:    fcvtzs w0, s18
-; CHECK-i32-SD-NEXT:    fcvt s20, h2
-; CHECK-i32-SD-NEXT:    fcvt s18, h4
-; CHECK-i32-SD-NEXT:    frintx s21, s0
-; CHECK-i32-SD-NEXT:    fcvt s3, h3
-; CHECK-i32-SD-NEXT:    fmov s0, w4
-; CHECK-i32-SD-NEXT:    frintx s19, s19
-; CHECK-i32-SD-NEXT:    fmov s2, w6
-; CHECK-i32-SD-NEXT:    fmov s4, w14
-; CHECK-i32-SD-NEXT:    fcvtzs w2, s6
-; CHECK-i32-SD-NEXT:    mov v7.s[1], w3
-; CHECK-i32-SD-NEXT:    frintx s1, s1
-; CHECK-i32-SD-NEXT:    fmov s6, w0
-; CHECK-i32-SD-NEXT:    mov v0.s[1], w5
+; CHECK-i32-SD-NEXT:    frintx s17, s24
+; CHECK-i32-SD-NEXT:    frintx s18, s25
+; CHECK-i32-SD-NEXT:    frintx s16, s16
+; CHECK-i32-SD-NEXT:    fcvtzs w18, s22
+; CHECK-i32-SD-NEXT:    frintx s6, s6
+; CHECK-i32-SD-NEXT:    frintx s21, s23
+; CHECK-i32-SD-NEXT:    fcvtzs w17, s19
+; CHECK-i32-SD-NEXT:    fcvtzs w3, s20
+; CHECK-i32-SD-NEXT:    frintx s19, s26
+; CHECK-i32-SD-NEXT:    fcvt s20, h7
+; CHECK-i32-SD-NEXT:    frintx s0, s0
+; CHECK-i32-SD-NEXT:    fcvtzs w5, s17
+; CHECK-i32-SD-NEXT:    fcvt s17, h1
+; CHECK-i32-SD-NEXT:    fcvtzs w6, s18
+; CHECK-i32-SD-NEXT:    fcvt s18, h2
+; CHECK-i32-SD-NEXT:    fcvtzs w7, s16
+; CHECK-i32-SD-NEXT:    fcvt s16, h3
+; CHECK-i32-SD-NEXT:    fcvtzs w4, s21
+; CHECK-i32-SD-NEXT:    mov h2, v2.h[3]
+; CHECK-i32-SD-NEXT:    mov h21, v7.h[3]
+; CHECK-i32-SD-NEXT:    fcvtzs w19, s19
+; CHECK-i32-SD-NEXT:    mov h19, v1.h[3]
 ; CHECK-i32-SD-NEXT:    frintx s20, s20
-; CHECK-i32-SD-NEXT:    mov v2.s[1], w7
-; CHECK-i32-SD-NEXT:    fcvtzs w3, s21
-; CHECK-i32-SD-NEXT:    mov v4.s[1], w15
-; CHECK-i32-SD-NEXT:    fcvtzs w14, s19
+; CHECK-i32-SD-NEXT:    frintx s17, s17
+; CHECK-i32-SD-NEXT:    mov h22, v3.h[3]
+; CHECK-i32-SD-NEXT:    fcvtzs s1, s4
 ; CHECK-i32-SD-NEXT:    frintx s18, s18
-; CHECK-i32-SD-NEXT:    frintx s3, s3
-; CHECK-i32-SD-NEXT:    mov v6.s[1], w2
-; CHECK-i32-SD-NEXT:    mov v17.s[2], w11
-; CHECK-i32-SD-NEXT:    fcvtzs w15, s1
-; CHECK-i32-SD-NEXT:    fcvtzs w0, s20
-; CHECK-i32-SD-NEXT:    mov v5.s[2], w18
-; CHECK-i32-SD-NEXT:    mov v0.s[2], w12
-; CHECK-i32-SD-NEXT:    mov v7.s[2], w1
-; CHECK-i32-SD-NEXT:    mov v2.s[2], w13
-; CHECK-i32-SD-NEXT:    mov v4.s[2], w3
-; CHECK-i32-SD-NEXT:    fcvtzs w9, s18
-; CHECK-i32-SD-NEXT:    fcvtzs w11, s3
-; CHECK-i32-SD-NEXT:    mov v16.s[3], w8
-; CHECK-i32-SD-NEXT:    mov v6.s[2], w14
-; CHECK-i32-SD-NEXT:    mov v17.s[3], w10
+; CHECK-i32-SD-NEXT:    frintx s16, s16
+; CHECK-i32-SD-NEXT:    fcvtzs s3, s5
+; CHECK-i32-SD-NEXT:    fcvt s23, h2
+; CHECK-i32-SD-NEXT:    fcvtzs s5, s6
+; CHECK-i32-SD-NEXT:    fcvtzs s0, s0
+; CHECK-i32-SD-NEXT:    fcvt s19, h19
+; CHECK-i32-SD-NEXT:    fcvtzs s7, s20
+; CHECK-i32-SD-NEXT:    fcvtzs s2, s17
+; CHECK-i32-SD-NEXT:    fcvt s17, h21
+; CHECK-i32-SD-NEXT:    mov v1.s[1], w11
+; CHECK-i32-SD-NEXT:    fcvtzs s4, s18
+; CHECK-i32-SD-NEXT:    fcvtzs s6, s16
+; CHECK-i32-SD-NEXT:    fcvt s16, h22
+; CHECK-i32-SD-NEXT:    mov v3.s[1], w12
+; CHECK-i32-SD-NEXT:    mov v5.s[1], w15
+; CHECK-i32-SD-NEXT:    mov v0.s[1], w2
+; CHECK-i32-SD-NEXT:    frintx s18, s19
+; CHECK-i32-SD-NEXT:    frintx s19, s23
+; CHECK-i32-SD-NEXT:    mov v7.s[1], w1
+; CHECK-i32-SD-NEXT:    mov v2.s[1], w3
+; CHECK-i32-SD-NEXT:    frintx s17, s17
+; CHECK-i32-SD-NEXT:    mov v1.s[2], w9
+; CHECK-i32-SD-NEXT:    mov v4.s[1], w5
+; CHECK-i32-SD-NEXT:    mov v6.s[1], w7
+; CHECK-i32-SD-NEXT:    frintx s16, s16
+; CHECK-i32-SD-NEXT:    mov v3.s[2], w10
+; CHECK-i32-SD-NEXT:    mov v5.s[2], w14
+; CHECK-i32-SD-NEXT:    mov v0.s[2], w0
+; CHECK-i32-SD-NEXT:    fcvtzs w11, s18
+; CHECK-i32-SD-NEXT:    fcvtzs w12, s19
+; CHECK-i32-SD-NEXT:    mov v7.s[2], w18
+; CHECK-i32-SD-NEXT:    mov v2.s[2], w4
+; CHECK-i32-SD-NEXT:    fcvtzs w9, s17
+; CHECK-i32-SD-NEXT:    mov v1.s[3], w8
+; CHECK-i32-SD-NEXT:    mov v4.s[2], w6
+; CHECK-i32-SD-NEXT:    mov v6.s[2], w19
+; CHECK-i32-SD-NEXT:    fcvtzs w10, s16
 ; CHECK-i32-SD-NEXT:    mov v0.s[3], w17
+; CHECK-i32-SD-NEXT:    mov v3.s[3], w13
 ; CHECK-i32-SD-NEXT:    mov v5.s[3], w16
-; CHECK-i32-SD-NEXT:    mov v2.s[3], w15
-; CHECK-i32-SD-NEXT:    mov v4.s[3], w0
+; CHECK-i32-SD-NEXT:    mov v2.s[3], w11
 ; CHECK-i32-SD-NEXT:    mov v7.s[3], w9
-; CHECK-i32-SD-NEXT:    mov v1.16b, v16.16b
-; CHECK-i32-SD-NEXT:    mov v6.s[3], w11
-; CHECK-i32-SD-NEXT:    mov v3.16b, v17.16b
+; CHECK-i32-SD-NEXT:    mov v4.s[3], w12
+; CHECK-i32-SD-NEXT:    mov v6.s[3], w10
+; CHECK-i32-SD-NEXT:    ldr x19, [sp], #16 // 8-byte Folded Reload
 ; CHECK-i32-SD-NEXT:    ret
 ;
 ; CHECK-i64-SD-LABEL: lrint_v32f16:
@@ -1326,18 +1302,11 @@ define <1 x iXLen> @lrint_v1f64(<1 x double> %x) nounwind {
 ; CHECK-i32-NEXT:    fmov s0, w8
 ; CHECK-i32-NEXT:    ret
 ;
-; CHECK-i64-SD-LABEL: lrint_v1f64:
-; CHECK-i64-SD:       // %bb.0:
-; CHECK-i64-SD-NEXT:    frintx d0, d0
-; CHECK-i64-SD-NEXT:    fcvtzs x8, d0
-; CHECK-i64-SD-NEXT:    fmov d0, x8
-; CHECK-i64-SD-NEXT:    ret
-;
-; CHECK-i64-GI-LABEL: lrint_v1f64:
-; CHECK-i64-GI:       // %bb.0:
-; CHECK-i64-GI-NEXT:    frintx d0, d0
-; CHECK-i64-GI-NEXT:    fcvtzs d0, d0
-; CHECK-i64-GI-NEXT:    ret
+; CHECK-i64-LABEL: lrint_v1f64:
+; CHECK-i64:       // %bb.0:
+; CHECK-i64-NEXT:    frintx d0, d0
+; CHECK-i64-NEXT:    fcvtzs d0, d0
+; CHECK-i64-NEXT:    ret
   %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double> %x)
   ret <1 x iXLen> %a
 }
diff --git a/llvm/test/CodeGen/AArch64/zext.ll b/llvm/test/CodeGen/AArch64/zext.ll
index 7078d9b2586a8..292b7b28903ee 100644
--- a/llvm/test/CodeGen/AArch64/zext.ll
+++ b/llvm/test/CodeGen/AArch64/zext.ll
@@ -1127,41 +1127,41 @@ entry:
 define <16 x i64> @zext_v16i10_v16i64(<16 x i10> %a) {
 ; CHECK-SD-LABEL: zext_v16i10_v16i64:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    fmov s0, w2
-; CHECK-SD-NEXT:    fmov s1, w0
+; CHECK-SD-NEXT:    fmov s0, w6
+; CHECK-SD-NEXT:    fmov s1, w4
 ; CHECK-SD-NEXT:    ldr s2, [sp]
-; CHECK-SD-NEXT:    fmov s3, w4
-; CHECK-SD-NEXT:    fmov s4, w6
-; CHECK-SD-NEXT:    add x9, sp, #8
+; CHECK-SD-NEXT:    fmov s3, w2
+; CHECK-SD-NEXT:    fmov s4, w0
 ; CHECK-SD-NEXT:    ldr s5, [sp, #16]
 ; CHECK-SD-NEXT:    ldr s6, [sp, #32]
 ; CHECK-SD-NEXT:    ldr s7, [sp, #48]
-; CHECK-SD-NEXT:    mov v1.s[1], w1
-; CHECK-SD-NEXT:    mov v0.s[1], w3
-; CHECK-SD-NEXT:    ld1 { v2.s }[1], [x9]
-; CHECK-SD-NEXT:    mov v3.s[1], w5
-; CHECK-SD-NEXT:    mov v4.s[1], w7
+; CHECK-SD-NEXT:    add x8, sp, #8
+; CHECK-SD-NEXT:    mov v1.s[1], w5
+; CHECK-SD-NEXT:    mov v0.s[1], w7
 ; CHECK-SD-NEXT:    add x9, sp, #24
+; CHECK-SD-NEXT:    mov v4.s[1], w1
+; CHECK-SD-NEXT:    mov v3.s[1], w3
 ; CHECK-SD-NEXT:    add x10, sp, #40
 ; CHECK-SD-NEXT:    add x11, sp, #56
+; CHECK-SD-NEXT:    ld1 { v2.s }[1], [x8]
 ; CHECK-SD-NEXT:    ld1 { v5.s }[1], [x9]
 ; CHECK-SD-NEXT:    ld1 { v6.s }[1], [x10]
 ; CHECK-SD-NEXT:    ld1 { v7.s }[1], [x11]
 ; CHECK-SD-NEXT:    mov w8, #1023 // =0x3ff
-; CHECK-SD-NEXT:    ushll v1.2d, v1.2s, #0
 ; CHECK-SD-NEXT:    dup v16.2d, x8
-; CHECK-SD-NEXT:    ushll v17.2d, v0.2s, #0
-; CHECK-SD-NEXT:    ushll v3.2d, v3.2s, #0
+; CHECK-SD-NEXT:    ushll v17.2d, v1.2s, #0
+; CHECK-SD-NEXT:    ushll v18.2d, v0.2s, #0
 ; CHECK-SD-NEXT:    ushll v4.2d, v4.2s, #0
-; CHECK-SD-NEXT:    ushll v18.2d, v2.2s, #0
+; CHECK-SD-NEXT:    ushll v3.2d, v3.2s, #0
+; CHECK-SD-NEXT:    ushll v2.2d, v2.2s, #0
 ; CHECK-SD-NEXT:    ushll v5.2d, v5.2s, #0
 ; CHECK-SD-NEXT:    ushll v6.2d, v6.2s, #0
 ; CHECK-SD-NEXT:    ushll v7.2d, v7.2s, #0
-; CHECK-SD-NEXT:    and v0.16b, v1.16b, v16.16b
-; CHECK-SD-NEXT:    and v1.16b, v17.16b, v16.16b
-; CHECK-SD-NEXT:    and v2.16b, v3.16b, v16.16b
-; CHECK-SD-NEXT:    and v3.16b, v4.16b, v16.16b
-; CHECK-SD-NEXT:    and v4.16b, v18.16b, v16.16b
+; CHECK-SD-NEXT:    and v0.16b, v4.16b, v16.16b
+; CHECK-SD-NEXT:    and v1.16b, v3.16b, v16.16b
+; CHECK-SD-NEXT:    and v4.16b, v2.16b, v16.16b
+; CHECK-SD-NEXT:    and v2.16b, v17.16b, v16.16b
+; CHECK-SD-NEXT:    and v3.16b, v18.16b, v16.16b
 ; CHECK-SD-NEXT:    and v5.16b, v5.16b, v16.16b
 ; CHECK-SD-NEXT:    and v6.16b, v6.16b, v16.16b
 ; CHECK-SD-NEXT:    and v7.16b, v7.16b, v16.16b

>From 3a58355796ea5ebd1569284167a175fdab3bd0c5 Mon Sep 17 00:00:00 2001
From: Marian Lukac <Marian.Lukac at arm.com>
Date: Wed, 4 Mar 2026 12:30:55 +0000
Subject: [PATCH 2/2] Regenarate tests

---
 llvm/test/CodeGen/AArch64/bitcnt-i256.ll    | 32 ++++++------
 llvm/test/CodeGen/AArch64/select-bitcast.ll | 56 ++++++++++-----------
 2 files changed, 44 insertions(+), 44 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/bitcnt-i256.ll b/llvm/test/CodeGen/AArch64/bitcnt-i256.ll
index 0c5de61bbf649..3376f44a0e68f 100644
--- a/llvm/test/CodeGen/AArch64/bitcnt-i256.ll
+++ b/llvm/test/CodeGen/AArch64/bitcnt-i256.ll
@@ -60,17 +60,17 @@ define i64 @hamming_i256(i256 %a, i256 %b) nounwind {
 ; NEON-NEXT:    eor x8, x0, x4
 ; NEON-NEXT:    eor x10, x2, x6
 ; NEON-NEXT:    eor x9, x1, x5
-; NEON-NEXT:    fmov d0, x10
-; NEON-NEXT:    fmov d1, x8
-; NEON-NEXT:    eor x11, x3, x7
-; NEON-NEXT:    mov v0.d[1], x11
-; NEON-NEXT:    mov v1.d[1], x9
-; NEON-NEXT:    cnt v0.16b, v0.16b
+; NEON-NEXT:    fmov d0, x8
+; NEON-NEXT:    fmov d1, x10
+; NEON-NEXT:    eor x8, x3, x7
+; NEON-NEXT:    mov v1.d[1], x8
+; NEON-NEXT:    mov v0.d[1], x9
 ; NEON-NEXT:    cnt v1.16b, v1.16b
-; NEON-NEXT:    addv b0, v0.16b
+; NEON-NEXT:    cnt v0.16b, v0.16b
 ; NEON-NEXT:    addv b1, v1.16b
-; NEON-NEXT:    fmov x8, d0
-; NEON-NEXT:    fmov x9, d1
+; NEON-NEXT:    addv b0, v0.16b
+; NEON-NEXT:    fmov x8, d1
+; NEON-NEXT:    fmov x9, d0
 ; NEON-NEXT:    add x0, x9, x8
 ; NEON-NEXT:    ret
 ;
@@ -79,15 +79,15 @@ define i64 @hamming_i256(i256 %a, i256 %b) nounwind {
 ; SVE-NEXT:    eor x8, x0, x4
 ; SVE-NEXT:    eor x10, x2, x6
 ; SVE-NEXT:    eor x9, x1, x5
-; SVE-NEXT:    fmov d0, x10
-; SVE-NEXT:    fmov d1, x8
-; SVE-NEXT:    eor x11, x3, x7
+; SVE-NEXT:    fmov d0, x8
+; SVE-NEXT:    fmov d1, x10
+; SVE-NEXT:    eor x8, x3, x7
 ; SVE-NEXT:    ptrue p0.d
-; SVE-NEXT:    mov v0.d[1], x11
-; SVE-NEXT:    mov v1.d[1], x9
-; SVE-NEXT:    cnt z0.d, p0/m, z0.d
+; SVE-NEXT:    mov v1.d[1], x8
+; SVE-NEXT:    mov v0.d[1], x9
 ; SVE-NEXT:    cnt z1.d, p0/m, z1.d
-; SVE-NEXT:    add v0.2d, v1.2d, v0.2d
+; SVE-NEXT:    cnt z0.d, p0/m, z0.d
+; SVE-NEXT:    add v0.2d, v0.2d, v1.2d
 ; SVE-NEXT:    addp d0, v0.2d
 ; SVE-NEXT:    fmov x0, d0
 ; SVE-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/select-bitcast.ll b/llvm/test/CodeGen/AArch64/select-bitcast.ll
index 1028288ec0ec8..8a79182182ab4 100644
--- a/llvm/test/CodeGen/AArch64/select-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/select-bitcast.ll
@@ -424,42 +424,42 @@ define void @if_then_else64(ptr %out, i64 %mask, ptr %if_true, ptr %if_false) no
 ; CHECK-BE-LABEL: if_then_else64:
 ; CHECK-BE:       // %bb.0: // %start
 ; CHECK-BE-NEXT:    stp d11, d10, [sp, #-32]! // 16-byte Folded Spill
-; CHECK-BE-NEXT:    fmov d4, x1
+; CHECK-BE-NEXT:    fmov d5, x1
 ; CHECK-BE-NEXT:    add x9, x2, #224
 ; CHECK-BE-NEXT:    add x8, x2, #240
 ; CHECK-BE-NEXT:    ld1 { v1.4s }, [x9]
+; CHECK-BE-NEXT:    add x9, x2, #192
+; CHECK-BE-NEXT:    adrp x10, .LCPI3_3
+; CHECK-BE-NEXT:    add x10, x10, :lo12:.LCPI3_3
+; CHECK-BE-NEXT:    ld1 { v3.4s }, [x9]
 ; CHECK-BE-NEXT:    add x9, x2, #128
-; CHECK-BE-NEXT:    ld1 { v2.4s }, [x8]
+; CHECK-BE-NEXT:    rev64 v17.4s, v5.4s
 ; CHECK-BE-NEXT:    ld1 { v18.4s }, [x9]
 ; CHECK-BE-NEXT:    add x9, x2, #80
-; CHECK-BE-NEXT:    add x8, x2, #208
-; CHECK-BE-NEXT:    rev64 v17.4s, v4.4s
-; CHECK-BE-NEXT:    adrp x10, .LCPI3_3
-; CHECK-BE-NEXT:    add x10, x10, :lo12:.LCPI3_3
 ; CHECK-BE-NEXT:    ld1 { v19.4s }, [x9]
 ; CHECK-BE-NEXT:    adrp x9, .LCPI3_4
 ; CHECK-BE-NEXT:    add x9, x9, :lo12:.LCPI3_4
-; CHECK-BE-NEXT:    ld1 { v0.4s }, [x8]
-; CHECK-BE-NEXT:    add x8, x2, #192
+; CHECK-BE-NEXT:    ld1 { v2.4s }, [x8]
+; CHECK-BE-NEXT:    add x8, x2, #208
 ; CHECK-BE-NEXT:    ld1 { v24.4s }, [x10]
 ; CHECK-BE-NEXT:    ld1 { v28.4s }, [x9]
 ; CHECK-BE-NEXT:    add x9, x3, #96
-; CHECK-BE-NEXT:    ld1 { v3.4s }, [x8]
-; CHECK-BE-NEXT:    dup v6.4s, v17.s[0]
+; CHECK-BE-NEXT:    ld1 { v0.4s }, [x8]
+; CHECK-BE-NEXT:    dup v7.4s, v17.s[0]
 ; CHECK-BE-NEXT:    add x8, x2, #176
 ; CHECK-BE-NEXT:    ld1 { v23.4s }, [x9]
 ; CHECK-BE-NEXT:    add x9, x3, #48
-; CHECK-BE-NEXT:    ld1 { v7.4s }, [x8]
+; CHECK-BE-NEXT:    ld1 { v4.4s }, [x8]
 ; CHECK-BE-NEXT:    add x8, x2, #160
 ; CHECK-BE-NEXT:    ld1 { v29.4s }, [x9]
 ; CHECK-BE-NEXT:    adrp x9, .LCPI3_5
 ; CHECK-BE-NEXT:    add x9, x9, :lo12:.LCPI3_5
-; CHECK-BE-NEXT:    ld1 { v4.4s }, [x8]
+; CHECK-BE-NEXT:    ld1 { v5.4s }, [x8]
 ; CHECK-BE-NEXT:    add x8, x2, #144
-; CHECK-BE-NEXT:    and v16.16b, v6.16b, v24.16b
-; CHECK-BE-NEXT:    and v30.16b, v6.16b, v28.16b
+; CHECK-BE-NEXT:    and v16.16b, v7.16b, v24.16b
+; CHECK-BE-NEXT:    and v30.16b, v7.16b, v28.16b
 ; CHECK-BE-NEXT:    ld1 { v25.4s }, [x9]
-; CHECK-BE-NEXT:    ld1 { v5.4s }, [x8]
+; CHECK-BE-NEXT:    ld1 { v6.4s }, [x8]
 ; CHECK-BE-NEXT:    add x8, x2, #96
 ; CHECK-BE-NEXT:    adrp x9, .LCPI3_6
 ; CHECK-BE-NEXT:    add x9, x9, :lo12:.LCPI3_6
@@ -469,7 +469,7 @@ define void @if_then_else64(ptr %out, i64 %mask, ptr %if_true, ptr %if_false) no
 ; CHECK-BE-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
 ; CHECK-BE-NEXT:    ld1 { v20.4s }, [x8]
 ; CHECK-BE-NEXT:    cmeq v16.4s, v16.4s, #0
-; CHECK-BE-NEXT:    and v8.16b, v6.16b, v25.16b
+; CHECK-BE-NEXT:    and v8.16b, v7.16b, v25.16b
 ; CHECK-BE-NEXT:    cmeq v30.4s, v30.4s, #0
 ; CHECK-BE-NEXT:    add x8, x3, #80
 ; CHECK-BE-NEXT:    adrp x9, .LCPI3_7
@@ -480,7 +480,7 @@ define void @if_then_else64(ptr %out, i64 %mask, ptr %if_true, ptr %if_false) no
 ; CHECK-BE-NEXT:    ld1 { v31.4s }, [x9]
 ; CHECK-BE-NEXT:    adrp x9, .LCPI3_0
 ; CHECK-BE-NEXT:    add x9, x9, :lo12:.LCPI3_0
-; CHECK-BE-NEXT:    and v10.16b, v6.16b, v27.16b
+; CHECK-BE-NEXT:    and v10.16b, v7.16b, v27.16b
 ; CHECK-BE-NEXT:    ld1 { v21.4s }, [x10]
 ; CHECK-BE-NEXT:    ld1 { v9.4s }, [x8]
 ; CHECK-BE-NEXT:    bsl v16.16b, v29.16b, v20.16b
@@ -489,7 +489,7 @@ define void @if_then_else64(ptr %out, i64 %mask, ptr %if_true, ptr %if_false) no
 ; CHECK-BE-NEXT:    dup v29.4s, v17.s[1]
 ; CHECK-BE-NEXT:    mov v17.16b, v30.16b
 ; CHECK-BE-NEXT:    add x8, x2, #112
-; CHECK-BE-NEXT:    and v11.16b, v6.16b, v31.16b
+; CHECK-BE-NEXT:    and v11.16b, v7.16b, v31.16b
 ; CHECK-BE-NEXT:    cmeq v30.4s, v10.4s, #0
 ; CHECK-BE-NEXT:    add x9, x3, #112
 ; CHECK-BE-NEXT:    bit v19.16b, v26.16b, v8.16b
@@ -522,7 +522,7 @@ define void @if_then_else64(ptr %out, i64 %mask, ptr %if_true, ptr %if_false) no
 ; CHECK-BE-NEXT:    ld1 { v10.4s }, [x8]
 ; CHECK-BE-NEXT:    add x8, x3, #192
 ; CHECK-BE-NEXT:    cmeq v28.4s, v28.4s, #0
-; CHECK-BE-NEXT:    bit v7.16b, v9.16b, v30.16b
+; CHECK-BE-NEXT:    bit v4.16b, v9.16b, v30.16b
 ; CHECK-BE-NEXT:    ld1 { v30.4s }, [x8]
 ; CHECK-BE-NEXT:    add x8, x3, #208
 ; CHECK-BE-NEXT:    ld1 { v9.4s }, [x8]
@@ -562,33 +562,33 @@ define void @if_then_else64(ptr %out, i64 %mask, ptr %if_true, ptr %if_false) no
 ; CHECK-BE-NEXT:    add x8, x0, #208
 ; CHECK-BE-NEXT:    st1 { v0.4s }, [x8]
 ; CHECK-BE-NEXT:    add x8, x0, #192
-; CHECK-BE-NEXT:    and v0.16b, v6.16b, v28.16b
-; CHECK-BE-NEXT:    bsl v2.16b, v31.16b, v4.16b
-; CHECK-BE-NEXT:    bsl v1.16b, v27.16b, v5.16b
+; CHECK-BE-NEXT:    and v0.16b, v7.16b, v28.16b
+; CHECK-BE-NEXT:    bsl v2.16b, v31.16b, v5.16b
+; CHECK-BE-NEXT:    bsl v1.16b, v27.16b, v6.16b
 ; CHECK-BE-NEXT:    st1 { v3.4s }, [x8]
 ; CHECK-BE-NEXT:    add x8, x0, #176
-; CHECK-BE-NEXT:    and v3.16b, v6.16b, v30.16b
-; CHECK-BE-NEXT:    and v4.16b, v6.16b, v20.16b
-; CHECK-BE-NEXT:    st1 { v7.4s }, [x8]
+; CHECK-BE-NEXT:    and v3.16b, v7.16b, v30.16b
+; CHECK-BE-NEXT:    st1 { v4.4s }, [x8]
 ; CHECK-BE-NEXT:    add x8, x0, #160
+; CHECK-BE-NEXT:    and v4.16b, v7.16b, v20.16b
 ; CHECK-BE-NEXT:    cmeq v0.4s, v0.4s, #0
 ; CHECK-BE-NEXT:    st1 { v2.4s }, [x8]
 ; CHECK-BE-NEXT:    add x8, x0, #144
-; CHECK-BE-NEXT:    cmeq v2.4s, v4.4s, #0
 ; CHECK-BE-NEXT:    st1 { v1.4s }, [x8]
 ; CHECK-BE-NEXT:    add x8, x0, #128
 ; CHECK-BE-NEXT:    cmeq v1.4s, v3.4s, #0
 ; CHECK-BE-NEXT:    st1 { v18.4s }, [x8]
 ; CHECK-BE-NEXT:    add x8, x0, #112
-; CHECK-BE-NEXT:    bsl v0.16b, v10.16b, v21.16b
+; CHECK-BE-NEXT:    cmeq v2.4s, v4.4s, #0
 ; CHECK-BE-NEXT:    st1 { v26.4s }, [x8]
 ; CHECK-BE-NEXT:    add x8, x0, #96
-; CHECK-BE-NEXT:    bsl v2.16b, v9.16b, v24.16b
+; CHECK-BE-NEXT:    bsl v0.16b, v10.16b, v21.16b
 ; CHECK-BE-NEXT:    st1 { v23.4s }, [x8]
 ; CHECK-BE-NEXT:    add x8, x0, #80
 ; CHECK-BE-NEXT:    bsl v1.16b, v25.16b, v22.16b
 ; CHECK-BE-NEXT:    st1 { v19.4s }, [x8]
 ; CHECK-BE-NEXT:    add x8, x0, #64
+; CHECK-BE-NEXT:    bsl v2.16b, v9.16b, v24.16b
 ; CHECK-BE-NEXT:    st1 { v17.4s }, [x8]
 ; CHECK-BE-NEXT:    add x8, x0, #48
 ; CHECK-BE-NEXT:    st1 { v16.4s }, [x8]



More information about the llvm-commits mailing list