[llvm] 61faf7d - [AArch64][GlobalISel] Use GPR for illegal fconstants and extend < 32 bit GPR constants to 32 bits (#178692)

via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 2 02:10:49 PST 2026


Author: Ryan Cowan
Date: 2026-03-02T10:10:44Z
New Revision: 61faf7d3db72dcfe8d19f9bd926133be1be0f62b

URL: https://github.com/llvm/llvm-project/commit/61faf7d3db72dcfe8d19f9bd926133be1be0f62b
DIFF: https://github.com/llvm/llvm-project/commit/61faf7d3db72dcfe8d19f9bd926133be1be0f62b.diff

LOG: [AArch64][GlobalISel] Use GPR for illegal fconstants and extend < 32 bit GPR constants to 32 bits (#178692)

In a similar fashion to
https://github.com/llvm/llvm-project/pull/175810, this PR aims to
simplify the handling of constants by extending smaller than 32 bit
integer constants to 32 bits in regbankselect. This is only done for
constants that were going to be assigned to a GPR and aims to simplify
the selection of these.

In addition, fconstants that would have lead to a constant pool load
because they are illegal as immediates (except for 128 bit) are now
converted to a constant on GPR registers before being copied to the FPR.
This hopefully reduces cache pressure & simplifies selection.

I have tried to rely on tablegen as much as possible for selecting the
instructions for this.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.h
    llvm/lib/Target/AArch64/AArch64InstrInfo.td
    llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
    llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
    llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/inttoptr_add.ll
    llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-build-vector.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-constant.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-dup.mir
    llvm/test/CodeGen/AArch64/aarch64-mops.ll
    llvm/test/CodeGen/AArch64/arm64-fp-imm-size.ll
    llvm/test/CodeGen/AArch64/arm64-fp-imm.ll
    llvm/test/CodeGen/AArch64/arm64-fp128.ll
    llvm/test/CodeGen/AArch64/arm64-vhadd.ll
    llvm/test/CodeGen/AArch64/dup.ll
    llvm/test/CodeGen/AArch64/fcvt_combine.ll
    llvm/test/CodeGen/AArch64/fpimm.ll
    llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll
    llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
    llvm/test/CodeGen/AArch64/frem-power2.ll
    llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
    llvm/test/CodeGen/AArch64/neon-extadd-extract.ll
    llvm/test/CodeGen/AArch64/rem-by-const.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index dc5756f950cc9..01e07a70aaaf4 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -13188,9 +13188,8 @@ bool AArch64TargetLowering::isOffsetFoldingLegal(
   return false;
 }
 
-bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
-                                         bool OptForSize) const {
-  bool IsLegal = false;
+bool AArch64TargetLowering::isFPImmLegalAsFMov(const APFloat &Imm,
+                                               EVT VT) const {
   // We can materialize #0.0 as fmov $Rd, XZR for 64-bit, 32-bit cases, and
   // 16-bit case when target has full fp16 support.
   // We encode bf16 bit patterns as if they were fp16. This results in very
@@ -13200,14 +13199,24 @@ bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
   // FP16 1.9375 which shares the same bit pattern as BF16 1.5.
   // FIXME: We should be able to handle f128 as well with a clever lowering.
   const APInt ImmInt = Imm.bitcastToAPInt();
+
   if (VT == MVT::f64)
-    IsLegal = AArch64_AM::getFP64Imm(ImmInt) != -1 || Imm.isPosZero();
-  else if (VT == MVT::f32)
-    IsLegal = AArch64_AM::getFP32Imm(ImmInt) != -1 || Imm.isPosZero();
-  else if (VT == MVT::f16 || VT == MVT::bf16)
-    IsLegal =
-        (Subtarget->hasFullFP16() && AArch64_AM::getFP16Imm(ImmInt) != -1) ||
-        Imm.isPosZero();
+    return AArch64_AM::getFP64Imm(ImmInt) != -1 || Imm.isPosZero();
+
+  if (VT == MVT::f32)
+    return AArch64_AM::getFP32Imm(ImmInt) != -1 || Imm.isPosZero();
+
+  if (VT == MVT::f16 || VT == MVT::bf16)
+    return (Subtarget->hasFullFP16() && AArch64_AM::getFP16Imm(ImmInt) != -1) ||
+           Imm.isPosZero();
+
+  return false;
+}
+
+bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
+                                         bool OptForSize) const {
+  bool IsLegal = isFPImmLegalAsFMov(Imm, VT);
+  const APInt ImmInt = Imm.bitcastToAPInt();
 
   // If we can not materialize in immediate field for fmov, check if the
   // value can be encoded as the immediate operand of a logical instruction.
@@ -13219,7 +13228,8 @@ bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
     // however the mov+fmov sequence is always better because of the reduced
     // cache pressure. The timings are still the same if you consider
     // movw+movk+fmov vs. adrp+ldr (it's one instruction longer, but the
-    // movw+movk is fused). So we limit up to 2 instrdduction at most.
+    // movw+movk is fused). So by default we limit up to 2 instructions
+    // or 4 with hasFuseLiterals.
     SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
     AArch64_IMM::expandMOVImm(ImmInt.getZExtValue(), VT.getSizeInBits(), Insn);
     assert(Insn.size() <= 4 &&

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index b1df977d43fcf..d8b4d98b921fa 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -144,6 +144,8 @@ class AArch64TargetLowering : public TargetLowering {
 
   bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
 
+  bool isFPImmLegalAsFMov(const APFloat &Imm, EVT VT) const;
+
   bool isFPImmLegal(const APFloat &Imm, EVT VT,
                     bool ForCodeSize) const override;
 

diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index ace85b04595b8..21c0b0502d0d1 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -8660,6 +8660,8 @@ def MOVIv2d_ns   : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
                    [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
 
 let Predicates = [HasNEON] in {
+def : Pat<(f128 fpimm0), (f128 (MOVIv2d_ns (i32 0)))>;
+
 def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
 def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
 def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
index d51466c623347..74cb5e9bb0729 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
@@ -23,8 +23,8 @@ AArch64GISelUtils::getAArch64VectorSplat(const MachineInstr &MI,
   if (MI.getOpcode() != AArch64::G_DUP)
     return std::nullopt;
   Register Src = MI.getOperand(1).getReg();
-  if (auto ValAndVReg =
-          getAnyConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI))
+  if (auto ValAndVReg = getAnyConstantVRegValWithLookThrough(
+          Src, MRI, /*LookThroughInstrs=*/true, /*LookThroughAnyExt=*/true))
     return RegOrConstant(ValAndVReg->Value.getSExtValue());
   return RegOrConstant(Src);
 }

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 32c91831d9fb7..734948d55766b 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -2131,6 +2131,18 @@ bool AArch64InstructionSelector::preISelLower(MachineInstr &I) {
   MachineRegisterInfo &MRI = MF.getRegInfo();
 
   switch (I.getOpcode()) {
+  case TargetOpcode::G_CONSTANT: {
+    Register DefReg = I.getOperand(0).getReg();
+    const LLT DefTy = MRI.getType(DefReg);
+    if (!DefTy.isPointer())
+      return false;
+    const unsigned PtrSize = DefTy.getSizeInBits();
+    if (PtrSize != 32 && PtrSize != 64)
+      return false;
+    // Convert pointer typed constants to integers so TableGen can select.
+    MRI.setType(DefReg, LLT::scalar(PtrSize));
+    return true;
+  }
   case TargetOpcode::G_STORE: {
     bool Changed = contractCrossBankCopyIntoStore(I, MRI);
     MachineOperand &SrcOp = I.getOperand(0);
@@ -2355,7 +2367,8 @@ bool AArch64InstructionSelector::earlySelect(MachineInstr &I) {
     // Before selecting a DUP instruction, check if it is better selected as a
     // MOV or load from a constant pool.
     Register Src = I.getOperand(1).getReg();
-    auto ValAndVReg = getAnyConstantVRegValWithLookThrough(Src, MRI);
+    auto ValAndVReg = getAnyConstantVRegValWithLookThrough(
+        Src, MRI, /*LookThroughInstrs=*/true, /*LookThroughAnyExt=*/true);
     if (!ValAndVReg)
       return false;
     LLVMContext &Ctx = MF.getFunction().getContext();
@@ -2679,118 +2692,60 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
     return true;
   }
 
-  case TargetOpcode::G_FCONSTANT:
-  case TargetOpcode::G_CONSTANT: {
-    const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
-
-    const LLT s8 = LLT::scalar(8);
-    const LLT s16 = LLT::scalar(16);
-    const LLT s32 = LLT::scalar(32);
-    const LLT s64 = LLT::scalar(64);
-    const LLT s128 = LLT::scalar(128);
-    const LLT p0 = LLT::pointer(0, 64);
-
+  case TargetOpcode::G_FCONSTANT: {
     const Register DefReg = I.getOperand(0).getReg();
     const LLT DefTy = MRI.getType(DefReg);
     const unsigned DefSize = DefTy.getSizeInBits();
     const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
 
-    // FIXME: Redundant check, but even less readable when factored out.
-    if (isFP) {
-      if (Ty != s16 && Ty != s32 && Ty != s64 && Ty != s128) {
-        LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
-                          << " constant, expected: " << s16 << " or " << s32
-                          << " or " << s64 << " or " << s128 << '\n');
-        return false;
-      }
-
-      if (RB.getID() != AArch64::FPRRegBankID) {
-        LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
-                          << " constant on bank: " << RB
-                          << ", expected: FPR\n");
-        return false;
-      }
-
-      // The case when we have 0.0 is covered by tablegen. Reject it here so we
-      // can be sure tablegen works correctly and isn't rescued by this code.
-      // 0.0 is not covered by tablegen for FP128. So we will handle this
-      // scenario in the code here.
-      if (DefSize != 128 && I.getOperand(1).getFPImm()->isExactlyValue(0.0))
-        return false;
-    } else {
-      // s32 and s64 are covered by tablegen.
-      if (Ty != p0 && Ty != s8 && Ty != s16) {
-        LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
-                          << " constant, expected: " << s32 << ", " << s64
-                          << ", or " << p0 << '\n');
-        return false;
-      }
-
-      if (RB.getID() != AArch64::GPRRegBankID) {
-        LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
-                          << " constant on bank: " << RB
-                          << ", expected: GPR\n");
+    const TargetRegisterClass &FPRRC = *getRegClassForTypeOnBank(DefTy, RB);
+    // For 16, 64, and 128b values, emit a constant pool load.
+    switch (DefSize) {
+    default:
+      llvm_unreachable("Unexpected destination size for G_FCONSTANT?");
+    case 32:
+    case 64: {
+      bool OptForSize = shouldOptForSize(&MF);
+      const auto &TLI = MF.getSubtarget().getTargetLowering();
+      // If TLI says that this fpimm is illegal, then we'll expand to a
+      // constant pool load.
+      if (TLI->isFPImmLegal(I.getOperand(1).getFPImm()->getValueAPF(),
+                            EVT::getFloatingPointVT(DefSize), OptForSize))
+        break;
+      [[fallthrough]];
+    }
+    case 16:
+    case 128: {
+      auto *FPImm = I.getOperand(1).getFPImm();
+      auto *LoadMI = emitLoadFromConstantPool(FPImm, MIB);
+      if (!LoadMI) {
+        LLVM_DEBUG(dbgs() << "Failed to load double constant pool entry\n");
         return false;
       }
+      MIB.buildCopy({DefReg}, {LoadMI->getOperand(0).getReg()});
+      I.eraseFromParent();
+      return RBI.constrainGenericRegister(DefReg, FPRRC, MRI);
+    }
     }
 
-    if (isFP) {
-      const TargetRegisterClass &FPRRC = *getRegClassForTypeOnBank(DefTy, RB);
-      // For 16, 64, and 128b values, emit a constant pool load.
-      switch (DefSize) {
-      default:
-        llvm_unreachable("Unexpected destination size for G_FCONSTANT?");
-      case 32:
-      case 64: {
-        bool OptForSize = shouldOptForSize(&MF);
-        const auto &TLI = MF.getSubtarget().getTargetLowering();
-        // If TLI says that this fpimm is illegal, then we'll expand to a
-        // constant pool load.
-        if (TLI->isFPImmLegal(I.getOperand(1).getFPImm()->getValueAPF(),
-                              EVT::getFloatingPointVT(DefSize), OptForSize))
-          break;
-        [[fallthrough]];
-      }
-      case 16:
-      case 128: {
-        auto *FPImm = I.getOperand(1).getFPImm();
-        auto *LoadMI = emitLoadFromConstantPool(FPImm, MIB);
-        if (!LoadMI) {
-          LLVM_DEBUG(dbgs() << "Failed to load double constant pool entry\n");
-          return false;
-        }
-        MIB.buildCopy({DefReg}, {LoadMI->getOperand(0).getReg()});
-        I.eraseFromParent();
-        return RBI.constrainGenericRegister(DefReg, FPRRC, MRI);
-      }
-      }
-
-      assert((DefSize == 32 || DefSize == 64) && "Unexpected const def size");
-      // Either emit a FMOV, or emit a copy to emit a normal mov.
-      const Register DefGPRReg = MRI.createVirtualRegister(
-          DefSize == 32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass);
-      MachineOperand &RegOp = I.getOperand(0);
-      RegOp.setReg(DefGPRReg);
-      MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
-      MIB.buildCopy({DefReg}, {DefGPRReg});
-
-      if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
-        LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
-        return false;
-      }
+    assert((DefSize == 32 || DefSize == 64) && "Unexpected const def size");
+    // Either emit a FMOV, or emit a copy to emit a normal mov.
+    const Register DefGPRReg = MRI.createVirtualRegister(
+        DefSize == 32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass);
+    MachineOperand &RegOp = I.getOperand(0);
+    RegOp.setReg(DefGPRReg);
+    MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
+    MIB.buildCopy({DefReg}, {DefGPRReg});
 
-      MachineOperand &ImmOp = I.getOperand(1);
-      // FIXME: Is going through int64_t always correct?
-      ImmOp.ChangeToImmediate(
-          ImmOp.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
-    } else if (I.getOperand(1).isCImm()) {
-      uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
-      I.getOperand(1).ChangeToImmediate(Val);
-    } else if (I.getOperand(1).isImm()) {
-      uint64_t Val = I.getOperand(1).getImm();
-      I.getOperand(1).ChangeToImmediate(Val);
+    if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
+      LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
+      return false;
     }
 
+    MachineOperand &ImmOp = I.getOperand(1);
+    ImmOp.ChangeToImmediate(
+        ImmOp.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
+
     const unsigned MovOpc =
         DefSize == 64 ? AArch64::MOVi64imm : AArch64::MOVi32imm;
     I.setDesc(TII.get(MovOpc));
@@ -5819,18 +5774,26 @@ bool AArch64InstructionSelector::tryOptConstantBuildVec(
   // generate a constant pool load instead of a vector insert sequence.
   SmallVector<Constant *, 16> Csts;
   for (unsigned Idx = 1; Idx < I.getNumOperands(); ++Idx) {
-    // Try to find G_CONSTANT or G_FCONSTANT
-    auto *OpMI =
-        getOpcodeDef(TargetOpcode::G_CONSTANT, I.getOperand(Idx).getReg(), MRI);
-    if (OpMI)
-      Csts.emplace_back(
-          const_cast<ConstantInt *>(OpMI->getOperand(1).getCImm()));
-    else if ((OpMI = getOpcodeDef(TargetOpcode::G_FCONSTANT,
-                                  I.getOperand(Idx).getReg(), MRI)))
-      Csts.emplace_back(
-          const_cast<ConstantFP *>(OpMI->getOperand(1).getFPImm()));
-    else
-      return false;
+    Register OpReg = I.getOperand(Idx).getReg();
+    if (auto AnyConst = getAnyConstantVRegValWithLookThrough(
+            OpReg, MRI, /*LookThroughInstrs=*/true,
+            /*LookThroughAnyExt=*/true)) {
+      MachineInstr *DefMI = MRI.getVRegDef(AnyConst->VReg);
+
+      if (DefMI->getOpcode() == TargetOpcode::G_CONSTANT) {
+        Csts.emplace_back(
+            ConstantInt::get(MIB.getMF().getFunction().getContext(),
+                             std::move(AnyConst->Value)));
+        continue;
+      }
+
+      if (DefMI->getOpcode() == TargetOpcode::G_FCONSTANT) {
+        Csts.emplace_back(
+            const_cast<ConstantFP *>(DefMI->getOperand(1).getFPImm()));
+        continue;
+      }
+    }
+    return false;
   }
   Constant *CV = ConstantVector::get(Csts);
   if (!emitConstantVector(I.getOperand(0).getReg(), CV, MIB, MRI))

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index a192f788ead71..4389c9ab55256 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -14,7 +14,9 @@
 #include "AArch64RegisterBankInfo.h"
 #include "AArch64RegisterInfo.h"
 #include "AArch64Subtarget.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
 #include "MCTargetDesc/AArch64MCTargetDesc.h"
+#include "llvm/ADT/APInt.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
@@ -25,11 +27,13 @@
 #include "llvm/CodeGen/MachineInstr.h"
 #include "llvm/CodeGen/MachineOperand.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineSizeOpts.h"
 #include "llvm/CodeGen/RegisterBank.h"
 #include "llvm/CodeGen/RegisterBankInfo.h"
 #include "llvm/CodeGen/TargetOpcodes.h"
 #include "llvm/CodeGen/TargetRegisterInfo.h"
 #include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/IR/Constants.h"
 #include "llvm/IR/IntrinsicsAArch64.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/Threading.h"
@@ -358,17 +362,129 @@ AArch64RegisterBankInfo::getInstrAlternativeMappings(
   return RegisterBankInfo::getInstrAlternativeMappings(MI);
 }
 
+static bool preferGPRForFPImm(const MachineInstr &MI,
+                              const MachineRegisterInfo &MRI,
+                              const AArch64Subtarget &STI) {
+  assert(MI.getOpcode() == TargetOpcode::G_FCONSTANT);
+  Register Dst = MI.getOperand(0).getReg();
+  LLT Ty = MRI.getType(Dst);
+
+  unsigned Size = Ty.getSizeInBits();
+  if (Size != 16 && Size != 32 && Size != 64)
+    return false;
+
+  EVT VT = EVT::getFloatingPointVT(Size);
+  const AArch64TargetLowering *TLI = STI.getTargetLowering();
+
+  const APFloat Imm = MI.getOperand(1).getFPImm()->getValueAPF();
+  const APInt ImmBits = Imm.bitcastToAPInt();
+
+  // Check if we can encode this as a movi. Note, we only have one pattern so
+  // far for movis, hence the one check.
+  if (Size == 32) {
+    uint64_t Val = APInt::getSplat(64, ImmBits).getZExtValue();
+    if (AArch64_AM::isAdvSIMDModImmType4(Val))
+      return false;
+  }
+
+  // We want to use GPR when the value cannot be encoded as the immediate value
+  // of a fmov and when it will not result in a constant pool load. As
+  // AArch64TargetLowering::isFPImmLegal is used by the instruction selector
+  // to choose whether to emit a constant pool load, negating this check will
+  // ensure it would not have become a constant pool load.
+  bool OptForSize =
+      shouldOptimizeForSize(&MI.getMF()->getFunction(), nullptr, nullptr);
+  bool IsLegal = TLI->isFPImmLegal(Imm, VT, OptForSize);
+  bool IsFMov = TLI->isFPImmLegalAsFMov(Imm, VT);
+  return !IsFMov && IsLegal;
+}
+
+// Some of the instructions in applyMappingImpl attempt to anyext small values.
+// It may be that these values come from a G_CONSTANT that has been expanded to
+// 32 bits and then truncated. If this is the case, we shouldn't insert an
+// anyext and should instead make use of the G_CONSTANT directly, deleting the
+// trunc if possible.
+static bool foldTruncOfI32Constant(MachineInstr &MI, unsigned OpIdx,
+                                   MachineRegisterInfo &MRI,
+                                   const AArch64RegisterBankInfo &RBI) {
+  MachineOperand &Op = MI.getOperand(OpIdx);
+
+  Register ScalarReg = Op.getReg();
+  MachineInstr *TruncMI = MRI.getVRegDef(ScalarReg);
+  if (!TruncMI || TruncMI->getOpcode() != TargetOpcode::G_TRUNC)
+    return false;
+
+  Register TruncSrc = TruncMI->getOperand(1).getReg();
+  MachineInstr *SrcDef = MRI.getVRegDef(TruncSrc);
+  if (!SrcDef || SrcDef->getOpcode() != TargetOpcode::G_CONSTANT)
+    return false;
+
+  LLT TruncSrcTy = MRI.getType(TruncSrc);
+  if (!TruncSrcTy.isScalar() || TruncSrcTy.getSizeInBits() != 32)
+    return false;
+
+  // Avoid truncating and extending a constant, this helps with selection.
+  Op.setReg(TruncSrc);
+  MRI.setRegBank(TruncSrc, RBI.getRegBank(AArch64::GPRRegBankID));
+
+  if (MRI.use_empty(ScalarReg))
+    TruncMI->eraseFromParent();
+
+  return true;
+}
+
 void AArch64RegisterBankInfo::applyMappingImpl(
     MachineIRBuilder &Builder, const OperandsMapper &OpdMapper) const {
   MachineInstr &MI = OpdMapper.getMI();
   MachineRegisterInfo &MRI = OpdMapper.getMRI();
 
   switch (MI.getOpcode()) {
+  case TargetOpcode::G_CONSTANT: {
+    Register Dst = MI.getOperand(0).getReg();
+    LLT DstTy = MRI.getType(Dst);
+    assert(MRI.getRegBank(Dst) == &AArch64::GPRRegBank && DstTy.isScalar() &&
+           DstTy.getSizeInBits() < 32 &&
+           "Expected a scalar smaller than 32 bits on a GPR.");
+    Builder.setInsertPt(*MI.getParent(), std::next(MI.getIterator()));
+    Register ExtReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
+    Builder.buildTrunc(Dst, ExtReg);
+
+    APInt Val = MI.getOperand(1).getCImm()->getValue().zext(32);
+    LLVMContext &Ctx = Builder.getMF().getFunction().getContext();
+    MI.getOperand(1).setCImm(ConstantInt::get(Ctx, Val));
+    MI.getOperand(0).setReg(ExtReg);
+    MRI.setRegBank(ExtReg, AArch64::GPRRegBank);
+
+    return applyDefaultMapping(OpdMapper);
+  }
+  case TargetOpcode::G_FCONSTANT: {
+    Register Dst = MI.getOperand(0).getReg();
+    assert(MRI.getRegBank(Dst) == &AArch64::GPRRegBank &&
+           "Expected Dst to be on a GPR.");
+    const APFloat &Imm = MI.getOperand(1).getFPImm()->getValueAPF();
+    APInt Bits = Imm.bitcastToAPInt();
+    Builder.setInsertPt(*MI.getParent(), MI.getIterator());
+    if (Bits.getBitWidth() < 32) {
+      Register ExtReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
+      Builder.buildConstant(ExtReg, Bits.zext(32));
+      Builder.buildTrunc(Dst, ExtReg);
+      MRI.setRegBank(ExtReg, AArch64::GPRRegBank);
+    } else {
+      Builder.buildConstant(Dst, Bits);
+    }
+    MI.eraseFromParent();
+    return;
+  }
   case TargetOpcode::G_STORE: {
     Register Dst = MI.getOperand(0).getReg();
     LLT Ty = MRI.getType(Dst);
+
     if (MRI.getRegBank(Dst) == &AArch64::GPRRegBank && Ty.isScalar() &&
         Ty.getSizeInBits() < 32) {
+
+      if (foldTruncOfI32Constant(MI, 0, MRI, *this))
+        return applyDefaultMapping(OpdMapper);
+
       Builder.setInsertPt(*MI.getParent(), MI.getIterator());
       auto Ext = Builder.buildAnyExt(LLT::scalar(32), Dst);
       MI.getOperand(0).setReg(Ext.getReg(0));
@@ -397,6 +513,9 @@ void AArch64RegisterBankInfo::applyMappingImpl(
            "Don't know how to handle that ID");
     return applyDefaultMapping(OpdMapper);
   case TargetOpcode::G_INSERT_VECTOR_ELT: {
+    if (foldTruncOfI32Constant(MI, 2, MRI, *this))
+      return applyDefaultMapping(OpdMapper);
+
     // Extend smaller gpr operands to 32 bit.
     Builder.setInsertPt(*MI.getParent(), MI.getIterator());
     auto Ext = Builder.buildAnyExt(LLT::scalar(32), MI.getOperand(2).getReg());
@@ -405,23 +524,20 @@ void AArch64RegisterBankInfo::applyMappingImpl(
     return applyDefaultMapping(OpdMapper);
   }
   case AArch64::G_DUP: {
+    if (foldTruncOfI32Constant(MI, 1, MRI, *this))
+      return applyDefaultMapping(OpdMapper);
+
     // Extend smaller gpr to 32-bits
     assert(MRI.getType(MI.getOperand(1).getReg()).getSizeInBits() < 32 &&
            "Expected sources smaller than 32-bits");
     Builder.setInsertPt(*MI.getParent(), MI.getIterator());
 
-    Register ConstReg;
-    auto ConstMI = MRI.getVRegDef(MI.getOperand(1).getReg());
-    if (ConstMI->getOpcode() == TargetOpcode::G_CONSTANT) {
-      auto CstVal = ConstMI->getOperand(1).getCImm()->getValue();
-      ConstReg =
-          Builder.buildConstant(LLT::scalar(32), CstVal.sext(32)).getReg(0);
-    } else {
-      ConstReg = Builder.buildAnyExt(LLT::scalar(32), MI.getOperand(1).getReg())
-                     .getReg(0);
-    }
+    Register ConstReg =
+        Builder.buildAnyExt(LLT::scalar(32), MI.getOperand(1).getReg())
+            .getReg(0);
     MRI.setRegBank(ConstReg, getRegBank(AArch64::GPRRegBankID));
     MI.getOperand(1).setReg(ConstReg);
+
     return applyDefaultMapping(OpdMapper);
   }
   default:
@@ -855,6 +971,21 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
   // Some of the floating-point instructions have mixed GPR and FPR operands:
   // fine-tune the computed mapping.
   switch (Opc) {
+  case TargetOpcode::G_CONSTANT: {
+    Register Dst = MI.getOperand(0).getReg();
+    LLT DstTy = MRI.getType(Dst);
+    if (DstTy.isScalar() && DstTy.getSizeInBits() < 32)
+      MappingID = CustomMappingID;
+    break;
+  }
+  case TargetOpcode::G_FCONSTANT: {
+    if (preferGPRForFPImm(MI, MRI, STI)) {
+      // Materialize in GPR and rely on later bank copies for FP uses.
+      MappingID = CustomMappingID;
+      OpRegBankIdx = {PMI_FirstGPR};
+    }
+    break;
+  }
   case AArch64::G_DUP: {
     Register ScalarReg = MI.getOperand(1).getReg();
     LLT ScalarTy = MRI.getType(ScalarReg);

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/inttoptr_add.ll b/llvm/test/CodeGen/AArch64/GlobalISel/inttoptr_add.ll
index 38b9558f426f2..21402fade53dd 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inttoptr_add.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inttoptr_add.ll
@@ -4,9 +4,9 @@
 define dso_local void @fn() {
 ; CHECK-LABEL: fn:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov x8, #4132
-; CHECK-NEXT:    mov w9, #1
-; CHECK-NEXT:    movk x8, #65489, lsl #16
+; CHECK-NEXT:    mov w8, #4132 // =0x1024
+; CHECK-NEXT:    mov w9, #1 // =0x1
+; CHECK-NEXT:    movk w8, #65489, lsl #16
 ; CHECK-NEXT:    str w9, [x8]
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir b/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
index 7620c729d580e..4bc3b5de3ffae 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -verify-machineinstrs -mtriple aarch64--- -run-pass=instruction-select -global-isel-abort=1 %s -o - | FileCheck %s
+# RUN: llc -verify-machineinstrs -mtriple aarch64--- --run-pass=instruction-select -global-isel-abort=1 %s -o - | FileCheck %s
 ---
 name:            test_loop_phi_fpr_to_gpr
 alignment:       4

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-build-vector.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-build-vector.mir
index 015949ed8de95..55f316facfca3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-build-vector.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-build-vector.mir
@@ -44,16 +44,41 @@ body:             |
     ; They're all constant, so we can select it via a constant-pool load if needed
     ; and this form is more amenable to selection by patterns (without x-bank copies).
     ; CHECK-LABEL: name: g_constant_operands_on_gpr
-    ; CHECK: [[C:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 4
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 10
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 3
-    ; CHECK-NEXT: [[C3:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 11
-    ; CHECK-NEXT: [[C4:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 15
-    ; CHECK-NEXT: [[C5:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 44
-    ; CHECK-NEXT: [[C6:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 22
-    ; CHECK-NEXT: [[C7:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 19
-    ; CHECK-NEXT: [[C8:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 55
-    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:fpr(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[C1]](s8), [[C2]](s8), [[C3]](s8), [[C4]](s8), [[C]](s8), [[C1]](s8), [[C5]](s8), [[C6]](s8), [[C4]](s8), [[C]](s8), [[C7]](s8), [[C2]](s8), [[C3]](s8), [[C4]](s8), [[C8]](s8)
+    ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 4
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:gpr(s8) = G_TRUNC [[C]](s32)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 10
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:gpr(s8) = G_TRUNC [[C1]](s32)
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3
+    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:gpr(s8) = G_TRUNC [[C2]](s32)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 11
+    ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:gpr(s8) = G_TRUNC [[C3]](s32)
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 15
+    ; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:gpr(s8) = G_TRUNC [[C4]](s32)
+    ; CHECK-NEXT: [[C5:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 44
+    ; CHECK-NEXT: [[TRUNC5:%[0-9]+]]:gpr(s8) = G_TRUNC [[C5]](s32)
+    ; CHECK-NEXT: [[C6:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 22
+    ; CHECK-NEXT: [[TRUNC6:%[0-9]+]]:gpr(s8) = G_TRUNC [[C6]](s32)
+    ; CHECK-NEXT: [[C7:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 19
+    ; CHECK-NEXT: [[TRUNC7:%[0-9]+]]:gpr(s8) = G_TRUNC [[C7]](s32)
+    ; CHECK-NEXT: [[C8:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 55
+    ; CHECK-NEXT: [[TRUNC8:%[0-9]+]]:gpr(s8) = G_TRUNC [[C8]](s32)
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s8) = COPY [[TRUNC]](s8)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr(s8) = COPY [[TRUNC1]](s8)
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr(s8) = COPY [[TRUNC2]](s8)
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:fpr(s8) = COPY [[TRUNC3]](s8)
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:fpr(s8) = COPY [[TRUNC4]](s8)
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:fpr(s8) = COPY [[TRUNC]](s8)
+    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:fpr(s8) = COPY [[TRUNC1]](s8)
+    ; CHECK-NEXT: [[COPY7:%[0-9]+]]:fpr(s8) = COPY [[TRUNC5]](s8)
+    ; CHECK-NEXT: [[COPY8:%[0-9]+]]:fpr(s8) = COPY [[TRUNC6]](s8)
+    ; CHECK-NEXT: [[COPY9:%[0-9]+]]:fpr(s8) = COPY [[TRUNC4]](s8)
+    ; CHECK-NEXT: [[COPY10:%[0-9]+]]:fpr(s8) = COPY [[TRUNC]](s8)
+    ; CHECK-NEXT: [[COPY11:%[0-9]+]]:fpr(s8) = COPY [[TRUNC7]](s8)
+    ; CHECK-NEXT: [[COPY12:%[0-9]+]]:fpr(s8) = COPY [[TRUNC2]](s8)
+    ; CHECK-NEXT: [[COPY13:%[0-9]+]]:fpr(s8) = COPY [[TRUNC3]](s8)
+    ; CHECK-NEXT: [[COPY14:%[0-9]+]]:fpr(s8) = COPY [[TRUNC4]](s8)
+    ; CHECK-NEXT: [[COPY15:%[0-9]+]]:fpr(s8) = COPY [[TRUNC8]](s8)
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:fpr(<16 x s8>) = G_BUILD_VECTOR [[COPY]](s8), [[COPY1]](s8), [[COPY2]](s8), [[COPY3]](s8), [[COPY4]](s8), [[COPY5]](s8), [[COPY6]](s8), [[COPY7]](s8), [[COPY8]](s8), [[COPY9]](s8), [[COPY10]](s8), [[COPY11]](s8), [[COPY12]](s8), [[COPY13]](s8), [[COPY14]](s8), [[COPY15]](s8)
     ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<16 x s8>)
     ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %1:_(s8) = G_CONSTANT i8 4

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-constant.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-constant.mir
index 72e691bf520ea..8ce47079d9e7e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-constant.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-constant.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple=aarch64-- -run-pass=regbankselect,instruction-select -verify-machineinstrs %s -o - | FileCheck %s
 
 --- |
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-dup.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-dup.mir
index cf2bab78fe5a6..2007ff68e73c8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-dup.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-dup.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=aarch64 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple=aarch64 -run-pass=regbankselect,instruction-select -verify-machineinstrs %s -o - | FileCheck %s
 #
 # GPR variants should not use INSERT_SUBREG. FPR variants (DUP<ty>lane) should.
 
@@ -444,7 +444,9 @@ body:             |
     ; CHECK-LABEL: name: cst_v2p0
     ; CHECK: liveins: $w0
     ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: %cst:gpr64 = MOVi64imm 3
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 3
+    ; CHECK-NEXT: %cst:gpr64all = SUBREG_TO_REG [[MOVi32imm]], %subreg.sub_32
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY %cst
     ; CHECK-NEXT: [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) %const.0
     ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) %const.0 :: (load (s128) from constant-pool)
     ; CHECK-NEXT: $q0 = COPY [[LDRQui]]

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-mops.ll b/llvm/test/CodeGen/AArch64/aarch64-mops.ll
index d082c583faeae..cefe25e322529 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mops.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-mops.ll
@@ -60,31 +60,17 @@ entry:
 }
 
 define void @memset_10_zeroval(ptr %dst) {
-; GISel-WITHOUT-MOPS-O0-LABEL: memset_10_zeroval:
-; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
-; GISel-WITHOUT-MOPS-O0-NEXT:    str xzr, [x0]
-; GISel-WITHOUT-MOPS-O0-NEXT:    mov w8, wzr
-; GISel-WITHOUT-MOPS-O0-NEXT:    strh w8, [x0, #8]
-; GISel-WITHOUT-MOPS-O0-NEXT:    ret
-;
-; GISel-WITHOUT-MOPS-O3-LABEL: memset_10_zeroval:
-; GISel-WITHOUT-MOPS-O3:       // %bb.0: // %entry
-; GISel-WITHOUT-MOPS-O3-NEXT:    str xzr, [x0]
-; GISel-WITHOUT-MOPS-O3-NEXT:    strh wzr, [x0, #8]
-; GISel-WITHOUT-MOPS-O3-NEXT:    ret
-;
-; GISel-MOPS-O0-LABEL: memset_10_zeroval:
-; GISel-MOPS-O0:       // %bb.0: // %entry
-; GISel-MOPS-O0-NEXT:    str xzr, [x0]
-; GISel-MOPS-O0-NEXT:    mov w8, wzr
-; GISel-MOPS-O0-NEXT:    strh w8, [x0, #8]
-; GISel-MOPS-O0-NEXT:    ret
+; GISel-WITHOUT-MOPS-LABEL: memset_10_zeroval:
+; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
+; GISel-WITHOUT-MOPS-NEXT:    str xzr, [x0]
+; GISel-WITHOUT-MOPS-NEXT:    strh wzr, [x0, #8]
+; GISel-WITHOUT-MOPS-NEXT:    ret
 ;
-; GISel-MOPS-O3-LABEL: memset_10_zeroval:
-; GISel-MOPS-O3:       // %bb.0: // %entry
-; GISel-MOPS-O3-NEXT:    str xzr, [x0]
-; GISel-MOPS-O3-NEXT:    strh wzr, [x0, #8]
-; GISel-MOPS-O3-NEXT:    ret
+; GISel-MOPS-LABEL: memset_10_zeroval:
+; GISel-MOPS:       // %bb.0: // %entry
+; GISel-MOPS-NEXT:    str xzr, [x0]
+; GISel-MOPS-NEXT:    strh wzr, [x0, #8]
+; GISel-MOPS-NEXT:    ret
 ;
 ; SDAG-WITHOUT-MOPS-O2-LABEL: memset_10_zeroval:
 ; SDAG-WITHOUT-MOPS-O2:       // %bb.0: // %entry
@@ -103,31 +89,17 @@ entry:
 }
 
 define void @memset_10_zeroval_volatile(ptr %dst) {
-; GISel-WITHOUT-MOPS-O0-LABEL: memset_10_zeroval_volatile:
-; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
-; GISel-WITHOUT-MOPS-O0-NEXT:    str xzr, [x0]
-; GISel-WITHOUT-MOPS-O0-NEXT:    mov w8, wzr
-; GISel-WITHOUT-MOPS-O0-NEXT:    strh w8, [x0, #8]
-; GISel-WITHOUT-MOPS-O0-NEXT:    ret
-;
-; GISel-WITHOUT-MOPS-O3-LABEL: memset_10_zeroval_volatile:
-; GISel-WITHOUT-MOPS-O3:       // %bb.0: // %entry
-; GISel-WITHOUT-MOPS-O3-NEXT:    str xzr, [x0]
-; GISel-WITHOUT-MOPS-O3-NEXT:    strh wzr, [x0, #8]
-; GISel-WITHOUT-MOPS-O3-NEXT:    ret
-;
-; GISel-MOPS-O0-LABEL: memset_10_zeroval_volatile:
-; GISel-MOPS-O0:       // %bb.0: // %entry
-; GISel-MOPS-O0-NEXT:    str xzr, [x0]
-; GISel-MOPS-O0-NEXT:    mov w8, wzr
-; GISel-MOPS-O0-NEXT:    strh w8, [x0, #8]
-; GISel-MOPS-O0-NEXT:    ret
+; GISel-WITHOUT-MOPS-LABEL: memset_10_zeroval_volatile:
+; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
+; GISel-WITHOUT-MOPS-NEXT:    str xzr, [x0]
+; GISel-WITHOUT-MOPS-NEXT:    strh wzr, [x0, #8]
+; GISel-WITHOUT-MOPS-NEXT:    ret
 ;
-; GISel-MOPS-O3-LABEL: memset_10_zeroval_volatile:
-; GISel-MOPS-O3:       // %bb.0: // %entry
-; GISel-MOPS-O3-NEXT:    str xzr, [x0]
-; GISel-MOPS-O3-NEXT:    strh wzr, [x0, #8]
-; GISel-MOPS-O3-NEXT:    ret
+; GISel-MOPS-LABEL: memset_10_zeroval_volatile:
+; GISel-MOPS:       // %bb.0: // %entry
+; GISel-MOPS-NEXT:    str xzr, [x0]
+; GISel-MOPS-NEXT:    strh wzr, [x0, #8]
+; GISel-MOPS-NEXT:    ret
 ;
 ; SDAG-WITHOUT-MOPS-O2-LABEL: memset_10_zeroval_volatile:
 ; SDAG-WITHOUT-MOPS-O2:       // %bb.0: // %entry
@@ -459,16 +431,16 @@ define void @memset_10(ptr %dst, i32 %value) {
 ;
 ; SDAG-WITHOUT-MOPS-O2-LABEL: memset_10:
 ; SDAG-WITHOUT-MOPS-O2:       // %bb.0: // %entry
-; SDAG-WITHOUT-MOPS-O2-NEXT:    dup	v0.16b, w1
-; SDAG-WITHOUT-MOPS-O2-NEXT:    str	h0, [x0, #8]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    str	d0, [x0]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    dup v0.16b, w1
+; SDAG-WITHOUT-MOPS-O2-NEXT:    str h0, [x0, #8]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    str d0, [x0]
 ; SDAG-WITHOUT-MOPS-O2-NEXT:    ret
 ;
 ; SDAG-MOPS-O2-LABEL: memset_10:
 ; SDAG-MOPS-O2:       // %bb.0: // %entry
-; SDAG-MOPS-O2-NEXT:    dup	v0.16b, w1
-; SDAG-MOPS-O2-NEXT:    str	h0, [x0, #8]
-; SDAG-MOPS-O2-NEXT:    str	d0, [x0]
+; SDAG-MOPS-O2-NEXT:    dup v0.16b, w1
+; SDAG-MOPS-O2-NEXT:    str h0, [x0, #8]
+; SDAG-MOPS-O2-NEXT:    str d0, [x0]
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
@@ -519,16 +491,16 @@ define void @memset_10_volatile(ptr %dst, i32 %value) {
 ;
 ; SDAG-WITHOUT-MOPS-O2-LABEL: memset_10_volatile:
 ; SDAG-WITHOUT-MOPS-O2:       // %bb.0: // %entry
-; SDAG-WITHOUT-MOPS-O2-NEXT:    dup	v0.16b, w1
-; SDAG-WITHOUT-MOPS-O2-NEXT:    str	h0, [x0, #8]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    str	d0, [x0]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    dup v0.16b, w1
+; SDAG-WITHOUT-MOPS-O2-NEXT:    str h0, [x0, #8]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    str d0, [x0]
 ; SDAG-WITHOUT-MOPS-O2-NEXT:    ret
 ;
 ; SDAG-MOPS-O2-LABEL: memset_10_volatile:
 ; SDAG-MOPS-O2:       // %bb.0: // %entry
-; SDAG-MOPS-O2-NEXT:    dup	v0.16b, w1
-; SDAG-MOPS-O2-NEXT:    str	h0, [x0, #8]
-; SDAG-MOPS-O2-NEXT:    str	d0, [x0]
+; SDAG-MOPS-O2-NEXT:    dup v0.16b, w1
+; SDAG-MOPS-O2-NEXT:    str h0, [x0, #8]
+; SDAG-MOPS-O2-NEXT:    str d0, [x0]
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
@@ -1526,28 +1498,28 @@ define void @memcpy_inline_300(ptr %dst, ptr %src, i32 %value) {
 ;
 ; SDAG-WITHOUT-MOPS-O2-LABEL: memcpy_inline_300:
 ; SDAG-WITHOUT-MOPS-O2:       // %bb.0: // %entry
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp	q1, q0, [x1, #32]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    add	x8, x1, #284
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp	q2, q3, [x1]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    stp	q1, q0, [x0, #32]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    stp	q2, q3, [x0]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp	q1, q0, [x1, #96]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp	q2, q3, [x1, #64]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    stp	q1, q0, [x0, #96]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    stp	q2, q3, [x0, #64]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp	q1, q0, [x1, #160]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp	q2, q3, [x1, #128]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    stp	q1, q0, [x0, #160]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    stp	q2, q3, [x0, #128]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp	q1, q0, [x1, #224]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp	q2, q3, [x1, #192]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    stp	q1, q0, [x0, #224]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    stp	q2, q3, [x0, #192]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp	q2, q1, [x1, #256]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldr	q0, [x8]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    add	x8, x0, #284
-; SDAG-WITHOUT-MOPS-O2-NEXT:    str	q0, [x8]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    stp	q2, q1, [x0, #256]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp q1, q0, [x1, #32]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    add x8, x1, #284
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp q2, q3, [x1]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    stp q1, q0, [x0, #32]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    stp q2, q3, [x0]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp q1, q0, [x1, #96]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp q2, q3, [x1, #64]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    stp q1, q0, [x0, #96]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    stp q2, q3, [x0, #64]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp q1, q0, [x1, #160]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp q2, q3, [x1, #128]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    stp q1, q0, [x0, #160]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    stp q2, q3, [x0, #128]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp q1, q0, [x1, #224]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp q2, q3, [x1, #192]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    stp q1, q0, [x0, #224]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    stp q2, q3, [x0, #192]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp q2, q1, [x1, #256]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldr q0, [x8]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    add x8, x0, #284
+; SDAG-WITHOUT-MOPS-O2-NEXT:    str q0, [x8]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    stp q2, q1, [x0, #256]
 ; SDAG-WITHOUT-MOPS-O2-NEXT:    ret
 ;
 ; SDAG-MOPS-O2-LABEL: memcpy_inline_300:
@@ -1708,80 +1680,52 @@ entry:
 }
 
 define void @memcpy_inline_65(ptr %dst, ptr %src, i32 %value) {
-; GISel-WITHOUT-MOPS-O0-LABEL: memcpy_inline_65:
-; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
-; GISel-WITHOUT-MOPS-O0-NEXT:    ldr	q0, [x1]
-; GISel-WITHOUT-MOPS-O0-NEXT:    str	q0, [x0]
-; GISel-WITHOUT-MOPS-O0-NEXT:    ldr	q0, [x1, #16]
-; GISel-WITHOUT-MOPS-O0-NEXT:    str	q0, [x0, #16]
-; GISel-WITHOUT-MOPS-O0-NEXT:    ldr	q0, [x1, #32]
-; GISel-WITHOUT-MOPS-O0-NEXT:    str	q0, [x0, #32]
-; GISel-WITHOUT-MOPS-O0-NEXT:    ldr	q0, [x1, #48]
-; GISel-WITHOUT-MOPS-O0-NEXT:    str	q0, [x0, #48]
-; GISel-WITHOUT-MOPS-O0-NEXT:    ldrb	w8, [x1, #64]
-; GISel-WITHOUT-MOPS-O0-NEXT:    strb	w8, [x0, #64]
-; GISel-WITHOUT-MOPS-O0-NEXT:    ret
-;
-; GISel-WITHOUT-MOPS-O3-LABEL: memcpy_inline_65:
-; GISel-WITHOUT-MOPS-O3:       // %bb.0: // %entry
-; GISel-WITHOUT-MOPS-O3-NEXT:    ldr	q0, [x1]
-; GISel-WITHOUT-MOPS-O3-NEXT:    str	q0, [x0]
-; GISel-WITHOUT-MOPS-O3-NEXT:    ldr	q0, [x1, #16]
-; GISel-WITHOUT-MOPS-O3-NEXT:    str	q0, [x0, #16]
-; GISel-WITHOUT-MOPS-O3-NEXT:    ldr	q0, [x1, #32]
-; GISel-WITHOUT-MOPS-O3-NEXT:    str	q0, [x0, #32]
-; GISel-WITHOUT-MOPS-O3-NEXT:    ldr	q0, [x1, #48]
-; GISel-WITHOUT-MOPS-O3-NEXT:    str	q0, [x0, #48]
-; GISel-WITHOUT-MOPS-O3-NEXT:    ldrb	w8, [x1, #64]
-; GISel-WITHOUT-MOPS-O3-NEXT:    strb	w8, [x0, #64]
-; GISel-WITHOUT-MOPS-O3-NEXT:    ret
-;
-; GISel-MOPS-O0-LABEL: memcpy_inline_65:
-; GISel-MOPS-O0:       // %bb.0: // %entry
-; GISel-MOPS-O0-NEXT:    ldr	q0, [x1]
-; GISel-MOPS-O0-NEXT:    str	q0, [x0]
-; GISel-MOPS-O0-NEXT:    ldr	q0, [x1, #16]
-; GISel-MOPS-O0-NEXT:    str	q0, [x0, #16]
-; GISel-MOPS-O0-NEXT:    ldr	q0, [x1, #32]
-; GISel-MOPS-O0-NEXT:    str	q0, [x0, #32]
-; GISel-MOPS-O0-NEXT:    ldr	q0, [x1, #48]
-; GISel-MOPS-O0-NEXT:    str	q0, [x0, #48]
-; GISel-MOPS-O0-NEXT:    ldrb	w8, [x1, #64]
-; GISel-MOPS-O0-NEXT:    strb	w8, [x0, #64]
-; GISel-MOPS-O0-NEXT:    ret
+; GISel-WITHOUT-MOPS-LABEL: memcpy_inline_65:
+; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
+; GISel-WITHOUT-MOPS-NEXT:    ldr q0, [x1]
+; GISel-WITHOUT-MOPS-NEXT:    str q0, [x0]
+; GISel-WITHOUT-MOPS-NEXT:    ldr q0, [x1, #16]
+; GISel-WITHOUT-MOPS-NEXT:    str q0, [x0, #16]
+; GISel-WITHOUT-MOPS-NEXT:    ldr q0, [x1, #32]
+; GISel-WITHOUT-MOPS-NEXT:    str q0, [x0, #32]
+; GISel-WITHOUT-MOPS-NEXT:    ldr q0, [x1, #48]
+; GISel-WITHOUT-MOPS-NEXT:    str q0, [x0, #48]
+; GISel-WITHOUT-MOPS-NEXT:    ldrb w8, [x1, #64]
+; GISel-WITHOUT-MOPS-NEXT:    strb w8, [x0, #64]
+; GISel-WITHOUT-MOPS-NEXT:    ret
 ;
-; GISel-MOPS-O3-LABEL: memcpy_inline_65:
-; GISel-MOPS-O3:       // %bb.0: // %entry
-; GISel-MOPS-O3-NEXT:    ldr	q0, [x1]
-; GISel-MOPS-O3-NEXT:    str	q0, [x0]
-; GISel-MOPS-O3-NEXT:    ldr	q0, [x1, #16]
-; GISel-MOPS-O3-NEXT:    str	q0, [x0, #16]
-; GISel-MOPS-O3-NEXT:    ldr	q0, [x1, #32]
-; GISel-MOPS-O3-NEXT:    str	q0, [x0, #32]
-; GISel-MOPS-O3-NEXT:    ldr	q0, [x1, #48]
-; GISel-MOPS-O3-NEXT:    str	q0, [x0, #48]
-; GISel-MOPS-O3-NEXT:    ldrb	w8, [x1, #64]
-; GISel-MOPS-O3-NEXT:    strb	w8, [x0, #64]
-; GISel-MOPS-O3-NEXT:    ret
+; GISel-MOPS-LABEL: memcpy_inline_65:
+; GISel-MOPS:       // %bb.0: // %entry
+; GISel-MOPS-NEXT:    ldr q0, [x1]
+; GISel-MOPS-NEXT:    str q0, [x0]
+; GISel-MOPS-NEXT:    ldr q0, [x1, #16]
+; GISel-MOPS-NEXT:    str q0, [x0, #16]
+; GISel-MOPS-NEXT:    ldr q0, [x1, #32]
+; GISel-MOPS-NEXT:    str q0, [x0, #32]
+; GISel-MOPS-NEXT:    ldr q0, [x1, #48]
+; GISel-MOPS-NEXT:    str q0, [x0, #48]
+; GISel-MOPS-NEXT:    ldrb w8, [x1, #64]
+; GISel-MOPS-NEXT:    strb w8, [x0, #64]
+; GISel-MOPS-NEXT:    ret
 ;
 ; SDAG-WITHOUT-MOPS-O2-LABEL: memcpy_inline_65:
 ; SDAG-WITHOUT-MOPS-O2:       // %bb.0: // %entry
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp	q1, q0, [x1, #32]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp	q2, q3, [x1]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    stp	q1, q0, [x0, #32]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    stp	q2, q3, [x0]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldrb	w8, [x1, #64]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    strb	w8, [x0, #64]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp q1, q0, [x1, #32]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp q2, q3, [x1]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    stp q1, q0, [x0, #32]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    stp q2, q3, [x0]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldrb w8, [x1, #64]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    strb w8, [x0, #64]
 ; SDAG-WITHOUT-MOPS-O2-NEXT:    ret
 ;
 ; SDAG-MOPS-O2-LABEL: memcpy_inline_65:
 ; SDAG-MOPS-O2:       // %bb.0: // %entry
-; SDAG-MOPS-O2-NEXT:    ldp	q1, q0, [x1, #32]
-; SDAG-MOPS-O2-NEXT:    ldp	q2, q3, [x1]
-; SDAG-MOPS-O2-NEXT:    stp	q1, q0, [x0, #32]
-; SDAG-MOPS-O2-NEXT:    stp	q2, q3, [x0]
-; SDAG-MOPS-O2-NEXT:    ldrb	w8, [x1, #64]
-; SDAG-MOPS-O2-NEXT:    strb	w8, [x0, #64]
+; SDAG-MOPS-O2-NEXT:    ldp q1, q0, [x1, #32]
+; SDAG-MOPS-O2-NEXT:    ldp q2, q3, [x1]
+; SDAG-MOPS-O2-NEXT:    stp q1, q0, [x0, #32]
+; SDAG-MOPS-O2-NEXT:    stp q2, q3, [x0]
+; SDAG-MOPS-O2-NEXT:    ldrb w8, [x1, #64]
+; SDAG-MOPS-O2-NEXT:    strb w8, [x0, #64]
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
   call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 65, i1 false)
@@ -1789,68 +1733,44 @@ entry:
 }
 
 define void @memcpy_inline_64(ptr %dst, ptr %src, i32 %value) {
-; GISel-WITHOUT-MOPS-O0-LABEL: memcpy_inline_64:
-; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
-; GISel-WITHOUT-MOPS-O0-NEXT:    ldr	q0, [x1]
-; GISel-WITHOUT-MOPS-O0-NEXT:    str	q0, [x0]
-; GISel-WITHOUT-MOPS-O0-NEXT:    ldr	q0, [x1, #16]
-; GISel-WITHOUT-MOPS-O0-NEXT:    str	q0, [x0, #16]
-; GISel-WITHOUT-MOPS-O0-NEXT:    ldr	q0, [x1, #32]
-; GISel-WITHOUT-MOPS-O0-NEXT:    str	q0, [x0, #32]
-; GISel-WITHOUT-MOPS-O0-NEXT:    ldr	q0, [x1, #48]
-; GISel-WITHOUT-MOPS-O0-NEXT:    str	q0, [x0, #48]
-; GISel-WITHOUT-MOPS-O0-NEXT:    ret
-;
-; GISel-WITHOUT-MOPS-O3-LABEL: memcpy_inline_64:
-; GISel-WITHOUT-MOPS-O3:       // %bb.0: // %entry
-; GISel-WITHOUT-MOPS-O3-NEXT:    ldr	q0, [x1]
-; GISel-WITHOUT-MOPS-O3-NEXT:    str	q0, [x0]
-; GISel-WITHOUT-MOPS-O3-NEXT:    ldr	q0, [x1, #16]
-; GISel-WITHOUT-MOPS-O3-NEXT:    str	q0, [x0, #16]
-; GISel-WITHOUT-MOPS-O3-NEXT:    ldr	q0, [x1, #32]
-; GISel-WITHOUT-MOPS-O3-NEXT:    str	q0, [x0, #32]
-; GISel-WITHOUT-MOPS-O3-NEXT:    ldr	q0, [x1, #48]
-; GISel-WITHOUT-MOPS-O3-NEXT:    str	q0, [x0, #48]
-; GISel-WITHOUT-MOPS-O3-NEXT:    ret
-;
-; GISel-MOPS-O0-LABEL: memcpy_inline_64:
-; GISel-MOPS-O0:       // %bb.0: // %entry
-; GISel-MOPS-O0-NEXT:    ldr	q0, [x1]
-; GISel-MOPS-O0-NEXT:    str	q0, [x0]
-; GISel-MOPS-O0-NEXT:    ldr	q0, [x1, #16]
-; GISel-MOPS-O0-NEXT:    str	q0, [x0, #16]
-; GISel-MOPS-O0-NEXT:    ldr	q0, [x1, #32]
-; GISel-MOPS-O0-NEXT:    str	q0, [x0, #32]
-; GISel-MOPS-O0-NEXT:    ldr	q0, [x1, #48]
-; GISel-MOPS-O0-NEXT:    str	q0, [x0, #48]
-; GISel-MOPS-O0-NEXT:    ret
+; GISel-WITHOUT-MOPS-LABEL: memcpy_inline_64:
+; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
+; GISel-WITHOUT-MOPS-NEXT:    ldr q0, [x1]
+; GISel-WITHOUT-MOPS-NEXT:    str q0, [x0]
+; GISel-WITHOUT-MOPS-NEXT:    ldr q0, [x1, #16]
+; GISel-WITHOUT-MOPS-NEXT:    str q0, [x0, #16]
+; GISel-WITHOUT-MOPS-NEXT:    ldr q0, [x1, #32]
+; GISel-WITHOUT-MOPS-NEXT:    str q0, [x0, #32]
+; GISel-WITHOUT-MOPS-NEXT:    ldr q0, [x1, #48]
+; GISel-WITHOUT-MOPS-NEXT:    str q0, [x0, #48]
+; GISel-WITHOUT-MOPS-NEXT:    ret
 ;
-; GISel-MOPS-O3-LABEL: memcpy_inline_64:
-; GISel-MOPS-O3:       // %bb.0: // %entry
-; GISel-MOPS-O3-NEXT:    ldr	q0, [x1]
-; GISel-MOPS-O3-NEXT:    str	q0, [x0]
-; GISel-MOPS-O3-NEXT:    ldr	q0, [x1, #16]
-; GISel-MOPS-O3-NEXT:    str	q0, [x0, #16]
-; GISel-MOPS-O3-NEXT:    ldr	q0, [x1, #32]
-; GISel-MOPS-O3-NEXT:    str	q0, [x0, #32]
-; GISel-MOPS-O3-NEXT:    ldr	q0, [x1, #48]
-; GISel-MOPS-O3-NEXT:    str	q0, [x0, #48]
-; GISel-MOPS-O3-NEXT:    ret
+; GISel-MOPS-LABEL: memcpy_inline_64:
+; GISel-MOPS:       // %bb.0: // %entry
+; GISel-MOPS-NEXT:    ldr q0, [x1]
+; GISel-MOPS-NEXT:    str q0, [x0]
+; GISel-MOPS-NEXT:    ldr q0, [x1, #16]
+; GISel-MOPS-NEXT:    str q0, [x0, #16]
+; GISel-MOPS-NEXT:    ldr q0, [x1, #32]
+; GISel-MOPS-NEXT:    str q0, [x0, #32]
+; GISel-MOPS-NEXT:    ldr q0, [x1, #48]
+; GISel-MOPS-NEXT:    str q0, [x0, #48]
+; GISel-MOPS-NEXT:    ret
 ;
 ; SDAG-WITHOUT-MOPS-O2-LABEL: memcpy_inline_64:
 ; SDAG-WITHOUT-MOPS-O2:       // %bb.0: // %entry
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp	q1, q0, [x1, #32]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp	q2, q3, [x1]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    stp	q1, q0, [x0, #32]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    stp	q2, q3, [x0]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp q1, q0, [x1, #32]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp q2, q3, [x1]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    stp q1, q0, [x0, #32]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    stp q2, q3, [x0]
 ; SDAG-WITHOUT-MOPS-O2-NEXT:    ret
 ;
 ; SDAG-MOPS-O2-LABEL: memcpy_inline_64:
 ; SDAG-MOPS-O2:       // %bb.0: // %entry
-; SDAG-MOPS-O2-NEXT:    ldp	q1, q0, [x1, #32]
-; SDAG-MOPS-O2-NEXT:    ldp	q2, q3, [x1]
-; SDAG-MOPS-O2-NEXT:    stp	q1, q0, [x0, #32]
-; SDAG-MOPS-O2-NEXT:    stp	q2, q3, [x0]
+; SDAG-MOPS-O2-NEXT:    ldp q1, q0, [x1, #32]
+; SDAG-MOPS-O2-NEXT:    ldp q2, q3, [x1]
+; SDAG-MOPS-O2-NEXT:    stp q1, q0, [x0, #32]
+; SDAG-MOPS-O2-NEXT:    stp q2, q3, [x0]
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
   call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 64, i1 false)
@@ -1858,72 +1778,48 @@ entry:
 }
 
 define void @memcpy_inline_63(ptr %dst, ptr %src, i32 %value) {
-; GISel-WITHOUT-MOPS-O0-LABEL: memcpy_inline_63:
-; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
-; GISel-WITHOUT-MOPS-O0-NEXT:    ldr	q0, [x1]
-; GISel-WITHOUT-MOPS-O0-NEXT:    str	q0, [x0]
-; GISel-WITHOUT-MOPS-O0-NEXT:    ldr	q0, [x1, #16]
-; GISel-WITHOUT-MOPS-O0-NEXT:    str	q0, [x0, #16]
-; GISel-WITHOUT-MOPS-O0-NEXT:    ldr	q0, [x1, #32]
-; GISel-WITHOUT-MOPS-O0-NEXT:    str	q0, [x0, #32]
-; GISel-WITHOUT-MOPS-O0-NEXT:    ldur	q0, [x1, #47]
-; GISel-WITHOUT-MOPS-O0-NEXT:    stur	q0, [x0, #47]
-; GISel-WITHOUT-MOPS-O0-NEXT:    ret
-;
-; GISel-WITHOUT-MOPS-O3-LABEL: memcpy_inline_63:
-; GISel-WITHOUT-MOPS-O3:       // %bb.0: // %entry
-; GISel-WITHOUT-MOPS-O3-NEXT:    ldr	q0, [x1]
-; GISel-WITHOUT-MOPS-O3-NEXT:    str	q0, [x0]
-; GISel-WITHOUT-MOPS-O3-NEXT:    ldr	q0, [x1, #16]
-; GISel-WITHOUT-MOPS-O3-NEXT:    str	q0, [x0, #16]
-; GISel-WITHOUT-MOPS-O3-NEXT:    ldr	q0, [x1, #32]
-; GISel-WITHOUT-MOPS-O3-NEXT:    str	q0, [x0, #32]
-; GISel-WITHOUT-MOPS-O3-NEXT:    ldur	q0, [x1, #47]
-; GISel-WITHOUT-MOPS-O3-NEXT:    stur	q0, [x0, #47]
-; GISel-WITHOUT-MOPS-O3-NEXT:    ret
-;
-; GISel-MOPS-O0-LABEL: memcpy_inline_63:
-; GISel-MOPS-O0:       // %bb.0: // %entry
-; GISel-MOPS-O0-NEXT:    ldr	q0, [x1]
-; GISel-MOPS-O0-NEXT:    str	q0, [x0]
-; GISel-MOPS-O0-NEXT:    ldr	q0, [x1, #16]
-; GISel-MOPS-O0-NEXT:    str	q0, [x0, #16]
-; GISel-MOPS-O0-NEXT:    ldr	q0, [x1, #32]
-; GISel-MOPS-O0-NEXT:    str	q0, [x0, #32]
-; GISel-MOPS-O0-NEXT:    ldur	q0, [x1, #47]
-; GISel-MOPS-O0-NEXT:    stur	q0, [x0, #47]
-; GISel-MOPS-O0-NEXT:    ret
+; GISel-WITHOUT-MOPS-LABEL: memcpy_inline_63:
+; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
+; GISel-WITHOUT-MOPS-NEXT:    ldr q0, [x1]
+; GISel-WITHOUT-MOPS-NEXT:    str q0, [x0]
+; GISel-WITHOUT-MOPS-NEXT:    ldr q0, [x1, #16]
+; GISel-WITHOUT-MOPS-NEXT:    str q0, [x0, #16]
+; GISel-WITHOUT-MOPS-NEXT:    ldr q0, [x1, #32]
+; GISel-WITHOUT-MOPS-NEXT:    str q0, [x0, #32]
+; GISel-WITHOUT-MOPS-NEXT:    ldur q0, [x1, #47]
+; GISel-WITHOUT-MOPS-NEXT:    stur q0, [x0, #47]
+; GISel-WITHOUT-MOPS-NEXT:    ret
 ;
-; GISel-MOPS-O3-LABEL: memcpy_inline_63:
-; GISel-MOPS-O3:       // %bb.0: // %entry
-; GISel-MOPS-O3-NEXT:    ldr	q0, [x1]
-; GISel-MOPS-O3-NEXT:    str	q0, [x0]
-; GISel-MOPS-O3-NEXT:    ldr	q0, [x1, #16]
-; GISel-MOPS-O3-NEXT:    str	q0, [x0, #16]
-; GISel-MOPS-O3-NEXT:    ldr	q0, [x1, #32]
-; GISel-MOPS-O3-NEXT:    str	q0, [x0, #32]
-; GISel-MOPS-O3-NEXT:    ldur	q0, [x1, #47]
-; GISel-MOPS-O3-NEXT:    stur	q0, [x0, #47]
-; GISel-MOPS-O3-NEXT:    ret
+; GISel-MOPS-LABEL: memcpy_inline_63:
+; GISel-MOPS:       // %bb.0: // %entry
+; GISel-MOPS-NEXT:    ldr q0, [x1]
+; GISel-MOPS-NEXT:    str q0, [x0]
+; GISel-MOPS-NEXT:    ldr q0, [x1, #16]
+; GISel-MOPS-NEXT:    str q0, [x0, #16]
+; GISel-MOPS-NEXT:    ldr q0, [x1, #32]
+; GISel-MOPS-NEXT:    str q0, [x0, #32]
+; GISel-MOPS-NEXT:    ldur q0, [x1, #47]
+; GISel-MOPS-NEXT:    stur q0, [x0, #47]
+; GISel-MOPS-NEXT:    ret
 ;
 ; SDAG-WITHOUT-MOPS-O2-LABEL: memcpy_inline_63:
 ; SDAG-WITHOUT-MOPS-O2:       // %bb.0: // %entry
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp	q3, q1, [x1, #16]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldur	q0, [x1, #47]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    ldr	q2, [x1]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    stur	q0, [x0, #47]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    stp	q3, q1, [x0, #16]
-; SDAG-WITHOUT-MOPS-O2-NEXT:    str	q2, [x0]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldp q3, q1, [x1, #16]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldur q0, [x1, #47]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    ldr q2, [x1]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    stur q0, [x0, #47]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    stp q3, q1, [x0, #16]
+; SDAG-WITHOUT-MOPS-O2-NEXT:    str q2, [x0]
 ; SDAG-WITHOUT-MOPS-O2-NEXT:    ret
 ;
 ; SDAG-MOPS-O2-LABEL: memcpy_inline_63:
 ; SDAG-MOPS-O2:       // %bb.0: // %entry
-; SDAG-MOPS-O2-NEXT:    ldp	q3, q1, [x1, #16]
-; SDAG-MOPS-O2-NEXT:    ldur	q0, [x1, #47]
-; SDAG-MOPS-O2-NEXT:    ldr	q2, [x1]
-; SDAG-MOPS-O2-NEXT:    stur	q0, [x0, #47]
-; SDAG-MOPS-O2-NEXT:    stp	q3, q1, [x0, #16]
-; SDAG-MOPS-O2-NEXT:    str	q2, [x0]
+; SDAG-MOPS-O2-NEXT:    ldp q3, q1, [x1, #16]
+; SDAG-MOPS-O2-NEXT:    ldur q0, [x1, #47]
+; SDAG-MOPS-O2-NEXT:    ldr q2, [x1]
+; SDAG-MOPS-O2-NEXT:    stur q0, [x0, #47]
+; SDAG-MOPS-O2-NEXT:    stp q3, q1, [x0, #16]
+; SDAG-MOPS-O2-NEXT:    str q2, [x0]
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
   call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 63, i1 false)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fp-imm-size.ll b/llvm/test/CodeGen/AArch64/arm64-fp-imm-size.ll
index cfb7c60f5a8b0..9e187378eea68 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fp-imm-size.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fp-imm-size.ll
@@ -1,60 +1,95 @@
-; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
-; RUN: llc < %s -mtriple=arm64-apple-darwin -global-isel | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck --check-prefixes=CHECK,CHECK-SD %s
+; RUN: llc < %s -mtriple=arm64-apple-darwin -global-isel | FileCheck --check-prefixes=CHECK,CHECK-GI %s
 
-; CHECK: literal8
-; CHECK: .quad  0x400921fb54442d18
 define double @foo() optsize {
-; CHECK: _foo:
-; CHECK: adrp x[[REG:[0-9]+]], lCPI0_0 at PAGE
-; CHECK: ldr  d0, [x[[REG]], lCPI0_0 at PAGEOFF]
-; CHECK-NEXT: ret
+; CHECK-LABEL: foo:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh0:
+; CHECK-NEXT:    adrp x8, lCPI0_0 at PAGE
+; CHECK-NEXT:  Lloh1:
+; CHECK-NEXT:    ldr d0, [x8, lCPI0_0 at PAGEOFF]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh0, Lloh1
   ret double 0x400921FB54442D18
 }
 
-; CHECK: literal8
-; CHECK: .quad 0x0000001fffffffc
 define double @foo2() optsize {
-; CHECK: _foo2:
-; CHECK: adrp x[[REG:[0-9]+]], lCPI1_0 at PAGE
-; CHECK: ldr  d0, [x[[REG]], lCPI1_0 at PAGEOFF]
-; CHECK-NEXT: ret
+; CHECK-LABEL: foo2:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh2:
+; CHECK-NEXT:    adrp x8, lCPI1_0 at PAGE
+; CHECK-NEXT:  Lloh3:
+; CHECK-NEXT:    ldr d0, [x8, lCPI1_0 at PAGEOFF]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh2, Lloh3
   ret double 0x1FFFFFFFC1
 }
 
 define float @bar() optsize {
-; CHECK: _bar:
-; CHECK: adrp x[[REG:[0-9]+]], lCPI2_0 at PAGE
-; CHECK: ldr  s0, [x[[REG]], lCPI2_0 at PAGEOFF]
-; CHECK-NEXT:  ret
+; CHECK-LABEL: bar:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh4:
+; CHECK-NEXT:    adrp x8, lCPI2_0 at PAGE
+; CHECK-NEXT:  Lloh5:
+; CHECK-NEXT:    ldr s0, [x8, lCPI2_0 at PAGEOFF]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh4, Lloh5
   ret float 0x400921FB60000000
 }
 
-; CHECK: literal16
-; CHECK: .quad 0
-; CHECK: .quad 0
 define fp128 @baz() optsize {
-; CHECK: _baz:
-; CHECK:  adrp x[[REG:[0-9]+]], lCPI3_0 at PAGE
-; CHECK:  ldr  q0, [x[[REG]], lCPI3_0 at PAGEOFF]
-; CHECK-NEXT:  ret
+; CHECK-SD-LABEL: baz:
+; CHECK-SD:       ; %bb.0:
+; CHECK-SD-NEXT:  Lloh6:
+; CHECK-SD-NEXT:    adrp x8, lCPI3_0 at PAGE
+; CHECK-SD-NEXT:  Lloh7:
+; CHECK-SD-NEXT:    ldr q0, [x8, lCPI3_0 at PAGEOFF]
+; CHECK-SD-NEXT:    ret
+; CHECK-SD-NEXT:    .loh AdrpLdr Lloh6, Lloh7
+;
+; CHECK-GI-LABEL: baz:
+; CHECK-GI:       ; %bb.0:
+; CHECK-GI-NEXT:    movi.2d v0, #0000000000000000
+; CHECK-GI-NEXT:    ret
   ret fp128 0xL00000000000000000000000000000000
 }
 
-; CHECK: literal8
-; CHECK: .quad 0x0000001fffffffd
 define double @foo2_pgso() !prof !14 {
-; CHECK: _foo2_pgso:
-; CHECK: adrp x[[REG:[0-9]+]], lCPI4_0 at PAGE
-; CHECK: ldr  d0, [x[[REG]], lCPI4_0 at PAGEOFF]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: foo2_pgso:
+; CHECK-SD:       ; %bb.0:
+; CHECK-SD-NEXT:  Lloh8:
+; CHECK-SD-NEXT:    adrp x8, lCPI4_0 at PAGE
+; CHECK-SD-NEXT:  Lloh9:
+; CHECK-SD-NEXT:    ldr d0, [x8, lCPI4_0 at PAGEOFF]
+; CHECK-SD-NEXT:    ret
+; CHECK-SD-NEXT:    .loh AdrpLdr Lloh8, Lloh9
+;
+; CHECK-GI-LABEL: foo2_pgso:
+; CHECK-GI:       ; %bb.0:
+; CHECK-GI-NEXT:    mov x8, #137438887936 ; =0x1fffff0000
+; CHECK-GI-NEXT:    movk x8, #65489
+; CHECK-GI-NEXT:    fmov d0, x8
+; CHECK-GI-NEXT:    ret
   ret double 0x1FFFFFFFd1
 }
 
 define float @bar_pgso() !prof !14 {
-; CHECK: _bar_pgso:
-; CHECK: adrp x[[REG:[0-9]+]], lCPI5_0 at PAGE
-; CHECK: ldr  s0, [x[[REG]], lCPI5_0 at PAGEOFF]
-; CHECK-NEXT:  ret
+; CHECK-SD-LABEL: bar_pgso:
+; CHECK-SD:       ; %bb.0:
+; CHECK-SD-NEXT:  Lloh10:
+; CHECK-SD-NEXT:    adrp x8, lCPI5_0 at PAGE
+; CHECK-SD-NEXT:  Lloh11:
+; CHECK-SD-NEXT:    ldr s0, [x8, lCPI5_0 at PAGEOFF]
+; CHECK-SD-NEXT:    ret
+; CHECK-SD-NEXT:    .loh AdrpLdr Lloh10, Lloh11
+;
+; CHECK-GI-LABEL: bar_pgso:
+; CHECK-GI:       ; %bb.0:
+; CHECK-GI-NEXT:    mov w8, #4060 ; =0xfdc
+; CHECK-GI-NEXT:    movk w8, #16457, lsl #16
+; CHECK-GI-NEXT:    fmov s0, w8
+; CHECK-GI-NEXT:    ret
   ret float 0x400921FB80000000
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fp-imm.ll b/llvm/test/CodeGen/AArch64/arm64-fp-imm.ll
index 61eb67486ae3d..d7c5f2ae35766 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fp-imm.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fp-imm.ll
@@ -1,32 +1,42 @@
-; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
-; RUN: llc < %s -mtriple=arm64-apple-darwin -global-isel | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck --check-prefixes=CHECK,CHECK-SD %s
+; RUN: llc < %s -mtriple=arm64-apple-darwin -global-isel | FileCheck --check-prefixes=CHECK,CHECK-GI %s
 
-; CHECK: literal8
-; CHECK: .quad 0x400921fb54442d18
 define double @foo() {
-; CHECK: _foo:
-; CHECK: adrp x[[REG:[0-9]+]], lCPI0_0 at PAGE
-; CHECK: ldr  d0, [x[[REG]], lCPI0_0 at PAGEOFF]
-; CHECK-NEXT: ret
+; CHECK-LABEL: foo:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh0:
+; CHECK-NEXT:    adrp x8, lCPI0_0 at PAGE
+; CHECK-NEXT:  Lloh1:
+; CHECK-NEXT:    ldr d0, [x8, lCPI0_0 at PAGEOFF]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh0, Lloh1
   ret double 0x400921FB54442D18
 }
 
 define float @bar() {
-; CHECK: _bar:
-; CHECK:  mov  [[REG:w[0-9]+]], #4059
-; CHECK:  movk [[REG]], #16457, lsl #16
-; CHECK:  fmov s0, [[REG]]
-; CHECK-NEXT:  ret
+; CHECK-LABEL: bar:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    mov w8, #4059 ; =0xfdb
+; CHECK-NEXT:    movk w8, #16457, lsl #16
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    ret
   ret float 0x400921FB60000000
 }
 
-; CHECK: literal16
-; CHECK: .quad 0
-; CHECK: .quad 0
 define fp128 @baz() {
-; CHECK: _baz:
-; CHECK:  adrp x[[REG:[0-9]+]], lCPI2_0 at PAGE
-; CHECK:  ldr  q0, [x[[REG]], lCPI2_0 at PAGEOFF]
-; CHECK-NEXT:  ret
+; CHECK-SD-LABEL: baz:
+; CHECK-SD:       ; %bb.0:
+; CHECK-SD-NEXT:  Lloh2:
+; CHECK-SD-NEXT:    adrp x8, lCPI2_0 at PAGE
+; CHECK-SD-NEXT:  Lloh3:
+; CHECK-SD-NEXT:    ldr q0, [x8, lCPI2_0 at PAGEOFF]
+; CHECK-SD-NEXT:    ret
+; CHECK-SD-NEXT:    .loh AdrpLdr Lloh2, Lloh3
+;
+; CHECK-GI-LABEL: baz:
+; CHECK-GI:       ; %bb.0:
+; CHECK-GI-NEXT:    movi.2d v0, #0000000000000000
+; CHECK-GI-NEXT:    ret
   ret fp128 0xL00000000000000000000000000000000
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fp128.ll b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
index 498dce138febf..c4f91c66fb9a6 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fp128.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
@@ -1470,25 +1470,23 @@ define <2 x fp128> @vec_neg_sub(<2 x fp128> %in) {
 ;
 ; CHECK-GI-LABEL: vec_neg_sub:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    sub sp, sp, #64
-; CHECK-GI-NEXT:    str x30, [sp, #48] // 8-byte Spill
-; CHECK-GI-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-GI-NEXT:    sub sp, sp, #48
+; CHECK-GI-NEXT:    str x30, [sp, #32] // 8-byte Spill
+; CHECK-GI-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-GI-NEXT:    .cfi_offset w30, -16
 ; CHECK-GI-NEXT:    mov v2.16b, v0.16b
-; CHECK-GI-NEXT:    adrp x8, .LCPI47_0
-; CHECK-GI-NEXT:    str q1, [sp, #32] // 16-byte Spill
-; CHECK-GI-NEXT:    ldr q0, [x8, :lo12:.LCPI47_0]
+; CHECK-GI-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-GI-NEXT:    str q1, [sp, #16] // 16-byte Spill
 ; CHECK-GI-NEXT:    mov v1.16b, v2.16b
-; CHECK-GI-NEXT:    str q0, [sp] // 16-byte Spill
 ; CHECK-GI-NEXT:    bl __subtf3
-; CHECK-GI-NEXT:    str q0, [sp, #16] // 16-byte Spill
-; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Reload
-; CHECK-GI-NEXT:    ldr q1, [sp, #32] // 16-byte Reload
+; CHECK-GI-NEXT:    str q0, [sp] // 16-byte Spill
+; CHECK-GI-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-GI-NEXT:    ldr q1, [sp, #16] // 16-byte Reload
 ; CHECK-GI-NEXT:    bl __subtf3
 ; CHECK-GI-NEXT:    mov v1.16b, v0.16b
-; CHECK-GI-NEXT:    ldr q0, [sp, #16] // 16-byte Reload
-; CHECK-GI-NEXT:    ldr x30, [sp, #48] // 8-byte Reload
-; CHECK-GI-NEXT:    add sp, sp, #64
+; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Reload
+; CHECK-GI-NEXT:    ldr x30, [sp, #32] // 8-byte Reload
+; CHECK-GI-NEXT:    add sp, sp, #48
 ; CHECK-GI-NEXT:    ret
   %ret = fsub <2 x fp128> zeroinitializer, %in
   ret <2 x fp128> %ret

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vhadd.ll b/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
index 09ea9eeb03914..4723867bc99f0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
@@ -1437,7 +1437,7 @@ define <2 x i16> @rhadd8x2_sext_asr(<2 x i8> %src1, <2 x i8> %src2) {
 ; CHECK-GI-NEXT:    shl.2s v1, v1, #24
 ; CHECK-GI-NEXT:    shl.2s v0, v0, #24
 ; CHECK-GI-NEXT:    mov w8, #1 // =0x1
-; CHECK-GI-NEXT:    dup.2s v2, w8
+; CHECK-GI-NEXT:    movi.2s v2, #1
 ; CHECK-GI-NEXT:    sshr.2s v1, v1, #24
 ; CHECK-GI-NEXT:    ssra.2s v1, v0, #24
 ; CHECK-GI-NEXT:    fmov s0, w8
@@ -1472,7 +1472,7 @@ define <2 x i16> @rhadd8x2_zext_asr(<2 x i8> %src1, <2 x i8> %src2) {
 ; CHECK-GI-NEXT:    mov w8, #1 // =0x1
 ; CHECK-GI-NEXT:    and.8b v0, v0, v2
 ; CHECK-GI-NEXT:    and.8b v1, v1, v2
-; CHECK-GI-NEXT:    dup.2s v2, w8
+; CHECK-GI-NEXT:    movi.2s v2, #1
 ; CHECK-GI-NEXT:    add.2s v0, v0, v1
 ; CHECK-GI-NEXT:    fmov s1, w8
 ; CHECK-GI-NEXT:    add.2s v0, v0, v2
@@ -1510,7 +1510,7 @@ define <2 x i16> @rhadd8x2_sext_lsr(<2 x i8> %src1, <2 x i8> %src2) {
 ; CHECK-GI-NEXT:    shl.2s v1, v1, #24
 ; CHECK-GI-NEXT:    shl.2s v0, v0, #24
 ; CHECK-GI-NEXT:    mov w8, #1 // =0x1
-; CHECK-GI-NEXT:    dup.2s v2, w8
+; CHECK-GI-NEXT:    movi.2s v2, #1
 ; CHECK-GI-NEXT:    sshr.2s v1, v1, #24
 ; CHECK-GI-NEXT:    ssra.2s v1, v0, #24
 ; CHECK-GI-NEXT:    fmov s0, w8
@@ -1545,7 +1545,7 @@ define <2 x i16> @rhadd8x2_zext_lsr(<2 x i8> %src1, <2 x i8> %src2) {
 ; CHECK-GI-NEXT:    mov w8, #1 // =0x1
 ; CHECK-GI-NEXT:    and.8b v0, v0, v2
 ; CHECK-GI-NEXT:    and.8b v1, v1, v2
-; CHECK-GI-NEXT:    dup.2s v2, w8
+; CHECK-GI-NEXT:    movi.2s v2, #1
 ; CHECK-GI-NEXT:    add.2s v0, v0, v1
 ; CHECK-GI-NEXT:    fmov s1, w8
 ; CHECK-GI-NEXT:    add.2s v0, v0, v2

diff  --git a/llvm/test/CodeGen/AArch64/dup.ll b/llvm/test/CodeGen/AArch64/dup.ll
index fa30815fdec30..9897a89dbfbd0 100644
--- a/llvm/test/CodeGen/AArch64/dup.ll
+++ b/llvm/test/CodeGen/AArch64/dup.ll
@@ -2564,14 +2564,22 @@ entry:
 }
 
 define <2 x fp128> @loaddup_str_v2fp128(ptr %p) {
-; CHECK-LABEL: loaddup_str_v2fp128:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    adrp x8, .LCPI155_0
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI155_0]
-; CHECK-NEXT:    mov v1.16b, v0.16b
-; CHECK-NEXT:    str q2, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: loaddup_str_v2fp128:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    ldr q0, [x0]
+; CHECK-SD-NEXT:    adrp x8, .LCPI155_0
+; CHECK-SD-NEXT:    ldr q2, [x8, :lo12:.LCPI155_0]
+; CHECK-SD-NEXT:    mov v1.16b, v0.16b
+; CHECK-SD-NEXT:    str q2, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: loaddup_str_v2fp128:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-GI-NEXT:    mov v1.16b, v0.16b
+; CHECK-GI-NEXT:    str q2, [x0]
+; CHECK-GI-NEXT:    ret
 entry:
   %a = load fp128, ptr %p
   %b = insertelement <2 x fp128> poison, fp128 %a, i64 0
@@ -2618,15 +2626,24 @@ entry:
 }
 
 define <3 x fp128> @loaddup_str_v3fp128(ptr %p) {
-; CHECK-LABEL: loaddup_str_v3fp128:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    adrp x8, .LCPI159_0
-; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI159_0]
-; CHECK-NEXT:    mov v1.16b, v0.16b
-; CHECK-NEXT:    mov v2.16b, v0.16b
-; CHECK-NEXT:    str q3, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: loaddup_str_v3fp128:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    ldr q0, [x0]
+; CHECK-SD-NEXT:    adrp x8, .LCPI159_0
+; CHECK-SD-NEXT:    ldr q3, [x8, :lo12:.LCPI159_0]
+; CHECK-SD-NEXT:    mov v1.16b, v0.16b
+; CHECK-SD-NEXT:    mov v2.16b, v0.16b
+; CHECK-SD-NEXT:    str q3, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: loaddup_str_v3fp128:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-GI-NEXT:    mov v1.16b, v0.16b
+; CHECK-GI-NEXT:    mov v2.16b, v0.16b
+; CHECK-GI-NEXT:    str q3, [x0]
+; CHECK-GI-NEXT:    ret
 entry:
   %a = load fp128, ptr %p
   %b = insertelement <3 x fp128> poison, fp128 %a, i64 0
@@ -2676,16 +2693,26 @@ entry:
 }
 
 define <4 x fp128> @loaddup_str_v4fp128(ptr %p) {
-; CHECK-LABEL: loaddup_str_v4fp128:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    adrp x8, .LCPI163_0
-; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI163_0]
-; CHECK-NEXT:    mov v1.16b, v0.16b
-; CHECK-NEXT:    mov v2.16b, v0.16b
-; CHECK-NEXT:    mov v3.16b, v0.16b
-; CHECK-NEXT:    str q4, [x0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: loaddup_str_v4fp128:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    ldr q0, [x0]
+; CHECK-SD-NEXT:    adrp x8, .LCPI163_0
+; CHECK-SD-NEXT:    ldr q4, [x8, :lo12:.LCPI163_0]
+; CHECK-SD-NEXT:    mov v1.16b, v0.16b
+; CHECK-SD-NEXT:    mov v2.16b, v0.16b
+; CHECK-SD-NEXT:    mov v3.16b, v0.16b
+; CHECK-SD-NEXT:    str q4, [x0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: loaddup_str_v4fp128:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    movi v4.2d, #0000000000000000
+; CHECK-GI-NEXT:    mov v1.16b, v0.16b
+; CHECK-GI-NEXT:    mov v2.16b, v0.16b
+; CHECK-GI-NEXT:    mov v3.16b, v0.16b
+; CHECK-GI-NEXT:    str q4, [x0]
+; CHECK-GI-NEXT:    ret
 entry:
   %a = load fp128, ptr %p
   %b = insertelement <4 x fp128> poison, fp128 %a, i64 0

diff  --git a/llvm/test/CodeGen/AArch64/fcvt_combine.ll b/llvm/test/CodeGen/AArch64/fcvt_combine.ll
index c190c63058fa9..d407f1a6e1cc9 100644
--- a/llvm/test/CodeGen/AArch64/fcvt_combine.ll
+++ b/llvm/test/CodeGen/AArch64/fcvt_combine.ll
@@ -469,6 +469,54 @@ define <4 x i32> @test_v4f16_i32(<4 x half> %in) {
   ret <4 x i32> %val
 }
 
+; Test conversion with NaN value.
+define <8 x i16> @test_v8f16_nan(<8 x half> %a) {
+; CHECK-NO16-SD-LABEL: test_v8f16_nan:
+; CHECK-NO16-SD:       // %bb.0: // %entry
+; CHECK-NO16-SD-NEXT:    movi v1.8h, #126, lsl #8
+; CHECK-NO16-SD-NEXT:    fcvtl v2.4s, v0.4h
+; CHECK-NO16-SD-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-SD-NEXT:    fcvtl v3.4s, v1.4h
+; CHECK-NO16-SD-NEXT:    fcvtl2 v1.4s, v1.8h
+; CHECK-NO16-SD-NEXT:    fmul v2.4s, v2.4s, v3.4s
+; CHECK-NO16-SD-NEXT:    fmul v0.4s, v0.4s, v1.4s
+; CHECK-NO16-SD-NEXT:    fcvtn v1.4h, v2.4s
+; CHECK-NO16-SD-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-SD-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-SD-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-SD-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-SD-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-SD-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-SD-NEXT:    ret
+;
+; CHECK-FP16-LABEL: test_v8f16_nan:
+; CHECK-FP16:       // %bb.0: // %entry
+; CHECK-FP16-NEXT:    movi v1.8h, #126, lsl #8
+; CHECK-FP16-NEXT:    fmul v0.8h, v0.8h, v1.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+;
+; CHECK-NO16-GI-LABEL: test_v8f16_nan:
+; CHECK-NO16-GI:       // %bb.0: // %entry
+; CHECK-NO16-GI-NEXT:    movi v1.4h, #126, lsl #8
+; CHECK-NO16-GI-NEXT:    fcvtl v2.4s, v0.4h
+; CHECK-NO16-GI-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-GI-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-GI-NEXT:    fmul v2.4s, v2.4s, v1.4s
+; CHECK-NO16-GI-NEXT:    fmul v0.4s, v0.4s, v1.4s
+; CHECK-NO16-GI-NEXT:    fcvtn v1.4h, v2.4s
+; CHECK-NO16-GI-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-GI-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-GI-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-GI-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-GI-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-GI-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-GI-NEXT:    ret
+entry:
+  %fmul = fmul <8 x half> %a, splat (half 0xH7E00)
+  %fptosi = fptosi <8 x half> %fmul to <8 x i16>
+  ret <8 x i16> %fptosi
+}
 
 declare <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float>)
 declare <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float>)
@@ -543,12 +591,12 @@ define <2 x i32> @test4_sat(<2 x double> %d) {
 ; CHECK-NO16-GI-LABEL: test4_sat:
 ; CHECK-NO16-GI:       // %bb.0:
 ; CHECK-NO16-GI-NEXT:    fcvtzs v0.2d, v0.2d, #4
-; CHECK-NO16-GI-NEXT:    adrp x8, .LCPI21_1
-; CHECK-NO16-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI21_1]
-; CHECK-NO16-GI-NEXT:    adrp x8, .LCPI21_0
+; CHECK-NO16-GI-NEXT:    adrp x8, .LCPI22_1
+; CHECK-NO16-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI22_1]
+; CHECK-NO16-GI-NEXT:    adrp x8, .LCPI22_0
 ; CHECK-NO16-GI-NEXT:    cmgt v2.2d, v1.2d, v0.2d
 ; CHECK-NO16-GI-NEXT:    bif v0.16b, v1.16b, v2.16b
-; CHECK-NO16-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI21_0]
+; CHECK-NO16-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI22_0]
 ; CHECK-NO16-GI-NEXT:    cmgt v2.2d, v0.2d, v1.2d
 ; CHECK-NO16-GI-NEXT:    bif v0.16b, v1.16b, v2.16b
 ; CHECK-NO16-GI-NEXT:    xtn v0.2s, v0.2d
@@ -557,12 +605,12 @@ define <2 x i32> @test4_sat(<2 x double> %d) {
 ; CHECK-FP16-GI-LABEL: test4_sat:
 ; CHECK-FP16-GI:       // %bb.0:
 ; CHECK-FP16-GI-NEXT:    fcvtzs v0.2d, v0.2d, #4
-; CHECK-FP16-GI-NEXT:    adrp x8, .LCPI21_1
-; CHECK-FP16-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI21_1]
-; CHECK-FP16-GI-NEXT:    adrp x8, .LCPI21_0
+; CHECK-FP16-GI-NEXT:    adrp x8, .LCPI22_1
+; CHECK-FP16-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI22_1]
+; CHECK-FP16-GI-NEXT:    adrp x8, .LCPI22_0
 ; CHECK-FP16-GI-NEXT:    cmgt v2.2d, v1.2d, v0.2d
 ; CHECK-FP16-GI-NEXT:    bif v0.16b, v1.16b, v2.16b
-; CHECK-FP16-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI21_0]
+; CHECK-FP16-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI22_0]
 ; CHECK-FP16-GI-NEXT:    cmgt v2.2d, v0.2d, v1.2d
 ; CHECK-FP16-GI-NEXT:    bif v0.16b, v1.16b, v2.16b
 ; CHECK-FP16-GI-NEXT:    xtn v0.2s, v0.2d
@@ -730,16 +778,16 @@ define <2 x i32> @test9_sat(<2 x float> %f) {
 ;
 ; CHECK-NO16-GI-LABEL: test9_sat:
 ; CHECK-NO16-GI:       // %bb.0:
-; CHECK-NO16-GI-NEXT:    adrp x8, .LCPI27_0
-; CHECK-NO16-GI-NEXT:    ldr d1, [x8, :lo12:.LCPI27_0]
+; CHECK-NO16-GI-NEXT:    adrp x8, .LCPI28_0
+; CHECK-NO16-GI-NEXT:    ldr d1, [x8, :lo12:.LCPI28_0]
 ; CHECK-NO16-GI-NEXT:    fmul v0.2s, v0.2s, v1.2s
 ; CHECK-NO16-GI-NEXT:    fcvtzu v0.2s, v0.2s
 ; CHECK-NO16-GI-NEXT:    ret
 ;
 ; CHECK-FP16-GI-LABEL: test9_sat:
 ; CHECK-FP16-GI:       // %bb.0:
-; CHECK-FP16-GI-NEXT:    adrp x8, .LCPI27_0
-; CHECK-FP16-GI-NEXT:    ldr d1, [x8, :lo12:.LCPI27_0]
+; CHECK-FP16-GI-NEXT:    adrp x8, .LCPI28_0
+; CHECK-FP16-GI-NEXT:    ldr d1, [x8, :lo12:.LCPI28_0]
 ; CHECK-FP16-GI-NEXT:    fmul v0.2s, v0.2s, v1.2s
 ; CHECK-FP16-GI-NEXT:    fcvtzu v0.2s, v0.2s
 ; CHECK-FP16-GI-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/fpimm.ll b/llvm/test/CodeGen/AArch64/fpimm.ll
index e2944243338f5..5dabf7ffda4dc 100644
--- a/llvm/test/CodeGen/AArch64/fpimm.ll
+++ b/llvm/test/CodeGen/AArch64/fpimm.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -mtriple=aarch64-linux-gnu                                                  -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=aarch64-apple-darwin -code-model=large                             -verify-machineinstrs < %s | FileCheck %s --check-prefixes=LARGE
 ; RUN: llc -mtriple=aarch64    -code-model=tiny                              -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-linux-gnu -global-isel                           -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -global-isel                           -verify-machineinstrs < %s | FileCheck %s --check-prefixes=GI
 
 @varf32 = global float 0.0
 @varf64 = global double 0.0
@@ -15,8 +15,9 @@ define void @check_float() {
 ; CHECK-DAG: fmov {{s[0-9]+}}, #8.5
 
   %newval2 = fadd float %val, 128.0
-  store volatile float %newval2, ptr @varf32
 ; CHECK-DAG: movi [[REG:v[0-9s]+]].2s, #67, lsl #24
+; GI-DAG: fmov [[REG:s[0-9s]+]],  #8.5
+  store volatile float %newval2, ptr @varf32
 
 ; CHECK: ret
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll
index 0ad09d416ce68..7a5fe0f4222bc 100644
--- a/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll
+++ b/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll
@@ -790,9 +790,8 @@ define i32 @test_unsigned_f128_i32(fp128 %f) {
 ; CHECK-GI-NEXT:    .cfi_offset w19, -8
 ; CHECK-GI-NEXT:    .cfi_offset w20, -16
 ; CHECK-GI-NEXT:    .cfi_offset w30, -32
-; CHECK-GI-NEXT:    adrp x8, .LCPI30_1
+; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    str q0, [sp] // 16-byte Spill
-; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI30_1]
 ; CHECK-GI-NEXT:    bl __gttf2
 ; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Reload
 ; CHECK-GI-NEXT:    cmp w0, #0

diff  --git a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
index ecca1165753bf..f044f5184ab32 100644
--- a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
@@ -480,9 +480,8 @@ define <1 x i32> @test_unsigned_v1f128_v1i32(<1 x fp128> %f) {
 ; CHECK-GI-NEXT:    .cfi_offset w19, -8
 ; CHECK-GI-NEXT:    .cfi_offset w20, -16
 ; CHECK-GI-NEXT:    .cfi_offset w30, -32
-; CHECK-GI-NEXT:    adrp x8, .LCPI14_1
+; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    str q0, [sp] // 16-byte Spill
-; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI14_1]
 ; CHECK-GI-NEXT:    bl __gttf2
 ; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Reload
 ; CHECK-GI-NEXT:    cmp w0, #0
@@ -564,21 +563,18 @@ define <2 x i32> @test_unsigned_v2f128_v2i32(<2 x fp128> %f) {
 ;
 ; CHECK-GI-LABEL: test_unsigned_v2f128_v2i32:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    sub sp, sp, #96
-; CHECK-GI-NEXT:    str x30, [sp, #48] // 8-byte Spill
-; CHECK-GI-NEXT:    stp x22, x21, [sp, #64] // 16-byte Folded Spill
-; CHECK-GI-NEXT:    stp x20, x19, [sp, #80] // 16-byte Folded Spill
-; CHECK-GI-NEXT:    .cfi_def_cfa_offset 96
+; CHECK-GI-NEXT:    sub sp, sp, #80
+; CHECK-GI-NEXT:    str x30, [sp, #32] // 8-byte Spill
+; CHECK-GI-NEXT:    stp x22, x21, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT:    stp x20, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-GI-NEXT:    .cfi_offset w19, -8
 ; CHECK-GI-NEXT:    .cfi_offset w20, -16
 ; CHECK-GI-NEXT:    .cfi_offset w21, -24
 ; CHECK-GI-NEXT:    .cfi_offset w22, -32
 ; CHECK-GI-NEXT:    .cfi_offset w30, -48
-; CHECK-GI-NEXT:    adrp x8, .LCPI15_1
-; CHECK-GI-NEXT:    str q0, [sp] // 16-byte Spill
-; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI15_1]
-; CHECK-GI-NEXT:    stp q2, q1, [sp, #16] // 32-byte Folded Spill
-; CHECK-GI-NEXT:    mov v1.16b, v2.16b
+; CHECK-GI-NEXT:    stp q0, q1, [sp] // 32-byte Folded Spill
+; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    bl __gttf2
 ; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Reload
 ; CHECK-GI-NEXT:    cmp w0, #0
@@ -600,12 +596,12 @@ define <2 x i32> @test_unsigned_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-GI-NEXT:    csel x8, x20, x21, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
-; CHECK-GI-NEXT:    ldp q1, q0, [sp, #16] // 32-byte Folded Reload
+; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-GI-NEXT:    ldr q0, [sp, #16] // 16-byte Reload
 ; CHECK-GI-NEXT:    mov w19, w0
 ; CHECK-GI-NEXT:    bl __gttf2
-; CHECK-GI-NEXT:    ldr q0, [sp, #32] // 16-byte Reload
+; CHECK-GI-NEXT:    ldp q1, q0, [sp] // 32-byte Folded Reload
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    ldr q1, [sp] // 16-byte Reload
 ; CHECK-GI-NEXT:    fmov x8, d0
 ; CHECK-GI-NEXT:    csel x20, x8, xzr, gt
 ; CHECK-GI-NEXT:    mov x8, v0.d[1]
@@ -620,12 +616,12 @@ define <2 x i32> @test_unsigned_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
 ; CHECK-GI-NEXT:    fmov s0, w19
-; CHECK-GI-NEXT:    ldp x20, x19, [sp, #80] // 16-byte Folded Reload
-; CHECK-GI-NEXT:    ldp x22, x21, [sp, #64] // 16-byte Folded Reload
-; CHECK-GI-NEXT:    ldr x30, [sp, #48] // 8-byte Reload
+; CHECK-GI-NEXT:    ldp x20, x19, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT:    ldp x22, x21, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT:    ldr x30, [sp, #32] // 8-byte Reload
 ; CHECK-GI-NEXT:    mov v0.s[1], w0
 ; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 killed $q0
-; CHECK-GI-NEXT:    add sp, sp, #96
+; CHECK-GI-NEXT:    add sp, sp, #80
 ; CHECK-GI-NEXT:    ret
     %x = call <2 x i32> @llvm.fptoui.sat.v2f128.v2i32(<2 x fp128> %f)
     ret <2 x i32> %x
@@ -696,21 +692,20 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 ;
 ; CHECK-GI-LABEL: test_unsigned_v3f128_v3i32:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    sub sp, sp, #112
-; CHECK-GI-NEXT:    stp x30, x23, [sp, #64] // 16-byte Folded Spill
-; CHECK-GI-NEXT:    stp x22, x21, [sp, #80] // 16-byte Folded Spill
-; CHECK-GI-NEXT:    stp x20, x19, [sp, #96] // 16-byte Folded Spill
-; CHECK-GI-NEXT:    .cfi_def_cfa_offset 112
+; CHECK-GI-NEXT:    sub sp, sp, #96
+; CHECK-GI-NEXT:    stp x30, x23, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT:    stp x22, x21, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT:    stp x20, x19, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-GI-NEXT:    .cfi_offset w19, -8
 ; CHECK-GI-NEXT:    .cfi_offset w20, -16
 ; CHECK-GI-NEXT:    .cfi_offset w21, -24
 ; CHECK-GI-NEXT:    .cfi_offset w22, -32
 ; CHECK-GI-NEXT:    .cfi_offset w23, -40
 ; CHECK-GI-NEXT:    .cfi_offset w30, -48
-; CHECK-GI-NEXT:    adrp x8, .LCPI16_1
 ; CHECK-GI-NEXT:    stp q1, q0, [sp] // 32-byte Folded Spill
-; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI16_1]
-; CHECK-GI-NEXT:    stp q1, q2, [sp, #32] // 32-byte Folded Spill
+; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-GI-NEXT:    str q2, [sp, #32] // 16-byte Spill
 ; CHECK-GI-NEXT:    bl __gttf2
 ; CHECK-GI-NEXT:    ldr q0, [sp, #16] // 16-byte Reload
 ; CHECK-GI-NEXT:    cmp w0, #0
@@ -732,8 +727,8 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-GI-NEXT:    csel x8, x20, x21, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
+; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Reload
-; CHECK-GI-NEXT:    ldr q1, [sp, #32] // 16-byte Reload
 ; CHECK-GI-NEXT:    mov w19, w0
 ; CHECK-GI-NEXT:    bl __gttf2
 ; CHECK-GI-NEXT:    ldp q0, q1, [sp] // 32-byte Folded Reload
@@ -751,12 +746,12 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-GI-NEXT:    csel x8, x22, x21, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
-; CHECK-GI-NEXT:    ldp q1, q0, [sp, #32] // 32-byte Folded Reload
+; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-GI-NEXT:    ldr q0, [sp, #32] // 16-byte Reload
 ; CHECK-GI-NEXT:    mov w20, w0
 ; CHECK-GI-NEXT:    bl __gttf2
-; CHECK-GI-NEXT:    ldr q0, [sp, #48] // 16-byte Reload
+; CHECK-GI-NEXT:    ldp q1, q0, [sp, #16] // 32-byte Folded Reload
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    ldr q1, [sp, #16] // 16-byte Reload
 ; CHECK-GI-NEXT:    fmov x8, d0
 ; CHECK-GI-NEXT:    csel x22, x8, xzr, gt
 ; CHECK-GI-NEXT:    mov x8, v0.d[1]
@@ -771,12 +766,12 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
 ; CHECK-GI-NEXT:    fmov s0, w19
-; CHECK-GI-NEXT:    ldp x22, x21, [sp, #80] // 16-byte Folded Reload
-; CHECK-GI-NEXT:    ldp x30, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT:    ldp x22, x21, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT:    ldp x30, x23, [sp, #48] // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    mov v0.s[1], w20
-; CHECK-GI-NEXT:    ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT:    ldp x20, x19, [sp, #80] // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    mov v0.s[2], w0
-; CHECK-GI-NEXT:    add sp, sp, #112
+; CHECK-GI-NEXT:    add sp, sp, #96
 ; CHECK-GI-NEXT:    ret
     %x = call <3 x i32> @llvm.fptoui.sat.v3f128.v3i32(<3 x fp128> %f)
     ret <3 x i32> %x
@@ -865,12 +860,12 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ;
 ; CHECK-GI-LABEL: test_unsigned_v4f128_v4i32:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    sub sp, sp, #144
-; CHECK-GI-NEXT:    str x30, [sp, #80] // 8-byte Spill
-; CHECK-GI-NEXT:    stp x24, x23, [sp, #96] // 16-byte Folded Spill
-; CHECK-GI-NEXT:    stp x22, x21, [sp, #112] // 16-byte Folded Spill
-; CHECK-GI-NEXT:    stp x20, x19, [sp, #128] // 16-byte Folded Spill
-; CHECK-GI-NEXT:    .cfi_def_cfa_offset 144
+; CHECK-GI-NEXT:    sub sp, sp, #128
+; CHECK-GI-NEXT:    str x30, [sp, #64] // 8-byte Spill
+; CHECK-GI-NEXT:    stp x24, x23, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT:    stp x22, x21, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT:    stp x20, x19, [sp, #112] // 16-byte Folded Spill
+; CHECK-GI-NEXT:    .cfi_def_cfa_offset 128
 ; CHECK-GI-NEXT:    .cfi_offset w19, -8
 ; CHECK-GI-NEXT:    .cfi_offset w20, -16
 ; CHECK-GI-NEXT:    .cfi_offset w21, -24
@@ -878,12 +873,9 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-GI-NEXT:    .cfi_offset w23, -40
 ; CHECK-GI-NEXT:    .cfi_offset w24, -48
 ; CHECK-GI-NEXT:    .cfi_offset w30, -64
-; CHECK-GI-NEXT:    adrp x8, .LCPI17_1
 ; CHECK-GI-NEXT:    stp q1, q2, [sp] // 32-byte Folded Spill
-; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI17_1]
-; CHECK-GI-NEXT:    str q0, [sp, #48] // 16-byte Spill
-; CHECK-GI-NEXT:    str q3, [sp, #32] // 16-byte Spill
-; CHECK-GI-NEXT:    str q1, [sp, #64] // 16-byte Spill
+; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-GI-NEXT:    stp q3, q0, [sp, #32] // 32-byte Folded Spill
 ; CHECK-GI-NEXT:    bl __gttf2
 ; CHECK-GI-NEXT:    ldr q0, [sp, #48] // 16-byte Reload
 ; CHECK-GI-NEXT:    cmp w0, #0
@@ -905,8 +897,8 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-GI-NEXT:    csel x8, x20, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
+; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Reload
-; CHECK-GI-NEXT:    ldr q1, [sp, #64] // 16-byte Reload
 ; CHECK-GI-NEXT:    mov w19, w0
 ; CHECK-GI-NEXT:    bl __gttf2
 ; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Reload
@@ -925,8 +917,8 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-GI-NEXT:    csel x8, x21, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
+; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    ldr q0, [sp, #16] // 16-byte Reload
-; CHECK-GI-NEXT:    ldr q1, [sp, #64] // 16-byte Reload
 ; CHECK-GI-NEXT:    mov w20, w0
 ; CHECK-GI-NEXT:    bl __gttf2
 ; CHECK-GI-NEXT:    ldr q0, [sp, #16] // 16-byte Reload
@@ -945,8 +937,8 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-GI-NEXT:    csel x8, x23, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
+; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    ldr q0, [sp, #32] // 16-byte Reload
-; CHECK-GI-NEXT:    ldr q1, [sp, #64] // 16-byte Reload
 ; CHECK-GI-NEXT:    mov w21, w0
 ; CHECK-GI-NEXT:    bl __gttf2
 ; CHECK-GI-NEXT:    ldp q0, q1, [sp, #32] // 32-byte Folded Reload
@@ -965,14 +957,14 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
 ; CHECK-GI-NEXT:    fmov s0, w19
-; CHECK-GI-NEXT:    ldp x24, x23, [sp, #96] // 16-byte Folded Reload
-; CHECK-GI-NEXT:    ldr x30, [sp, #80] // 8-byte Reload
+; CHECK-GI-NEXT:    ldp x24, x23, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT:    ldr x30, [sp, #64] // 8-byte Reload
 ; CHECK-GI-NEXT:    mov v0.s[1], w20
-; CHECK-GI-NEXT:    ldp x20, x19, [sp, #128] // 16-byte Folded Reload
+; CHECK-GI-NEXT:    ldp x20, x19, [sp, #112] // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    mov v0.s[2], w21
-; CHECK-GI-NEXT:    ldp x22, x21, [sp, #112] // 16-byte Folded Reload
+; CHECK-GI-NEXT:    ldp x22, x21, [sp, #96] // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    mov v0.s[3], w0
-; CHECK-GI-NEXT:    add sp, sp, #144
+; CHECK-GI-NEXT:    add sp, sp, #128
 ; CHECK-GI-NEXT:    ret
     %x = call <4 x i32> @llvm.fptoui.sat.v4f128.v4i32(<4 x fp128> %f)
     ret <4 x i32> %x
@@ -4139,22 +4131,19 @@ define <2 x i64> @test_signed_v2f128_v2i64(<2 x fp128> %f) {
 ;
 ; CHECK-GI-LABEL: test_signed_v2f128_v2i64:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    sub sp, sp, #96
-; CHECK-GI-NEXT:    stp x30, x23, [sp, #48] // 16-byte Folded Spill
-; CHECK-GI-NEXT:    stp x22, x21, [sp, #64] // 16-byte Folded Spill
-; CHECK-GI-NEXT:    stp x20, x19, [sp, #80] // 16-byte Folded Spill
-; CHECK-GI-NEXT:    .cfi_def_cfa_offset 96
+; CHECK-GI-NEXT:    sub sp, sp, #80
+; CHECK-GI-NEXT:    stp x30, x23, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT:    stp x22, x21, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT:    stp x20, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-GI-NEXT:    .cfi_offset w19, -8
 ; CHECK-GI-NEXT:    .cfi_offset w20, -16
 ; CHECK-GI-NEXT:    .cfi_offset w21, -24
 ; CHECK-GI-NEXT:    .cfi_offset w22, -32
 ; CHECK-GI-NEXT:    .cfi_offset w23, -40
 ; CHECK-GI-NEXT:    .cfi_offset w30, -48
-; CHECK-GI-NEXT:    adrp x8, .LCPI86_1
-; CHECK-GI-NEXT:    str q0, [sp] // 16-byte Spill
-; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI86_1]
-; CHECK-GI-NEXT:    stp q2, q1, [sp, #16] // 32-byte Folded Spill
-; CHECK-GI-NEXT:    mov v1.16b, v2.16b
+; CHECK-GI-NEXT:    stp q0, q1, [sp] // 32-byte Folded Spill
+; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    bl __gttf2
 ; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Reload
 ; CHECK-GI-NEXT:    cmp w0, #0
@@ -4176,12 +4165,12 @@ define <2 x i64> @test_signed_v2f128_v2i64(<2 x fp128> %f) {
 ; CHECK-GI-NEXT:    csel x8, x20, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfdi
-; CHECK-GI-NEXT:    ldp q1, q0, [sp, #16] // 32-byte Folded Reload
+; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-GI-NEXT:    ldr q0, [sp, #16] // 16-byte Reload
 ; CHECK-GI-NEXT:    mov x19, x0
 ; CHECK-GI-NEXT:    bl __gttf2
-; CHECK-GI-NEXT:    ldr q0, [sp, #32] // 16-byte Reload
+; CHECK-GI-NEXT:    ldp q1, q0, [sp] // 32-byte Folded Reload
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    ldr q1, [sp] // 16-byte Reload
 ; CHECK-GI-NEXT:    fmov x8, d0
 ; CHECK-GI-NEXT:    csel x20, x8, xzr, gt
 ; CHECK-GI-NEXT:    mov x8, v0.d[1]
@@ -4196,11 +4185,11 @@ define <2 x i64> @test_signed_v2f128_v2i64(<2 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfdi
 ; CHECK-GI-NEXT:    fmov d0, x19
-; CHECK-GI-NEXT:    ldp x20, x19, [sp, #80] // 16-byte Folded Reload
-; CHECK-GI-NEXT:    ldp x22, x21, [sp, #64] // 16-byte Folded Reload
-; CHECK-GI-NEXT:    ldp x30, x23, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT:    ldp x20, x19, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT:    ldp x22, x21, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT:    ldp x30, x23, [sp, #32] // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    mov v0.d[1], x0
-; CHECK-GI-NEXT:    add sp, sp, #96
+; CHECK-GI-NEXT:    add sp, sp, #80
 ; CHECK-GI-NEXT:    ret
     %x = call <2 x i64> @llvm.fptoui.sat.v2f128.v2i64(<2 x fp128> %f)
     ret <2 x i64> %x

diff  --git a/llvm/test/CodeGen/AArch64/frem-power2.ll b/llvm/test/CodeGen/AArch64/frem-power2.ll
index 2d2004e068060..548079dc5c4aa 100644
--- a/llvm/test/CodeGen/AArch64/frem-power2.ll
+++ b/llvm/test/CodeGen/AArch64/frem-power2.ll
@@ -552,19 +552,18 @@ define <4 x float> @frem1152921504606846976_absv(<4 x float> %x) {
 ; CHECK-GI-LABEL: frem1152921504606846976_absv:
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    sub sp, sp, #96
-; CHECK-GI-NEXT:    stp d11, d10, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT:    str d10, [sp, #48] // 8-byte Spill
 ; CHECK-GI-NEXT:    stp d9, d8, [sp, #64] // 16-byte Folded Spill
-; CHECK-GI-NEXT:    str x30, [sp, #80] // 8-byte Spill
+; CHECK-GI-NEXT:    stp x30, x19, [sp, #80] // 16-byte Folded Spill
 ; CHECK-GI-NEXT:    .cfi_def_cfa_offset 96
+; CHECK-GI-NEXT:    .cfi_offset w19, -8
 ; CHECK-GI-NEXT:    .cfi_offset w30, -16
 ; CHECK-GI-NEXT:    .cfi_offset b8, -24
 ; CHECK-GI-NEXT:    .cfi_offset b9, -32
-; CHECK-GI-NEXT:    .cfi_offset b10, -40
-; CHECK-GI-NEXT:    .cfi_offset b11, -48
-; CHECK-GI-NEXT:    mov w8, #1568669696 // =0x5d800000
+; CHECK-GI-NEXT:    .cfi_offset b10, -48
 ; CHECK-GI-NEXT:    fabs v0.4s, v0.4s
-; CHECK-GI-NEXT:    fmov s11, w8
-; CHECK-GI-NEXT:    fmov s1, s11
+; CHECK-GI-NEXT:    mov w19, #1568669696 // =0x5d800000
+; CHECK-GI-NEXT:    fmov s1, w19
 ; CHECK-GI-NEXT:    mov s8, v0.s[1]
 ; CHECK-GI-NEXT:    mov s9, v0.s[2]
 ; CHECK-GI-NEXT:    mov s10, v0.s[3]
@@ -572,24 +571,24 @@ define <4 x float> @frem1152921504606846976_absv(<4 x float> %x) {
 ; CHECK-GI-NEXT:    bl fmodf
 ; CHECK-GI-NEXT:    // kill: def $s0 killed $s0 def $q0
 ; CHECK-GI-NEXT:    str q0, [sp, #32] // 16-byte Spill
-; CHECK-GI-NEXT:    fmov s1, s11
+; CHECK-GI-NEXT:    fmov s1, w19
 ; CHECK-GI-NEXT:    fmov s0, s8
 ; CHECK-GI-NEXT:    bl fmodf
 ; CHECK-GI-NEXT:    // kill: def $s0 killed $s0 def $q0
 ; CHECK-GI-NEXT:    str q0, [sp, #16] // 16-byte Spill
-; CHECK-GI-NEXT:    fmov s1, s11
+; CHECK-GI-NEXT:    fmov s1, w19
 ; CHECK-GI-NEXT:    fmov s0, s9
 ; CHECK-GI-NEXT:    bl fmodf
 ; CHECK-GI-NEXT:    // kill: def $s0 killed $s0 def $q0
 ; CHECK-GI-NEXT:    str q0, [sp] // 16-byte Spill
-; CHECK-GI-NEXT:    fmov s1, s11
+; CHECK-GI-NEXT:    fmov s1, w19
 ; CHECK-GI-NEXT:    fmov s0, s10
 ; CHECK-GI-NEXT:    bl fmodf
 ; CHECK-GI-NEXT:    ldp q2, q1, [sp, #16] // 32-byte Folded Reload
 ; CHECK-GI-NEXT:    // kill: def $s0 killed $s0 def $q0
-; CHECK-GI-NEXT:    ldr x30, [sp, #80] // 8-byte Reload
+; CHECK-GI-NEXT:    ldr d10, [sp, #48] // 8-byte Reload
+; CHECK-GI-NEXT:    ldp x30, x19, [sp, #80] // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    ldp d9, d8, [sp, #64] // 16-byte Folded Reload
-; CHECK-GI-NEXT:    ldp d11, d10, [sp, #48] // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    mov v1.s[1], v2.s[0]
 ; CHECK-GI-NEXT:    ldr q2, [sp] // 16-byte Reload
 ; CHECK-GI-NEXT:    mov v1.s[2], v2.s[0]

diff  --git a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
index 9f5592c20277c..9f646c28ce74a 100644
--- a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
@@ -2503,8 +2503,7 @@ define <4 x i32> @fcmal4xfloat(<4 x float> %A, <4 x float> %B) {
 ;
 ; CHECK-GI-LABEL: fcmal4xfloat:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    mov w8, #1 // =0x1
-; CHECK-GI-NEXT:    dup v0.2s, w8
+; CHECK-GI-NEXT:    movi v0.2s, #1
 ; CHECK-GI-NEXT:    mov v0.d[1], v0.d[0]
 ; CHECK-GI-NEXT:    shl v0.4s, v0.4s, #31
 ; CHECK-GI-NEXT:    cmlt v0.4s, v0.4s, #0

diff  --git a/llvm/test/CodeGen/AArch64/neon-extadd-extract.ll b/llvm/test/CodeGen/AArch64/neon-extadd-extract.ll
index 5753798e87512..b35511dd4ab69 100644
--- a/llvm/test/CodeGen/AArch64/neon-extadd-extract.ll
+++ b/llvm/test/CodeGen/AArch64/neon-extadd-extract.ll
@@ -786,7 +786,7 @@ define <2 x i8> @extract_scalable_vec() vscale_range(1,16) "target-features"="+s
 ; CHECK-GI-LABEL: extract_scalable_vec:
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    mov x8, xzr
-; CHECK-GI-NEXT:    mov x9, #1 // =0x1
+; CHECK-GI-NEXT:    mov w9, #1 // =0x1
 ; CHECK-GI-NEXT:    ld1 { v0.b }[0], [x8]
 ; CHECK-GI-NEXT:    ldr b1, [x9]
 ; CHECK-GI-NEXT:    adrp x8, .LCPI36_0

diff  --git a/llvm/test/CodeGen/AArch64/rem-by-const.ll b/llvm/test/CodeGen/AArch64/rem-by-const.ll
index 171fa1e74ce54..2c1fb31828d93 100644
--- a/llvm/test/CodeGen/AArch64/rem-by-const.ll
+++ b/llvm/test/CodeGen/AArch64/rem-by-const.ll
@@ -925,7 +925,7 @@ define <4 x i8> @sv4i8_7(<4 x i8> %d, <4 x i8> %e) {
 ; CHECK-GI-NEXT:    mov v3.b[3], w8
 ; CHECK-GI-NEXT:    uzp1 v1.8b, v2.8b, v0.8b
 ; CHECK-GI-NEXT:    neg v2.8b, v3.8b
-; CHECK-GI-NEXT:    dup v3.4h, w9
+; CHECK-GI-NEXT:    movi v3.4h, #7
 ; CHECK-GI-NEXT:    sshl v1.8b, v1.8b, v2.8b
 ; CHECK-GI-NEXT:    neg v2.8b, v4.8b
 ; CHECK-GI-NEXT:    ushl v2.8b, v1.8b, v2.8b
@@ -976,10 +976,9 @@ define <4 x i8> @sv4i8_100(<4 x i8> %d, <4 x i8> %e) {
 ; CHECK-GI-NEXT:    mov v3.b[2], w8
 ; CHECK-GI-NEXT:    sshr v1.4h, v1.4h, #8
 ; CHECK-GI-NEXT:    mov v3.b[3], w8
-; CHECK-GI-NEXT:    mov w8, #100 // =0x64
 ; CHECK-GI-NEXT:    uzp1 v1.8b, v1.8b, v0.8b
 ; CHECK-GI-NEXT:    neg v2.8b, v3.8b
-; CHECK-GI-NEXT:    dup v3.4h, w8
+; CHECK-GI-NEXT:    movi v3.4h, #100
 ; CHECK-GI-NEXT:    sshl v1.8b, v1.8b, v2.8b
 ; CHECK-GI-NEXT:    neg v2.8b, v4.8b
 ; CHECK-GI-NEXT:    ushl v2.8b, v1.8b, v2.8b
@@ -1410,12 +1409,11 @@ define <4 x i8> @uv4i8_7(<4 x i8> %d, <4 x i8> %e) {
 ; CHECK-GI-NEXT:    ushl v2.8b, v2.8b, v3.8b
 ; CHECK-GI-NEXT:    ushll v2.8h, v2.8b, #0
 ; CHECK-GI-NEXT:    mov v4.b[3], w8
-; CHECK-GI-NEXT:    mov w8, #7 // =0x7
 ; CHECK-GI-NEXT:    usra v2.4h, v1.4h, #8
 ; CHECK-GI-NEXT:    uzp1 v1.8b, v2.8b, v0.8b
 ; CHECK-GI-NEXT:    neg v2.8b, v4.8b
 ; CHECK-GI-NEXT:    ushl v1.8b, v1.8b, v2.8b
-; CHECK-GI-NEXT:    dup v2.4h, w8
+; CHECK-GI-NEXT:    movi v2.4h, #7
 ; CHECK-GI-NEXT:    ushll v1.8h, v1.8b, #0
 ; CHECK-GI-NEXT:    mls v0.4h, v1.4h, v2.4h
 ; CHECK-GI-NEXT:    ret
@@ -1453,11 +1451,10 @@ define <4 x i8> @uv4i8_100(<4 x i8> %d, <4 x i8> %e) {
 ; CHECK-GI-NEXT:    mov v3.b[2], w8
 ; CHECK-GI-NEXT:    ushr v1.4h, v1.4h, #8
 ; CHECK-GI-NEXT:    mov v3.b[3], w8
-; CHECK-GI-NEXT:    mov w8, #100 // =0x64
 ; CHECK-GI-NEXT:    uzp1 v1.8b, v1.8b, v0.8b
 ; CHECK-GI-NEXT:    neg v2.8b, v3.8b
 ; CHECK-GI-NEXT:    ushl v1.8b, v1.8b, v2.8b
-; CHECK-GI-NEXT:    dup v2.4h, w8
+; CHECK-GI-NEXT:    movi v2.4h, #100
 ; CHECK-GI-NEXT:    ushll v1.8h, v1.8b, #0
 ; CHECK-GI-NEXT:    mls v0.4h, v1.4h, v2.4h
 ; CHECK-GI-NEXT:    ret
@@ -1591,10 +1588,9 @@ define <2 x i16> @sv2i16_7(<2 x i16> %d, <2 x i16> %e) {
 ; CHECK-GI-NEXT:    uzp1 v1.4h, v1.4h, v0.4h
 ; CHECK-GI-NEXT:    mov v3.h[1], w8
 ; CHECK-GI-NEXT:    neg v2.4h, v2.4h
-; CHECK-GI-NEXT:    mov w8, #7 // =0x7
 ; CHECK-GI-NEXT:    sshl v1.4h, v1.4h, v2.4h
 ; CHECK-GI-NEXT:    neg v2.4h, v3.4h
-; CHECK-GI-NEXT:    dup v3.2s, w8
+; CHECK-GI-NEXT:    movi v3.2s, #7
 ; CHECK-GI-NEXT:    ushl v2.4h, v1.4h, v2.4h
 ; CHECK-GI-NEXT:    ushll v1.4s, v1.4h, #0
 ; CHECK-GI-NEXT:    ushll v2.4s, v2.4h, #0
@@ -1640,10 +1636,9 @@ define <2 x i16> @sv2i16_100(<2 x i16> %d, <2 x i16> %e) {
 ; CHECK-GI-NEXT:    uzp1 v1.4h, v1.4h, v0.4h
 ; CHECK-GI-NEXT:    mov v3.h[1], w8
 ; CHECK-GI-NEXT:    neg v2.4h, v2.4h
-; CHECK-GI-NEXT:    mov w8, #100 // =0x64
 ; CHECK-GI-NEXT:    sshl v1.4h, v1.4h, v2.4h
 ; CHECK-GI-NEXT:    neg v2.4h, v3.4h
-; CHECK-GI-NEXT:    dup v3.2s, w8
+; CHECK-GI-NEXT:    movi v3.2s, #100
 ; CHECK-GI-NEXT:    ushl v2.4h, v1.4h, v2.4h
 ; CHECK-GI-NEXT:    ushll v1.4s, v1.4h, #0
 ; CHECK-GI-NEXT:    ushll v2.4s, v2.4h, #0
@@ -1933,12 +1928,11 @@ define <2 x i16> @uv2i16_7(<2 x i16> %d, <2 x i16> %e) {
 ; CHECK-GI-NEXT:    fmov s3, w8
 ; CHECK-GI-NEXT:    ushll v2.4s, v2.4h, #0
 ; CHECK-GI-NEXT:    mov v3.h[1], w8
-; CHECK-GI-NEXT:    mov w8, #7 // =0x7
 ; CHECK-GI-NEXT:    usra v2.2s, v1.2s, #16
 ; CHECK-GI-NEXT:    uzp1 v1.4h, v2.4h, v0.4h
 ; CHECK-GI-NEXT:    neg v2.4h, v3.4h
 ; CHECK-GI-NEXT:    ushl v1.4h, v1.4h, v2.4h
-; CHECK-GI-NEXT:    dup v2.2s, w8
+; CHECK-GI-NEXT:    movi v2.2s, #7
 ; CHECK-GI-NEXT:    ushll v1.4s, v1.4h, #0
 ; CHECK-GI-NEXT:    mls v0.2s, v1.2s, v2.2s
 ; CHECK-GI-NEXT:    ret
@@ -1978,12 +1972,11 @@ define <2 x i16> @uv2i16_100(<2 x i16> %d, <2 x i16> %e) {
 ; CHECK-GI-NEXT:    mul v1.2s, v1.2s, v2.2s
 ; CHECK-GI-NEXT:    fmov s2, w8
 ; CHECK-GI-NEXT:    mov v2.h[1], w8
-; CHECK-GI-NEXT:    mov w8, #100 // =0x64
 ; CHECK-GI-NEXT:    ushr v1.2s, v1.2s, #16
 ; CHECK-GI-NEXT:    uzp1 v1.4h, v1.4h, v0.4h
 ; CHECK-GI-NEXT:    neg v2.4h, v2.4h
 ; CHECK-GI-NEXT:    ushl v1.4h, v1.4h, v2.4h
-; CHECK-GI-NEXT:    dup v2.2s, w8
+; CHECK-GI-NEXT:    movi v2.2s, #100
 ; CHECK-GI-NEXT:    ushll v1.4s, v1.4h, #0
 ; CHECK-GI-NEXT:    mls v0.2s, v1.2s, v2.2s
 ; CHECK-GI-NEXT:    ret


        


More information about the llvm-commits mailing list