[llvm] fa15255 - [ARM] Convert floating point splats to integer

David Green via llvm-commits llvm-commits at lists.llvm.org
Wed May 13 07:25:31 PDT 2020


Author: David Green
Date: 2020-05-13T15:24:16+01:00
New Revision: fa15255d8af53126bbcb017f2fb6f9961e8574df

URL: https://github.com/llvm/llvm-project/commit/fa15255d8af53126bbcb017f2fb6f9961e8574df
DIFF: https://github.com/llvm/llvm-project/commit/fa15255d8af53126bbcb017f2fb6f9961e8574df.diff

LOG: [ARM] Convert floating point splats to integer

Under MVE a vdup will always take a gpr register, not a floating point
value. During DAG combine we convert the types to a bitcast to an
integer in an attempt to fold the bitcast into other instructions. This
is OK, but only works inside the same basic block. To do the same trick
across a basic block boundary we need to convert the type in
codegenprepare, before the splat is sunk into the loop.

This adds a convertSplatType function to codegenprepare to do that,
putting bitcasts around the splat to force the type to an integer. There
is then some adjustment to the code in shouldSinkOperands to handle the
extra bitcasts.

Differential Revision: https://reviews.llvm.org/D78728

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/TargetLowering.h
    llvm/lib/CodeGen/CodeGenPrepare.cpp
    llvm/lib/Target/ARM/ARMISelLowering.cpp
    llvm/lib/Target/ARM/ARMISelLowering.h
    llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
    llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
    llvm/test/CodeGen/Thumb2/mve-fma-loops.ll
    llvm/test/CodeGen/Thumb2/mve-intrinsics/dup.ll
    llvm/test/CodeGen/Thumb2/mve-intrinsics/ternary.ll
    llvm/test/CodeGen/Thumb2/mve-intrinsics/vaddq.ll
    llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulq.ll
    llvm/test/CodeGen/Thumb2/mve-intrinsics/vsubq.ll
    llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
    llvm/test/CodeGen/Thumb2/mve-pred-threshold.ll
    llvm/test/CodeGen/Thumb2/mve-vldst4.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index dedd151bc6da..58da3fa1c71b 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -2344,6 +2344,15 @@ class TargetLoweringBase {
     return false;
   }
 
+  /// Given a shuffle vector SVI representing a vector splat, return a new
+  /// scalar type of size equal to SVI's scalar type if the new type is more
+  /// profitable. Returns nullptr otherwise. For example under MVE float splats
+  /// are converted to integer to prevent the need to move from SPR to GPR
+  /// registers.
+  virtual Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const {
+    return nullptr;
+  }
+
   /// Returns true if the opcode is a commutative binary operation.
   virtual bool isCommutativeBinOp(unsigned Opcode) const {
     // FIXME: This should get its info from the td file.

diff  --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 881366c58986..60cc995cefdd 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -392,6 +392,8 @@ class TypePromotionTransaction;
     bool optimizeLoadExt(LoadInst *Load);
     bool optimizeShiftInst(BinaryOperator *BO);
     bool optimizeSelectInst(SelectInst *SI);
+    bool sinkShuffleVectorToShift(ShuffleVectorInst *SVI);
+    bool convertSplatType(ShuffleVectorInst *SVI);
     bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
     bool optimizeSwitchInst(SwitchInst *SI);
     bool optimizeExtractElementInst(Instruction *Inst);
@@ -6419,7 +6421,7 @@ bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases
 /// it's often worth sinking a shufflevector splat down to its use so that
 /// codegen can spot all lanes are identical.
-bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
+bool CodeGenPrepare::sinkShuffleVectorToShift(ShuffleVectorInst *SVI) {
   BasicBlock *DefBB = SVI->getParent();
 
   // Only do this xform if variable vector shifts are particularly expensive.
@@ -6471,6 +6473,58 @@ bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
   return MadeChange;
 }
 
+/// Some targets only accept certain types for splat inputs. For example a VDUP
+/// in MVE takes a GPR (integer) register, and the instruction that incorporate
+/// a VDUP (such as a VADD qd, qm, rm) also require a gpr register.
+bool CodeGenPrepare::convertSplatType(ShuffleVectorInst *SVI) {
+  if (!match(SVI,
+             m_ShuffleVector(m_InsertElement(m_Undef(), m_Value(), m_ZeroInt()),
+                             m_Undef(), m_ZeroMask())))
+    return false;
+  Type *NewType = TLI->shouldConvertSplatType(SVI);
+  if (!NewType)
+    return false;
+
+  VectorType *SVIVecType = cast<VectorType>(SVI->getType());
+  Type *SVIType = SVIVecType->getScalarType();
+  assert(!NewType->isVectorTy() && "Expected a scalar type!");
+  assert(NewType->getScalarSizeInBits() == SVIType->getScalarSizeInBits() &&
+         "Expected a type of the same size!");
+  Type *NewVecType = VectorType::get(NewType, SVIVecType->getNumElements());
+
+  // Create a bitcast (shuffle (insert (bitcast(..))))
+  IRBuilder<> Builder(SVI->getContext());
+  Builder.SetInsertPoint(SVI);
+  Value *BC1 = Builder.CreateBitCast(
+      cast<Instruction>(SVI->getOperand(0))->getOperand(1), NewType);
+  Value *Insert = Builder.CreateInsertElement(UndefValue::get(NewVecType), BC1,
+                                              (uint64_t)0);
+  Value *Shuffle = Builder.CreateShuffleVector(
+      Insert, UndefValue::get(NewVecType), SVI->getShuffleMask());
+  Value *BC2 = Builder.CreateBitCast(Shuffle, SVIVecType);
+
+  SVI->replaceAllUsesWith(BC2);
+  RecursivelyDeleteTriviallyDeadInstructions(SVI);
+
+  // Also hoist the bitcast up to its operand if it they are not in the same
+  // block.
+  if (auto *BCI = dyn_cast<Instruction>(BC1))
+    if (auto *Op = dyn_cast<Instruction>(BCI->getOperand(0)))
+      if (BCI->getParent() != Op->getParent() && !isa<PHINode>(Op) &&
+          !Op->isTerminator() && !Op->isEHPad())
+        BCI->moveAfter(Op);
+
+  return true;
+}
+
+bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
+  if (sinkShuffleVectorToShift(SVI))
+    return true;
+  if (convertSplatType(SVI))
+    return true;
+  return false;
+}
+
 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
   // If the operands of I can be folded into a target instruction together with
   // I, duplicate and sink them.

diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index e8e6b6f900c7..2c6d124d4715 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -15746,32 +15746,52 @@ bool ARMTargetLowering::shouldSinkOperands(Instruction *I,
   };
 
   for (auto OpIdx : enumerate(I->operands())) {
-    Value *Op = OpIdx.value().get();
+    Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
     // Make sure we are not already sinking this operand
-    if (any_of(Ops, [&](Use *U) { return U->get() == Op; }))
+    if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
       continue;
+
+    Instruction *Shuffle = Op;
+    if (Shuffle->getOpcode() == Instruction::BitCast)
+      Shuffle = dyn_cast<Instruction>(Shuffle->getOperand(0));
     // We are looking for a splat that can be sunk.
-    if (!match(Op, m_ShuffleVector(
-                       m_InsertElement(m_Undef(), m_Value(), m_ZeroInt()),
-                       m_Undef(), m_ZeroMask())))
+    if (!Shuffle ||
+        !match(Shuffle, m_ShuffleVector(
+                            m_InsertElement(m_Undef(), m_Value(), m_ZeroInt()),
+                            m_Undef(), m_ZeroMask())))
       continue;
     if (!IsSinker(I, OpIdx.index()))
       continue;
 
-    Instruction *Shuffle = cast<Instruction>(Op);
     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
     // and vector registers
-    for (Use &U : Shuffle->uses()) {
+    for (Use &U : Op->uses()) {
       Instruction *Insn = cast<Instruction>(U.getUser());
       if (!IsSinker(Insn, U.getOperandNo()))
         return false;
     }
+
     Ops.push_back(&Shuffle->getOperandUse(0));
+    if (Shuffle != Op)
+      Ops.push_back(&Op->getOperandUse(0));
     Ops.push_back(&OpIdx.value());
   }
   return true;
 }
 
+Type *ARMTargetLowering::shouldConvertSplatType(ShuffleVectorInst *SVI) const {
+  if (!Subtarget->hasMVEIntegerOps())
+    return nullptr;
+  Type *SVIType = SVI->getType();
+  Type *ScalarType = SVIType->getScalarType();
+
+  if (ScalarType->isFloatTy())
+    return Type::getInt32Ty(SVIType->getContext());
+  if (ScalarType->isHalfTy())
+    return Type::getInt16Ty(SVIType->getContext());
+  return nullptr;
+}
+
 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
   EVT VT = ExtVal.getValueType();
 

diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 04995c4ffc54..5e9b077278a0 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -384,6 +384,7 @@ class VectorType;
     bool isZExtFree(SDValue Val, EVT VT2) const override;
     bool shouldSinkOperands(Instruction *I,
                             SmallVectorImpl<Use *> &Ops) const override;
+    Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const override;
 
     bool isFNegFree(EVT VT) const override;
 

diff  --git a/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll b/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
index 17b0077c0e86..189c543ed79c 100644
--- a/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
@@ -7,8 +7,7 @@ define arm_aapcs_vfpcc void @test_fadd(half* noalias nocapture readonly %A, half
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:    vldr.16 s0, [r1]
-; CHECK-NEXT:    vmov.f16 r1, s0
+; CHECK-NEXT:    ldrh r1, [r1]
 ; CHECK-NEXT:  .LBB0_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
@@ -54,8 +53,7 @@ define arm_aapcs_vfpcc void @test_fadd_r(half* noalias nocapture readonly %A, ha
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:    vldr.16 s0, [r1]
-; CHECK-NEXT:    vmov.f16 r1, s0
+; CHECK-NEXT:    ldrh r1, [r1]
 ; CHECK-NEXT:  .LBB1_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
@@ -101,8 +99,7 @@ define arm_aapcs_vfpcc void @test_fmul(half* noalias nocapture readonly %A, half
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:    vldr.16 s0, [r1]
-; CHECK-NEXT:    vmov.f16 r1, s0
+; CHECK-NEXT:    ldrh r1, [r1]
 ; CHECK-NEXT:  .LBB2_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
@@ -148,8 +145,7 @@ define arm_aapcs_vfpcc void @test_fmul_r(half* noalias nocapture readonly %A, ha
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:    vldr.16 s0, [r1]
-; CHECK-NEXT:    vmov.f16 r1, s0
+; CHECK-NEXT:    ldrh r1, [r1]
 ; CHECK-NEXT:  .LBB3_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
@@ -195,8 +191,7 @@ define arm_aapcs_vfpcc void @test_fsub(half* noalias nocapture readonly %A, half
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:    vldr.16 s0, [r1]
-; CHECK-NEXT:    vmov.f16 r1, s0
+; CHECK-NEXT:    ldrh r1, [r1]
 ; CHECK-NEXT:  .LBB4_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
@@ -242,8 +237,7 @@ define arm_aapcs_vfpcc void @test_fsub_r(half* noalias nocapture readonly %A, ha
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:    vldr.16 s0, [r1]
-; CHECK-NEXT:    vmov.f16 r1, s0
+; CHECK-NEXT:    ldrh r1, [r1]
 ; CHECK-NEXT:    vdup.16 q0, r1
 ; CHECK-NEXT:  .LBB5_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
@@ -292,8 +286,7 @@ define arm_aapcs_vfpcc void @test_fmas(half* noalias nocapture readonly %A, half
 ; CHECK-NEXT:    cmp.w r12, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:    vldr.16 s0, [r2]
-; CHECK-NEXT:    vmov.f16 r2, s0
+; CHECK-NEXT:    ldrh r2, [r2]
 ; CHECK-NEXT:  .LBB6_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
@@ -345,8 +338,7 @@ define arm_aapcs_vfpcc void @test_fmas_r(half* noalias nocapture readonly %A, ha
 ; CHECK-NEXT:    cmp.w r12, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:    vldr.16 s0, [r2]
-; CHECK-NEXT:    vmov.f16 r2, s0
+; CHECK-NEXT:    ldrh r2, [r2]
 ; CHECK-NEXT:  .LBB7_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
@@ -398,8 +390,7 @@ define arm_aapcs_vfpcc void @test_fma(half* noalias nocapture readonly %A, half*
 ; CHECK-NEXT:    cmp.w r12, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:    vldr.16 s0, [r2]
-; CHECK-NEXT:    vmov.f16 r2, s0
+; CHECK-NEXT:    ldrh r2, [r2]
 ; CHECK-NEXT:  .LBB8_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
@@ -451,8 +442,7 @@ define arm_aapcs_vfpcc void @test_fma_r(half* noalias nocapture readonly %A, hal
 ; CHECK-NEXT:    cmp.w r12, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:    vldr.16 s0, [r2]
-; CHECK-NEXT:    vmov.f16 r2, s0
+; CHECK-NEXT:    ldrh r2, [r2]
 ; CHECK-NEXT:  .LBB9_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
@@ -505,8 +495,7 @@ define arm_aapcs_vfpcc void @test_fmss(half* noalias nocapture readonly %A, half
 ; CHECK-NEXT:    cmp.w r12, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:    vldr.16 s0, [r2]
-; CHECK-NEXT:    vmov.f16 r2, s0
+; CHECK-NEXT:    ldrh r2, [r2]
 ; CHECK-NEXT:    vdup.16 q0, r2
 ; CHECK-NEXT:    vneg.f16 q0, q0
 ; CHECK-NEXT:  .LBB10_1: @ %vector.body
@@ -561,8 +550,7 @@ define arm_aapcs_vfpcc void @test_fmss_r(half* noalias nocapture readonly %A, ha
 ; CHECK-NEXT:    cmp.w r12, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:    vldr.16 s0, [r2]
-; CHECK-NEXT:    vmov.f16 r2, s0
+; CHECK-NEXT:    ldrh r2, [r2]
 ; CHECK-NEXT:    vdup.16 q0, r2
 ; CHECK-NEXT:  .LBB11_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
@@ -616,8 +604,7 @@ define arm_aapcs_vfpcc void @test_fms(half* noalias nocapture readonly %A, half*
 ; CHECK-NEXT:    cmp.w r12, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:    vldr.16 s0, [r2]
-; CHECK-NEXT:    vmov.f16 r2, s0
+; CHECK-NEXT:    ldrh r2, [r2]
 ; CHECK-NEXT:  .LBB12_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r1], #16
@@ -670,8 +657,7 @@ define arm_aapcs_vfpcc void @test_fms_r(half* noalias nocapture readonly %A, hal
 ; CHECK-NEXT:    cmp.w r12, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:    vldr.16 s0, [r2]
-; CHECK-NEXT:    vmov.f16 r2, s0
+; CHECK-NEXT:    ldrh r2, [r2]
 ; CHECK-NEXT:  .LBB13_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r1], #16
@@ -811,109 +797,99 @@ define void @arm_fir_f32_1_4_mve(%struct.arm_fir_instance_f32* nocapture readonl
 ; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
 ; CHECK-NEXT:    .pad #16
 ; CHECK-NEXT:    sub sp, #16
-; CHECK-NEXT:    ldrh r4, [r0]
-; CHECK-NEXT:    ldr r5, [r0, #4]
-; CHECK-NEXT:    subs r7, r4, #1
-; CHECK-NEXT:    cmp r7, #3
+; CHECK-NEXT:    ldrh.w r10, [r0]
+; CHECK-NEXT:    mov r5, r3
+; CHECK-NEXT:    ldr.w r12, [r0, #4]
+; CHECK-NEXT:    sub.w r6, r10, #1
+; CHECK-NEXT:    cmp r6, #3
 ; CHECK-NEXT:    bhi .LBB15_6
 ; CHECK-NEXT:  @ %bb.1: @ %if.then
-; CHECK-NEXT:    ldr r6, [r0, #8]
-; CHECK-NEXT:    add.w r11, r5, r7, lsl #1
-; CHECK-NEXT:    lsr.w lr, r3, #2
-; CHECK-NEXT:    vldr.16 s0, [r6, #6]
-; CHECK-NEXT:    vldr.16 s2, [r6, #4]
-; CHECK-NEXT:    vldr.16 s4, [r6, #2]
-; CHECK-NEXT:    vldr.16 s6, [r6]
+; CHECK-NEXT:    ldr r7, [r0, #8]
+; CHECK-NEXT:    add.w r4, r12, r6, lsl #1
+; CHECK-NEXT:    lsr.w lr, r5, #2
+; CHECK-NEXT:    ldrh r3, [r7, #6]
+; CHECK-NEXT:    ldrh.w r9, [r7, #4]
+; CHECK-NEXT:    ldrh.w r8, [r7, #2]
+; CHECK-NEXT:    ldrh r7, [r7]
 ; CHECK-NEXT:    wls lr, lr, .LBB15_5
 ; CHECK-NEXT:  @ %bb.2: @ %while.body.lr.ph
-; CHECK-NEXT:    strd r3, r4, [sp, #8] @ 8-byte Folded Spill
-; CHECK-NEXT:    vmov.f16 r10, s6
-; CHECK-NEXT:    vmov.f16 r12, s4
-; CHECK-NEXT:    bic r3, r3, #3
-; CHECK-NEXT:    vmov.f16 r4, s2
-; CHECK-NEXT:    str r3, [sp] @ 4-byte Spill
-; CHECK-NEXT:    vmov.f16 r8, s0
-; CHECK-NEXT:    add.w r3, r2, r3, lsl #1
-; CHECK-NEXT:    str r3, [sp, #4] @ 4-byte Spill
-; CHECK-NEXT:    movs r6, #0
-; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    strd r5, r10, [sp, #8] @ 8-byte Folded Spill
+; CHECK-NEXT:    bic r5, r5, #3
+; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT:    mov.w r10, #0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    add.w r5, r2, r5, lsl #1
+; CHECK-NEXT:    str r5, [sp, #4] @ 4-byte Spill
 ; CHECK-NEXT:  .LBB15_3: @ %while.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    add.w r9, r1, r6
-; CHECK-NEXT:    add.w r7, r11, r6
-; CHECK-NEXT:    vldrw.u32 q2, [r9]
-; CHECK-NEXT:    vstrw.32 q2, [r7]
-; CHECK-NEXT:    adds r7, r3, r6
-; CHECK-NEXT:    vldrw.u32 q2, [r7]
-; CHECK-NEXT:    adds r5, r7, #2
-; CHECK-NEXT:    vldrw.u32 q3, [r5]
-; CHECK-NEXT:    adds r5, r7, #6
-; CHECK-NEXT:    vmul.f16 q2, q2, r10
-; CHECK-NEXT:    vfma.f16 q2, q3, r12
-; CHECK-NEXT:    vldrw.u32 q3, [r7, #4]
-; CHECK-NEXT:    vfma.f16 q2, q3, r4
-; CHECK-NEXT:    vldrw.u32 q3, [r5]
-; CHECK-NEXT:    adds r5, r2, r6
-; CHECK-NEXT:    adds r6, #8
-; CHECK-NEXT:    vfma.f16 q2, q3, r8
-; CHECK-NEXT:    vstrw.32 q2, [r5]
+; CHECK-NEXT:    add.w r11, r1, r10
+; CHECK-NEXT:    add.w r5, r4, r10
+; CHECK-NEXT:    vldrw.u32 q0, [r11]
+; CHECK-NEXT:    vstrw.32 q0, [r5]
+; CHECK-NEXT:    add.w r5, r12, r10
+; CHECK-NEXT:    vldrw.u32 q0, [r5]
+; CHECK-NEXT:    adds r6, r5, #2
+; CHECK-NEXT:    vldrw.u32 q1, [r6]
+; CHECK-NEXT:    vmul.f16 q0, q0, r7
+; CHECK-NEXT:    vfma.f16 q0, q1, r8
+; CHECK-NEXT:    vldrw.u32 q1, [r5, #4]
+; CHECK-NEXT:    adds r5, #6
+; CHECK-NEXT:    vfma.f16 q0, q1, r9
+; CHECK-NEXT:    vldrw.u32 q1, [r5]
+; CHECK-NEXT:    add.w r5, r2, r10
+; CHECK-NEXT:    add.w r10, r10, #8
+; CHECK-NEXT:    vfma.f16 q0, q1, r3
+; CHECK-NEXT:    vstrw.32 q0, [r5]
 ; CHECK-NEXT:    le lr, .LBB15_3
 ; CHECK-NEXT:  @ %bb.4: @ %while.end.loopexit
-; CHECK-NEXT:    ldr r2, [sp] @ 4-byte Reload
-; CHECK-NEXT:    add r4, sp, #4
-; CHECK-NEXT:    add r11, r6
-; CHECK-NEXT:    add.w r5, r3, r2, lsl #1
-; CHECK-NEXT:    add.w r1, r1, r2, lsl #1
-; CHECK-NEXT:    ldm r4, {r2, r3, r4} @ 12-byte Folded Reload
+; CHECK-NEXT:    add r4, r10
+; CHECK-NEXT:    add.w r12, r12, r0, lsl #1
+; CHECK-NEXT:    add.w r1, r1, r0, lsl #1
+; CHECK-NEXT:    ldm.w sp, {r0, r2, r5, r10} @ 16-byte Folded Reload
 ; CHECK-NEXT:  .LBB15_5: @ %while.end
-; CHECK-NEXT:    and r7, r3, #3
-; CHECK-NEXT:    vldrw.u32 q2, [r1]
-; CHECK-NEXT:    vctp.16 r7
+; CHECK-NEXT:    and r6, r5, #3
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    vctp.16 r6
 ; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrht.16 q2, [r11]
-; CHECK-NEXT:    vldrw.u32 q2, [r5]
-; CHECK-NEXT:    vmov.f16 r1, s6
-; CHECK-NEXT:    adds r7, r5, #2
-; CHECK-NEXT:    vmul.f16 q2, q2, r1
-; CHECK-NEXT:    vmov.f16 r1, s4
-; CHECK-NEXT:    vldrw.u32 q1, [r7]
-; CHECK-NEXT:    adds r7, r5, #6
-; CHECK-NEXT:    vfma.f16 q2, q1, r1
-; CHECK-NEXT:    vldrw.u32 q1, [r5, #4]
-; CHECK-NEXT:    vmov.f16 r1, s2
-; CHECK-NEXT:    vfma.f16 q2, q1, r1
-; CHECK-NEXT:    vmov.f16 r1, s0
-; CHECK-NEXT:    vldrw.u32 q0, [r7]
-; CHECK-NEXT:    vfma.f16 q2, q0, r1
+; CHECK-NEXT:    vstrht.16 q0, [r4]
+; CHECK-NEXT:    vldrw.u32 q0, [r12]
+; CHECK-NEXT:    add.w r1, r12, #2
+; CHECK-NEXT:    vldrw.u32 q1, [r1]
+; CHECK-NEXT:    add.w r1, r12, #6
+; CHECK-NEXT:    vmul.f16 q0, q0, r7
+; CHECK-NEXT:    vfma.f16 q0, q1, r8
+; CHECK-NEXT:    vldrw.u32 q1, [r12, #4]
+; CHECK-NEXT:    vfma.f16 q0, q1, r9
+; CHECK-NEXT:    vldrw.u32 q1, [r1]
+; CHECK-NEXT:    vfma.f16 q0, q1, r3
 ; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrht.16 q2, [r2]
-; CHECK-NEXT:    ldr r5, [r0, #4]
+; CHECK-NEXT:    vstrht.16 q0, [r2]
+; CHECK-NEXT:    ldr.w r12, [r0, #4]
 ; CHECK-NEXT:  .LBB15_6: @ %if.end
-; CHECK-NEXT:    add.w r0, r5, r3, lsl #1
-; CHECK-NEXT:    mov r2, r5
-; CHECK-NEXT:    lsr.w lr, r4, #2
+; CHECK-NEXT:    add.w r0, r12, r5, lsl #1
+; CHECK-NEXT:    lsr.w lr, r10, #2
 ; CHECK-NEXT:    wls lr, lr, .LBB15_10
 ; CHECK-NEXT:  @ %bb.7: @ %while.body51.preheader
-; CHECK-NEXT:    bic r7, r4, #3
-; CHECK-NEXT:    adds r1, r7, r3
-; CHECK-NEXT:    mov r3, r2
-; CHECK-NEXT:    add.w r1, r2, r1, lsl #1
+; CHECK-NEXT:    bic r2, r10, #3
+; CHECK-NEXT:    mov r3, r12
+; CHECK-NEXT:    adds r1, r2, r5
+; CHECK-NEXT:    add.w r1, r12, r1, lsl #1
 ; CHECK-NEXT:  .LBB15_8: @ %while.body51
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #8
 ; CHECK-NEXT:    vstrb.8 q0, [r3], #8
 ; CHECK-NEXT:    le lr, .LBB15_8
 ; CHECK-NEXT:  @ %bb.9: @ %while.end55.loopexit
-; CHECK-NEXT:    add.w r2, r2, r7, lsl #1
+; CHECK-NEXT:    add.w r12, r12, r2, lsl #1
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:  .LBB15_10: @ %while.end55
-; CHECK-NEXT:    ands r1, r4, #3
+; CHECK-NEXT:    ands r1, r10, #3
 ; CHECK-NEXT:    beq .LBB15_12
 ; CHECK-NEXT:  @ %bb.11: @ %if.then59
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    vctp.16 r1
 ; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrht.16 q0, [r2]
+; CHECK-NEXT:    vstrht.16 q0, [r12]
 ; CHECK-NEXT:  .LBB15_12: @ %if.end61
 ; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}

diff  --git a/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll b/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
index da082719f45e..111a5871a17b 100644
--- a/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
@@ -772,115 +772,98 @@ define void @arm_fir_f32_1_4_mve(%struct.arm_fir_instance_f32* nocapture readonl
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
 ; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; CHECK-NEXT:    .pad #4
-; CHECK-NEXT:    sub sp, #4
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
-; CHECK-NEXT:    .pad #24
-; CHECK-NEXT:    sub sp, #24
-; CHECK-NEXT:    ldrh r4, [r0]
-; CHECK-NEXT:    ldr.w r11, [r0, #4]
-; CHECK-NEXT:    subs r7, r4, #1
-; CHECK-NEXT:    cmp r7, #3
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    ldrh r5, [r0]
+; CHECK-NEXT:    mov r6, r3
+; CHECK-NEXT:    ldr.w r12, [r0, #4]
+; CHECK-NEXT:    sub.w lr, r5, #1
+; CHECK-NEXT:    cmp.w lr, #3
 ; CHECK-NEXT:    bhi .LBB15_6
 ; CHECK-NEXT:  @ %bb.1: @ %if.then
-; CHECK-NEXT:    ldr r6, [r0, #8]
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    add.w r0, r11, r7, lsl #2
-; CHECK-NEXT:    lsr.w lr, r3, #2
-; CHECK-NEXT:    vldr s0, [r6]
-; CHECK-NEXT:    vldr s2, [r6, #4]
-; CHECK-NEXT:    vldr s4, [r6, #8]
-; CHECK-NEXT:    vldr s6, [r6, #12]
+; CHECK-NEXT:    ldr r4, [r0, #8]
+; CHECK-NEXT:    ldr r3, [r4, #12]
+; CHECK-NEXT:    ldm.w r4, {r7, r8, r9}
+; CHECK-NEXT:    add.w r4, r12, lr, lsl #2
+; CHECK-NEXT:    lsr.w lr, r6, #2
 ; CHECK-NEXT:    wls lr, lr, .LBB15_5
 ; CHECK-NEXT:  @ %bb.2: @ %while.body.lr.ph
-; CHECK-NEXT:    strd r5, r3, [sp, #12] @ 8-byte Folded Spill
-; CHECK-NEXT:    vmov r7, s4
-; CHECK-NEXT:    str r4, [sp, #20] @ 4-byte Spill
-; CHECK-NEXT:    vmov r5, s6
-; CHECK-NEXT:    vmov r4, s2
-; CHECK-NEXT:    bic r3, r3, #3
-; CHECK-NEXT:    vmov r8, s0
-; CHECK-NEXT:    movs r6, #0
-; CHECK-NEXT:    str r3, [sp, #4] @ 4-byte Spill
-; CHECK-NEXT:    add.w r3, r2, r3, lsl #2
-; CHECK-NEXT:    str r3, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT:    strd r6, r5, [sp, #8] @ 8-byte Folded Spill
+; CHECK-NEXT:    bic r5, r6, #3
+; CHECK-NEXT:    str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT:    mov.w r10, #0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    add.w r5, r2, r5, lsl #2
+; CHECK-NEXT:    str r5, [sp, #4] @ 4-byte Spill
 ; CHECK-NEXT:  .LBB15_3: @ %while.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    add.w r10, r1, r6
-; CHECK-NEXT:    adds r3, r0, r6
-; CHECK-NEXT:    vldrw.u32 q2, [r10]
-; CHECK-NEXT:    add.w r12, r11, r6
-; CHECK-NEXT:    add.w r9, r2, r6
-; CHECK-NEXT:    adds r6, #16
-; CHECK-NEXT:    vstrw.32 q2, [r3]
-; CHECK-NEXT:    vldrw.u32 q2, [r12]
-; CHECK-NEXT:    vldrw.u32 q3, [r12, #4]
-; CHECK-NEXT:    vldrw.u32 q4, [r12, #12]
-; CHECK-NEXT:    vmul.f32 q2, q2, r8
-; CHECK-NEXT:    vfma.f32 q2, q3, r4
-; CHECK-NEXT:    vldrw.u32 q3, [r12, #8]
-; CHECK-NEXT:    vfma.f32 q2, q3, r7
-; CHECK-NEXT:    vfma.f32 q2, q4, r5
-; CHECK-NEXT:    vstrw.32 q2, [r9]
+; CHECK-NEXT:    add.w r11, r1, r10
+; CHECK-NEXT:    add.w r5, r4, r10
+; CHECK-NEXT:    vldrw.u32 q0, [r11]
+; CHECK-NEXT:    add.w r6, r12, r10
+; CHECK-NEXT:    vstrw.32 q0, [r5]
+; CHECK-NEXT:    add.w r5, r2, r10
+; CHECK-NEXT:    vldrw.u32 q0, [r6]
+; CHECK-NEXT:    vldrw.u32 q1, [r6, #4]
+; CHECK-NEXT:    vldrw.u32 q2, [r6, #12]
+; CHECK-NEXT:    add.w r10, r10, #16
+; CHECK-NEXT:    vmul.f32 q0, q0, r7
+; CHECK-NEXT:    vfma.f32 q0, q1, r8
+; CHECK-NEXT:    vldrw.u32 q1, [r6, #8]
+; CHECK-NEXT:    vfma.f32 q0, q1, r9
+; CHECK-NEXT:    vfma.f32 q0, q2, r3
+; CHECK-NEXT:    vstrw.32 q0, [r5]
 ; CHECK-NEXT:    le lr, .LBB15_3
 ; CHECK-NEXT:  @ %bb.4: @ %while.end.loopexit
-; CHECK-NEXT:    ldr r2, [sp, #4] @ 4-byte Reload
-; CHECK-NEXT:    add r0, r6
-; CHECK-NEXT:    ldrd r3, r4, [sp, #16] @ 8-byte Folded Reload
-; CHECK-NEXT:    add.w r11, r11, r2, lsl #2
-; CHECK-NEXT:    add.w r1, r1, r2, lsl #2
-; CHECK-NEXT:    ldrd r2, r5, [sp, #8] @ 8-byte Folded Reload
+; CHECK-NEXT:    ldr r5, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT:    add r4, r10
+; CHECK-NEXT:    add.w r12, r12, r0, lsl #2
+; CHECK-NEXT:    add.w r1, r1, r0, lsl #2
+; CHECK-NEXT:    ldm.w sp, {r0, r2, r6} @ 12-byte Folded Reload
 ; CHECK-NEXT:  .LBB15_5: @ %while.end
-; CHECK-NEXT:    and r6, r3, #3
-; CHECK-NEXT:    vmov r12, s6
-; CHECK-NEXT:    vmov lr, s4
-; CHECK-NEXT:    vldrw.u32 q1, [r1]
-; CHECK-NEXT:    vctp.32 r6
+; CHECK-NEXT:    and lr, r6, #3
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    vctp.32 lr
 ; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrwt.32 q1, [r0]
-; CHECK-NEXT:    vmov r9, s2
-; CHECK-NEXT:    vldrw.u32 q1, [r11, #4]
-; CHECK-NEXT:    vmov r7, s0
-; CHECK-NEXT:    vldrw.u32 q0, [r11]
+; CHECK-NEXT:    vstrwt.32 q0, [r4]
+; CHECK-NEXT:    vldrw.u32 q0, [r12]
+; CHECK-NEXT:    vldrw.u32 q1, [r12, #4]
 ; CHECK-NEXT:    vmul.f32 q0, q0, r7
+; CHECK-NEXT:    vfma.f32 q0, q1, r8
+; CHECK-NEXT:    vldrw.u32 q1, [r12, #8]
 ; CHECK-NEXT:    vfma.f32 q0, q1, r9
-; CHECK-NEXT:    vldrw.u32 q1, [r11, #8]
-; CHECK-NEXT:    vfma.f32 q0, q1, lr
-; CHECK-NEXT:    vldrw.u32 q1, [r11, #12]
-; CHECK-NEXT:    vfma.f32 q0, q1, r12
+; CHECK-NEXT:    vldrw.u32 q1, [r12, #12]
+; CHECK-NEXT:    vfma.f32 q0, q1, r3
 ; CHECK-NEXT:    vpst
 ; CHECK-NEXT:    vstrwt.32 q0, [r2]
-; CHECK-NEXT:    ldr.w r11, [r5, #4]
+; CHECK-NEXT:    ldr.w r12, [r0, #4]
 ; CHECK-NEXT:  .LBB15_6: @ %if.end
-; CHECK-NEXT:    add.w r0, r11, r3, lsl #2
-; CHECK-NEXT:    lsr.w lr, r4, #2
+; CHECK-NEXT:    add.w r0, r12, r6, lsl #2
+; CHECK-NEXT:    lsr.w lr, r5, #2
 ; CHECK-NEXT:    wls lr, lr, .LBB15_10
 ; CHECK-NEXT:  @ %bb.7: @ %while.body51.preheader
-; CHECK-NEXT:    bic r7, r4, #3
-; CHECK-NEXT:    adds r1, r7, r3
-; CHECK-NEXT:    mov r3, r11
-; CHECK-NEXT:    add.w r1, r11, r1, lsl #2
+; CHECK-NEXT:    bic r2, r5, #3
+; CHECK-NEXT:    mov r3, r12
+; CHECK-NEXT:    adds r1, r2, r6
+; CHECK-NEXT:    add.w r1, r12, r1, lsl #2
 ; CHECK-NEXT:  .LBB15_8: @ %while.body51
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
 ; CHECK-NEXT:    vstrb.8 q0, [r3], #16
 ; CHECK-NEXT:    le lr, .LBB15_8
 ; CHECK-NEXT:  @ %bb.9: @ %while.end55.loopexit
-; CHECK-NEXT:    add.w r11, r11, r7, lsl #2
+; CHECK-NEXT:    add.w r12, r12, r2, lsl #2
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:  .LBB15_10: @ %while.end55
-; CHECK-NEXT:    ands r1, r4, #3
+; CHECK-NEXT:    ands r1, r5, #3
 ; CHECK-NEXT:    beq .LBB15_12
 ; CHECK-NEXT:  @ %bb.11: @ %if.then59
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    vctp.32 r1
 ; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrwt.32 q0, [r11]
+; CHECK-NEXT:    vstrwt.32 q0, [r12]
 ; CHECK-NEXT:  .LBB15_12: @ %if.end61
-; CHECK-NEXT:    add sp, #24
-; CHECK-NEXT:    vpop {d8, d9}
-; CHECK-NEXT:    add sp, #4
+; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
   %pState1 = getelementptr inbounds %struct.arm_fir_instance_f32, %struct.arm_fir_instance_f32* %S, i32 0, i32 1

diff  --git a/llvm/test/CodeGen/Thumb2/mve-fma-loops.ll b/llvm/test/CodeGen/Thumb2/mve-fma-loops.ll
index 4888f78c7dae..bd1141d4b1af 100644
--- a/llvm/test/CodeGen/Thumb2/mve-fma-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-fma-loops.ll
@@ -239,9 +239,9 @@ define arm_aapcs_vfpcc void @fmss1(float* nocapture readonly %x, float* nocaptur
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    poplt {r7, pc}
-; CHECK-NEXT:    vneg.f32 s0, s0
-; CHECK-NEXT:    dlstp.32 lr, r3
 ; CHECK-NEXT:    vmov r12, s0
+; CHECK-NEXT:    dlstp.32 lr, r3
+; CHECK-NEXT:    eor r12, r12, #-2147483648
 ; CHECK-NEXT:  .LBB4_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r1], #16
@@ -479,9 +479,9 @@ define arm_aapcs_vfpcc void @fms1(float* nocapture readonly %x, float* nocapture
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    poplt {r7, pc}
-; CHECK-NEXT:    vneg.f32 s0, s0
-; CHECK-NEXT:    dlstp.32 lr, r3
 ; CHECK-NEXT:    vmov r12, s0
+; CHECK-NEXT:    dlstp.32 lr, r3
+; CHECK-NEXT:    eor r12, r12, #-2147483648
 ; CHECK-NEXT:  .LBB8_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #16

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/dup.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/dup.ll
index 530ef11f871a..a87848244af1 100644
--- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/dup.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/dup.ll
@@ -4,7 +4,7 @@
 define arm_aapcs_vfpcc <8 x half> @test_vdupq_n_f16(float %a.coerce) {
 ; CHECK-LABEL: test_vdupq_n_f16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f16 r0, s0
+; CHECK-NEXT:    vmov r0, s0
 ; CHECK-NEXT:    vdup.16 q0, r0
 ; CHECK-NEXT:    bx lr
 entry:
@@ -97,7 +97,7 @@ entry:
 define arm_aapcs_vfpcc <8 x half> @test_vdupq_m_n_f16(<8 x half> %inactive, float %a.coerce, i16 zeroext %p) {
 ; CHECK-LABEL: test_vdupq_m_n_f16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f16 r1, s4
+; CHECK-NEXT:    vmov r1, s4
 ; CHECK-NEXT:    vmsr p0, r0
 ; CHECK-NEXT:    vpst
 ; CHECK-NEXT:    vdupt.16 q0, r1

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/ternary.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/ternary.ll
index 73c2707e7af7..b752aa28e3c3 100644
--- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/ternary.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/ternary.ll
@@ -24,7 +24,7 @@ entry:
 define arm_aapcs_vfpcc <8 x half> @test_vfmaq_n_f16(<8 x half> %a, <8 x half> %b, float %c.coerce) {
 ; CHECK-LABEL: test_vfmaq_n_f16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f16 r0, s8
+; CHECK-NEXT:    vmov r0, s8
 ; CHECK-NEXT:    vfma.f16 q0, q1, r0
 ; CHECK-NEXT:    bx lr
 entry:
@@ -53,7 +53,7 @@ entry:
 define arm_aapcs_vfpcc <8 x half> @test_vfmasq_n_f16(<8 x half> %a, <8 x half> %b, float %c.coerce) {
 ; CHECK-LABEL: test_vfmasq_n_f16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f16 r0, s8
+; CHECK-NEXT:    vmov r0, s8
 ; CHECK-NEXT:    vfmas.f16 q0, q1, r0
 ; CHECK-NEXT:    bx lr
 entry:
@@ -422,7 +422,7 @@ entry:
 define arm_aapcs_vfpcc <8 x half> @test_vfmaq_m_n_f16(<8 x half> %a, <8 x half> %b, float %c.coerce, i16 zeroext %p) {
 ; CHECK-LABEL: test_vfmaq_m_n_f16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f16 r1, s8
+; CHECK-NEXT:    vmov r1, s8
 ; CHECK-NEXT:    vmsr p0, r0
 ; CHECK-NEXT:    vpst
 ; CHECK-NEXT:    vfmat.f16 q0, q1, r1
@@ -459,7 +459,7 @@ entry:
 define arm_aapcs_vfpcc <8 x half> @test_vfmasq_m_n_f16(<8 x half> %a, <8 x half> %b, float %c.coerce, i16 zeroext %p) {
 ; CHECK-LABEL: test_vfmasq_m_n_f16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f16 r1, s8
+; CHECK-NEXT:    vmov r1, s8
 ; CHECK-NEXT:    vmsr p0, r0
 ; CHECK-NEXT:    vpst
 ; CHECK-NEXT:    vfmast.f16 q0, q1, r1

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vaddq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vaddq.ll
index 97bc14969a09..5850404418a7 100644
--- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vaddq.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vaddq.ll
@@ -106,7 +106,7 @@ entry:
 define arm_aapcs_vfpcc <8 x half> @test_vaddq_n_f16(<8 x half> %a, float %b.coerce) {
 ; CHECK-LABEL: test_vaddq_n_f16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f16 r0, s4
+; CHECK-NEXT:    vmov r0, s4
 ; CHECK-NEXT:    vadd.f16 q0, q0, r0
 ; CHECK-NEXT:    bx lr
 entry:
@@ -171,7 +171,7 @@ entry:
 define arm_aapcs_vfpcc <8 x half> @test_vaddq_x_n_f16(<8 x half> %a, float %b.coerce, i16 zeroext %p) {
 ; CHECK-LABEL: test_vaddq_x_n_f16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f16 r1, s4
+; CHECK-NEXT:    vmov r1, s4
 ; CHECK-NEXT:    vmsr p0, r0
 ; CHECK-NEXT:    vpst
 ; CHECK-NEXT:    vaddt.f16 q0, q0, r1

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulq.ll
index 075e609479a6..387e032707b2 100644
--- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulq.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vmulq.ll
@@ -269,7 +269,7 @@ entry:
 define arm_aapcs_vfpcc <8 x half> @test_vmulq_m_n_f16(<8 x half> %inactive, <8 x half> %a, float %b.coerce, i16 zeroext %p) {
 ; CHECK-LABEL: test_vmulq_m_n_f16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f16 r1, s8
+; CHECK-NEXT:    vmov r1, s8
 ; CHECK-NEXT:    vmsr p0, r0
 ; CHECK-NEXT:    vpst
 ; CHECK-NEXT:    vmult.f16 q0, q1, r1

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vsubq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vsubq.ll
index e65a84a19c3b..d1722d498934 100644
--- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vsubq.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vsubq.ll
@@ -106,7 +106,7 @@ entry:
 define arm_aapcs_vfpcc <8 x half> @test_vsubq_n_f16(<8 x half> %a, float %b.coerce) {
 ; CHECK-LABEL: test_vsubq_n_f16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f16 r0, s4
+; CHECK-NEXT:    vmov r0, s4
 ; CHECK-NEXT:    vsub.f16 q0, q0, r0
 ; CHECK-NEXT:    bx lr
 entry:
@@ -171,7 +171,7 @@ entry:
 define arm_aapcs_vfpcc <8 x half> @test_vsubq_x_n_f16(<8 x half> %a, float %b.coerce, i16 zeroext %p) {
 ; CHECK-LABEL: test_vsubq_x_n_f16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f16 r1, s4
+; CHECK-NEXT:    vmov r1, s4
 ; CHECK-NEXT:    vmsr p0, r0
 ; CHECK-NEXT:    vpst
 ; CHECK-NEXT:    vsubt.f16 q0, q0, r1

diff  --git a/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll b/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
index 8f01326c002f..646124e0cf98 100644
--- a/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
@@ -1170,32 +1170,32 @@ define arm_aapcs_vfpcc void @_Z37_arm_radix4_butterfly_inverse_f32_mvePK21arm_cf
 ; CHECK-NEXT:    lsr.w lr, r0, #3
 ; CHECK-NEXT:    wls lr, lr, .LBB7_12
 ; CHECK-NEXT:  @ %bb.10:
-; CHECK-NEXT:    vldrw.u32 q3, [q1, #16]
 ; CHECK-NEXT:    vldr s0, [sp, #4] @ 4-byte Reload
 ; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vldrw.u32 q0, [q1, #16]
 ; CHECK-NEXT:  .LBB7_11: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vldrw.u32 q0, [q1, #24]
+; CHECK-NEXT:    vldrw.u32 q3, [q1, #24]
 ; CHECK-NEXT:    vldrw.u32 q4, [q1, #8]
-; CHECK-NEXT:    vadd.f32 q6, q2, q3
-; CHECK-NEXT:    vsub.f32 q2, q2, q3
-; CHECK-NEXT:    vadd.f32 q5, q4, q0
-; CHECK-NEXT:    vsub.f32 q0, q4, q0
-; CHECK-NEXT:    vsub.f32 q7, q6, q5
-; CHECK-NEXT:    vcadd.f32 q4, q2, q0, #270
+; CHECK-NEXT:    vsub.f32 q6, q2, q0
+; CHECK-NEXT:    vadd.f32 q0, q2, q0
+; CHECK-NEXT:    vsub.f32 q5, q4, q3
+; CHECK-NEXT:    vadd.f32 q3, q4, q3
+; CHECK-NEXT:    vcadd.f32 q7, q6, q5, #270
+; CHECK-NEXT:    vsub.f32 q2, q0, q3
+; CHECK-NEXT:    vmul.f32 q7, q7, r0
+; CHECK-NEXT:    vadd.f32 q3, q0, q3
 ; CHECK-NEXT:    vstrw.32 q7, [sp, #32] @ 16-byte Spill
-; CHECK-NEXT:    vcadd.f32 q7, q2, q0, #90
-; CHECK-NEXT:    vadd.f32 q0, q6, q5
+; CHECK-NEXT:    vcadd.f32 q7, q6, q5, #90
+; CHECK-NEXT:    vmul.f32 q4, q2, r0
 ; CHECK-NEXT:    vldrw.u32 q2, [q1, #64]!
-; CHECK-NEXT:    vmul.f32 q0, q0, r0
-; CHECK-NEXT:    vldrw.u32 q3, [q1, #16]
-; CHECK-NEXT:    vstrw.32 q0, [q1, #-64]
-; CHECK-NEXT:    vldrw.u32 q5, [sp, #32] @ 16-byte Reload
-; CHECK-NEXT:    vmul.f32 q0, q4, r0
-; CHECK-NEXT:    vmul.f32 q4, q7, r0
-; CHECK-NEXT:    vmul.f32 q5, q5, r0
-; CHECK-NEXT:    vstrw.32 q5, [q1, #-56]
-; CHECK-NEXT:    vstrw.32 q4, [q1, #-48]
-; CHECK-NEXT:    vstrw.32 q0, [q1, #-40]
+; CHECK-NEXT:    vmul.f32 q5, q7, r0
+; CHECK-NEXT:    vmul.f32 q3, q3, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1, #16]
+; CHECK-NEXT:    vstrw.32 q3, [q1, #-64]
+; CHECK-NEXT:    vstrw.32 q4, [q1, #-56]
+; CHECK-NEXT:    vstrw.32 q5, [q1, #-48]
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vstrw.32 q3, [q1, #-40]
 ; CHECK-NEXT:    le lr, .LBB7_11
 ; CHECK-NEXT:  .LBB7_12:
 ; CHECK-NEXT:    add sp, #56

diff  --git a/llvm/test/CodeGen/Thumb2/mve-pred-threshold.ll b/llvm/test/CodeGen/Thumb2/mve-pred-threshold.ll
index 0ba0e8a6c409..4c665bdbfde1 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pred-threshold.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-threshold.ll
@@ -177,20 +177,19 @@ define arm_aapcs_vfpcc void @thresh_f32(float* %data, i16 zeroext %N, float %T)
 ; CHECK-NEXT:    cmp r1, #0
 ; CHECK-NEXT:    it eq
 ; CHECK-NEXT:    popeq {r7, pc}
-; CHECK-NEXT:    vneg.f32 s2, s0
 ; CHECK-NEXT:    mvn r2, #3
 ; CHECK-NEXT:    add.w r1, r2, r1, lsl #2
 ; CHECK-NEXT:    movs r2, #1
 ; CHECK-NEXT:    add.w lr, r2, r1, lsr #2
-; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    dls lr, lr
-; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r1, s0
 ; CHECK-NEXT:    vmov.i32 q0, #0x0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:    eor r2, r1, #-2147483648
 ; CHECK-NEXT:  .LBB3_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
-; CHECK-NEXT:    vpte.f32 ge, q1, r2
-; CHECK-NEXT:    vcmpt.f32 le, q1, r1
+; CHECK-NEXT:    vpte.f32 ge, q1, r1
+; CHECK-NEXT:    vcmpt.f32 le, q1, r2
 ; CHECK-NEXT:    vstrwe.32 q0, [r0], #16
 ; CHECK-NEXT:    le lr, .LBB3_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
@@ -235,13 +234,13 @@ define arm_aapcs_vfpcc void @thresh_f16(half* %data, i16 zeroext %N, float %T.co
 ; CHECK-NEXT:    cmp r1, #0
 ; CHECK-NEXT:    it eq
 ; CHECK-NEXT:    popeq {r7, pc}
-; CHECK-NEXT:    mvn r2, #7
-; CHECK-NEXT:    add.w r1, r2, r1, lsl #3
-; CHECK-NEXT:    movs r2, #1
-; CHECK-NEXT:    vneg.f16 s2, s0
-; CHECK-NEXT:    add.w lr, r2, r1, lsr #3
-; CHECK-NEXT:    vmov.f16 r1, s2
-; CHECK-NEXT:    vmov.f16 r2, s0
+; CHECK-NEXT:    mvn r3, #7
+; CHECK-NEXT:    add.w r1, r3, r1, lsl #3
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vneg.f16 s0, s0
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r1, lsr #3
+; CHECK-NEXT:    vmov.f16 r1, s0
 ; CHECK-NEXT:    vmov.i32 q0, #0x0
 ; CHECK-NEXT:    dls lr, lr
 ; CHECK-NEXT:  .LBB4_1: @ %vector.body
@@ -466,20 +465,19 @@ define arm_aapcs_vfpcc void @thresh_rev_f32(float* %data, i16 zeroext %N, float
 ; CHECK-NEXT:    cmp r1, #0
 ; CHECK-NEXT:    it eq
 ; CHECK-NEXT:    popeq {r7, pc}
-; CHECK-NEXT:    vneg.f32 s2, s0
 ; CHECK-NEXT:    mvn r2, #3
 ; CHECK-NEXT:    add.w r1, r2, r1, lsl #2
 ; CHECK-NEXT:    movs r2, #1
 ; CHECK-NEXT:    add.w lr, r2, r1, lsr #2
-; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    dls lr, lr
-; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r1, s0
 ; CHECK-NEXT:    vmov.i32 q0, #0x0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:    eor r2, r1, #-2147483648
 ; CHECK-NEXT:  .LBB8_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
-; CHECK-NEXT:    vpte.f32 ge, q1, r2
-; CHECK-NEXT:    vcmpt.f32 le, q1, r1
+; CHECK-NEXT:    vpte.f32 ge, q1, r1
+; CHECK-NEXT:    vcmpt.f32 le, q1, r2
 ; CHECK-NEXT:    vstrwe.32 q0, [r0], #16
 ; CHECK-NEXT:    le lr, .LBB8_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
@@ -524,13 +522,13 @@ define arm_aapcs_vfpcc void @thresh_rev_f16(half* %data, i16 zeroext %N, float %
 ; CHECK-NEXT:    cmp r1, #0
 ; CHECK-NEXT:    it eq
 ; CHECK-NEXT:    popeq {r7, pc}
-; CHECK-NEXT:    mvn r2, #7
-; CHECK-NEXT:    add.w r1, r2, r1, lsl #3
-; CHECK-NEXT:    movs r2, #1
-; CHECK-NEXT:    vneg.f16 s2, s0
-; CHECK-NEXT:    add.w lr, r2, r1, lsr #3
-; CHECK-NEXT:    vmov.f16 r1, s2
-; CHECK-NEXT:    vmov.f16 r2, s0
+; CHECK-NEXT:    mvn r3, #7
+; CHECK-NEXT:    add.w r1, r3, r1, lsl #3
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vneg.f16 s0, s0
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r1, lsr #3
+; CHECK-NEXT:    vmov.f16 r1, s0
 ; CHECK-NEXT:    vmov.i32 q0, #0x0
 ; CHECK-NEXT:    dls lr, lr
 ; CHECK-NEXT:  .LBB9_1: @ %vector.body

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vldst4.ll b/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
index 8fba6d97eaa6..27d5db41c151 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
@@ -4,233 +4,235 @@
 define void @vldst4(half* nocapture readonly %pIn, half* nocapture %pOut, i32 %numRows, i32 %numCols, i32 %scale.coerce) #0 {
 ; CHECK-LABEL: vldst4:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .save {r4, lr}
-; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    .pad #88
-; CHECK-NEXT:    sub sp, #88
-; CHECK-NEXT:    muls r2, r3, r2
-; CHECK-NEXT:    movs r3, #0
-; CHECK-NEXT:    cmp.w r3, r2, lsr #2
+; CHECK-NEXT:    .pad #80
+; CHECK-NEXT:    sub sp, #80
+; CHECK-NEXT:    mul r12, r3, r2
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    cmp.w r2, r12, lsr #2
 ; CHECK-NEXT:    beq.w .LBB0_3
 ; CHECK-NEXT:  @ %bb.1: @ %vector.ph
-; CHECK-NEXT:    vldr.16 s0, [sp, #160]
 ; CHECK-NEXT:    mvn r3, #7
-; CHECK-NEXT:    and.w r2, r3, r2, lsr #2
+; CHECK-NEXT:    ldr r5, [sp, #160]
+; CHECK-NEXT:    and.w r3, r3, r12, lsr #2
+; CHECK-NEXT:    sub.w r12, r3, #8
 ; CHECK-NEXT:    movs r3, #1
-; CHECK-NEXT:    vmov.f16 r12, s0
-; CHECK-NEXT:    subs r2, #8
-; CHECK-NEXT:    add.w lr, r3, r2, lsr #3
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #3
 ; CHECK-NEXT:    dls lr, lr
 ; CHECK-NEXT:  .LBB0_2: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vldrh.u16 q4, [r0, #32]
+; CHECK-NEXT:    vldrh.u16 q5, [r0, #32]
 ; CHECK-NEXT:    vldrh.u16 q3, [r0, #48]
 ; CHECK-NEXT:    vldrh.u16 q7, [r0], #64
-; CHECK-NEXT:    vmov r2, s17
-; CHECK-NEXT:    vmovx.f16 s8, s13
+; CHECK-NEXT:    vmov r2, s20
+; CHECK-NEXT:    vmovx.f16 s8, s12
 ; CHECK-NEXT:    vmov.16 q0[4], r2
-; CHECK-NEXT:    vmov r3, s19
+; CHECK-NEXT:    vmov r3, s22
 ; CHECK-NEXT:    vmov.16 q0[5], r3
-; CHECK-NEXT:    vmov r2, s13
+; CHECK-NEXT:    vmov r2, s12
 ; CHECK-NEXT:    vmov.16 q0[6], r2
-; CHECK-NEXT:    vmov r2, s15
-; CHECK-NEXT:    vmov r3, s29
-; CHECK-NEXT:    vldrh.u16 q5, [r0, #-48]
-; CHECK-NEXT:    vmov.16 q0[7], r2
-; CHECK-NEXT:    vmov r2, s31
-; CHECK-NEXT:    vmov.16 q1[0], r3
-; CHECK-NEXT:    vmov.16 q1[1], r2
-; CHECK-NEXT:    vmov r2, s21
+; CHECK-NEXT:    vmov r2, s28
+; CHECK-NEXT:    vldrh.u16 q6, [r0, #-48]
+; CHECK-NEXT:    vmov.16 q1[0], r2
+; CHECK-NEXT:    vmov r3, s30
+; CHECK-NEXT:    vmov.16 q1[1], r3
+; CHECK-NEXT:    vmov r2, s24
 ; CHECK-NEXT:    vmov.16 q1[2], r2
-; CHECK-NEXT:    vmov r2, s23
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    vmov.16 q0[7], r2
+; CHECK-NEXT:    vmov r2, s26
 ; CHECK-NEXT:    vmov.16 q1[3], r2
 ; CHECK-NEXT:    vmov.f32 s6, s2
 ; CHECK-NEXT:    vmov.f32 s7, s3
-; CHECK-NEXT:    vmul.f16 q0, q1, r12
-; CHECK-NEXT:    vmovx.f16 s4, s21
-; CHECK-NEXT:    vmov q6, q0
-; CHECK-NEXT:    vstrw.32 q0, [sp, #32] @ 16-byte Spill
-; CHECK-NEXT:    vmovx.f16 s0, s31
-; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmul.f16 q0, q1, r5
+; CHECK-NEXT:    vmovx.f16 s4, s24
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vmovx.f16 s0, s30
+; CHECK-NEXT:    vmov r3, s3
 ; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    vmovx.f16 s0, s29
+; CHECK-NEXT:    vmovx.f16 s0, s28
 ; CHECK-NEXT:    vmov r4, s0
 ; CHECK-NEXT:    vmov.16 q0[0], r4
 ; CHECK-NEXT:    vmov.16 q0[1], r2
 ; CHECK-NEXT:    vmov r2, s4
-; CHECK-NEXT:    vmovx.f16 s4, s19
+; CHECK-NEXT:    vmovx.f16 s4, s22
 ; CHECK-NEXT:    vmov.16 q0[2], r2
 ; CHECK-NEXT:    vmov r2, s4
-; CHECK-NEXT:    vmovx.f16 s4, s17
+; CHECK-NEXT:    vmovx.f16 s4, s20
 ; CHECK-NEXT:    vmov r4, s4
 ; CHECK-NEXT:    vmov.16 q1[4], r4
 ; CHECK-NEXT:    vmov.16 q1[5], r2
 ; CHECK-NEXT:    vmov r2, s8
-; CHECK-NEXT:    vmovx.f16 s8, s15
+; CHECK-NEXT:    vmovx.f16 s8, s14
 ; CHECK-NEXT:    vmov.16 q1[6], r2
 ; CHECK-NEXT:    vmov r2, s8
-; CHECK-NEXT:    vmovx.f16 s8, s23
+; CHECK-NEXT:    vmovx.f16 s8, s26
 ; CHECK-NEXT:    vmov.16 q1[7], r2
 ; CHECK-NEXT:    vmov r2, s8
 ; CHECK-NEXT:    vmov.16 q0[3], r2
+; CHECK-NEXT:    vmovx.f16 s8, s13
 ; CHECK-NEXT:    vmov.f32 s2, s6
 ; CHECK-NEXT:    vmov.f32 s3, s7
-; CHECK-NEXT:    vmov.16 q1[2], r3
-; CHECK-NEXT:    vmul.f16 q0, q0, r12
-; CHECK-NEXT:    vmov r3, s18
-; CHECK-NEXT:    vmov r2, s1
-; CHECK-NEXT:    vmov q2, q0
+; CHECK-NEXT:    vmov.16 q1[0], r3
+; CHECK-NEXT:    vmul.f16 q0, q0, r5
+; CHECK-NEXT:    vmov r3, s23
+; CHECK-NEXT:    vmov r2, s3
 ; CHECK-NEXT:    vstrw.32 q0, [sp, #64] @ 16-byte Spill
-; CHECK-NEXT:    vmovx.f16 s0, s25
-; CHECK-NEXT:    vmov.16 q1[3], r2
-; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    vmovx.f16 s0, s9
-; CHECK-NEXT:    vmov.16 q1[6], r2
+; CHECK-NEXT:    vmovx.f16 s0, s19
+; CHECK-NEXT:    vmov.16 q1[1], r2
 ; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    vmovx.f16 s8, s12
-; CHECK-NEXT:    vmov.16 q1[7], r2
-; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vmov.16 q1[4], r2
+; CHECK-NEXT:    vmov r2, s21
 ; CHECK-NEXT:    vmov.16 q0[4], r2
-; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vmov r2, s13
 ; CHECK-NEXT:    vmov.16 q0[5], r3
-; CHECK-NEXT:    vmov r3, s28
-; CHECK-NEXT:    vstrw.32 q1, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vmov r3, s29
+; CHECK-NEXT:    vstrw.32 q1, [sp, #16] @ 16-byte Spill
 ; CHECK-NEXT:    vmov.16 q0[6], r2
-; CHECK-NEXT:    vmov r2, s30
+; CHECK-NEXT:    vmov r2, s31
 ; CHECK-NEXT:    vmov.16 q1[0], r3
 ; CHECK-NEXT:    vmov.16 q1[1], r2
-; CHECK-NEXT:    vmov r2, s20
+; CHECK-NEXT:    vmov r2, s25
 ; CHECK-NEXT:    vmov.16 q1[2], r2
-; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    vmov r2, s15
 ; CHECK-NEXT:    vmov.16 q0[7], r2
-; CHECK-NEXT:    vmov r2, s22
+; CHECK-NEXT:    vmov r2, s27
 ; CHECK-NEXT:    vmov.16 q1[3], r2
 ; CHECK-NEXT:    vmov.f32 s6, s2
 ; CHECK-NEXT:    vmov.f32 s7, s3
-; CHECK-NEXT:    vmovx.f16 s0, s30
+; CHECK-NEXT:    vmovx.f16 s0, s31
 ; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    vmovx.f16 s0, s28
+; CHECK-NEXT:    vmovx.f16 s0, s29
 ; CHECK-NEXT:    vmov r4, s0
-; CHECK-NEXT:    vmul.f16 q6, q1, r12
+; CHECK-NEXT:    vmul.f16 q4, q1, r5
 ; CHECK-NEXT:    vmov.16 q0[0], r4
-; CHECK-NEXT:    vmovx.f16 s4, s20
+; CHECK-NEXT:    vmovx.f16 s4, s25
 ; CHECK-NEXT:    vmov.16 q0[1], r2
 ; CHECK-NEXT:    vmov r2, s4
-; CHECK-NEXT:    vmovx.f16 s4, s18
+; CHECK-NEXT:    vmovx.f16 s4, s23
 ; CHECK-NEXT:    vmov.16 q0[2], r2
 ; CHECK-NEXT:    vmov r2, s4
-; CHECK-NEXT:    vmovx.f16 s4, s16
+; CHECK-NEXT:    vmovx.f16 s4, s21
 ; CHECK-NEXT:    vmov r4, s4
-; CHECK-NEXT:    vldrw.u32 q4, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vstrw.32 q4, [sp, #32] @ 16-byte Spill
 ; CHECK-NEXT:    vmov.16 q1[4], r4
-; CHECK-NEXT:    vmov r3, s25
+; CHECK-NEXT:    vmov r3, s16
 ; CHECK-NEXT:    vmov.16 q1[5], r2
 ; CHECK-NEXT:    vmov r2, s8
-; CHECK-NEXT:    vmovx.f16 s8, s14
+; CHECK-NEXT:    vmovx.f16 s8, s15
 ; CHECK-NEXT:    vmov.16 q1[6], r2
 ; CHECK-NEXT:    vmov r2, s8
-; CHECK-NEXT:    vmovx.f16 s8, s22
+; CHECK-NEXT:    vmovx.f16 s8, s27
 ; CHECK-NEXT:    vmov.16 q1[7], r2
 ; CHECK-NEXT:    vmov r2, s8
 ; CHECK-NEXT:    vmov.16 q0[3], r2
-; CHECK-NEXT:    vmov.16 q5[0], r3
+; CHECK-NEXT:    vldrw.u32 q2, [sp, #48] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s2, s6
 ; CHECK-NEXT:    vmov.f32 s3, s7
+; CHECK-NEXT:    vmov.16 q1[2], r3
+; CHECK-NEXT:    vmul.f16 q6, q0, r5
+; CHECK-NEXT:    vmovx.f16 s0, s16
+; CHECK-NEXT:    vmov r2, s24
+; CHECK-NEXT:    vmov.16 q1[3], r2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmovx.f16 s0, s24
+; CHECK-NEXT:    vmov.16 q1[6], r2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmovx.f16 s0, s8
+; CHECK-NEXT:    vmov.16 q1[7], r2
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vstrw.32 q1, [sp] @ 16-byte Spill
 ; CHECK-NEXT:    vldrw.u32 q1, [sp, #64] @ 16-byte Reload
-; CHECK-NEXT:    vmul.f16 q2, q0, r12
-; CHECK-NEXT:    vmovx.f16 s0, s25
-; CHECK-NEXT:    vmov r2, s9
-; CHECK-NEXT:    vstrw.32 q2, [sp] @ 16-byte Spill
-; CHECK-NEXT:    vmov.16 q5[1], r2
+; CHECK-NEXT:    vmov.16 q5[0], r2
 ; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    vmovx.f16 s0, s9
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmovx.f16 s0, s4
+; CHECK-NEXT:    vmov.16 q5[1], r3
+; CHECK-NEXT:    vmov r3, s25
 ; CHECK-NEXT:    vmov.16 q5[4], r2
 ; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    vmovx.f16 s0, s19
 ; CHECK-NEXT:    vmov.16 q5[5], r2
-; CHECK-NEXT:    vmov r2, s19
+; CHECK-NEXT:    vmov r2, s17
 ; CHECK-NEXT:    vmov.16 q3[2], r2
-; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    vmovx.f16 s0, s17
 ; CHECK-NEXT:    vmov.16 q3[3], r3
 ; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    vmovx.f16 s0, s7
+; CHECK-NEXT:    vmovx.f16 s0, s25
 ; CHECK-NEXT:    vmov.16 q3[6], r2
 ; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    vmovx.f16 s0, s27
+; CHECK-NEXT:    vmovx.f16 s0, s9
 ; CHECK-NEXT:    vmov.16 q3[7], r2
-; CHECK-NEXT:    vmov r2, s27
+; CHECK-NEXT:    vmov r2, s9
 ; CHECK-NEXT:    vmov.16 q7[0], r2
-; CHECK-NEXT:    vmov r3, s11
+; CHECK-NEXT:    vmov r3, s5
 ; CHECK-NEXT:    vmov.16 q7[1], r3
 ; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    vmovx.f16 s0, s11
+; CHECK-NEXT:    vmovx.f16 s0, s5
 ; CHECK-NEXT:    vmov.16 q7[4], r2
 ; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    vstrw.32 q3, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.16 q7[5], r2
-; CHECK-NEXT:    vmov r2, s16
-; CHECK-NEXT:    vmovx.f16 s0, s16
-; CHECK-NEXT:    vmov.16 q3[2], r2
-; CHECK-NEXT:    vmov r3, s4
-; CHECK-NEXT:    vmovx.f16 s16, s18
-; CHECK-NEXT:    vmov.16 q3[3], r3
+; CHECK-NEXT:    vmov r3, s26
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmovx.f16 s0, s2
+; CHECK-NEXT:    vmov.16 q2[2], r2
+; CHECK-NEXT:    vmov q4, q1
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vmov.16 q2[3], r3
 ; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    vmovx.f16 s0, s4
-; CHECK-NEXT:    vmov.16 q3[6], r2
-; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    vmovx.f16 s0, s24
-; CHECK-NEXT:    vmov.16 q3[7], r2
-; CHECK-NEXT:    vmov r2, s24
-; CHECK-NEXT:    vmov.16 q1[0], r2
-; CHECK-NEXT:    vmov r3, s8
-; CHECK-NEXT:    vmov.16 q1[1], r3
-; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    vmovx.f16 s0, s8
-; CHECK-NEXT:    vmov.16 q1[4], r2
+; CHECK-NEXT:    vmovx.f16 s0, s26
+; CHECK-NEXT:    vmov.16 q2[6], r2
 ; CHECK-NEXT:    vmov r2, s0
-; CHECK-NEXT:    vmovx.f16 s24, s26
-; CHECK-NEXT:    vmov.16 q1[5], r2
-; CHECK-NEXT:    vmov r2, s26
-; CHECK-NEXT:    vmov r3, s10
+; CHECK-NEXT:    vmov.16 q2[7], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vmov r3, s18
 ; CHECK-NEXT:    vmov.16 q0[0], r2
+; CHECK-NEXT:    vmovx.f16 s4, s6
 ; CHECK-NEXT:    vmov.16 q0[1], r3
-; CHECK-NEXT:    vmov r2, s24
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmovx.f16 s4, s18
+; CHECK-NEXT:    vldrw.u32 q4, [sp, #32] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.16 q0[4], r2
-; CHECK-NEXT:    vmov r2, s18
-; CHECK-NEXT:    vmov.16 q6[2], r2
-; CHECK-NEXT:    vmov r3, s10
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov.16 q0[5], r2
+; CHECK-NEXT:    vmov r2, s19
+; CHECK-NEXT:    vmov r3, s27
+; CHECK-NEXT:    vmov.16 q1[2], r2
+; CHECK-NEXT:    vmovx.f16 s16, s19
+; CHECK-NEXT:    vmov.16 q1[3], r3
 ; CHECK-NEXT:    vmov r2, s16
-; CHECK-NEXT:    vmovx.f16 s16, s10
-; CHECK-NEXT:    vldrw.u32 q2, [sp] @ 16-byte Reload
-; CHECK-NEXT:    vmov.16 q6[3], r3
-; CHECK-NEXT:    vmov.16 q6[6], r2
+; CHECK-NEXT:    vmovx.f16 s16, s27
+; CHECK-NEXT:    vmov.16 q1[6], r2
 ; CHECK-NEXT:    vmov r2, s16
-; CHECK-NEXT:    vmovx.f16 s16, s10
-; CHECK-NEXT:    vmov.16 q6[7], r2
+; CHECK-NEXT:    vldrw.u32 q4, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vmov.16 q1[7], r2
+; CHECK-NEXT:    vmov.f32 s1, s9
+; CHECK-NEXT:    vldrw.u32 q6, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vmovx.f16 s16, s19
+; CHECK-NEXT:    vmov.f32 s3, s11
 ; CHECK-NEXT:    vmov r2, s16
-; CHECK-NEXT:    vmov.16 q0[5], r2
-; CHECK-NEXT:    vmov.f32 s5, s13
-; CHECK-NEXT:    vmov.f32 s1, s25
-; CHECK-NEXT:    vmov.f32 s3, s27
+; CHECK-NEXT:    vldrw.u32 q4, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s21, s25
 ; CHECK-NEXT:    vstrh.16 q0, [r1, #32]
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s7, s15
-; CHECK-NEXT:    vmov.f32 s29, s1
-; CHECK-NEXT:    vmov.f32 s31, s3
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #48] @ 16-byte Reload
-; CHECK-NEXT:    vstrh.16 q7, [r1, #48]
-; CHECK-NEXT:    vstrh.16 q1, [r1], #64
-; CHECK-NEXT:    vmov.f32 s21, s1
-; CHECK-NEXT:    vmov.f32 s23, s3
-; CHECK-NEXT:    vstrh.16 q5, [r1, #-48]
+; CHECK-NEXT:    vmov.16 q4[5], r2
+; CHECK-NEXT:    vmov.f32 s29, s13
+; CHECK-NEXT:    vmov q2, q4
+; CHECK-NEXT:    vmov.f32 s23, s27
+; CHECK-NEXT:    vmov.f32 s9, s5
+; CHECK-NEXT:    vmov.f32 s11, s7
+; CHECK-NEXT:    vstrh.16 q2, [r1, #48]
+; CHECK-NEXT:    vstrh.16 q5, [r1], #64
+; CHECK-NEXT:    vmov.f32 s31, s15
+; CHECK-NEXT:    vstrh.16 q7, [r1, #-48]
 ; CHECK-NEXT:    le lr, .LBB0_2
 ; CHECK-NEXT:  .LBB0_3: @ %while.end
-; CHECK-NEXT:    add sp, #88
+; CHECK-NEXT:    add sp, #80
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    pop {r4, pc}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
 entry:
   %tmp.0.extract.trunc = trunc i32 %scale.coerce to i16
   %l0 = bitcast i16 %tmp.0.extract.trunc to half


        


More information about the llvm-commits mailing list