[llvm] de3e65e - [ARM][LowOverheadLoops] Check loop liveouts

Sam Parker via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 19 04:59:22 PST 2020


Author: Sam Parker
Date: 2020-02-19T12:59:01Z
New Revision: de3e65e60c8a28c70df7fc23376a0683a004b30f

URL: https://github.com/llvm/llvm-project/commit/de3e65e60c8a28c70df7fc23376a0683a004b30f
DIFF: https://github.com/llvm/llvm-project/commit/de3e65e60c8a28c70df7fc23376a0683a004b30f.diff

LOG: [ARM][LowOverheadLoops] Check loop liveouts

Check that no Q-regs are live out of the loop, unless the instruction
within the loop is predicated on the vctp.

Differential Revision: https://reviews.llvm.org/D72713

Added: 
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/extract-element.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/livereg-no-loop-def.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-vpsel-liveout.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-liveout.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp16-reduce.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-liveout-lsr-shift.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-opcode-liveout.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-operand-liveout.mir

Modified: 
    llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiple-do-loops.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-arith-codegen.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/wlstp.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp
index fde9fa9f82b8..f9b7b39b8fb3 100644
--- a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp
+++ b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp
@@ -178,6 +178,7 @@ namespace {
     MachineLoop &ML;
     MachineLoopInfo &MLI;
     ReachingDefAnalysis &RDA;
+    const TargetRegisterInfo &TRI;
     MachineFunction *MF = nullptr;
     MachineInstr *InsertPt = nullptr;
     MachineInstr *Start = nullptr;
@@ -192,7 +193,8 @@ namespace {
     bool CannotTailPredicate = false;
 
     LowOverheadLoop(MachineLoop &ML, MachineLoopInfo &MLI,
-                    ReachingDefAnalysis &RDA) : ML(ML), MLI(MLI), RDA(RDA) {
+                    ReachingDefAnalysis &RDA, const TargetRegisterInfo &TRI)
+      : ML(ML), MLI(MLI), RDA(RDA), TRI(TRI) {
       MF = ML.getHeader()->getParent();
     }
 
@@ -212,8 +214,15 @@ namespace {
              !CannotTailPredicate && ML.getNumBlocks() == 1;
     }
 
+    // Check that the predication in the loop will be equivalent once we
+    // perform the conversion. Also ensure that we can provide the number
+    // of elements to the loop start instruction.
     bool ValidateTailPredicate(MachineInstr *StartInsertPt);
 
+    // Check that any values available outside of the loop will be the same
+    // after tail predication conversion.
+    bool ValidateLiveOuts() const;
+
     // Is it safe to define LR with DLS/WLS?
     // LR can be defined if it is the operand to start, because it's the same
     // value, or if it's going to be equivalent to the operand to Start.
@@ -373,6 +382,9 @@ bool LowOverheadLoop::ValidateTailPredicate(MachineInstr *StartInsertPt) {
     }
   }
 
+  if (!ValidateLiveOuts())
+    return false;
+
   // For tail predication, we need to provide the number of elements, instead
   // of the iteration count, to the loop start instruction. The number of
   // elements is provided to the vctp instruction, so we need to check that
@@ -496,6 +508,41 @@ bool LowOverheadLoop::ValidateTailPredicate(MachineInstr *StartInsertPt) {
   return true;
 }
 
+bool LowOverheadLoop::ValidateLiveOuts() const {
+  // Collect Q-regs that are live in the exit blocks. We don't collect scalars
+  // because they won't be affected by lane predication.
+  const TargetRegisterClass *QPRs = TRI.getRegClass(ARM::MQPRRegClassID);
+  SmallSet<Register, 2> LiveOuts;
+  SmallVector<MachineBasicBlock*, 2> ExitBlocks;
+  ML.getExitBlocks(ExitBlocks);
+  for (auto *MBB : ExitBlocks)
+    for (const MachineBasicBlock::RegisterMaskPair &RegMask : MBB->liveins())
+      if (QPRs->contains(RegMask.PhysReg))
+        LiveOuts.insert(RegMask.PhysReg);
+
+  // Collect the instructions in the loop body that define the live-out values.
+  SmallPtrSet<MachineInstr*, 2> LiveMIs;
+  MachineBasicBlock *MBB = ML.getHeader();
+  for (auto Reg : LiveOuts)
+    if (auto *MI = RDA.getLocalLiveOutMIDef(MBB, Reg))
+      LiveMIs.insert(MI);
+
+  LLVM_DEBUG(dbgs() << "ARM Loops: Found loop live-outs:\n";
+             for (auto *MI : LiveMIs)
+               dbgs() << " - " << *MI);
+  // We've already validated that any VPT predication within the loop will be
+  // equivalent when we perform the predication transformation; so we know that
+  // any VPT predicated instruction is predicated upon VCTP. Any live-out
+  // instruction needs to be predicated, so check this here.
+  for (auto *MI : LiveMIs) {
+    int PIdx = llvm::findFirstVPTPredOperandIdx(*MI);
+    if (PIdx == -1 || MI->getOperand(PIdx+1).getReg() != ARM::VPR)
+      return false;
+  }
+
+  return true;
+}
+
 void LowOverheadLoop::CheckLegality(ARMBasicBlockUtils *BBUtils) {
   if (Revert)
     return;
@@ -619,7 +666,8 @@ bool LowOverheadLoop::ValidateMVEInst(MachineInstr* MI) {
     return false;
   }
 
-  // Ensure that all memory operations are predicated.
+  // If the instruction is already explicitly predicated, then the conversion
+  // will be fine, but ensure that all memory operations are predicated.
   return !IsUse && MI->mayLoadOrStore() ? false : true;
 }
 
@@ -682,7 +730,7 @@ bool ARMLowOverheadLoops::ProcessLoop(MachineLoop *ML) {
     return nullptr;
   };
 
-  LowOverheadLoop LoLoop(*ML, *MLI, *RDA);
+  LowOverheadLoop LoLoop(*ML, *MLI, *RDA, *TRI);
   // Search the preheader for the start intrinsic.
   // FIXME: I don't see why we shouldn't be supporting multiple predecessors
   // with potentially multiple set.loop.iterations, so we need to enable this.

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll
index f31217217d3a..ca6dfe186e05 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll
@@ -213,22 +213,27 @@ define dso_local i32 @and_mul_reduce_add(i32* noalias nocapture readonly %a, i32
 ; CHECK-NEXT:    vmov.i32 q1, #0x0
 ; CHECK-NEXT:    bic r4, r4, #3
 ; CHECK-NEXT:    subs r5, r4, #4
+; CHECK-NEXT:    movs r4, #1
+; CHECK-NEXT:    add.w lr, r4, r5, lsr #2
 ; CHECK-NEXT:    lsrs r4, r5, #2
 ; CHECK-NEXT:    sub.w r4, r12, r4, lsl #2
-; CHECK-NEXT:    dlstp.32 lr, r12
+; CHECK-NEXT:    dls lr, lr
 ; CHECK-NEXT:  .LBB2_2: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vctp.32 r12
 ; CHECK-NEXT:    vmov q0, q1
-; CHECK-NEXT:    vldrw.u32 q1, [r1], #16
-; CHECK-NEXT:    vldrw.u32 q2, [r0], #16
-; CHECK-NEXT:    vsub.i32 q1, q2, q1
-; CHECK-NEXT:    vcmp.i32 eq, q1, zr
 ; CHECK-NEXT:    vpstt
+; CHECK-NEXT:    vldrwt.u32 q1, [r1], #16
+; CHECK-NEXT:    vldrwt.u32 q2, [r0], #16
+; CHECK-NEXT:    sub.w r12, r12, #4
+; CHECK-NEXT:    vsub.i32 q1, q2, q1
+; CHECK-NEXT:    vpsttt
+; CHECK-NEXT:    vcmpt.i32 eq, q1, zr
 ; CHECK-NEXT:    vldrwt.u32 q1, [r3], #16
 ; CHECK-NEXT:    vldrwt.u32 q2, [r2], #16
 ; CHECK-NEXT:    vmul.i32 q1, q2, q1
 ; CHECK-NEXT:    vadd.i32 q1, q1, q0
-; CHECK-NEXT:    letp lr, .LBB2_2
+; CHECK-NEXT:    le lr, .LBB2_2
 ; CHECK-NEXT:  @ %bb.3: @ %middle.block
 ; CHECK-NEXT:    vctp.32 r4
 ; CHECK-NEXT:    vpsel q0, q1, q0

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/extract-element.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/extract-element.mir
new file mode 100644
index 000000000000..963207325433
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/extract-element.mir
@@ -0,0 +1,186 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s
+
+# Test that the scalar register that aliases a Q reg prevents the tail
+# predication.
+
+--- |
+  define dso_local i32 @no_vpsel_liveout(i16* nocapture readonly %a, i16* nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
+  entry:
+    %cmp9 = icmp eq i32 %N, 0
+    %tmp = add i32 %N, 3
+    %tmp1 = lshr i32 %tmp, 2
+    %tmp2 = shl nuw i32 %tmp1, 2
+    %tmp3 = add i32 %tmp2, -4
+    %tmp4 = lshr i32 %tmp3, 2
+    %tmp5 = add nuw nsw i32 %tmp4, 1
+    br i1 %cmp9, label %for.cond.cleanup, label %vector.ph
+
+  vector.ph:                                        ; preds = %entry
+    call void @llvm.set.loop.iterations.i32(i32 %tmp5)
+    br label %vector.body
+
+  vector.body:                                      ; preds = %vector.body, %vector.ph
+    %lsr.iv1 = phi i32 [ %lsr.iv.next, %vector.body ], [ %tmp5, %vector.ph ]
+    %lsr.iv18 = phi i16* [ %scevgep19, %vector.body ], [ %b, %vector.ph ]
+    %lsr.iv = phi i16* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
+    %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %tmp13, %vector.body ]
+    %tmp7 = phi i32 [ %N, %vector.ph ], [ %tmp9, %vector.body ]
+    %lsr.iv17 = bitcast i16* %lsr.iv to <4 x i16>*
+    %lsr.iv1820 = bitcast i16* %lsr.iv18 to <4 x i16>*
+    %tmp8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %tmp7)
+    %tmp9 = sub i32 %tmp7, 4
+    %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
+    %tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
+    %wide.masked.load14 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv1820, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
+    %tmp11 = sext <4 x i16> %wide.masked.load14 to <4 x i32>
+    %tmp12 = mul nsw <4 x i32> %tmp11, %tmp10
+    %tmp13 = add <4 x i32> %tmp12, %vec.phi
+    %scevgep = getelementptr i16, i16* %lsr.iv, i32 4
+    %scevgep19 = getelementptr i16, i16* %lsr.iv18, i32 4
+    %tmp14 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
+    %tmp15 = icmp ne i32 %tmp14, 0
+    %lsr.iv.next = add nsw i32 %lsr.iv1, -1
+    br i1 %tmp15, label %vector.body, label %middle.block
+
+  middle.block:                                     ; preds = %vector.body
+    %tmp16 = extractelement <4 x i32> %tmp13, i32 3
+    br label %for.cond.cleanup
+
+  for.cond.cleanup:                                 ; preds = %middle.block, %entry
+    %res.0.lcssa = phi i32 [ 0, %entry ], [ %tmp16, %middle.block ]
+    ret i32 %res.0.lcssa
+  }
+  declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
+  declare void @llvm.set.loop.iterations.i32(i32)
+  declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
+  declare <4 x i1> @llvm.arm.mve.vctp32(i32)
+
+...
+---
+name:            no_vpsel_liveout
+alignment:       2
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+hasWinCFI:       false
+registers:       []
+liveins:
+  - { reg: '$r0', virtual-reg: '' }
+  - { reg: '$r1', virtual-reg: '' }
+  - { reg: '$r2', virtual-reg: '' }
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       8
+  offsetAdjustment: 0
+  maxAlignment:    4
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  maxCallFrameSize: 0
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:      []
+stack:
+  - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+  - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites:       []
+constants:       []
+machineFunctionInfo: {}
+body:             |
+  ; CHECK-LABEL: name: no_vpsel_liveout
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   liveins: $lr, $r0, $r1, $r2, $r7
+  ; CHECK:   tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+  ; CHECK:   t2IT 0, 4, implicit-def $itstate
+  ; CHECK:   renamable $r0 = tMOVi8 $noreg, 0, 0, $cpsr, implicit killed $r0, implicit $itstate
+  ; CHECK:   tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+  ; CHECK:   frame-setup tPUSH 14, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $lr, -4
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $r7, -8
+  ; CHECK:   renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14, $noreg
+  ; CHECK:   renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0
+  ; CHECK:   renamable $r3 = t2BICri killed renamable $r3, 3, 14, $noreg, $noreg
+  ; CHECK:   renamable $r12 = t2SUBri killed renamable $r3, 4, 14, $noreg, $noreg
+  ; CHECK:   renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+  ; CHECK:   renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14, $noreg, $noreg
+  ; CHECK:   dead $lr = t2DLS renamable $r12
+  ; CHECK:   $r3 = tMOVr killed $r12, 14, $noreg
+  ; CHECK: bb.1.vector.body:
+  ; CHECK:   successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+  ; CHECK:   liveins: $q0, $r0, $r1, $r2, $r3
+  ; CHECK:   renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
+  ; CHECK:   MVE_VPST 4, implicit $vpr
+  ; CHECK:   renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv17, align 2)
+  ; CHECK:   renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, killed renamable $vpr :: (load 8 from %ir.lsr.iv1820, align 2)
+  ; CHECK:   $lr = tMOVr $r3, 14, $noreg
+  ; CHECK:   renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
+  ; CHECK:   renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14, $noreg
+  ; CHECK:   renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+  ; CHECK:   dead $lr = t2LEUpdate killed renamable $lr, %bb.1
+  ; CHECK: bb.2.middle.block:
+  ; CHECK:   liveins: $q0
+  ; CHECK:   $r0 = VMOVRS killed $s3, 14, $noreg, implicit killed $q0
+  ; CHECK:   tPOP_RET 14, $noreg, def $r7, def $pc, implicit killed $r0
+  bb.0.entry:
+    successors: %bb.1(0x80000000)
+    liveins: $r0, $r1, $r2, $lr, $r7
+
+    tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+    t2IT 0, 4, implicit-def $itstate
+    renamable $r0 = tMOVi8 $noreg, 0, 0, $cpsr, implicit killed $r0, implicit $itstate
+    tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+    frame-setup tPUSH 14, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+    frame-setup CFI_INSTRUCTION def_cfa_offset 8
+    frame-setup CFI_INSTRUCTION offset $lr, -4
+    frame-setup CFI_INSTRUCTION offset $r7, -8
+    renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14, $noreg
+    renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0
+    renamable $r3 = t2BICri killed renamable $r3, 3, 14, $noreg, $noreg
+    renamable $r12 = t2SUBri killed renamable $r3, 4, 14, $noreg, $noreg
+    renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+    renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14, $noreg, $noreg
+    t2DoLoopStart renamable $r12
+    $r3 = tMOVr killed $r12, 14, $noreg
+
+  bb.1.vector.body:
+    successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+    liveins: $q0, $r0, $r1, $r2, $r3
+
+    renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
+    MVE_VPST 4, implicit $vpr
+    renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv17, align 2)
+    renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, killed renamable $vpr :: (load 8 from %ir.lsr.iv1820, align 2)
+    $lr = tMOVr $r3, 14, $noreg
+    renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
+    renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14, $noreg
+    renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14, $noreg
+    renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+    renamable $lr = t2LoopDec killed renamable $lr, 1
+    t2LoopEnd killed renamable $lr, %bb.1, implicit-def dead $cpsr
+    tB %bb.2, 14, $noreg
+
+  bb.2.middle.block:
+    liveins: $q0
+
+    $r0 = VMOVRS killed $s3, 14, $noreg, implicit $q0
+    tPOP_RET 14, $noreg, def $r7, def $pc, implicit killed $r0
+
+...

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/livereg-no-loop-def.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/livereg-no-loop-def.mir
new file mode 100644
index 000000000000..395e24f8b558
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/livereg-no-loop-def.mir
@@ -0,0 +1,168 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s
+
+--- |
+  define dso_local <4 x i32> @exit_liveout(i16* nocapture readonly %a, i16* nocapture readonly %b, i32* %c, i32 %N, <4 x i32> %pass) {
+  entry:
+    %cmp9 = icmp eq i32 %N, 0
+    %tmp = add i32 %N, 3
+    %tmp1 = lshr i32 %tmp, 2
+    %tmp2 = shl nuw i32 %tmp1, 2
+    %tmp3 = add i32 %tmp2, -4
+    %tmp4 = lshr i32 %tmp3, 2
+    %tmp5 = add nuw nsw i32 %tmp4, 1
+    br i1 %cmp9, label %exit, label %vector.ph
+
+  vector.ph:                                        ; preds = %entry
+    call void @llvm.set.loop.iterations.i32(i32 %tmp5)
+    br label %vector.body
+
+  vector.body:                                      ; preds = %vector.body, %vector.ph
+    %lsr.iv1 = phi i32 [ %lsr.iv.next, %vector.body ], [ %tmp5, %vector.ph ]
+    %lsr.iv18 = phi i16* [ %scevgep19, %vector.body ], [ %b, %vector.ph ]
+    %lsr.iv20 = phi i32* [ %scevgep20, %vector.body ], [ %c, %vector.ph ]
+    %lsr.iv = phi i16* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
+    %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %tmp13, %vector.body ]
+    %tmp7 = phi i32 [ %N, %vector.ph ], [ %tmp9, %vector.body ]
+    %lsr.iv17 = bitcast i16* %lsr.iv to <4 x i16>*
+    %lsr.store = bitcast i32* %lsr.iv20 to <4 x i32>*
+    %lsr.iv1820 = bitcast i16* %lsr.iv18 to <4 x i16>*
+    %tmp8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %tmp7)
+    %tmp9 = sub i32 %tmp7, 4
+    %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
+    %tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
+    %wide.masked.load14 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv1820, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
+    %tmp11 = sext <4 x i16> %wide.masked.load14 to <4 x i32>
+    %tmp12 = mul nsw <4 x i32> %tmp11, %tmp10
+    %tmp13 = add <4 x i32> %tmp12, %vec.phi
+    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp13, <4 x i32>* %lsr.store, i32 4, <4 x i1> %tmp8)
+    %scevgep = getelementptr i16, i16* %lsr.iv, i32 4
+    %scevgep19 = getelementptr i16, i16* %lsr.iv18, i32 4
+    %scevgep20 = getelementptr i32, i32* %lsr.iv20, i32 4
+    %tmp14 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
+    %tmp15 = icmp ne i32 %tmp14, 0
+    %lsr.iv.next = add nsw i32 %lsr.iv1, -1
+    br i1 %tmp15, label %vector.body, label %exit
+
+  exit:                                             ; preds = %vector.body, %entry
+    ret <4 x i32> %pass
+  }
+
+  declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
+  declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
+  declare void @llvm.set.loop.iterations.i32(i32)
+  declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
+  declare <4 x i1> @llvm.arm.mve.vctp32(i32)
+
+...
+---
+name:            exit_liveout
+alignment:       2
+tracksRegLiveness: true
+registers:       []
+liveins:
+  - { reg: '$r0', virtual-reg: '' }
+  - { reg: '$r1', virtual-reg: '' }
+  - { reg: '$r2', virtual-reg: '' }
+  - { reg: '$r3', virtual-reg: '' }
+frameInfo:
+  stackSize:       8
+  offsetAdjustment: 0
+  maxAlignment:    8
+fixedStack:
+  - { id: 0, type: default, offset: 0, size: 16, alignment: 8, stack-id: default,
+      isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+stack:
+  - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+  - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites:       []
+constants:       []
+machineFunctionInfo: {}
+body:             |
+  ; CHECK-LABEL: name: exit_liveout
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.3(0x30000000), %bb.1(0x50000000)
+  ; CHECK:   liveins: $lr, $r0, $r1, $r2, $r3, $r4
+  ; CHECK:   frame-setup tPUSH 14, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $lr, -4
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $r4, -8
+  ; CHECK:   renamable $r12 = t2ADDri $sp, 8, 14, $noreg, $noreg
+  ; CHECK:   renamable $q0 = MVE_VLDRWU32 killed renamable $r12, 0, 0, $noreg :: (load 16 from %fixed-stack.0, align 8)
+  ; CHECK:   tCBZ $r3, %bb.3
+  ; CHECK: bb.1.vector.ph:
+  ; CHECK:   successors: %bb.2(0x80000000)
+  ; CHECK:   liveins: $q0, $r0, $r1, $r2, $r3
+  ; CHECK:   renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q1
+  ; CHECK:   $lr = MVE_DLSTP_32 killed renamable $r3
+  ; CHECK: bb.2.vector.body:
+  ; CHECK:   successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+  ; CHECK:   liveins: $lr, $q0, $q1, $r0, $r1, $r2
+  ; CHECK:   renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg :: (load 8 from %ir.lsr.iv17, align 2)
+  ; CHECK:   renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 0, $noreg :: (load 8 from %ir.lsr.iv1820, align 2)
+  ; CHECK:   renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, undef renamable $q2
+  ; CHECK:   renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
+  ; CHECK:   renamable $r2 = MVE_VSTRWU32_post renamable $q1, killed renamable $r2, 16, 0, killed $noreg :: (store 16 into %ir.lsr.store, align 4)
+  ; CHECK:   $lr = MVE_LETP killed renamable $lr, %bb.2
+  ; CHECK: bb.3.exit:
+  ; CHECK:   liveins: $q0
+  ; CHECK:   renamable $r0, renamable $r1 = VMOVRRD renamable $d0, 14, $noreg
+  ; CHECK:   renamable $r2, renamable $r3 = VMOVRRD killed renamable $d1, 14, $noreg, implicit killed $q0
+  ; CHECK:   tPOP_RET 14, $noreg, def $r4, def $pc, implicit killed $r0, implicit killed $r1, implicit killed $r2, implicit killed $r3
+  bb.0.entry:
+    successors: %bb.3(0x30000000), %bb.1(0x50000000)
+    liveins: $r0, $r1, $r2, $r3, $r4, $lr
+
+    frame-setup tPUSH 14, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+    frame-setup CFI_INSTRUCTION def_cfa_offset 8
+    frame-setup CFI_INSTRUCTION offset $lr, -4
+    frame-setup CFI_INSTRUCTION offset $r4, -8
+    renamable $r12 = t2ADDri $sp, 8, 14, $noreg, $noreg
+    renamable $q0 = MVE_VLDRWU32 killed renamable $r12, 0, 0, $noreg :: (load 16 from %fixed-stack.0, align 8)
+    tCBZ $r3, %bb.3
+
+  bb.1.vector.ph:
+    successors: %bb.2(0x80000000)
+    liveins: $q0, $r0, $r1, $r2, $r3
+
+    renamable $r12 = t2ADDri renamable $r3, 3, 14, $noreg, $noreg
+    renamable $lr = t2MOVi 1, 14, $noreg, $noreg
+    renamable $r12 = t2BICri killed renamable $r12, 3, 14, $noreg, $noreg
+    renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q1
+    renamable $r12 = t2SUBri killed renamable $r12, 4, 14, $noreg, $noreg
+    renamable $r4 = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14, $noreg, $noreg
+    t2DoLoopStart renamable $r4
+    $r12 = tMOVr killed $r4, 14, $noreg
+
+  bb.2.vector.body:
+    successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+    liveins: $q0, $q1, $r0, $r1, $r2, $r3, $r12
+
+    renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg
+    MVE_VPST 4, implicit $vpr
+    renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv17, align 2)
+    renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv1820, align 2)
+    $lr = tMOVr $r12, 14, $noreg
+    renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, undef renamable $q2
+    renamable $r12 = nsw t2SUBri killed $r12, 1, 14, $noreg, $noreg
+    renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14, $noreg
+    renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
+    renamable $lr = t2LoopDec killed renamable $lr, 1
+    MVE_VPST 8, implicit $vpr
+    renamable $r2 = MVE_VSTRWU32_post renamable $q1, killed renamable $r2, 16, 1, killed renamable $vpr :: (store 16 into %ir.lsr.store, align 4)
+    t2LoopEnd killed renamable $lr, %bb.2, implicit-def dead $cpsr
+    tB %bb.3, 14, $noreg
+
+  bb.3.exit:
+    liveins: $q0
+
+    renamable $r0, renamable $r1 = VMOVRRD renamable $d0, 14, $noreg
+    renamable $r2, renamable $r3 = VMOVRRD killed renamable $d1, 14, $noreg, implicit $q0
+    tPOP_RET 14, $noreg, def $r4, def $pc, implicit killed $r0, implicit killed $r1, implicit killed $r2, implicit killed $r3
+
+...

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix.mir
index 69e43c730625..a26c338a08a3 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s
 
 # A decent sized test to handle a matrix, with scalar and vector low-overhead loops.
 
@@ -270,9 +270,7 @@ body:             |
   ; CHECK:   renamable $r0 = t2BICri killed renamable $r0, 3, 14, $noreg, $noreg
   ; CHECK:   renamable $r3 = t2LSLri $r10, 1, 14, $noreg, $noreg
   ; CHECK:   renamable $r1, dead $cpsr = tSUBi3 killed renamable $r0, 4, 14, $noreg
-  ; CHECK:   renamable $r0, dead $cpsr = tMOVi8 1, 14, $noreg
   ; CHECK:   renamable $q0 = MVE_VDUP32 renamable $r7, 0, $noreg, undef renamable $q0
-  ; CHECK:   renamable $r0 = nuw nsw t2ADDrs killed renamable $r0, renamable $r1, 19, 14, $noreg, $noreg
   ; CHECK:   renamable $r1, dead $cpsr = tLSRri killed renamable $r1, 2, 14, $noreg
   ; CHECK:   renamable $r9 = t2SUBrs $r10, killed renamable $r1, 18, 14, $noreg, $noreg
   ; CHECK: bb.5.for.cond4.preheader.us:
@@ -284,16 +282,19 @@ body:             |
   ; CHECK:   renamable $q1 = MVE_VMOV_to_lane_32 killed renamable $q1, killed renamable $r1, 0, 14, $noreg
   ; CHECK:   $r6 = tMOVr $r5, 14, $noreg
   ; CHECK:   $r1 = tMOVr $r8, 14, $noreg
-  ; CHECK:   $lr = MVE_DLSTP_32 killed renamable $r2
+  ; CHECK:   $lr = t2DLS renamable $r0
   ; CHECK: bb.6.vector.body:
   ; CHECK:   successors: %bb.6(0x7c000000), %bb.7(0x04000000)
-  ; CHECK:   liveins: $lr, $q0, $q1, $r0, $r1, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r12
+  ; CHECK:   liveins: $lr, $q0, $q1, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r12
+  ; CHECK:   renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
   ; CHECK:   $q2 = MVE_VORR killed $q1, killed $q1, 0, $noreg, undef $q2
-  ; CHECK:   renamable $r6, renamable $q1 = MVE_VLDRHS32_post killed renamable $r6, 8, 0, $noreg :: (load 8 from %ir.lsr.iv1012, align 2)
-  ; CHECK:   renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 0, killed $noreg :: (load 8 from %ir.lsr.iv46, align 2)
+  ; CHECK:   MVE_VPST 4, implicit $vpr
+  ; CHECK:   renamable $r6, renamable $q1 = MVE_VLDRHS32_post killed renamable $r6, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv1012, align 2)
+  ; CHECK:   renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, killed renamable $vpr :: (load 8 from %ir.lsr.iv46, align 2)
+  ; CHECK:   renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14, $noreg
   ; CHECK:   renamable $q1 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q1, 0, $noreg, undef renamable $q1
   ; CHECK:   renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q2, 0, $noreg, undef renamable $q1
-  ; CHECK:   $lr = MVE_LETP killed renamable $lr, %bb.6
+  ; CHECK:   $lr = t2LEUpdate killed renamable $lr, %bb.6
   ; CHECK: bb.7.middle.block:
   ; CHECK:   successors: %bb.8(0x04000000), %bb.5(0x7c000000)
   ; CHECK:   liveins: $q0, $q1, $q2, $r0, $r3, $r4, $r5, $r7, $r8, $r9, $r10, $r12

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiple-do-loops.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiple-do-loops.mir
index dc3ed4f36a18..d181fe849d4a 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiple-do-loops.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiple-do-loops.mir
@@ -563,7 +563,6 @@ body:             |
   ; CHECK:   early-clobber $sp = frame-setup t2STR_PRE killed $r8, $sp, -4, 14, $noreg
   ; CHECK:   frame-setup CFI_INSTRUCTION offset $r8, -24
   ; CHECK:   renamable $r6, dead $cpsr = tMOVi8 0, 14, $noreg
-  ; CHECK:   dead renamable $r12 = t2MOVi 1, 14, $noreg, $noreg
   ; CHECK:   t2CMPrs killed renamable $r6, renamable $r3, 11, 14, $noreg, implicit-def $cpsr
   ; CHECK:   tBcc %bb.3, 0, killed $cpsr
   ; CHECK: bb.1.vector.ph:
@@ -801,7 +800,6 @@ body:             |
   ; CHECK:   successors: %bb.6(0x30000000), %bb.4(0x50000000)
   ; CHECK:   liveins: $r0, $r1, $r2, $r3
   ; CHECK:   renamable $r6, dead $cpsr = tMOVi8 0, 14, $noreg
-  ; CHECK:   dead renamable $r8 = t2MOVi 1, 14, $noreg, $noreg
   ; CHECK:   t2CMPrs killed renamable $r6, renamable $r3, 11, 14, $noreg, implicit-def $cpsr
   ; CHECK:   tBcc %bb.6, 0, killed $cpsr
   ; CHECK: bb.4.vector.ph66:

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll
index 0751d4980f39..0224c2f9ac08 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll
@@ -13,15 +13,20 @@ define arm_aapcs_vfpcc i32 @test_acc_scalar_char(i8 zeroext %a, i8* nocapture re
 ; CHECK-NEXT:    vmov.i32 q0, #0x0
 ; CHECK-NEXT:    bic r3, r3, #3
 ; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #2
 ; CHECK-NEXT:    lsr.w r3, r12, #2
 ; CHECK-NEXT:    sub.w r3, r2, r3, lsl #2
-; CHECK-NEXT:    dlstp.32 lr, r2
+; CHECK-NEXT:    dls lr, lr
 ; CHECK-NEXT:  .LBB0_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vctp.32 r2
+; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    vmov q1, q0
-; CHECK-NEXT:    vldrb.u32 q2, [r1], #4
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vldrbt.u32 q2, [r1], #4
 ; CHECK-NEXT:    vmla.u32 q0, q2, r0
-; CHECK-NEXT:    letp lr, .LBB0_1
+; CHECK-NEXT:    le lr, .LBB0_1
 ; CHECK-NEXT:  @ %bb.2: @ %middle.block
 ; CHECK-NEXT:    vctp.32 r3
 ; CHECK-NEXT:    vpsel q0, q0, q1
@@ -81,15 +86,20 @@ define arm_aapcs_vfpcc i32 @test_acc_scalar_short(i16 signext %a, i16* nocapture
 ; CHECK-NEXT:    vmov.i32 q0, #0x0
 ; CHECK-NEXT:    bic r3, r3, #3
 ; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #2
 ; CHECK-NEXT:    lsr.w r3, r12, #2
 ; CHECK-NEXT:    sub.w r3, r2, r3, lsl #2
-; CHECK-NEXT:    dlstp.32 lr, r2
+; CHECK-NEXT:    dls lr, lr
 ; CHECK-NEXT:  .LBB1_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vctp.32 r2
+; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    vmov q1, q0
-; CHECK-NEXT:    vldrh.s32 q2, [r1], #8
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vldrht.s32 q2, [r1], #8
 ; CHECK-NEXT:    vmla.u32 q0, q2, r0
-; CHECK-NEXT:    letp lr, .LBB1_1
+; CHECK-NEXT:    le lr, .LBB1_1
 ; CHECK-NEXT:  @ %bb.2: @ %middle.block
 ; CHECK-NEXT:    vctp.32 r3
 ; CHECK-NEXT:    vpsel q0, q0, q1
@@ -149,15 +159,20 @@ define arm_aapcs_vfpcc i32 @test_acc_scalar_uchar(i8 zeroext %a, i8* nocapture r
 ; CHECK-NEXT:    vmov.i32 q0, #0x0
 ; CHECK-NEXT:    bic r3, r3, #3
 ; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #2
 ; CHECK-NEXT:    lsr.w r3, r12, #2
 ; CHECK-NEXT:    sub.w r3, r2, r3, lsl #2
-; CHECK-NEXT:    dlstp.32 lr, r2
+; CHECK-NEXT:    dls lr, lr
 ; CHECK-NEXT:  .LBB2_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vctp.32 r2
+; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    vmov q1, q0
-; CHECK-NEXT:    vldrb.u32 q2, [r1], #4
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vldrbt.u32 q2, [r1], #4
 ; CHECK-NEXT:    vmla.u32 q0, q2, r0
-; CHECK-NEXT:    letp lr, .LBB2_1
+; CHECK-NEXT:    le lr, .LBB2_1
 ; CHECK-NEXT:  @ %bb.2: @ %middle.block
 ; CHECK-NEXT:    vctp.32 r3
 ; CHECK-NEXT:    vpsel q0, q0, q1
@@ -217,15 +232,20 @@ define arm_aapcs_vfpcc i32 @test_acc_scalar_ushort(i16 signext %a, i16* nocaptur
 ; CHECK-NEXT:    vmov.i32 q0, #0x0
 ; CHECK-NEXT:    bic r3, r3, #3
 ; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #2
 ; CHECK-NEXT:    lsr.w r3, r12, #2
 ; CHECK-NEXT:    sub.w r3, r2, r3, lsl #2
-; CHECK-NEXT:    dlstp.32 lr, r2
+; CHECK-NEXT:    dls lr, lr
 ; CHECK-NEXT:  .LBB3_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vctp.32 r2
+; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    vmov q1, q0
-; CHECK-NEXT:    vldrh.u32 q2, [r1], #8
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vldrht.u32 q2, [r1], #8
 ; CHECK-NEXT:    vmla.u32 q0, q2, r0
-; CHECK-NEXT:    letp lr, .LBB3_1
+; CHECK-NEXT:    le lr, .LBB3_1
 ; CHECK-NEXT:  @ %bb.2: @ %middle.block
 ; CHECK-NEXT:    vctp.32 r3
 ; CHECK-NEXT:    vpsel q0, q0, q1
@@ -285,15 +305,20 @@ define arm_aapcs_vfpcc i32 @test_acc_scalar_int(i32 %a, i32* nocapture readonly
 ; CHECK-NEXT:    vmov.i32 q0, #0x0
 ; CHECK-NEXT:    bic r3, r3, #3
 ; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #2
 ; CHECK-NEXT:    lsr.w r3, r12, #2
 ; CHECK-NEXT:    sub.w r3, r2, r3, lsl #2
-; CHECK-NEXT:    dlstp.32 lr, r2
+; CHECK-NEXT:    dls lr, lr
 ; CHECK-NEXT:  .LBB4_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vctp.32 r2
+; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    vmov q1, q0
-; CHECK-NEXT:    vldrw.u32 q2, [r1], #16
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vldrwt.u32 q2, [r1], #16
 ; CHECK-NEXT:    vmla.u32 q0, q2, r0
-; CHECK-NEXT:    letp lr, .LBB4_1
+; CHECK-NEXT:    le lr, .LBB4_1
 ; CHECK-NEXT:  @ %bb.2: @ %middle.block
 ; CHECK-NEXT:    vctp.32 r3
 ; CHECK-NEXT:    vpsel q0, q0, q1

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-vpsel-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-vpsel-liveout.mir
new file mode 100644
index 000000000000..f8d752d80446
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-vpsel-liveout.mir
@@ -0,0 +1,183 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s
+--- |
+  define dso_local i32 @no_vpsel_liveout(i16* nocapture readonly %a, i16* nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
+  entry:
+    %cmp9 = icmp eq i32 %N, 0
+    %tmp = add i32 %N, 3
+    %tmp1 = lshr i32 %tmp, 2
+    %tmp2 = shl nuw i32 %tmp1, 2
+    %tmp3 = add i32 %tmp2, -4
+    %tmp4 = lshr i32 %tmp3, 2
+    %tmp5 = add nuw nsw i32 %tmp4, 1
+    br i1 %cmp9, label %for.cond.cleanup, label %vector.ph
+
+  vector.ph:                                        ; preds = %entry
+    call void @llvm.set.loop.iterations.i32(i32 %tmp5)
+    br label %vector.body
+
+  vector.body:                                      ; preds = %vector.body, %vector.ph
+    %lsr.iv1 = phi i32 [ %lsr.iv.next, %vector.body ], [ %tmp5, %vector.ph ]
+    %lsr.iv18 = phi i16* [ %scevgep19, %vector.body ], [ %b, %vector.ph ]
+    %lsr.iv = phi i16* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
+    %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %tmp13, %vector.body ]
+    %tmp7 = phi i32 [ %N, %vector.ph ], [ %tmp9, %vector.body ]
+    %lsr.iv17 = bitcast i16* %lsr.iv to <4 x i16>*
+    %lsr.iv1820 = bitcast i16* %lsr.iv18 to <4 x i16>*
+    %tmp8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %tmp7)
+    %tmp9 = sub i32 %tmp7, 4
+    %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
+    %tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
+    %wide.masked.load14 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv1820, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
+    %tmp11 = sext <4 x i16> %wide.masked.load14 to <4 x i32>
+    %tmp12 = mul nsw <4 x i32> %tmp11, %tmp10
+    %tmp13 = add <4 x i32> %tmp12, %vec.phi
+    %scevgep = getelementptr i16, i16* %lsr.iv, i32 4
+    %scevgep19 = getelementptr i16, i16* %lsr.iv18, i32 4
+    %tmp14 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
+    %tmp15 = icmp ne i32 %tmp14, 0
+    %lsr.iv.next = add nsw i32 %lsr.iv1, -1
+    br i1 %tmp15, label %vector.body, label %middle.block
+
+  middle.block:                                     ; preds = %vector.body
+    %tmp16 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %tmp13)
+    br label %for.cond.cleanup
+
+  for.cond.cleanup:                                 ; preds = %middle.block, %entry
+    %res.0.lcssa = phi i32 [ 0, %entry ], [ %tmp16, %middle.block ]
+    ret i32 %res.0.lcssa
+  }
+  declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) #1
+  declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) #2
+  declare void @llvm.set.loop.iterations.i32(i32) #3
+  declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
+  declare <4 x i1> @llvm.arm.mve.vctp32(i32) #4
+
+...
+---
+name:            no_vpsel_liveout
+alignment:       2
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+hasWinCFI:       false
+registers:       []
+liveins:
+  - { reg: '$r0', virtual-reg: '' }
+  - { reg: '$r1', virtual-reg: '' }
+  - { reg: '$r2', virtual-reg: '' }
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       8
+  offsetAdjustment: 0
+  maxAlignment:    4
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  maxCallFrameSize: 0
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:      []
+stack:
+  - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+  - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites:       []
+constants:       []
+machineFunctionInfo: {}
+body:             |
+  ; CHECK-LABEL: name: no_vpsel_liveout
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   liveins: $lr, $r0, $r1, $r2, $r7
+  ; CHECK:   tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+  ; CHECK:   t2IT 0, 4, implicit-def $itstate
+  ; CHECK:   renamable $r0 = tMOVi8 $noreg, 0, 0, $cpsr, implicit killed $r0, implicit $itstate
+  ; CHECK:   tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+  ; CHECK:   frame-setup tPUSH 14, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $lr, -4
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $r7, -8
+  ; CHECK:   renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14, $noreg
+  ; CHECK:   renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0
+  ; CHECK:   renamable $r3 = t2BICri killed renamable $r3, 3, 14, $noreg, $noreg
+  ; CHECK:   renamable $r12 = t2SUBri killed renamable $r3, 4, 14, $noreg, $noreg
+  ; CHECK:   renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+  ; CHECK:   renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14, $noreg, $noreg
+  ; CHECK:   dead $lr = t2DLS renamable $r12
+  ; CHECK:   $r3 = tMOVr killed $r12, 14, $noreg
+  ; CHECK: bb.1.vector.body:
+  ; CHECK:   successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+  ; CHECK:   liveins: $q0, $r0, $r1, $r2, $r3
+  ; CHECK:   renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
+  ; CHECK:   MVE_VPST 4, implicit $vpr
+  ; CHECK:   renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv17, align 2)
+  ; CHECK:   renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, killed renamable $vpr :: (load 8 from %ir.lsr.iv1820, align 2)
+  ; CHECK:   $lr = tMOVr $r3, 14, $noreg
+  ; CHECK:   renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
+  ; CHECK:   renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14, $noreg
+  ; CHECK:   renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+  ; CHECK:   dead $lr = t2LEUpdate killed renamable $lr, %bb.1
+  ; CHECK: bb.2.middle.block:
+  ; CHECK:   liveins: $q0
+  ; CHECK:   renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg
+  ; CHECK:   tPOP_RET 14, $noreg, def $r7, def $pc, implicit killed $r0
+  bb.0.entry:
+    successors: %bb.1(0x80000000)
+    liveins: $r0, $r1, $r2, $lr, $r7
+
+    tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+    t2IT 0, 4, implicit-def $itstate
+    renamable $r0 = tMOVi8 $noreg, 0, 0, $cpsr, implicit killed $r0, implicit $itstate
+    tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+    frame-setup tPUSH 14, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+    frame-setup CFI_INSTRUCTION def_cfa_offset 8
+    frame-setup CFI_INSTRUCTION offset $lr, -4
+    frame-setup CFI_INSTRUCTION offset $r7, -8
+    renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14, $noreg
+    renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0
+    renamable $r3 = t2BICri killed renamable $r3, 3, 14, $noreg, $noreg
+    renamable $r12 = t2SUBri killed renamable $r3, 4, 14, $noreg, $noreg
+    renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+    renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14, $noreg, $noreg
+    t2DoLoopStart renamable $r12
+    $r3 = tMOVr killed $r12, 14, $noreg
+
+  bb.1.vector.body:
+    successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+    liveins: $q0, $r0, $r1, $r2, $r3
+
+    renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
+    MVE_VPST 4, implicit $vpr
+    renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv17, align 2)
+    renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, killed renamable $vpr :: (load 8 from %ir.lsr.iv1820, align 2)
+    $lr = tMOVr $r3, 14, $noreg
+    renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
+    renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14, $noreg
+    renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14, $noreg
+    renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+    renamable $lr = t2LoopDec killed renamable $lr, 1
+    t2LoopEnd killed renamable $lr, %bb.1, implicit-def dead $cpsr
+    tB %bb.2, 14, $noreg
+
+  bb.2.middle.block:
+    liveins: $q0
+
+    renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg
+    tPOP_RET 14, $noreg, def $r7, def $pc, implicit killed $r0
+
+...

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-liveout.mir
new file mode 100644
index 000000000000..621687062f6d
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-liveout.mir
@@ -0,0 +1,158 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s --verify-machineinstrs -o - | FileCheck %s
+
+--- |
+  define i16 @predicated_livout(i8* %input_1_vect, i8* %input_2_vect, i32 %block_size) #0 {
+  entry:
+    %rnd.up = add i32 %block_size, 7
+    %div = lshr i32 %rnd.up, 3
+    %0 = call i1 @llvm.test.set.loop.iterations.i32(i32 %div)
+    br i1 %0, label %for.body.preheader, label %for.cond.cleanup
+
+  for.body.preheader:                               ; preds = %entry
+    br label %for.body
+
+  for.body:                                         ; preds = %for.body.preheader, %for.body
+    %lsr.iv = phi i32 [ 0, %for.body.preheader ], [ %lsr.iv.next, %for.body ]
+    %input_1_vect.addr.052 = phi i8* [ %add.ptr, %for.body ], [ %input_1_vect, %for.body.preheader ]
+    %input_2_vect.addr.051 = phi i8* [ %add.ptr14, %for.body ], [ %input_2_vect, %for.body.preheader ]
+    %num_elements.049 = phi i32 [ %sub, %for.body ], [ %block_size, %for.body.preheader ]
+    %acc = phi <8 x i16> [ %acc.next, %for.body ], [ zeroinitializer, %for.body.preheader ]
+    %input_2_cast = bitcast i8* %input_2_vect.addr.051 to <8 x i8>*
+    %input_1_cast = bitcast i8* %input_1_vect.addr.052 to <8 x i8>*
+    %pred = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %num_elements.049)
+    %load.1 = tail call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %input_1_cast, i32 1, <8 x i1> %pred, <8 x i8> undef)
+    %zext.load.1 = zext <8 x i8> %load.1 to <8 x i16>
+    %load.2 = tail call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %input_2_cast, i32 1, <8 x i1> %pred, <8 x i8> undef)
+    %zext.load.2 = zext <8 x i8> %load.2 to <8 x i16>
+    %add = tail call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %zext.load.1, <8 x i16> %zext.load.2, <8 x i1> %pred, <8 x i16> undef)
+    %acc.next = tail call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %add, <8 x i16> %acc, <8 x i1> %pred, <8 x i16> undef)
+    %add.ptr = getelementptr inbounds i8, i8* %input_1_vect.addr.052, i32 8
+    %add.ptr14 = getelementptr inbounds i8, i8* %input_2_vect.addr.051, i32 8
+    %sub = add i32 %num_elements.049, -8
+    %iv.next = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1)
+    %cmp = icmp ne i32 %iv.next, 0
+    %lsr.iv.next = add i32 %lsr.iv, -1
+    br i1 %cmp, label %for.body, label %middle.block
+
+  middle.block:                                     ; preds = %for.body
+    %reduce = tail call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %acc.next)
+    ret i16 %reduce
+
+  for.cond.cleanup:                                 ; preds = %entry
+    ret i16 0
+  }
+
+  declare <8 x i1> @llvm.arm.mve.vctp16(i32) #1
+  declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>) #2
+  declare i1 @llvm.test.set.loop.iterations.i32(i32) #3
+  declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
+  declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>) #4
+  declare <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) #1
+
+...
+---
+name:            predicated_livout
+alignment:       2
+tracksRegLiveness: true
+registers:       []
+liveins:
+  - { reg: '$r0', virtual-reg: '' }
+  - { reg: '$r1', virtual-reg: '' }
+  - { reg: '$r2', virtual-reg: '' }
+frameInfo:
+  stackSize:       8
+  offsetAdjustment: 0
+  maxAlignment:    4
+fixedStack:      []
+stack:
+  - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+  - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites:       []
+constants:       []
+machineFunctionInfo: {}
+body:             |
+  ; CHECK-LABEL: name: predicated_livout
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x40000000), %bb.4(0x40000000)
+  ; CHECK:   liveins: $lr, $r0, $r1, $r2, $r7
+  ; CHECK:   frame-setup tPUSH 14, $noreg, $r7, killed $lr, implicit-def $sp, implicit $sp
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $lr, -4
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $r7, -8
+  ; CHECK:   $lr = MVE_WLSTP_16 killed renamable $r2, %bb.4
+  ; CHECK: bb.1.for.body.preheader:
+  ; CHECK:   successors: %bb.2(0x80000000)
+  ; CHECK:   liveins: $lr, $r0, $r1
+  ; CHECK:   renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0
+  ; CHECK:   renamable $r3, dead $cpsr = tMOVi8 0, 14, $noreg
+  ; CHECK: bb.2.for.body:
+  ; CHECK:   successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+  ; CHECK:   liveins: $lr, $q0, $r0, $r1, $r3
+  ; CHECK:   renamable $r3, dead $cpsr = tSUBi8 killed $r3, 1, 14, $noreg
+  ; CHECK:   renamable $r1, renamable $q1 = MVE_VLDRBU16_post killed renamable $r1, 8, 0, $noreg :: (load 8 from %ir.input_2_cast, align 1)
+  ; CHECK:   renamable $r0, renamable $q2 = MVE_VLDRBU16_post killed renamable $r0, 8, 0, $noreg :: (load 8 from %ir.input_1_cast, align 1)
+  ; CHECK:   renamable $q1 = MVE_VADDi16 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
+  ; CHECK:   renamable $q0 = MVE_VADDi16 killed renamable $q1, killed renamable $q0, 0, killed $noreg, undef renamable $q0
+  ; CHECK:   $lr = MVE_LETP killed renamable $lr, %bb.2
+  ; CHECK: bb.3.middle.block:
+  ; CHECK:   liveins: $q0
+  ; CHECK:   renamable $r0 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg
+  ; CHECK:   tPOP_RET 14, $noreg, def $r7, def $pc, implicit killed $r0
+  ; CHECK: bb.4.for.cond.cleanup:
+  ; CHECK:   liveins: $lr, $r7
+  ; CHECK:   $r0, dead $cpsr = tMOVi8 0, 14, $noreg
+  ; CHECK:   tBX_RET 14, $noreg, implicit killed $r0
+  bb.0.entry:
+    successors: %bb.1(0x40000000), %bb.4(0x40000000)
+    liveins: $r0, $r1, $r2, $lr, $r7
+
+    frame-setup tPUSH 14, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+    frame-setup CFI_INSTRUCTION def_cfa_offset 8
+    frame-setup CFI_INSTRUCTION offset $lr, -4
+    frame-setup CFI_INSTRUCTION offset $r7, -8
+    renamable $r3, dead $cpsr = tADDi3 renamable $r2, 7, 14, $noreg
+    renamable $lr = t2LSRri killed renamable $r3, 3, 14, $noreg, $noreg
+    t2WhileLoopStart killed renamable $lr, %bb.4, implicit-def dead $cpsr
+    tB %bb.1, 14, $noreg
+
+  bb.1.for.body.preheader:
+    successors: %bb.2(0x80000000)
+    liveins: $r0, $r1, $r2, $lr
+
+    renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0
+    renamable $r3, dead $cpsr = tMOVi8 0, 14, $noreg
+
+  bb.2.for.body:
+    successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+    liveins: $q0, $r0, $r1, $r2, $r3, $lr
+
+    renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg
+    renamable $r3, dead $cpsr = tSUBi8 killed $r3, 1, 14, $noreg
+    renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 8, 14, $noreg
+    renamable $lr = t2LoopDec killed renamable $lr, 1
+    MVE_VPST 1, implicit $vpr
+    renamable $r1, renamable $q1 = MVE_VLDRBU16_post killed renamable $r1, 8, 1, renamable $vpr :: (load 8 from %ir.input_2_cast, align 1)
+    renamable $r0, renamable $q2 = MVE_VLDRBU16_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.input_1_cast, align 1)
+    renamable $q1 = MVE_VADDi16 killed renamable $q2, killed renamable $q1, 1, renamable $vpr, undef renamable $q1
+    renamable $q0 = MVE_VADDi16 killed renamable $q1, killed renamable $q0, 1, killed renamable $vpr, undef renamable $q0
+    t2LoopEnd killed renamable $lr, %bb.2, implicit-def dead $cpsr
+    tB %bb.3, 14, $noreg
+
+  bb.3.middle.block:
+    liveins: $q0
+
+    renamable $r0 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg
+    tPOP_RET 14, $noreg, def $r7, def $pc, implicit killed $r0
+
+  bb.4.for.cond.cleanup:
+    liveins: $lr, $r7
+
+    $r0, dead $cpsr = tMOVi8 0, 14, $noreg
+    tBX_RET 14, $noreg, implicit killed $r0
+
+...

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir
new file mode 100644
index 000000000000..a053293e1b01
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir
@@ -0,0 +1,199 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s
+
+# This example is actually equivalent as there's a sub in the loop, which is
+# then used by the add in the exit - making the vctp operands equivalent.
+
+--- |
+  define dso_local i32 @wrong_vctp_liveout(i16* nocapture readonly %a, i16* nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
+  entry:
+    %cmp9 = icmp eq i32 %N, 0
+    %0 = add i32 %N, 3
+    %1 = lshr i32 %0, 2
+    %2 = shl nuw i32 %1, 2
+    %3 = add i32 %2, -4
+    %4 = lshr i32 %3, 2
+    %5 = add nuw nsw i32 %4, 1
+    br i1 %cmp9, label %for.cond.cleanup, label %vector.ph
+
+  vector.ph:                                        ; preds = %entry
+    call void @llvm.set.loop.iterations.i32(i32 %5)
+    br label %vector.body
+
+  vector.body:                                      ; preds = %vector.body, %vector.ph
+    %lsr.iv1 = phi i32 [ %lsr.iv.next, %vector.body ], [ %5, %vector.ph ]
+    %lsr.iv18 = phi i16* [ %scevgep19, %vector.body ], [ %b, %vector.ph ]
+    %lsr.iv = phi i16* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
+    %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %12, %vector.body ]
+    %6 = phi i32 [ %N, %vector.ph ], [ %8, %vector.body ]
+    %lsr.iv17 = bitcast i16* %lsr.iv to <4 x i16>*
+    %lsr.iv1820 = bitcast i16* %lsr.iv18 to <4 x i16>*
+    %7 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %6)
+    %8 = sub i32 %6, 4
+    %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %7, <4 x i16> undef)
+    %9 = sext <4 x i16> %wide.masked.load to <4 x i32>
+    %wide.masked.load14 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv1820, i32 2, <4 x i1> %7, <4 x i16> undef)
+    %10 = sext <4 x i16> %wide.masked.load14 to <4 x i32>
+    %11 = mul nsw <4 x i32> %10, %9
+    %12 = add <4 x i32> %11, %vec.phi
+    %scevgep = getelementptr i16, i16* %lsr.iv, i32 4
+    %scevgep19 = getelementptr i16, i16* %lsr.iv18, i32 4
+    %13 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
+    %14 = icmp ne i32 %13, 0
+    %lsr.iv.next = add nsw i32 %lsr.iv1, -1
+    br i1 %14, label %vector.body, label %middle.block
+
+  middle.block:                                     ; preds = %vector.body
+    %15 = add i32 %8, 4
+    %16 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %15)
+    %17 = select <4 x i1> %16, <4 x i32> %12, <4 x i32> %vec.phi
+    %18 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %17)
+    br label %for.cond.cleanup
+
+  for.cond.cleanup:                                 ; preds = %middle.block, %entry
+    %res.0.lcssa = phi i32 [ 0, %entry ], [ %18, %middle.block ]
+    ret i32 %res.0.lcssa
+  }
+  declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
+  declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+  declare void @llvm.set.loop.iterations.i32(i32)
+  declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
+  declare <4 x i1> @llvm.arm.mve.vctp32(i32)
+
+...
+---
+name:            wrong_vctp_liveout
+alignment:       2
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+hasWinCFI:       false
+registers:       []
+liveins:
+  - { reg: '$r0', virtual-reg: '' }
+  - { reg: '$r1', virtual-reg: '' }
+  - { reg: '$r2', virtual-reg: '' }
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       8
+  offsetAdjustment: 0
+  maxAlignment:    4
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  maxCallFrameSize: 0
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:      []
+stack:
+  - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+  - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites:       []
+constants:       []
+machineFunctionInfo: {}
+body:             |
+  ; CHECK-LABEL: name: wrong_vctp_liveout
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   liveins: $lr, $r0, $r1, $r2, $r7
+  ; CHECK:   tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+  ; CHECK:   t2IT 0, 4, implicit-def $itstate
+  ; CHECK:   renamable $r0 = tMOVi8 $noreg, 0, 0, $cpsr, implicit killed $r0, implicit $itstate
+  ; CHECK:   tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+  ; CHECK:   frame-setup tPUSH 14, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $lr, -4
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $r7, -8
+  ; CHECK:   renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14, $noreg
+  ; CHECK:   renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q1
+  ; CHECK:   renamable $r3 = t2BICri killed renamable $r3, 3, 14, $noreg, $noreg
+  ; CHECK:   renamable $r12 = t2SUBri killed renamable $r3, 4, 14, $noreg, $noreg
+  ; CHECK:   renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+  ; CHECK:   renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14, $noreg, $noreg
+  ; CHECK:   dead $lr = t2DLS renamable $r12
+  ; CHECK:   $r3 = tMOVr killed $r12, 14, $noreg
+  ; CHECK: bb.1.vector.body:
+  ; CHECK:   successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+  ; CHECK:   liveins: $q1, $r0, $r1, $r2, $r3
+  ; CHECK:   renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
+  ; CHECK:   $q0 = MVE_VORR killed $q1, killed $q1, 0, $noreg, undef $q0
+  ; CHECK:   MVE_VPST 4, implicit $vpr
+  ; CHECK:   renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv17, align 2)
+  ; CHECK:   renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, killed renamable $vpr :: (load 8 from %ir.lsr.iv1820, align 2)
+  ; CHECK:   $lr = tMOVr $r3, 14, $noreg
+  ; CHECK:   renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
+  ; CHECK:   renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14, $noreg
+  ; CHECK:   renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14, $noreg
+  ; CHECK:   renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, undef renamable $q1
+  ; CHECK:   dead $lr = t2LEUpdate killed renamable $lr, %bb.1
+  ; CHECK: bb.2.middle.block:
+  ; CHECK:   liveins: $q0, $q1, $r2
+  ; CHECK:   renamable $r0, dead $cpsr = tADDi3 killed renamable $r2, 4, 14, $noreg
+  ; CHECK:   renamable $vpr = MVE_VCTP32 killed renamable $r0, 0, $noreg
+  ; CHECK:   renamable $q0 = MVE_VPSEL killed renamable $q1, killed renamable $q0, 0, killed renamable $vpr
+  ; CHECK:   renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg
+  ; CHECK:   tPOP_RET 14, $noreg, def $r7, def $pc, implicit killed $r0
+  bb.0.entry:
+    successors: %bb.1(0x80000000)
+    liveins: $r0, $r1, $r2, $lr, $r7
+
+    tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+    t2IT 0, 4, implicit-def $itstate
+    renamable $r0 = tMOVi8 $noreg, 0, 0, $cpsr, implicit killed $r0, implicit $itstate
+    tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+    frame-setup tPUSH 14, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+    frame-setup CFI_INSTRUCTION def_cfa_offset 8
+    frame-setup CFI_INSTRUCTION offset $lr, -4
+    frame-setup CFI_INSTRUCTION offset $r7, -8
+    renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14, $noreg
+    renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q1
+    renamable $r3 = t2BICri killed renamable $r3, 3, 14, $noreg, $noreg
+    renamable $r12 = t2SUBri killed renamable $r3, 4, 14, $noreg, $noreg
+    renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+    renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14, $noreg, $noreg
+    t2DoLoopStart renamable $r12
+    $r3 = tMOVr killed $r12, 14, $noreg
+
+  bb.1.vector.body:
+    successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+    liveins: $q1, $r0, $r1, $r2, $r3
+
+    renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
+    $q0 = MVE_VORR killed $q1, $q1, 0, $noreg, undef $q0
+    MVE_VPST 4, implicit $vpr
+    renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv17, align 2)
+    renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, killed renamable $vpr :: (load 8 from %ir.lsr.iv1820, align 2)
+    $lr = tMOVr $r3, 14, $noreg
+    renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
+    renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14, $noreg
+    renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14, $noreg
+    renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, undef renamable $q1
+    renamable $lr = t2LoopDec killed renamable $lr, 1
+    t2LoopEnd killed renamable $lr, %bb.1, implicit-def dead $cpsr
+    tB %bb.2, 14, $noreg
+
+  bb.2.middle.block:
+    liveins: $q0, $q1, $r2
+
+    renamable $r0, dead $cpsr = tADDi3 killed renamable $r2, 4, 14, $noreg
+    renamable $vpr = MVE_VCTP32 killed renamable $r0, 0, $noreg
+    renamable $q0 = MVE_VPSEL killed renamable $q1, killed renamable $q0, 0, killed renamable $vpr
+    renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg
+    tPOP_RET 14, $noreg, def $r7, def $pc, implicit killed $r0
+
+...

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp16-reduce.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp16-reduce.mir
new file mode 100644
index 000000000000..5ed730e5313a
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp16-reduce.mir
@@ -0,0 +1,215 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s
+
+--- |
+  define dso_local arm_aapcs_vfpcc signext i16 @wrong_liveout_shift(i8* nocapture readonly %b, i8* nocapture readonly %c, i32 %N) {
+  entry:
+    %cmp11 = icmp eq i32 %N, 0
+    %0 = add i32 %N, 7
+    %1 = lshr i32 %0, 3
+    %2 = shl nuw i32 %1, 3
+    %3 = add i32 %2, -8
+    %4 = lshr i32 %3, 3
+    %5 = add nuw nsw i32 %4, 1
+    br i1 %cmp11, label %for.cond.cleanup, label %vector.ph
+
+  vector.ph:                                        ; preds = %entry
+    call void @llvm.set.loop.iterations.i32(i32 %5)
+    %6 = shl i32 %4, 3
+    %7 = sub i32 %N, %6
+    br label %vector.body
+
+  vector.body:                                      ; preds = %vector.body, %vector.ph
+    %lsr.iv20 = phi i8* [ %scevgep21, %vector.body ], [ %c, %vector.ph ]
+    %lsr.iv = phi i8* [ %scevgep, %vector.body ], [ %b, %vector.ph ]
+    %vec.phi = phi <8 x i16> [ <i16 32767, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, %vector.ph ], [ %15, %vector.body ]
+    %8 = phi i32 [ %5, %vector.ph ], [ %16, %vector.body ]
+    %9 = phi i32 [ %N, %vector.ph ], [ %11, %vector.body ]
+    %lsr.iv2022 = bitcast i8* %lsr.iv20 to <8 x i8>*
+    %lsr.iv19 = bitcast i8* %lsr.iv to <8 x i8>*
+    %10 = call <8 x i1> @llvm.arm.mve.vctp16(i32 %9)
+    %11 = sub i32 %9, 8
+    %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %lsr.iv19, i32 1, <8 x i1> %10, <8 x i8> undef)
+    %12 = zext <8 x i8> %wide.masked.load to <8 x i16>
+    %wide.masked.load16 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %lsr.iv2022, i32 1, <8 x i1> %10, <8 x i8> undef)
+    %13 = zext <8 x i8> %wide.masked.load16 to <8 x i16>
+    %14 = mul nuw <8 x i16> %13, %12
+    %15 = sub <8 x i16> %vec.phi, %14
+    %scevgep = getelementptr i8, i8* %lsr.iv, i32 8
+    %scevgep21 = getelementptr i8, i8* %lsr.iv20, i32 8
+    %16 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %8, i32 1)
+    %17 = icmp ne i32 %16, 0
+    br i1 %17, label %vector.body, label %middle.block
+
+  middle.block:                                     ; preds = %vector.body
+    %vec.phi.lcssa = phi <8 x i16> [ %vec.phi, %vector.body ]
+    %.lcssa = phi <8 x i16> [ %15, %vector.body ]
+    %18 = call <8 x i1> @llvm.arm.mve.vctp16(i32 %7)
+    %19 = select <8 x i1> %18, <8 x i16> %.lcssa, <8 x i16> %vec.phi.lcssa
+    %20 = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %19)
+    br label %for.cond.cleanup
+
+  for.cond.cleanup:                                 ; preds = %middle.block, %entry
+    %a.0.lcssa = phi i16 [ 32767, %entry ], [ %20, %middle.block ]
+    ret i16 %a.0.lcssa
+  }
+  declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>)
+  declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
+  declare void @llvm.set.loop.iterations.i32(i32)
+  declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
+  declare <8 x i1> @llvm.arm.mve.vctp16(i32)
+
+...
+---
+name:            wrong_liveout_shift
+alignment:       16
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+hasWinCFI:       false
+registers:       []
+liveins:
+  - { reg: '$r0', virtual-reg: '' }
+  - { reg: '$r1', virtual-reg: '' }
+  - { reg: '$r2', virtual-reg: '' }
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       8
+  offsetAdjustment: 0
+  maxAlignment:    4
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  maxCallFrameSize: 0
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:      []
+stack:
+  - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$lr', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+  - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites:       []
+constants:
+  - id:              0
+    value:           '<8 x i16> <i16 32767, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>'
+    alignment:       16
+    isTargetSpecific: false
+machineFunctionInfo: {}
+body:             |
+  ; CHECK-LABEL: name: wrong_liveout_shift
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   liveins: $lr, $r0, $r1, $r2
+  ; CHECK:   tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+  ; CHECK:   t2IT 0, 2, implicit-def $itstate
+  ; CHECK:   renamable $r0 = t2MOVi16 32767, 0, $cpsr, implicit killed $r0, implicit $itstate
+  ; CHECK:   renamable $r0 = tSXTH killed renamable $r0, 0, $cpsr, implicit killed $r0, implicit $itstate
+  ; CHECK:   tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+  ; CHECK:   frame-setup tPUSH 14, $noreg, killed $lr, implicit-def $sp, implicit $sp
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $lr, -4
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $r7, -8
+  ; CHECK:   dead $r7 = frame-setup tMOVr $sp, 14, $noreg
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_register $r7
+  ; CHECK:   renamable $r3, dead $cpsr = tADDi3 renamable $r2, 7, 14, $noreg
+  ; CHECK:   renamable $r3 = t2BICri killed renamable $r3, 7, 14, $noreg, $noreg
+  ; CHECK:   renamable $r12 = t2SUBri killed renamable $r3, 8, 14, $noreg, $noreg
+  ; CHECK:   renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+  ; CHECK:   renamable $lr = nuw nsw t2ADDrs killed renamable $r3, renamable $r12, 27, 14, $noreg, $noreg
+  ; CHECK:   renamable $r3 = tLEApcrel %const.0, 14, $noreg
+  ; CHECK:   renamable $r12 = t2LSRri killed renamable $r12, 3, 14, $noreg, $noreg
+  ; CHECK:   renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg :: (load 16 from constant-pool)
+  ; CHECK:   renamable $r3 = t2SUBrs renamable $r2, killed renamable $r12, 26, 14, $noreg, $noreg
+  ; CHECK:   $lr = t2DLS killed renamable $lr
+  ; CHECK: bb.1.vector.body:
+  ; CHECK:   successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+  ; CHECK:   liveins: $lr, $q0, $r0, $r1, $r2, $r3
+  ; CHECK:   renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg
+  ; CHECK:   $q1 = MVE_VORR killed $q0, killed $q0, 0, $noreg, undef $q1
+  ; CHECK:   MVE_VPST 4, implicit $vpr
+  ; CHECK:   renamable $r0, renamable $q0 = MVE_VLDRBU16_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv19, align 1)
+  ; CHECK:   renamable $r1, renamable $q2 = MVE_VLDRBU16_post killed renamable $r1, 8, 1, killed renamable $vpr :: (load 8 from %ir.lsr.iv2022, align 1)
+  ; CHECK:   renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 8, 14, $noreg
+  ; CHECK:   renamable $q0 = nuw MVE_VMULi16 killed renamable $q2, killed renamable $q0, 0, $noreg, undef renamable $q0
+  ; CHECK:   renamable $q0 = MVE_VSUBi16 renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+  ; CHECK:   $lr = t2LEUpdate killed renamable $lr, %bb.1
+  ; CHECK: bb.2.middle.block:
+  ; CHECK:   liveins: $q0, $q1, $r3
+  ; CHECK:   renamable $vpr = MVE_VCTP16 killed renamable $r3, 0, $noreg
+  ; CHECK:   renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr
+  ; CHECK:   renamable $r0 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg
+  ; CHECK:   $sp = t2LDMIA_UPD $sp, 14, $noreg, def $r7, def $lr
+  ; CHECK:   renamable $r0 = tSXTH killed renamable $r0, 14, $noreg
+  ; CHECK:   tBX_RET 14, $noreg, implicit killed $r0
+  ; CHECK: bb.3 (align 16):
+  ; CHECK:   CONSTPOOL_ENTRY 0, %const.0, 16
+  bb.0.entry:
+    successors: %bb.1(0x80000000)
+    liveins: $r0, $r1, $r2, $lr
+
+    tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+    t2IT 0, 2, implicit-def $itstate
+    renamable $r0 = t2MOVi16 32767, 0, $cpsr, implicit killed $r0, implicit $itstate
+    renamable $r0 = tSXTH killed renamable $r0, 0, $cpsr, implicit killed $r0, implicit $itstate
+    tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+    frame-setup tPUSH 14, $noreg, killed $lr, implicit-def $sp, implicit $sp
+    frame-setup CFI_INSTRUCTION def_cfa_offset 8
+    frame-setup CFI_INSTRUCTION offset $lr, -4
+    frame-setup CFI_INSTRUCTION offset $r7, -8
+    $r7 = frame-setup tMOVr $sp, 14, $noreg
+    frame-setup CFI_INSTRUCTION def_cfa_register $r7
+    renamable $r3, dead $cpsr = tADDi3 renamable $r2, 7, 14, $noreg
+    renamable $r3 = t2BICri killed renamable $r3, 7, 14, $noreg, $noreg
+    renamable $r12 = t2SUBri killed renamable $r3, 8, 14, $noreg, $noreg
+    renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+    renamable $lr = nuw nsw t2ADDrs killed renamable $r3, renamable $r12, 27, 14, $noreg, $noreg
+    renamable $r3 = tLEApcrel %const.0, 14, $noreg
+    renamable $r12 = t2LSRri killed renamable $r12, 3, 14, $noreg, $noreg
+    renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg :: (load 16 from constant-pool)
+    renamable $r3 = t2SUBrs renamable $r2, killed renamable $r12, 26, 14, $noreg, $noreg
+    t2DoLoopStart renamable $lr
+
+  bb.1.vector.body:
+    successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+    liveins: $lr, $q0, $r0, $r1, $r2, $r3
+
+    renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg
+    $q1 = MVE_VORR killed $q0, $q0, 0, $noreg, undef $q1
+    MVE_VPST 4, implicit $vpr
+    renamable $r0, renamable $q0 = MVE_VLDRBU16_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv19, align 1)
+    renamable $r1, renamable $q2 = MVE_VLDRBU16_post killed renamable $r1, 8, 1, killed renamable $vpr :: (load 8 from %ir.lsr.iv2022, align 1)
+    renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 8, 14, $noreg
+    renamable $q0 = nuw MVE_VMULi16 killed renamable $q2, killed renamable $q0, 0, $noreg, undef renamable $q0
+    renamable $lr = t2LoopDec killed renamable $lr, 1
+    renamable $q0 = MVE_VSUBi16 renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+    t2LoopEnd renamable $lr, %bb.1, implicit-def dead $cpsr
+    tB %bb.2, 14, $noreg
+
+  bb.2.middle.block:
+    liveins: $q0, $q1, $r3
+
+    renamable $vpr = MVE_VCTP16 killed renamable $r3, 0, $noreg
+    renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr
+    renamable $r0 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg
+    $sp = t2LDMIA_UPD $sp, 14, $noreg, def $r7, def $lr
+    renamable $r0 = tSXTH killed renamable $r0, 14, $noreg
+    tBX_RET 14, $noreg, implicit killed $r0
+
+  bb.3 (align 16):
+    CONSTPOOL_ENTRY 0, %const.0, 16
+
+...

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-arith-codegen.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-arith-codegen.ll
index e8d72028594f..e2f68d32c2f5 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-arith-codegen.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-arith-codegen.ll
@@ -13,17 +13,22 @@ define dso_local i32 @mul_reduce_add(i32* noalias nocapture readonly %a, i32* no
 ; CHECK-NEXT:    vmov.i32 q0, #0x0
 ; CHECK-NEXT:    bic r3, r3, #3
 ; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #2
 ; CHECK-NEXT:    lsr.w r3, r12, #2
 ; CHECK-NEXT:    sub.w r3, r2, r3, lsl #2
-; CHECK-NEXT:    dlstp.32 lr, r2
+; CHECK-NEXT:    dls lr, lr
 ; CHECK-NEXT:  .LBB0_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vctp.32 r2
 ; CHECK-NEXT:    vmov q1, q0
-; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
-; CHECK-NEXT:    vldrw.u32 q2, [r1], #16
+; CHECK-NEXT:    vpstt
+; CHECK-NEXT:    vldrwt.u32 q0, [r0], #16
+; CHECK-NEXT:    vldrwt.u32 q2, [r1], #16
+; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    vmul.i32 q0, q2, q0
 ; CHECK-NEXT:    vadd.i32 q0, q0, q1
-; CHECK-NEXT:    letp lr, .LBB0_1
+; CHECK-NEXT:    le lr, .LBB0_1
 ; CHECK-NEXT:  @ %bb.2: @ %middle.block
 ; CHECK-NEXT:    vctp.32 r3
 ; CHECK-NEXT:    vpsel q0, q0, q1
@@ -79,18 +84,23 @@ define dso_local i32 @mul_reduce_add_const(i32* noalias nocapture readonly %a, i
 ; CHECK-NEXT:    bxeq lr
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    adds r1, r2, #3
+; CHECK-NEXT:    movs r3, #1
 ; CHECK-NEXT:    bic r1, r1, #3
 ; CHECK-NEXT:    vmov.i32 q0, #0x0
 ; CHECK-NEXT:    subs r1, #4
+; CHECK-NEXT:    add.w lr, r3, r1, lsr #2
 ; CHECK-NEXT:    lsrs r1, r1, #2
 ; CHECK-NEXT:    sub.w r1, r2, r1, lsl #2
-; CHECK-NEXT:    dlstp.32 lr, r2
+; CHECK-NEXT:    dls lr, lr
 ; CHECK-NEXT:  .LBB1_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vctp.32 r2
 ; CHECK-NEXT:    vmov q1, q0
-; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vldrwt.u32 q0, [r0], #16
+; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    vadd.i32 q0, q0, q1
-; CHECK-NEXT:    letp lr, .LBB1_1
+; CHECK-NEXT:    le lr, .LBB1_1
 ; CHECK-NEXT:  @ %bb.2: @ %middle.block
 ; CHECK-NEXT:    vctp.32 r1
 ; CHECK-NEXT:    vpsel q0, q0, q1
@@ -142,18 +152,23 @@ define dso_local i32 @add_reduce_add_const(i32* noalias nocapture readonly %a, i
 ; CHECK-NEXT:    bxeq lr
 ; CHECK-NEXT:    push {r7, lr}
 ; CHECK-NEXT:    adds r1, r2, #3
+; CHECK-NEXT:    movs r3, #1
 ; CHECK-NEXT:    bic r1, r1, #3
 ; CHECK-NEXT:    vmov.i32 q0, #0x0
 ; CHECK-NEXT:    subs r1, #4
+; CHECK-NEXT:    add.w lr, r3, r1, lsr #2
 ; CHECK-NEXT:    lsrs r1, r1, #2
 ; CHECK-NEXT:    sub.w r1, r2, r1, lsl #2
-; CHECK-NEXT:    dlstp.32 lr, r2
+; CHECK-NEXT:    dls lr, lr
 ; CHECK-NEXT:  .LBB2_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vctp.32 r2
 ; CHECK-NEXT:    vmov q1, q0
-; CHECK-NEXT:    vldrw.u32 q0, [r0], #16
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vldrwt.u32 q0, [r0], #16
+; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    vadd.i32 q0, q0, q1
-; CHECK-NEXT:    letp lr, .LBB2_1
+; CHECK-NEXT:    le lr, .LBB2_1
 ; CHECK-NEXT:  @ %bb.2: @ %middle.block
 ; CHECK-NEXT:    vctp.32 r1
 ; CHECK-NEXT:    vpsel q0, q0, q1

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wlstp.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wlstp.mir
index 76d440d3df3b..7efd5efc39f6 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wlstp.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wlstp.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s
 
 --- |
   define dso_local arm_aapcs_vfpcc void @test_wlstp8(i8* noalias nocapture %a, i8* noalias nocapture readonly %b, i8* noalias nocapture readonly %c, i32 %N) {
@@ -425,8 +425,13 @@ body:             |
   ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
   ; CHECK:   frame-setup CFI_INSTRUCTION offset $lr, -4
   ; CHECK:   frame-setup CFI_INSTRUCTION offset $r7, -8
+  ; CHECK:   renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14, $noreg
+  ; CHECK:   renamable $r3 = t2BICri killed renamable $r3, 3, 14, $noreg, $noreg
+  ; CHECK:   renamable $r12 = t2SUBri killed renamable $r3, 4, 14, $noreg, $noreg
+  ; CHECK:   renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+  ; CHECK:   renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14, $noreg, $noreg
   ; CHECK:   renamable $r12 = t2MOVi 0, 14, $noreg, $noreg
-  ; CHECK:   $lr = MVE_WLSTP_32 $r2, %bb.1
+  ; CHECK:   $lr = t2WLS killed renamable $lr, %bb.1
   ; CHECK:   tB %bb.4, 14, $noreg
   ; CHECK: bb.1.vector.ph:
   ; CHECK:   successors: %bb.2(0x80000000)
@@ -436,15 +441,18 @@ body:             |
   ; CHECK:   successors: %bb.3(0x04000000), %bb.2(0x7c000000)
   ; CHECK:   liveins: $lr, $q1, $r0, $r1, $r2
   ; CHECK:   $q0 = MVE_VORR killed $q1, killed $q1, 0, $noreg, undef $q0
-  ; CHECK:   renamable $q1 = MVE_VLDRWU32 renamable $r0, 0, 0, $noreg :: (load 16 from %ir.lsr.iv24, align 4)
-  ; CHECK:   renamable $q2 = MVE_VLDRWU32 renamable $r1, 0, 0, killed $noreg :: (load 16 from %ir.lsr.iv1, align 4)
+  ; CHECK:   renamable $vpr = MVE_VCTP32 $r2, 0, $noreg
+  ; CHECK:   MVE_VPST 4, implicit $vpr
+  ; CHECK:   renamable $q1 = MVE_VLDRWU32 renamable $r0, 0, 1, renamable $vpr :: (load 16 from %ir.lsr.iv24, align 4)
+  ; CHECK:   renamable $q2 = MVE_VLDRWU32 renamable $r1, 0, 1, renamable $vpr :: (load 16 from %ir.lsr.iv1, align 4)
   ; CHECK:   $r3 = tMOVr $r2, 14, $noreg
   ; CHECK:   renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
   ; CHECK:   renamable $r0, dead $cpsr = tADDi8 killed renamable $r0, 16, 14, $noreg
   ; CHECK:   renamable $r1, dead $cpsr = tADDi8 killed renamable $r1, 16, 14, $noreg
   ; CHECK:   renamable $r2, dead $cpsr = tSUBi8 killed $r2, 4, 14, $noreg
-  ; CHECK:   renamable $q1 = nsw MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, undef renamable $q1
-  ; CHECK:   $lr = MVE_LETP killed renamable $lr, %bb.2
+  ; CHECK:   MVE_VPST 8, implicit $vpr
+  ; CHECK:   renamable $q1 = nsw MVE_VADDi32 killed renamable $q1, renamable $q0, 0, killed renamable $vpr, undef renamable $q1
+  ; CHECK:   $lr = t2LEUpdate killed renamable $lr, %bb.2
   ; CHECK: bb.3.middle.block:
   ; CHECK:   successors: %bb.4(0x80000000)
   ; CHECK:   liveins: $q0, $q1, $r3
@@ -486,13 +494,14 @@ body:             |
     renamable $vpr = MVE_VCTP32 $r2, 0, $noreg
     MVE_VPST 4, implicit $vpr
     renamable $q1 = MVE_VLDRWU32 renamable $r0, 0, 1, renamable $vpr :: (load 16 from %ir.lsr.iv24, align 4)
-    renamable $q2 = MVE_VLDRWU32 renamable $r1, 0, 1, killed renamable $vpr :: (load 16 from %ir.lsr.iv1, align 4)
+    renamable $q2 = MVE_VLDRWU32 renamable $r1, 0, 1, renamable $vpr :: (load 16 from %ir.lsr.iv1, align 4)
     $r3 = tMOVr $r2, 14, $noreg
     renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
     renamable $r0, dead $cpsr = tADDi8 killed renamable $r0, 16, 14, $noreg
     renamable $r1, dead $cpsr = tADDi8 killed renamable $r1, 16, 14, $noreg
     renamable $r2, dead $cpsr = tSUBi8 killed $r2, 4, 14, $noreg
-    renamable $q1 = nsw MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, undef renamable $q1
+    MVE_VPST 8, implicit $vpr
+    renamable $q1 = nsw MVE_VADDi32 killed renamable $q1, renamable $q0, 0, renamable $vpr, undef renamable $q1
     renamable $lr = t2LoopDec killed renamable $lr, 1
     t2LoopEnd renamable $lr, %bb.2, implicit-def dead $cpsr
     tB %bb.3, 14, $noreg

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-liveout-lsr-shift.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-liveout-lsr-shift.mir
new file mode 100644
index 000000000000..3ef6e117f1f1
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-liveout-lsr-shift.mir
@@ -0,0 +1,214 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s
+
+--- |
+  define dso_local arm_aapcs_vfpcc signext i16 @wrong_liveout_shift(i8* nocapture readonly %b, i8* nocapture readonly %c, i32 %N) {
+  entry:
+    %cmp11 = icmp eq i32 %N, 0
+    %0 = add i32 %N, 7
+    %1 = lshr i32 %0, 3
+    %2 = shl nuw i32 %1, 3
+    %3 = add i32 %2, -8
+    %4 = lshr i32 %3, 2
+    %5 = add nuw nsw i32 %4, 1
+    br i1 %cmp11, label %for.cond.cleanup, label %vector.ph
+
+  vector.ph:                                        ; preds = %entry
+    call void @llvm.set.loop.iterations.i32(i32 %5)
+    %6 = shl i32 %4, 3
+    %7 = sub i32 %N, %6
+    br label %vector.body
+
+  vector.body:                                      ; preds = %vector.body, %vector.ph
+    %lsr.iv20 = phi i8* [ %scevgep21, %vector.body ], [ %c, %vector.ph ]
+    %lsr.iv = phi i8* [ %scevgep, %vector.body ], [ %b, %vector.ph ]
+    %vec.phi = phi <8 x i16> [ <i16 32767, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, %vector.ph ], [ %15, %vector.body ]
+    %8 = phi i32 [ %5, %vector.ph ], [ %16, %vector.body ]
+    %9 = phi i32 [ %N, %vector.ph ], [ %11, %vector.body ]
+    %lsr.iv2022 = bitcast i8* %lsr.iv20 to <8 x i8>*
+    %lsr.iv19 = bitcast i8* %lsr.iv to <8 x i8>*
+    %10 = call <8 x i1> @llvm.arm.mve.vctp16(i32 %9)
+    %11 = sub i32 %9, 8
+    %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %lsr.iv19, i32 1, <8 x i1> %10, <8 x i8> undef)
+    %12 = zext <8 x i8> %wide.masked.load to <8 x i16>
+    %wide.masked.load16 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %lsr.iv2022, i32 1, <8 x i1> %10, <8 x i8> undef)
+    %13 = zext <8 x i8> %wide.masked.load16 to <8 x i16>
+    %14 = mul nuw <8 x i16> %13, %12
+    %15 = sub <8 x i16> %vec.phi, %14
+    %scevgep = getelementptr i8, i8* %lsr.iv, i32 8
+    %scevgep21 = getelementptr i8, i8* %lsr.iv20, i32 8
+    %16 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %8, i32 1)
+    %17 = icmp ne i32 %16, 0
+    br i1 %17, label %vector.body, label %middle.block
+
+  middle.block:                                     ; preds = %vector.body
+    %vec.phi.lcssa = phi <8 x i16> [ %vec.phi, %vector.body ]
+    %.lcssa = phi <8 x i16> [ %15, %vector.body ]
+    %18 = call <8 x i1> @llvm.arm.mve.vctp16(i32 %7)
+    %19 = select <8 x i1> %18, <8 x i16> %.lcssa, <8 x i16> %vec.phi.lcssa
+    %20 = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %19)
+    br label %for.cond.cleanup
+
+  for.cond.cleanup:                                 ; preds = %middle.block, %entry
+    %a.0.lcssa = phi i16 [ 32767, %entry ], [ %20, %middle.block ]
+    ret i16 %a.0.lcssa
+  }
+  declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>)
+  declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
+  declare void @llvm.set.loop.iterations.i32(i32)
+  declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
+  declare <8 x i1> @llvm.arm.mve.vctp16(i32)
+
+...
+---
+name:            wrong_liveout_shift
+alignment:       16
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+hasWinCFI:       false
+registers:       []
+liveins:
+  - { reg: '$r0', virtual-reg: '' }
+  - { reg: '$r1', virtual-reg: '' }
+  - { reg: '$r2', virtual-reg: '' }
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       8
+  offsetAdjustment: 0
+  maxAlignment:    4
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  maxCallFrameSize: 0
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:      []
+stack:
+  - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$lr', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+  - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites:       []
+constants:
+  - id:              0
+    value:           '<8 x i16> <i16 32767, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>'
+    alignment:       16
+    isTargetSpecific: false
+machineFunctionInfo: {}
+body:             |
+  ; CHECK-LABEL: name: wrong_liveout_shift
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   liveins: $lr, $r0, $r1, $r2
+  ; CHECK:   tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+  ; CHECK:   t2IT 0, 2, implicit-def $itstate
+  ; CHECK:   renamable $r0 = t2MOVi16 32767, 0, $cpsr, implicit killed $r0, implicit $itstate
+  ; CHECK:   renamable $r0 = tSXTH killed renamable $r0, 0, $cpsr, implicit killed $r0, implicit $itstate
+  ; CHECK:   tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+  ; CHECK:   frame-setup tPUSH 14, $noreg, killed $lr, implicit-def $sp, implicit $sp
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $lr, -4
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $r7, -8
+  ; CHECK:   dead $r7 = frame-setup tMOVr $sp, 14, $noreg
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_register $r7
+  ; CHECK:   renamable $r3, dead $cpsr = tADDi3 renamable $r2, 7, 14, $noreg
+  ; CHECK:   renamable $r3 = t2BICri killed renamable $r3, 7, 14, $noreg, $noreg
+  ; CHECK:   renamable $r12 = t2SUBri killed renamable $r3, 8, 14, $noreg, $noreg
+  ; CHECK:   renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+  ; CHECK:   renamable $lr = nuw nsw t2ADDrs killed renamable $r3, renamable $r12, 27, 14, $noreg, $noreg
+  ; CHECK:   renamable $r3 = tLEApcrel %const.0, 14, $noreg
+  ; CHECK:   renamable $r12 = t2LSRri killed renamable $r12, 2, 14, $noreg, $noreg
+  ; CHECK:   renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg :: (load 16 from constant-pool)
+  ; CHECK:   renamable $r3 = t2SUBrs renamable $r2, killed renamable $r12, 26, 14, $noreg, $noreg
+  ; CHECK:   $lr = t2DLS killed renamable $lr
+  ; CHECK: bb.1.vector.body:
+  ; CHECK:   successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+  ; CHECK:   liveins: $lr, $q0, $r0, $r1, $r2, $r3
+  ; CHECK:   renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg
+  ; CHECK:   $q1 = MVE_VORR killed $q0, killed $q0, 0, $noreg, undef $q1
+  ; CHECK:   MVE_VPST 4, implicit $vpr
+  ; CHECK:   renamable $r0, renamable $q0 = MVE_VLDRBU16_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv19, align 1)
+  ; CHECK:   renamable $r1, renamable $q2 = MVE_VLDRBU16_post killed renamable $r1, 8, 1, killed renamable $vpr :: (load 8 from %ir.lsr.iv2022, align 1)
+  ; CHECK:   renamable $q0 = nuw MVE_VMULi16 killed renamable $q2, killed renamable $q0, 0, $noreg, undef renamable $q0
+  ; CHECK:   renamable $q0 = MVE_VSUBi16 renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+  ; CHECK:   $lr = t2LEUpdate killed renamable $lr, %bb.1
+  ; CHECK: bb.2.middle.block:
+  ; CHECK:   liveins: $q0, $q1, $r3
+  ; CHECK:   renamable $vpr = MVE_VCTP16 killed renamable $r3, 0, $noreg
+  ; CHECK:   renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr
+  ; CHECK:   renamable $r0 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg
+  ; CHECK:   $sp = t2LDMIA_UPD $sp, 14, $noreg, def $r7, def $lr
+  ; CHECK:   renamable $r0 = tSXTH killed renamable $r0, 14, $noreg
+  ; CHECK:   tBX_RET 14, $noreg, implicit killed $r0
+  ; CHECK: bb.3 (align 16):
+  ; CHECK:   CONSTPOOL_ENTRY 0, %const.0, 16
+  bb.0.entry:
+    successors: %bb.1(0x80000000)
+    liveins: $r0, $r1, $r2, $lr
+
+    tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+    t2IT 0, 2, implicit-def $itstate
+    renamable $r0 = t2MOVi16 32767, 0, $cpsr, implicit killed $r0, implicit $itstate
+    renamable $r0 = tSXTH killed renamable $r0, 0, $cpsr, implicit killed $r0, implicit $itstate
+    tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+    frame-setup tPUSH 14, $noreg, killed $lr, implicit-def $sp, implicit $sp
+    frame-setup CFI_INSTRUCTION def_cfa_offset 8
+    frame-setup CFI_INSTRUCTION offset $lr, -4
+    frame-setup CFI_INSTRUCTION offset $r7, -8
+    $r7 = frame-setup tMOVr $sp, 14, $noreg
+    frame-setup CFI_INSTRUCTION def_cfa_register $r7
+    renamable $r3, dead $cpsr = tADDi3 renamable $r2, 7, 14, $noreg
+    renamable $r3 = t2BICri killed renamable $r3, 7, 14, $noreg, $noreg
+    renamable $r12 = t2SUBri killed renamable $r3, 8, 14, $noreg, $noreg
+    renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+    renamable $lr = nuw nsw t2ADDrs killed renamable $r3, renamable $r12, 27, 14, $noreg, $noreg
+    renamable $r3 = tLEApcrel %const.0, 14, $noreg
+    renamable $r12 = t2LSRri killed renamable $r12, 2, 14, $noreg, $noreg
+    renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg :: (load 16 from constant-pool)
+    renamable $r3 = t2SUBrs renamable $r2, killed renamable $r12, 26, 14, $noreg, $noreg
+    t2DoLoopStart renamable $lr
+
+  bb.1.vector.body:
+    successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+    liveins: $lr, $q0, $r0, $r1, $r2, $r3
+
+    renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg
+    $q1 = MVE_VORR killed $q0, $q0, 0, $noreg, undef $q1
+    MVE_VPST 4, implicit $vpr
+    renamable $r0, renamable $q0 = MVE_VLDRBU16_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv19, align 1)
+    renamable $r1, renamable $q2 = MVE_VLDRBU16_post killed renamable $r1, 8, 1, killed renamable $vpr :: (load 8 from %ir.lsr.iv2022, align 1)
+    renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 8, 14, $noreg
+    renamable $q0 = nuw MVE_VMULi16 killed renamable $q2, killed renamable $q0, 0, $noreg, undef renamable $q0
+    renamable $lr = t2LoopDec killed renamable $lr, 1
+    renamable $q0 = MVE_VSUBi16 renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+    t2LoopEnd renamable $lr, %bb.1, implicit-def dead $cpsr
+    tB %bb.2, 14, $noreg
+
+  bb.2.middle.block:
+    liveins: $q0, $q1, $r3
+
+    renamable $vpr = MVE_VCTP16 killed renamable $r3, 0, $noreg
+    renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr
+    renamable $r0 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg
+    $sp = t2LDMIA_UPD $sp, 14, $noreg, def $r7, def $lr
+    renamable $r0 = tSXTH killed renamable $r0, 14, $noreg
+    tBX_RET 14, $noreg, implicit killed $r0
+
+  bb.3 (align 16):
+    CONSTPOOL_ENTRY 0, %const.0, 16
+
+...

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-opcode-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-opcode-liveout.mir
new file mode 100644
index 000000000000..8254c406bbd1
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-opcode-liveout.mir
@@ -0,0 +1,210 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s
+
+# I think this should be equivalent, but the calculation in the middle block
+# is too complex to process for now.
+
+--- |
+  define dso_local i32 @wrong_vctp_liveout(i16* nocapture readonly %a, i16* nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
+  entry:
+    %cmp9 = icmp eq i32 %N, 0
+    %tmp = add i32 %N, 3
+    %tmp1 = lshr i32 %tmp, 2
+    %tmp2 = shl nuw i32 %tmp1, 2
+    %tmp3 = add i32 %tmp2, -4
+    %tmp4 = lshr i32 %tmp3, 2
+    %tmp5 = add nuw nsw i32 %tmp4, 1
+    br i1 %cmp9, label %for.cond.cleanup, label %vector.ph
+
+  vector.ph:                                        ; preds = %entry
+    call void @llvm.set.loop.iterations.i32(i32 %tmp5)
+    br label %vector.body
+
+  vector.body:                                      ; preds = %vector.body, %vector.ph
+    %lsr.iv1 = phi i32 [ %lsr.iv.next, %vector.body ], [ %tmp5, %vector.ph ]
+    %lsr.iv18 = phi i16* [ %scevgep19, %vector.body ], [ %b, %vector.ph ]
+    %lsr.iv = phi i16* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
+    %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %tmp13, %vector.body ]
+    %tmp7 = phi i32 [ %N, %vector.ph ], [ %tmp9, %vector.body ]
+    %lsr.iv17 = bitcast i16* %lsr.iv to <4 x i16>*
+    %lsr.iv1820 = bitcast i16* %lsr.iv18 to <4 x i16>*
+    %tmp8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %tmp7)
+    %tmp9 = sub i32 %tmp7, 4
+    %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
+    %tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
+    %wide.masked.load14 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv1820, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
+    %tmp11 = sext <4 x i16> %wide.masked.load14 to <4 x i32>
+    %tmp12 = mul nsw <4 x i32> %tmp11, %tmp10
+    %tmp13 = add <4 x i32> %tmp12, %vec.phi
+    %scevgep = getelementptr i16, i16* %lsr.iv, i32 4
+    %scevgep19 = getelementptr i16, i16* %lsr.iv18, i32 4
+    %tmp14 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
+    %tmp15 = icmp ne i32 %tmp14, 0
+    %lsr.iv.next = add nsw i32 %lsr.iv1, -1
+    br i1 %tmp15, label %vector.body, label %middle.block
+
+  middle.block:                                     ; preds = %vector.body
+    %0 = add i32 %tmp9, 4
+    %insert.idx = insertelement <4 x i32> undef, i32 %0, i32 0
+    %idx.splat = shufflevector <4 x i32> %insert.idx, <4 x i32> undef, <4 x i32> zeroinitializer
+    %n.minusone = add i32 %N, -1
+    %insert.n = insertelement <4 x i32> undef, i32 %n.minusone, i32 0
+    %n.splat = shufflevector <4 x i32> %insert.n, <4 x i32> undef, <4 x i32> zeroinitializer
+    %tmp16 = icmp ult <4 x i32> %idx.splat, %n.splat
+    %tmp17 = select <4 x i1> %tmp16, <4 x i32> %tmp13, <4 x i32> %vec.phi
+    %tmp18 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %tmp17)
+    br label %for.cond.cleanup
+
+  for.cond.cleanup:                                 ; preds = %middle.block, %entry
+    %res.0.lcssa = phi i32 [ 0, %entry ], [ %tmp18, %middle.block ]
+    ret i32 %res.0.lcssa
+  }
+  declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) #1
+  declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) #2
+  declare void @llvm.set.loop.iterations.i32(i32) #3
+  declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
+  declare <4 x i1> @llvm.arm.mve.vctp32(i32) #4
+
+...
+---
+name:            wrong_vctp_liveout
+alignment:       2
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+hasWinCFI:       false
+registers:       []
+liveins:
+  - { reg: '$r0', virtual-reg: '' }
+  - { reg: '$r1', virtual-reg: '' }
+  - { reg: '$r2', virtual-reg: '' }
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       8
+  offsetAdjustment: 0
+  maxAlignment:    4
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  maxCallFrameSize: 0
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:      []
+stack:
+  - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+  - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites:       []
+constants:       []
+machineFunctionInfo: {}
+body:             |
+  ; CHECK-LABEL: name: wrong_vctp_liveout
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   liveins: $lr, $r0, $r1, $r2, $r7
+  ; CHECK:   tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+  ; CHECK:   t2IT 0, 4, implicit-def $itstate
+  ; CHECK:   renamable $r0 = tMOVi8 $noreg, 0, 0, $cpsr, implicit killed $r0, implicit $itstate
+  ; CHECK:   tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+  ; CHECK:   frame-setup tPUSH 14, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $lr, -4
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $r7, -8
+  ; CHECK:   renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14, $noreg
+  ; CHECK:   renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q1
+  ; CHECK:   renamable $r3 = t2BICri killed renamable $r3, 3, 14, $noreg, $noreg
+  ; CHECK:   renamable $r12 = t2SUBri killed renamable $r3, 4, 14, $noreg, $noreg
+  ; CHECK:   renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+  ; CHECK:   renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14, $noreg, $noreg
+  ; CHECK:   dead $lr = t2DLS renamable $r3
+  ; CHECK:   $r12 = tMOVr killed $r3, 14, $noreg
+  ; CHECK:   $r3 = tMOVr $r2, 14, $noreg
+  ; CHECK: bb.1.vector.body:
+  ; CHECK:   successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+  ; CHECK:   liveins: $q1, $r0, $r1, $r2, $r3, $r12
+  ; CHECK:   renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg
+  ; CHECK:   $q0 = MVE_VORR killed $q1, killed $q1, 0, $noreg, undef $q0
+  ; CHECK:   MVE_VPST 4, implicit $vpr
+  ; CHECK:   renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv17, align 2)
+  ; CHECK:   renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, killed renamable $vpr :: (load 8 from %ir.lsr.iv1820, align 2)
+  ; CHECK:   $lr = tMOVr $r12, 14, $noreg
+  ; CHECK:   renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
+  ; CHECK:   renamable $r12 = nsw t2SUBri killed $r12, 1, 14, $noreg, $noreg
+  ; CHECK:   renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14, $noreg
+  ; CHECK:   renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, undef renamable $q1
+  ; CHECK:   dead $lr = t2LEUpdate killed renamable $lr, %bb.1
+  ; CHECK: bb.2.middle.block:
+  ; CHECK:   liveins: $q0, $q1, $r2, $r3
+  ; CHECK:   renamable $r0, dead $cpsr = tSUBi3 killed renamable $r2, 1, 14, $noreg
+  ; CHECK:   renamable $q2 = MVE_VDUP32 killed renamable $r0, 0, $noreg, undef renamable $q2
+  ; CHECK:   renamable $r0, dead $cpsr = tADDi3 killed renamable $r3, 4, 14, $noreg
+  ; CHECK:   renamable $vpr = MVE_VCMPu32r killed renamable $q2, killed renamable $r0, 8, 0, $noreg
+  ; CHECK:   renamable $q0 = MVE_VPSEL killed renamable $q1, killed renamable $q0, 0, killed renamable $vpr
+  ; CHECK:   renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg
+  ; CHECK:   tPOP_RET 14, $noreg, def $r7, def $pc, implicit killed $r0
+  bb.0.entry:
+    successors: %bb.1(0x80000000)
+    liveins: $r0, $r1, $r2, $lr, $r7
+
+    tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+    t2IT 0, 4, implicit-def $itstate
+    renamable $r0 = tMOVi8 $noreg, 0, 0, $cpsr, implicit killed $r0, implicit $itstate
+    tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+    frame-setup tPUSH 14, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+    frame-setup CFI_INSTRUCTION def_cfa_offset 8
+    frame-setup CFI_INSTRUCTION offset $lr, -4
+    frame-setup CFI_INSTRUCTION offset $r7, -8
+    renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14, $noreg
+    renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q1
+    renamable $r3 = t2BICri killed renamable $r3, 3, 14, $noreg, $noreg
+    renamable $r12 = t2SUBri killed renamable $r3, 4, 14, $noreg, $noreg
+    renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+    renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14, $noreg, $noreg
+    t2DoLoopStart renamable $r3
+    $r12 = tMOVr killed $r3, 14, $noreg
+    $r3 = tMOVr $r2, 14, $noreg
+
+  bb.1.vector.body:
+    successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+    liveins: $q1, $r0, $r1, $r2, $r3, $r12
+
+    renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg
+    $q0 = MVE_VORR killed $q1, $q1, 0, $noreg, undef $q0
+    MVE_VPST 4, implicit $vpr
+    renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv17, align 2)
+    renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, killed renamable $vpr :: (load 8 from %ir.lsr.iv1820, align 2)
+    $lr = tMOVr $r12, 14, $noreg
+    renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
+    renamable $r12 = nsw t2SUBri killed $r12, 1, 14, $noreg, $noreg
+    renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14, $noreg
+    renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, undef renamable $q1
+    renamable $lr = t2LoopDec killed renamable $lr, 1
+    t2LoopEnd killed renamable $lr, %bb.1, implicit-def dead $cpsr
+    tB %bb.2, 14, $noreg
+
+  bb.2.middle.block:
+    liveins: $q0, $q1, $r2, $r3
+
+    renamable $r0, dead $cpsr = tSUBi3 killed renamable $r2, 1, 14, $noreg
+    renamable $q2 = MVE_VDUP32 killed renamable $r0, 0, $noreg, undef renamable $q2
+    renamable $r0, dead $cpsr = tADDi3 killed renamable $r3, 4, 14, $noreg
+    renamable $vpr = MVE_VCMPu32r killed renamable $q2, killed renamable $r0, 8, 0, $noreg
+    renamable $q0 = MVE_VPSEL killed renamable $q1, killed renamable $q0, 0, killed renamable $vpr
+    renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg
+    tPOP_RET 14, $noreg, def $r7, def $pc, implicit killed $r0
+
+...

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-operand-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-operand-liveout.mir
new file mode 100644
index 000000000000..affe2d2ee758
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-operand-liveout.mir
@@ -0,0 +1,194 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s
+
+# The VCTP uses r2, which is redefined in the loop.
+
+--- |
+  define dso_local i32 @wrong_vctp_liveout(i16* nocapture readonly %a, i16* nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
+  entry:
+    %cmp9 = icmp eq i32 %N, 0
+    %0 = add i32 %N, 3
+    %1 = lshr i32 %0, 2
+    %2 = shl nuw i32 %1, 2
+    %3 = add i32 %2, -4
+    %4 = lshr i32 %3, 2
+    %5 = add nuw nsw i32 %4, 1
+    br i1 %cmp9, label %for.cond.cleanup, label %vector.ph
+
+  vector.ph:                                        ; preds = %entry
+    call void @llvm.set.loop.iterations.i32(i32 %5)
+    br label %vector.body
+
+  vector.body:                                      ; preds = %vector.body, %vector.ph
+    %lsr.iv1 = phi i32 [ %lsr.iv.next, %vector.body ], [ %5, %vector.ph ]
+    %lsr.iv18 = phi i16* [ %scevgep19, %vector.body ], [ %b, %vector.ph ]
+    %lsr.iv = phi i16* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
+    %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %12, %vector.body ]
+    %6 = phi i32 [ %N, %vector.ph ], [ %8, %vector.body ]
+    %lsr.iv17 = bitcast i16* %lsr.iv to <4 x i16>*
+    %lsr.iv1820 = bitcast i16* %lsr.iv18 to <4 x i16>*
+    %7 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %6)
+    %8 = sub i32 %6, 4
+    %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %7, <4 x i16> undef)
+    %9 = sext <4 x i16> %wide.masked.load to <4 x i32>
+    %wide.masked.load14 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv1820, i32 2, <4 x i1> %7, <4 x i16> undef)
+    %10 = sext <4 x i16> %wide.masked.load14 to <4 x i32>
+    %11 = mul nsw <4 x i32> %10, %9
+    %12 = add <4 x i32> %11, %vec.phi
+    %scevgep = getelementptr i16, i16* %lsr.iv, i32 4
+    %scevgep19 = getelementptr i16, i16* %lsr.iv18, i32 4
+    %13 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
+    %14 = icmp ne i32 %13, 0
+    %lsr.iv.next = add nsw i32 %lsr.iv1, -1
+    br i1 %14, label %vector.body, label %middle.block
+
+  middle.block:                                     ; preds = %vector.body
+    %15 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %8)
+    %16 = select <4 x i1> %15, <4 x i32> %12, <4 x i32> %vec.phi
+    %17 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %16)
+    br label %for.cond.cleanup
+
+  for.cond.cleanup:                                 ; preds = %middle.block, %entry
+    %res.0.lcssa = phi i32 [ 0, %entry ], [ %17, %middle.block ]
+    ret i32 %res.0.lcssa
+  }
+  declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) #1
+  declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) #2
+  declare void @llvm.set.loop.iterations.i32(i32) #3
+  declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
+  declare <4 x i1> @llvm.arm.mve.vctp32(i32) #4
+...
+---
+name:            wrong_vctp_liveout
+alignment:       2
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+hasWinCFI:       false
+registers:       []
+liveins:
+  - { reg: '$r0', virtual-reg: '' }
+  - { reg: '$r1', virtual-reg: '' }
+  - { reg: '$r2', virtual-reg: '' }
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       8
+  offsetAdjustment: 0
+  maxAlignment:    4
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  maxCallFrameSize: 0
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:      []
+stack:
+  - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+  - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites:       []
+constants:       []
+machineFunctionInfo: {}
+body:             |
+  ; CHECK-LABEL: name: wrong_vctp_liveout
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   liveins: $lr, $r0, $r1, $r2, $r7
+  ; CHECK:   tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+  ; CHECK:   t2IT 0, 4, implicit-def $itstate
+  ; CHECK:   renamable $r0 = tMOVi8 $noreg, 0, 0, $cpsr, implicit killed $r0, implicit $itstate
+  ; CHECK:   tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+  ; CHECK:   frame-setup tPUSH 14, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $lr, -4
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $r7, -8
+  ; CHECK:   renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14, $noreg
+  ; CHECK:   renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q1
+  ; CHECK:   renamable $r3 = t2BICri killed renamable $r3, 3, 14, $noreg, $noreg
+  ; CHECK:   renamable $r12 = t2SUBri killed renamable $r3, 4, 14, $noreg, $noreg
+  ; CHECK:   renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+  ; CHECK:   renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14, $noreg, $noreg
+  ; CHECK:   dead $lr = t2DLS renamable $r12
+  ; CHECK:   $r3 = tMOVr killed $r12, 14, $noreg
+  ; CHECK: bb.1.vector.body:
+  ; CHECK:   successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+  ; CHECK:   liveins: $q1, $r0, $r1, $r2, $r3
+  ; CHECK:   renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
+  ; CHECK:   $q0 = MVE_VORR killed $q1, killed $q1, 0, $noreg, undef $q0
+  ; CHECK:   MVE_VPST 4, implicit $vpr
+  ; CHECK:   renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv17, align 2)
+  ; CHECK:   renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, killed renamable $vpr :: (load 8 from %ir.lsr.iv1820, align 2)
+  ; CHECK:   $lr = tMOVr $r3, 14, $noreg
+  ; CHECK:   renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
+  ; CHECK:   renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14, $noreg
+  ; CHECK:   renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14, $noreg
+  ; CHECK:   renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, undef renamable $q1
+  ; CHECK:   dead $lr = t2LEUpdate killed renamable $lr, %bb.1
+  ; CHECK: bb.2.middle.block:
+  ; CHECK:   liveins: $q0, $q1, $r2
+  ; CHECK:   renamable $vpr = MVE_VCTP32 killed renamable $r2, 0, $noreg
+  ; CHECK:   renamable $q0 = MVE_VPSEL killed renamable $q1, killed renamable $q0, 0, killed renamable $vpr
+  ; CHECK:   renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg
+  ; CHECK:   tPOP_RET 14, $noreg, def $r7, def $pc, implicit killed $r0
+  bb.0.entry:
+    successors: %bb.1(0x80000000)
+    liveins: $r0, $r1, $r2, $lr, $r7
+
+    tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+    t2IT 0, 4, implicit-def $itstate
+    renamable $r0 = tMOVi8 $noreg, 0, 0, $cpsr, implicit killed $r0, implicit $itstate
+    tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+    frame-setup tPUSH 14, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+    frame-setup CFI_INSTRUCTION def_cfa_offset 8
+    frame-setup CFI_INSTRUCTION offset $lr, -4
+    frame-setup CFI_INSTRUCTION offset $r7, -8
+    renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14, $noreg
+    renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q1
+    renamable $r3 = t2BICri killed renamable $r3, 3, 14, $noreg, $noreg
+    renamable $r12 = t2SUBri killed renamable $r3, 4, 14, $noreg, $noreg
+    renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+    renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14, $noreg, $noreg
+    t2DoLoopStart renamable $r12
+    $r3 = tMOVr killed $r12, 14, $noreg
+
+  bb.1.vector.body:
+    successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+    liveins: $q1, $r0, $r1, $r2, $r3
+
+    renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
+    $q0 = MVE_VORR killed $q1, $q1, 0, $noreg, undef $q0
+    MVE_VPST 4, implicit $vpr
+    renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr :: (load 8 from %ir.lsr.iv17, align 2)
+    renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, killed renamable $vpr :: (load 8 from %ir.lsr.iv1820, align 2)
+    $lr = tMOVr $r3, 14, $noreg
+    renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
+    renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14, $noreg
+    renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14, $noreg
+    renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, undef renamable $q1
+    renamable $lr = t2LoopDec killed renamable $lr, 1
+    t2LoopEnd killed renamable $lr, %bb.1, implicit-def dead $cpsr
+    tB %bb.2, 14, $noreg
+
+  bb.2.middle.block:
+    liveins: $q0, $q1, $r2
+
+    renamable $vpr = MVE_VCTP32 killed renamable $r2, 0, $noreg
+    renamable $q0 = MVE_VPSEL killed renamable $q1, killed renamable $q0, 0, killed renamable $vpr
+    renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg
+    tPOP_RET 14, $noreg, def $r7, def $pc, implicit killed $r0
+
+...


        


More information about the llvm-commits mailing list