[llvm] [AArch64] Restore Z-registers before P-registers (PR #79623)

via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 9 06:50:29 PST 2024


https://github.com/CarolineConcatto updated https://github.com/llvm/llvm-project/pull/79623

>From 73803bfdf553cda3b61e1a0855703855321c72ec Mon Sep 17 00:00:00 2001
From: Caroline Concatto <caroline.concatto at arm.com>
Date: Fri, 26 Jan 2024 13:06:25 +0000
Subject: [PATCH 1/3] [NFC] Remove reverse restore from epilogue for SVE
 registers

The reverse for SVE register restore  in the epilogue was added to guarantee
performance, but further work was done to improve sve frame restore and
besides that the schedule also may change the order of the restore, undoing
the reverse restore.
---
 .../Target/AArch64/AArch64FrameLowering.cpp   |   9 -
 llvm/test/CodeGen/AArch64/active_lane_mask.ll |   2 +-
 .../framelayout-sve-calleesaves-fix.mir       |   2 +-
 llvm/test/CodeGen/AArch64/framelayout-sve.mir |  42 +-
 .../sme-streaming-compatible-interface.ll     | 104 +-
 .../AArch64/sme-streaming-interface.ll        | 104 +-
 .../CodeGen/AArch64/sme2-intrinsics-ld1.ll    | 990 +++++++++---------
 .../CodeGen/AArch64/sme2-intrinsics-ldnt1.ll  | 990 +++++++++---------
 .../test/CodeGen/AArch64/stack-probing-sve.ll |  80 +-
 llvm/test/CodeGen/AArch64/sve-alloca.ll       |  52 +-
 .../sve-breakdown-scalable-vectortype.ll      |  84 +-
 .../AArch64/sve-calling-convention-mixed.ll   | 104 +-
 llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll   |  12 +-
 llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll   |  12 +-
 llvm/test/CodeGen/AArch64/sve-pred-arith.ll   |  16 +-
 llvm/test/CodeGen/AArch64/sve-tailcall.ll     | 108 +-
 llvm/test/CodeGen/AArch64/sve-trunc.ll        |   4 +-
 llvm/test/CodeGen/AArch64/unwind-preserved.ll | 216 ++--
 18 files changed, 1461 insertions(+), 1470 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index cffd414221c30c..74e94e85d1eb9d 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -3186,11 +3186,6 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
     return MIB->getIterator();
   };
 
-  // SVE objects are always restored in reverse order.
-  for (const RegPairInfo &RPI : reverse(RegPairs))
-    if (RPI.isScalable())
-      EmitMI(RPI);
-
   if (homogeneousPrologEpilog(MF, &MBB)) {
     auto MIB = BuildMI(MBB, MBBI, DL, TII.get(AArch64::HOM_Epilog))
                    .setMIFlag(MachineInstr::FrameDestroy);
@@ -3204,8 +3199,6 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
   if (ReverseCSRRestoreSeq) {
     MachineBasicBlock::iterator First = MBB.end();
     for (const RegPairInfo &RPI : reverse(RegPairs)) {
-      if (RPI.isScalable())
-        continue;
       MachineBasicBlock::iterator It = EmitMI(RPI);
       if (First == MBB.end())
         First = It;
@@ -3214,8 +3207,6 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
       MBB.splice(MBBI, &MBB, First);
   } else {
     for (const RegPairInfo &RPI : RegPairs) {
-      if (RPI.isScalable())
-        continue;
       (void)EmitMI(RPI);
     }
   }
diff --git a/llvm/test/CodeGen/AArch64/active_lane_mask.ll b/llvm/test/CodeGen/AArch64/active_lane_mask.ll
index a65c5d66677946..15a61910fb1009 100644
--- a/llvm/test/CodeGen/AArch64/active_lane_mask.ll
+++ b/llvm/test/CodeGen/AArch64/active_lane_mask.ll
@@ -191,8 +191,8 @@ define <vscale x 32 x i1> @lane_mask_nxv32i1_i32(i32 %index, i32 %TC) {
 ; CHECK-NEXT:    uzp1 p3.h, p3.h, p4.h
 ; CHECK-NEXT:    cmphi p0.s, p0/z, z25.s, z1.s
 ; CHECK-NEXT:    uzp1 p4.h, p5.h, p6.h
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    uzp1 p2.h, p2.h, p0.h
 ; CHECK-NEXT:    uzp1 p0.b, p1.b, p3.b
 ; CHECK-NEXT:    uzp1 p1.b, p4.b, p2.b
diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
index 3dba21d59b4087..aed31450736191 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
+++ b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
@@ -19,8 +19,8 @@
   ; CHECK-NEXT:    // implicit-def: $p4
   ; CHECK-NEXT:    addvl sp, sp, #1
   ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
-  ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
   ; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+  ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
   ; CHECK-NEXT:    addvl sp, sp, #2
   ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
   ; CHECK-NEXT:    .cfi_restore z8
diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
index 213d7919e4a727..8bfd12067d77c9 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir
+++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
@@ -710,9 +710,9 @@ body:             |
 
 # CHECK:      $sp = frame-destroy ADDXri $sp, 32, 0
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK:      $p6 = frame-destroy LDR_PXI $sp, 5
-# CHECK:      $p5 = frame-destroy LDR_PXI $sp, 6
 # CHECK:      $p4 = frame-destroy LDR_PXI $sp, 7
+# CHECK:      $p5 = frame-destroy LDR_PXI $sp, 6
+# CHECK:      $p6 = frame-destroy LDR_PXI $sp, 5
 # CHECK:      $sp = frame-destroy ADDVL_XXI $sp, 1
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
 # CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.4)
@@ -772,9 +772,9 @@ body:             |
 
 # CHECK:      $sp  = frame-destroy ADDXri $sp, 32, 0
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0
-# CHECK-NEXT: $z9  = frame-destroy LDR_ZXI $sp, 1
 # CHECK-NEXT: $z8  = frame-destroy LDR_ZXI $sp, 2
+# CHECK-NEXT: $z9  = frame-destroy LDR_ZXI $sp, 1
+# CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0
 # CHECK-NEXT: $sp  = frame-destroy ADDVL_XXI $sp, 3
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8
@@ -873,14 +873,14 @@ body:             |
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22
 # CHECK:      $sp = frame-destroy ADDVL_XXI $sp, 1
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK:      $p15 = frame-destroy LDR_PXI $sp, 4
-# CHECK:      $p14 = frame-destroy LDR_PXI $sp, 5
-# CHECK:      $p5 = frame-destroy LDR_PXI $sp, 14
-# CHECK:      $p4 = frame-destroy LDR_PXI $sp, 15
-# CHECK:      $z23 = frame-destroy LDR_ZXI $sp, 2
-# CHECK:      $z22 = frame-destroy LDR_ZXI $sp, 3
-# CHECK:      $z9 = frame-destroy LDR_ZXI $sp, 16
 # CHECK:      $z8 = frame-destroy LDR_ZXI $sp, 17
+# CHECK:      $z9 = frame-destroy LDR_ZXI $sp, 16
+# CHECK:      $z22 = frame-destroy LDR_ZXI $sp, 3
+# CHECK:      $z23 = frame-destroy LDR_ZXI $sp, 2
+# CHECK:      $p4 = frame-destroy LDR_PXI $sp, 15
+# CHECK:      $p5 = frame-destroy LDR_PXI $sp, 14
+# CHECK:      $p14 = frame-destroy LDR_PXI $sp, 5
+# CHECK:      $p15 = frame-destroy LDR_PXI $sp, 4
 # CHECK:      $sp = frame-destroy ADDVL_XXI $sp, 18
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 32
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8
@@ -1037,14 +1037,14 @@ body:             |
 # CHECK-NEXT: $sp = frame-setup ANDXri killed $[[TMP]]
 
 # CHECK:      $sp = frame-destroy ADDVL_XXI $fp, -18
-# CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 4
-# CHECK-NEXT: $p14 = frame-destroy LDR_PXI $sp, 5
-# CHECK:      $p5 = frame-destroy LDR_PXI $sp, 14
-# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 15
+# CHECK:      $z8 = frame-destroy LDR_ZXI $sp, 17
+# CHECK-NEXT: $z9 = frame-destroy LDR_ZXI $sp, 16
+# CHECK:      $z22 = frame-destroy LDR_ZXI $sp, 3
 # CHECK-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 2
-# CHECK-NEXT: $z22 = frame-destroy LDR_ZXI $sp, 3
-# CHECK:      $z9 = frame-destroy LDR_ZXI $sp, 16
-# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 17
+# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 15
+# CHECK-NEXT: $p5 = frame-destroy LDR_PXI $sp, 14
+# CHECK:      $p14 = frame-destroy LDR_PXI $sp, 5
+# CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 4
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z9
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z10
@@ -1198,10 +1198,10 @@ body:             |
 
 # CHECK:      $sp = frame-destroy ADDVL_XXI $sp, 7
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 6
-# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 7
-# CHECK-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 1
 # CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2
+# CHECK-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 1
+# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 7
+# CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 6
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 3
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll b/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
index 5d0c9127d3ebb2..23acc57159907b 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
@@ -219,34 +219,34 @@ define <vscale x 2 x double> @streaming_compatible_with_scalable_vectors(<vscale
 ; CHECK-NEXT:    ldr z1, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    fadd z0.d, z1.d, z0.d
 ; CHECK-NEXT:    addvl sp, sp, #2
-; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x30, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x29, [sp], #32 // 8-byte Folded Reload
@@ -311,34 +311,34 @@ define <vscale x 2 x i1> @streaming_compatible_with_predicate_vectors(<vscale x
 ; CHECK-NEXT:    ldr p1, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    and p0.b, p1/z, p1.b, p0.b
 ; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x30, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x29, [sp], #32 // 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll b/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll
index dd7d6470ad7b08..efb904b9bb333e 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll
@@ -187,34 +187,34 @@ define <vscale x 4 x i32> @smstart_clobber_sve(<vscale x 4 x i32> %x) nounwind {
 ; CHECK-NEXT:    smstop sm
 ; CHECK-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -267,34 +267,34 @@ define <vscale x 4 x i32> @smstart_clobber_sve_duplicate(<vscale x 4 x i32> %x)
 ; CHECK-NEXT:    smstop sm
 ; CHECK-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll
index b7119fc0825673..f575d65490e362 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll
@@ -77,23 +77,23 @@ define <vscale x 32 x i8> @ld1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale x
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -129,22 +129,22 @@ define <vscale x 32 x i8> @ld1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale x
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -232,23 +232,23 @@ define <vscale x 32 x i8> @ld1_x2_i8_z0_z8_scalar(<vscale x 16 x i8> %unused, <v
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -284,22 +284,22 @@ define <vscale x 32 x i8> @ld1_x2_i8_z0_z8_scalar(<vscale x 16 x i8> %unused, <v
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -388,23 +388,23 @@ define <vscale x 16 x i16> @ld1_x2_i16_z0_z8(<vscale x 8 x i16> %unused, <vscale
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -440,22 +440,22 @@ define <vscale x 16 x i16> @ld1_x2_i16_z0_z8(<vscale x 8 x i16> %unused, <vscale
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -543,23 +543,23 @@ define <vscale x 16 x i16> @ld1_x2_i16_z0_z8_scalar(<vscale x 8 x i16> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -595,22 +595,22 @@ define <vscale x 16 x i16> @ld1_x2_i16_z0_z8_scalar(<vscale x 8 x i16> %unused,
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -699,23 +699,23 @@ define <vscale x 8 x i32> @ld1_x2_i32_z0_z8(<vscale x 4 x i32> %unused, <vscale
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -751,22 +751,22 @@ define <vscale x 8 x i32> @ld1_x2_i32_z0_z8(<vscale x 4 x i32> %unused, <vscale
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -854,23 +854,23 @@ define <vscale x 8 x i32> @ld1_x2_i32_z0_z8_scalar(<vscale x 4 x i32> %unused, <
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -906,22 +906,22 @@ define <vscale x 8 x i32> @ld1_x2_i32_z0_z8_scalar(<vscale x 4 x i32> %unused, <
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -1010,23 +1010,23 @@ define <vscale x 4 x i64> @ld1_x2_i64_z0_z8(<vscale x 2 x i64> %unused, <vscale
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -1062,22 +1062,22 @@ define <vscale x 4 x i64> @ld1_x2_i64_z0_z8(<vscale x 2 x i64> %unused, <vscale
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -1165,23 +1165,23 @@ define <vscale x 4 x i64> @ld1_x2_i64_z0_z8_scalar(<vscale x 2 x i64> %unused, <
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -1204,35 +1204,35 @@ define <vscale x 4 x i64> @ld1_x2_i64_z0_z8_scalar(<vscale x 2 x i64> %unused, <
 ; CONTIGUOUS-NEXT:    str z13, [sp, #11, mul vl] // 16-byte Folded Spill
 ; CONTIGUOUS-NEXT:    str z12, [sp, #12, mul vl] // 16-byte Folded Spill
 ; CONTIGUOUS-NEXT:    str z11, [sp, #13, mul vl] // 16-byte Folded Spill
-; CONTIGUOUS-NEXT:    str z10, [sp, #14, mul vl] // 16-byte Folded Spill
-; CONTIGUOUS-NEXT:    str z9, [sp, #15, mul vl] // 16-byte Folded Spill
-; CONTIGUOUS-NEXT:    addvl sp, sp, #-2
-; CONTIGUOUS-NEXT:    mov p8.b, p0.b
-; CONTIGUOUS-NEXT:    ld1d { z0.d, z1.d }, pn8/z, [x0, x1, lsl #3]
-; CONTIGUOUS-NEXT:    str z0, [sp]
-; CONTIGUOUS-NEXT:    str z1, [sp, #1, mul vl]
-; CONTIGUOUS-NEXT:    //APP
-; CONTIGUOUS-NEXT:    nop
-; CONTIGUOUS-NEXT:    //NO_APP
-; CONTIGUOUS-NEXT:    ldr z0, [sp]
-; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
-; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    str z10, [sp, #14, mul vl] // 16-byte Folded Spill
+; CONTIGUOUS-NEXT:    str z9, [sp, #15, mul vl] // 16-byte Folded Spill
+; CONTIGUOUS-NEXT:    addvl sp, sp, #-2
+; CONTIGUOUS-NEXT:    mov p8.b, p0.b
+; CONTIGUOUS-NEXT:    ld1d { z0.d, z1.d }, pn8/z, [x0, x1, lsl #3]
+; CONTIGUOUS-NEXT:    str z0, [sp]
+; CONTIGUOUS-NEXT:    str z1, [sp, #1, mul vl]
+; CONTIGUOUS-NEXT:    //APP
+; CONTIGUOUS-NEXT:    nop
+; CONTIGUOUS-NEXT:    //NO_APP
+; CONTIGUOUS-NEXT:    ldr z0, [sp]
+; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
+; CONTIGUOUS-NEXT:    addvl sp, sp, #2
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -1323,24 +1323,24 @@ define <vscale x 64 x i8> @ld1_x4_i8_z0_z4_z8_z12(<vscale x 16 x i8> %unused, <v
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1380,21 +1380,21 @@ define <vscale x 64 x i8> @ld1_x4_i8_z0_z4_z8_z12(<vscale x 16 x i8> %unused, <v
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -1488,24 +1488,24 @@ define <vscale x 64 x i8> @ld1_x4_i8_z0_z4_z8_z12_scalar(<vscale x 16 x i8> %unu
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1545,21 +1545,21 @@ define <vscale x 64 x i8> @ld1_x4_i8_z0_z4_z8_z12_scalar(<vscale x 16 x i8> %unu
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -1654,24 +1654,24 @@ define <vscale x 32 x i16> @ld1_x4_i16_z0_z4_z8_z12(<vscale x 8 x i16> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1711,21 +1711,21 @@ define <vscale x 32 x i16> @ld1_x4_i16_z0_z4_z8_z12(<vscale x 8 x i16> %unused,
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -1820,24 +1820,24 @@ define <vscale x 32 x i16> @ld1_x4_i16_z0_z4_z8_z12_scalar(<vscale x 8 x i16> %u
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1867,31 +1867,31 @@ define <vscale x 32 x i16> @ld1_x4_i16_z0_z4_z8_z12_scalar(<vscale x 8 x i16> %u
 ; CONTIGUOUS-NEXT:    ld1h { z0.h - z3.h }, pn8/z, [x0, x1, lsl #1]
 ; CONTIGUOUS-NEXT:    str z0, [sp]
 ; CONTIGUOUS-NEXT:    str z1, [sp, #1, mul vl]
-; CONTIGUOUS-NEXT:    str z2, [sp, #2, mul vl]
-; CONTIGUOUS-NEXT:    str z3, [sp, #3, mul vl]
-; CONTIGUOUS-NEXT:    //APP
-; CONTIGUOUS-NEXT:    nop
-; CONTIGUOUS-NEXT:    //NO_APP
-; CONTIGUOUS-NEXT:    ldr z0, [sp]
-; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
-; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
-; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
-; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    str z2, [sp, #2, mul vl]
+; CONTIGUOUS-NEXT:    str z3, [sp, #3, mul vl]
+; CONTIGUOUS-NEXT:    //APP
+; CONTIGUOUS-NEXT:    nop
+; CONTIGUOUS-NEXT:    //NO_APP
+; CONTIGUOUS-NEXT:    ldr z0, [sp]
+; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
+; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
+; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
+; CONTIGUOUS-NEXT:    addvl sp, sp, #4
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -1986,24 +1986,24 @@ define <vscale x 16 x i32> @ld1_x4_i32_z0_z4_z8_z12(<vscale x 4 x i32> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -2043,21 +2043,21 @@ define <vscale x 16 x i32> @ld1_x4_i32_z0_z4_z8_z12(<vscale x 4 x i32> %unused,
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -2152,24 +2152,24 @@ define <vscale x 16 x i32> @ld1_x4_i32_z0_z4_z8_z12_scalar(<vscale x 4 x i32> %u
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -2209,21 +2209,21 @@ define <vscale x 16 x i32> @ld1_x4_i32_z0_z4_z8_z12_scalar(<vscale x 4 x i32> %u
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -2318,24 +2318,24 @@ define <vscale x 8 x i64> @ld1_x4_i64_z0_z4_z8_z12(<vscale x 2 x i64> %unused, <
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -2375,21 +2375,21 @@ define <vscale x 8 x i64> @ld1_x4_i64_z0_z4_z8_z12(<vscale x 2 x i64> %unused, <
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -2484,24 +2484,24 @@ define <vscale x 8 x i64> @ld1_x4_i64_z0_z4_z8_z12_scalar(<vscale x 2 x i64> %un
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -2541,21 +2541,21 @@ define <vscale x 8 x i64> @ld1_x4_i64_z0_z4_z8_z12_scalar(<vscale x 2 x i64> %un
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll
index 1fb251a4f628e9..0c42b8ba17c60c 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll
@@ -30,23 +30,23 @@ define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -82,22 +82,22 @@ define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -138,23 +138,23 @@ define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8_scalar(<vscale x 16 x i8> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -190,22 +190,22 @@ define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8_scalar(<vscale x 16 x i8> %unused,
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -247,23 +247,23 @@ define <vscale x 16 x i16> @ldnt1_x2_i16_z0_z8(<vscale x 8 x i16> %unused, <vsca
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -299,22 +299,22 @@ define <vscale x 16 x i16> @ldnt1_x2_i16_z0_z8(<vscale x 8 x i16> %unused, <vsca
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -355,23 +355,23 @@ define <vscale x 16 x i16> @ldnt1_x2_i16_z0_z8_scalar(<vscale x 8 x i16> %unused
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -407,22 +407,22 @@ define <vscale x 16 x i16> @ldnt1_x2_i16_z0_z8_scalar(<vscale x 8 x i16> %unused
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -464,23 +464,23 @@ define <vscale x 8 x i32> @ldnt1_x2_i32_z0_z8(<vscale x 4 x i32> %unused, <vscal
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -516,22 +516,22 @@ define <vscale x 8 x i32> @ldnt1_x2_i32_z0_z8(<vscale x 4 x i32> %unused, <vscal
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -572,23 +572,23 @@ define <vscale x 8 x i32> @ldnt1_x2_i32_z0_z8_scalar(<vscale x 4 x i32> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -624,22 +624,22 @@ define <vscale x 8 x i32> @ldnt1_x2_i32_z0_z8_scalar(<vscale x 4 x i32> %unused,
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -681,23 +681,23 @@ define <vscale x 4 x i64> @ldnt1_x2_i64_z0_z8(<vscale x 2 x i64> %unused, <vscal
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -733,22 +733,22 @@ define <vscale x 4 x i64> @ldnt1_x2_i64_z0_z8(<vscale x 2 x i64> %unused, <vscal
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -789,23 +789,23 @@ define <vscale x 4 x i64> @ldnt1_x2_i64_z0_z8_scalar(<vscale x 2 x i64> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    mov z1.d, z8.d
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -828,35 +828,35 @@ define <vscale x 4 x i64> @ldnt1_x2_i64_z0_z8_scalar(<vscale x 2 x i64> %unused,
 ; CONTIGUOUS-NEXT:    str z13, [sp, #11, mul vl] // 16-byte Folded Spill
 ; CONTIGUOUS-NEXT:    str z12, [sp, #12, mul vl] // 16-byte Folded Spill
 ; CONTIGUOUS-NEXT:    str z11, [sp, #13, mul vl] // 16-byte Folded Spill
-; CONTIGUOUS-NEXT:    str z10, [sp, #14, mul vl] // 16-byte Folded Spill
-; CONTIGUOUS-NEXT:    str z9, [sp, #15, mul vl] // 16-byte Folded Spill
-; CONTIGUOUS-NEXT:    addvl sp, sp, #-2
-; CONTIGUOUS-NEXT:    mov p8.b, p0.b
-; CONTIGUOUS-NEXT:    ldnt1d { z0.d, z1.d }, pn8/z, [x0, x1, lsl #3]
-; CONTIGUOUS-NEXT:    str z0, [sp]
-; CONTIGUOUS-NEXT:    str z1, [sp, #1, mul vl]
-; CONTIGUOUS-NEXT:    //APP
-; CONTIGUOUS-NEXT:    nop
-; CONTIGUOUS-NEXT:    //NO_APP
-; CONTIGUOUS-NEXT:    ldr z0, [sp]
-; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
-; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    str z10, [sp, #14, mul vl] // 16-byte Folded Spill
+; CONTIGUOUS-NEXT:    str z9, [sp, #15, mul vl] // 16-byte Folded Spill
+; CONTIGUOUS-NEXT:    addvl sp, sp, #-2
+; CONTIGUOUS-NEXT:    mov p8.b, p0.b
+; CONTIGUOUS-NEXT:    ldnt1d { z0.d, z1.d }, pn8/z, [x0, x1, lsl #3]
+; CONTIGUOUS-NEXT:    str z0, [sp]
+; CONTIGUOUS-NEXT:    str z1, [sp, #1, mul vl]
+; CONTIGUOUS-NEXT:    //APP
+; CONTIGUOUS-NEXT:    nop
+; CONTIGUOUS-NEXT:    //NO_APP
+; CONTIGUOUS-NEXT:    ldr z0, [sp]
+; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
+; CONTIGUOUS-NEXT:    addvl sp, sp, #2
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -898,24 +898,24 @@ define <vscale x 64 x i8> @ldnt1_x4_i8_z0_z4_z8_z12(<vscale x 16 x i8> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -955,21 +955,21 @@ define <vscale x 64 x i8> @ldnt1_x4_i8_z0_z4_z8_z12(<vscale x 16 x i8> %unused,
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -1014,24 +1014,24 @@ define <vscale x 64 x i8> @ldnt1_x4_i8_z0_z4_z8_z12_scalar(<vscale x 16 x i8> %u
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1071,21 +1071,21 @@ define <vscale x 64 x i8> @ldnt1_x4_i8_z0_z4_z8_z12_scalar(<vscale x 16 x i8> %u
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -1131,24 +1131,24 @@ define <vscale x 32 x i16> @ldnt1_x4_i16_z0_z4_z8_z12(<vscale x 8 x i16> %unused
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1188,21 +1188,21 @@ define <vscale x 32 x i16> @ldnt1_x4_i16_z0_z4_z8_z12(<vscale x 8 x i16> %unused
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -1247,24 +1247,24 @@ define <vscale x 32 x i16> @ldnt1_x4_i16_z0_z4_z8_z12_scalar(<vscale x 8 x i16>
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1294,31 +1294,31 @@ define <vscale x 32 x i16> @ldnt1_x4_i16_z0_z4_z8_z12_scalar(<vscale x 8 x i16>
 ; CONTIGUOUS-NEXT:    ldnt1h { z0.h - z3.h }, pn8/z, [x0, x1, lsl #1]
 ; CONTIGUOUS-NEXT:    str z0, [sp]
 ; CONTIGUOUS-NEXT:    str z1, [sp, #1, mul vl]
-; CONTIGUOUS-NEXT:    str z2, [sp, #2, mul vl]
-; CONTIGUOUS-NEXT:    str z3, [sp, #3, mul vl]
-; CONTIGUOUS-NEXT:    //APP
-; CONTIGUOUS-NEXT:    nop
-; CONTIGUOUS-NEXT:    //NO_APP
-; CONTIGUOUS-NEXT:    ldr z0, [sp]
-; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
-; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
-; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
-; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    str z2, [sp, #2, mul vl]
+; CONTIGUOUS-NEXT:    str z3, [sp, #3, mul vl]
+; CONTIGUOUS-NEXT:    //APP
+; CONTIGUOUS-NEXT:    nop
+; CONTIGUOUS-NEXT:    //NO_APP
+; CONTIGUOUS-NEXT:    ldr z0, [sp]
+; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
+; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
+; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
+; CONTIGUOUS-NEXT:    addvl sp, sp, #4
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -1364,24 +1364,24 @@ define <vscale x 16 x i32> @ldnt1_x4_i32_z0_z4_z8_z12(<vscale x 4 x i32> %unused
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1421,21 +1421,21 @@ define <vscale x 16 x i32> @ldnt1_x4_i32_z0_z4_z8_z12(<vscale x 4 x i32> %unused
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -1480,24 +1480,24 @@ define <vscale x 16 x i32> @ldnt1_x4_i32_z0_z4_z8_z12_scalar(<vscale x 4 x i32>
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1537,21 +1537,21 @@ define <vscale x 16 x i32> @ldnt1_x4_i32_z0_z4_z8_z12_scalar(<vscale x 4 x i32>
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -1597,24 +1597,24 @@ define <vscale x 8 x i64> @ldnt1_x4_i64_z0_z4_z8_z12(<vscale x 2 x i64> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1654,21 +1654,21 @@ define <vscale x 8 x i64> @ldnt1_x4_i64_z0_z4_z8_z12(<vscale x 2 x i64> %unused,
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
@@ -1713,24 +1713,24 @@ define <vscale x 8 x i64> @ldnt1_x4_i64_z0_z4_z8_z12_scalar(<vscale x 2 x i64> %
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1770,21 +1770,21 @@ define <vscale x 8 x i64> @ldnt1_x4_i64_z0_z4_z8_z12_scalar(<vscale x 2 x i64> %
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll
index 03a9220ebfddc6..9f76cbb8912b44 100644
--- a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll
+++ b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll
@@ -191,10 +191,10 @@ define void @sve_4v_csr(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
-; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    .cfi_restore z8
@@ -245,22 +245,22 @@ define void @sve_16v_csr(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
-; CHECK-NEXT:    ldr z23, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z23, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    .cfi_restore z8
@@ -317,10 +317,10 @@ define void @sve_4p_csr(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
-; CHECK-NEXT:    ldr p11, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -380,23 +380,23 @@ define void @sve_16v_1p_csr(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
-; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #17
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    .cfi_restore z8
@@ -697,10 +697,10 @@ define void @sve_unprobed_area(<vscale x 4 x float> %a, i32 %n) #0 {
 ; CHECK-NEXT:    //NO_APP
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
-; CHECK-NEXT:    ldr p9, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    .cfi_restore z8
diff --git a/llvm/test/CodeGen/AArch64/sve-alloca.ll b/llvm/test/CodeGen/AArch64/sve-alloca.ll
index 209c6198fe575e..d2087f0de07440 100644
--- a/llvm/test/CodeGen/AArch64/sve-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/sve-alloca.ll
@@ -66,34 +66,34 @@ define void @foo(<vscale x 4 x i64> %dst, i1 %cond) {
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    bl bar
 ; CHECK-NEXT:    addvl sp, x29, #-18
-; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov sp, x29
 ; CHECK-NEXT:    ldp x28, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldp x29, x30, [sp], #32 // 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sve-breakdown-scalable-vectortype.ll b/llvm/test/CodeGen/AArch64/sve-breakdown-scalable-vectortype.ll
index 5b69b68552a4d7..f2247ac11608e9 100644
--- a/llvm/test/CodeGen/AArch64/sve-breakdown-scalable-vectortype.ll
+++ b/llvm/test/CodeGen/AArch64/sve-breakdown-scalable-vectortype.ll
@@ -25,8 +25,8 @@ define <vscale x 32 x i8> @wide_32i8(i1 %b, <vscale x 16 x i8> %legal, <vscale x
 ; CHECK-NEXT:  .LBB0_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z9.d
 ; CHECK-NEXT:    mov z1.d, z8.d
-; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -53,8 +53,8 @@ define <vscale x 16 x i16> @wide_16i16(i1 %b, <vscale x 16 x i8> %legal, <vscale
 ; CHECK-NEXT:  .LBB1_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z9.d
 ; CHECK-NEXT:    mov z1.d, z8.d
-; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -81,8 +81,8 @@ define <vscale x 8 x i32> @wide_8i32(i1 %b, <vscale x 16 x i8> %legal, <vscale x
 ; CHECK-NEXT:  .LBB2_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z9.d
 ; CHECK-NEXT:    mov z1.d, z8.d
-; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -109,8 +109,8 @@ define <vscale x 4 x i64> @wide_4i64(i1 %b, <vscale x 16 x i8> %legal, <vscale x
 ; CHECK-NEXT:  .LBB3_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z9.d
 ; CHECK-NEXT:    mov z1.d, z8.d
-; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -137,8 +137,8 @@ define <vscale x 16 x half> @wide_16f16(i1 %b, <vscale x 16 x i8> %legal, <vscal
 ; CHECK-NEXT:  .LBB4_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z9.d
 ; CHECK-NEXT:    mov z1.d, z8.d
-; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -165,8 +165,8 @@ define <vscale x 8 x float> @wide_8f32(i1 %b, <vscale x 16 x i8> %legal, <vscale
 ; CHECK-NEXT:  .LBB5_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z9.d
 ; CHECK-NEXT:    mov z1.d, z8.d
-; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -193,8 +193,8 @@ define <vscale x 4 x double> @wide_4f64(i1 %b, <vscale x 16 x i8> %legal, <vscal
 ; CHECK-NEXT:  .LBB6_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z9.d
 ; CHECK-NEXT:    mov z1.d, z8.d
-; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -227,10 +227,10 @@ define <vscale x 48 x i8> @wide_48i8(i1 %b, <vscale x 16 x i8> %legal, <vscale x
 ; CHECK-NEXT:  .LBB7_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z10.d
 ; CHECK-NEXT:    mov z1.d, z9.d
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    mov z2.d, z8.d
 ; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    mov z2.d, z8.d
 ; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -259,10 +259,10 @@ define <vscale x 24 x i16> @wide_24i16(i1 %b, <vscale x 16 x i8> %legal, <vscale
 ; CHECK-NEXT:  .LBB8_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z10.d
 ; CHECK-NEXT:    mov z1.d, z9.d
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    mov z2.d, z8.d
 ; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    mov z2.d, z8.d
 ; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -291,10 +291,10 @@ define <vscale x 12 x i32> @wide_12i32(i1 %b, <vscale x 16 x i8> %legal, <vscale
 ; CHECK-NEXT:  .LBB9_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z10.d
 ; CHECK-NEXT:    mov z1.d, z9.d
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    mov z2.d, z8.d
 ; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    mov z2.d, z8.d
 ; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -323,10 +323,10 @@ define <vscale x 6 x i64> @wide_6i64(i1 %b, <vscale x 16 x i8> %legal, <vscale x
 ; CHECK-NEXT:  .LBB10_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z10.d
 ; CHECK-NEXT:    mov z1.d, z9.d
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    mov z2.d, z8.d
 ; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    mov z2.d, z8.d
 ; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -355,10 +355,10 @@ define <vscale x 24 x half> @wide_24f16(i1 %b, <vscale x 16 x i8> %legal, <vscal
 ; CHECK-NEXT:  .LBB11_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z10.d
 ; CHECK-NEXT:    mov z1.d, z9.d
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    mov z2.d, z8.d
 ; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    mov z2.d, z8.d
 ; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -387,10 +387,10 @@ define <vscale x 12 x float> @wide_12f32(i1 %b, <vscale x 16 x i8> %legal, <vsca
 ; CHECK-NEXT:  .LBB12_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z10.d
 ; CHECK-NEXT:    mov z1.d, z9.d
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    mov z2.d, z8.d
 ; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    mov z2.d, z8.d
 ; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -419,10 +419,10 @@ define <vscale x 6 x double> @wide_6f64(i1 %b, <vscale x 16 x i8> %legal, <vscal
 ; CHECK-NEXT:  .LBB13_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z10.d
 ; CHECK-NEXT:    mov z1.d, z9.d
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    mov z2.d, z8.d
 ; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    mov z2.d, z8.d
 ; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -457,12 +457,12 @@ define <vscale x 64 x i8> @wide_64i8(i1 %b, <vscale x 16 x i8> %legal, <vscale x
 ; CHECK-NEXT:  .LBB14_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z11.d
 ; CHECK-NEXT:    mov z1.d, z10.d
-; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z9.d
 ; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -493,12 +493,12 @@ define <vscale x 32 x i16> @wide_32i16(i1 %b, <vscale x 16 x i8> %legal, <vscale
 ; CHECK-NEXT:  .LBB15_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z11.d
 ; CHECK-NEXT:    mov z1.d, z10.d
-; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z9.d
 ; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -529,12 +529,12 @@ define <vscale x 16 x i32> @wide_16i32(i1 %b, <vscale x 16 x i8> %legal, <vscale
 ; CHECK-NEXT:  .LBB16_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z11.d
 ; CHECK-NEXT:    mov z1.d, z10.d
-; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z9.d
 ; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -565,12 +565,12 @@ define <vscale x 8 x i64> @wide_8i64(i1 %b, <vscale x 16 x i8> %legal, <vscale x
 ; CHECK-NEXT:  .LBB17_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z11.d
 ; CHECK-NEXT:    mov z1.d, z10.d
-; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z9.d
 ; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -601,12 +601,12 @@ define <vscale x 32 x half> @wide_32f16(i1 %b, <vscale x 16 x i8> %legal, <vscal
 ; CHECK-NEXT:  .LBB18_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z11.d
 ; CHECK-NEXT:    mov z1.d, z10.d
-; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z9.d
 ; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -637,12 +637,12 @@ define <vscale x 16 x float> @wide_16f32(i1 %b, <vscale x 16 x i8> %legal, <vsca
 ; CHECK-NEXT:  .LBB19_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z11.d
 ; CHECK-NEXT:    mov z1.d, z10.d
-; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z9.d
 ; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -673,12 +673,12 @@ define <vscale x 8 x double> @wide_8f64(i1 %b, <vscale x 16 x i8> %legal, <vscal
 ; CHECK-NEXT:  .LBB20_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z11.d
 ; CHECK-NEXT:    mov z1.d, z10.d
-; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z9.d
 ; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
index 783878fe738068..4e3b88bccae521 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
@@ -567,34 +567,34 @@ define <vscale x 4 x float> @sve_caller_non_sve_callee_high_range(<vscale x 4 x
 ; CHECK-NEXT:    bl non_sve_callee_high_range
 ; CHECK-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
-; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -659,34 +659,34 @@ define <vscale x 4 x float> @sve_ret_caller_non_sve_callee_high_range()  {
 ; CHECK-NEXT:    fmov s7, #7.00000000
 ; CHECK-NEXT:    bl non_sve_callee_high_range
 ; CHECK-NEXT:    addvl sp, sp, #2
-; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll b/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll
index 43113df07c079f..50d38e1777951b 100644
--- a/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll
@@ -360,11 +360,11 @@ define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) {
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z2.d, z2.d
 ; CHECK-NEXT:    sel z0.d, p5, z26.d, z6.d
 ; CHECK-NEXT:    sel z1.d, p6, z26.d, z5.d
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z2.d, p7, z26.d, z7.d
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z3.d, p1, z26.d, z4.d
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z2.d, p4/m, #0 // =0x0
@@ -469,11 +469,11 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z0.d, z0.d
 ; CHECK-NEXT:    sel z2.d, p5, z25.d, z5.d
 ; CHECK-NEXT:    sel z0.d, p6, z25.d, z6.d
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z1.d, p7, z25.d, z7.d
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z3.d, p1, z25.d, z24.d
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    mov z0.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z1.d, p4/m, #0 // =0x0
diff --git a/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll b/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll
index c9e06fd9f3414e..51ea661840dfa4 100644
--- a/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll
@@ -268,11 +268,11 @@ define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) {
 ; CHECK-NEXT:    mov z7.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z24.d, p4/m, #0 // =0x0
 ; CHECK-NEXT:    sel z1.d, p5, z0.d, z5.d
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z2.d, p6, z0.d, z6.d
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z3.d, p1, z0.d, z7.d
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z4.d, p0, z0.d, z24.d
 ; CHECK-NEXT:    uzp1 z0.s, z2.s, z1.s
 ; CHECK-NEXT:    uzp1 z1.s, z4.s, z3.s
@@ -351,11 +351,11 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
 ; CHECK-NEXT:    mov z7.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z24.d, p4/m, #0 // =0x0
 ; CHECK-NEXT:    sel z0.d, p5, z2.d, z5.d
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z1.d, p6, z2.d, z6.d
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z3.d, p1, z2.d, z7.d
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z2.d, p0, z2.d, z24.d
 ; CHECK-NEXT:    uzp1 z0.s, z1.s, z0.s
 ; CHECK-NEXT:    uzp1 z1.s, z2.s, z3.s
diff --git a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll
index 4d46ac5ecbaa95..536d03cce31825 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll
@@ -66,13 +66,13 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @add_nxv64i1(<vscale x 64 x i1>
 ; CHECK-NEXT:    ldr p8, [x3]
 ; CHECK-NEXT:    eor p0.b, p4/z, p0.b, p5.b
 ; CHECK-NEXT:    eor p1.b, p4/z, p1.b, p6.b
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    eor p2.b, p4/z, p2.b, p7.b
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    eor p3.b, p4/z, p3.b, p8.b
-; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -150,13 +150,13 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @sub_nxv64i1(<vscale x 64 x i1>
 ; CHECK-NEXT:    ldr p8, [x3]
 ; CHECK-NEXT:    eor p0.b, p4/z, p0.b, p5.b
 ; CHECK-NEXT:    eor p1.b, p4/z, p1.b, p6.b
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    eor p2.b, p4/z, p2.b, p7.b
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    eor p3.b, p4/z, p3.b, p8.b
-; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sve-tailcall.ll b/llvm/test/CodeGen/AArch64/sve-tailcall.ll
index f32c80d392b633..9857931eff3d66 100644
--- a/llvm/test/CodeGen/AArch64/sve-tailcall.ll
+++ b/llvm/test/CodeGen/AArch64/sve-tailcall.ll
@@ -13,8 +13,8 @@ define <vscale x 4 x i32> @sve_caller_sve_callee() nounwind {
 ; CHECK-NEXT:    str z9, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    b sve_callee
@@ -32,8 +32,8 @@ define <vscale x 4 x i32> @sve_caller_sve_callee_fastcc() nounwind {
 ; CHECK-NEXT:    str z9, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    b sve_callee
@@ -83,34 +83,34 @@ define i32 @sve_caller_non_sve_callee(<vscale x 4 x i32> %arg) nounwind {
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
 ; CHECK-NEXT:    bl non_sve_callee
-; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -158,34 +158,34 @@ define i32 @sve_caller_non_sve_callee_fastcc(<vscale x 4 x i32> %arg) nounwind {
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
 ; CHECK-NEXT:    bl non_sve_callee
-; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-trunc.ll b/llvm/test/CodeGen/AArch64/sve-trunc.ll
index dfa4a6148b86e9..214e79ed033555 100644
--- a/llvm/test/CodeGen/AArch64/sve-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-trunc.ll
@@ -136,9 +136,9 @@ define <vscale x 16 x i1> @trunc_i64toi1_split3(<vscale x 16 x i64> %in) {
 ; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    uzp1 p3.s, p4.s, p3.s
 ; CHECK-NEXT:    uzp1 p4.s, p6.s, p5.s
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p2.s
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    uzp1 p0.s, p0.s, p2.s
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    uzp1 p1.h, p3.h, p1.h
 ; CHECK-NEXT:    uzp1 p0.h, p0.h, p4.h
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/unwind-preserved.ll b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
index f3c4d217e6fcaa..415865b5293b76 100644
--- a/llvm/test/CodeGen/AArch64/unwind-preserved.ll
+++ b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
@@ -63,34 +63,34 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
 ; CHECK-NEXT:    ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
-; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    .cfi_restore z8
@@ -112,34 +112,34 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
 ; CHECK-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
-; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    .cfi_restore z8
@@ -215,34 +215,34 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
 ; GISEL-NEXT:    ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
 ; GISEL-NEXT:    addvl sp, sp, #2
 ; GISEL-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
-; GISEL-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; GISEL-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; GISEL-NEXT:    addvl sp, sp, #18
 ; GISEL-NEXT:    .cfi_def_cfa wsp, 16
 ; GISEL-NEXT:    .cfi_restore z8
@@ -264,34 +264,34 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
 ; GISEL-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
 ; GISEL-NEXT:    addvl sp, sp, #2
 ; GISEL-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
-; GISEL-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; GISEL-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; GISEL-NEXT:    addvl sp, sp, #18
 ; GISEL-NEXT:    .cfi_def_cfa wsp, 16
 ; GISEL-NEXT:    .cfi_restore z8

>From 3a4ed507d66acddb1f7d0f61079f15f4e8048071 Mon Sep 17 00:00:00 2001
From: Caroline Concatto <caroline.concatto at arm.com>
Date: Wed, 31 Jan 2024 14:37:44 +0000
Subject: [PATCH 2/3] Sort the RegPairs offset for Z and P Regs to be restore
 in crescent order

---
 .../Target/AArch64/AArch64FrameLowering.cpp   |  14 +
 llvm/test/CodeGen/AArch64/active_lane_mask.ll |   2 +-
 llvm/test/CodeGen/AArch64/framelayout-sve.mir |  38 +-
 .../sme-streaming-compatible-interface.ll     | 104 +-
 .../AArch64/sme-streaming-interface.ll        | 104 +-
 .../CodeGen/AArch64/sme2-intrinsics-ld1.ll    | 912 +++++++++---------
 .../CodeGen/AArch64/sme2-intrinsics-ldnt1.ll  | 912 +++++++++---------
 .../test/CodeGen/AArch64/stack-probing-sve.ll |  76 +-
 llvm/test/CodeGen/AArch64/sve-alloca.ll       |  52 +-
 .../sve-breakdown-scalable-vectortype.ll      |  84 +-
 .../AArch64/sve-calling-convention-mixed.ll   | 104 +-
 llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll   |  12 +-
 llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll   |  12 +-
 llvm/test/CodeGen/AArch64/sve-pred-arith.ll   |  16 +-
 llvm/test/CodeGen/AArch64/sve-tailcall.ll     | 108 +--
 llvm/test/CodeGen/AArch64/sve-trunc.ll        |   4 +-
 llvm/test/CodeGen/AArch64/unwind-preserved.ll | 208 ++--
 17 files changed, 1388 insertions(+), 1374 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 74e94e85d1eb9d..9370cbbc6ca21d 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -3196,9 +3196,21 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
     return true;
   }
 
+  SmallVector<RegPairInfo, 8> RegPairsScalable = RegPairs;
+  llvm::stable_sort(
+      RegPairsScalable, [](const RegPairInfo &A, const RegPairInfo &B) {
+        return !(A.Type == RegPairInfo::PPR && B.Type == RegPairInfo::ZPR);
+      });
+
+  for (const RegPairInfo &RPI : RegPairsScalable)
+    if (RPI.isScalable())
+      EmitMI(RPI);
+
   if (ReverseCSRRestoreSeq) {
     MachineBasicBlock::iterator First = MBB.end();
     for (const RegPairInfo &RPI : reverse(RegPairs)) {
+      if (RPI.isScalable())
+        continue;
       MachineBasicBlock::iterator It = EmitMI(RPI);
       if (First == MBB.end())
         First = It;
@@ -3207,6 +3219,8 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
       MBB.splice(MBBI, &MBB, First);
   } else {
     for (const RegPairInfo &RPI : RegPairs) {
+      if (RPI.isScalable())
+        continue;
       (void)EmitMI(RPI);
     }
   }
diff --git a/llvm/test/CodeGen/AArch64/active_lane_mask.ll b/llvm/test/CodeGen/AArch64/active_lane_mask.ll
index 15a61910fb1009..a65c5d66677946 100644
--- a/llvm/test/CodeGen/AArch64/active_lane_mask.ll
+++ b/llvm/test/CodeGen/AArch64/active_lane_mask.ll
@@ -191,8 +191,8 @@ define <vscale x 32 x i1> @lane_mask_nxv32i1_i32(i32 %index, i32 %TC) {
 ; CHECK-NEXT:    uzp1 p3.h, p3.h, p4.h
 ; CHECK-NEXT:    cmphi p0.s, p0/z, z25.s, z1.s
 ; CHECK-NEXT:    uzp1 p4.h, p5.h, p6.h
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    uzp1 p2.h, p2.h, p0.h
 ; CHECK-NEXT:    uzp1 p0.b, p1.b, p3.b
 ; CHECK-NEXT:    uzp1 p1.b, p4.b, p2.b
diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
index 8bfd12067d77c9..f7920e595e44ba 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir
+++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
@@ -710,9 +710,9 @@ body:             |
 
 # CHECK:      $sp = frame-destroy ADDXri $sp, 32, 0
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK:      $p4 = frame-destroy LDR_PXI $sp, 7
-# CHECK:      $p5 = frame-destroy LDR_PXI $sp, 6
 # CHECK:      $p6 = frame-destroy LDR_PXI $sp, 5
+# CHECK:      $p5 = frame-destroy LDR_PXI $sp, 6
+# CHECK:      $p4 = frame-destroy LDR_PXI $sp, 7
 # CHECK:      $sp = frame-destroy ADDVL_XXI $sp, 1
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
 # CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.4)
@@ -772,9 +772,9 @@ body:             |
 
 # CHECK:      $sp  = frame-destroy ADDXri $sp, 32, 0
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: $z8  = frame-destroy LDR_ZXI $sp, 2
+# CHECK-NEXT: $z10  = frame-destroy LDR_ZXI $sp, 0
 # CHECK-NEXT: $z9  = frame-destroy LDR_ZXI $sp, 1
-# CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0
+# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2
 # CHECK-NEXT: $sp  = frame-destroy ADDVL_XXI $sp, 3
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8
@@ -873,14 +873,14 @@ body:             |
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22
 # CHECK:      $sp = frame-destroy ADDVL_XXI $sp, 1
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK:      $z8 = frame-destroy LDR_ZXI $sp, 17
-# CHECK:      $z9 = frame-destroy LDR_ZXI $sp, 16
-# CHECK:      $z22 = frame-destroy LDR_ZXI $sp, 3
 # CHECK:      $z23 = frame-destroy LDR_ZXI $sp, 2
-# CHECK:      $p4 = frame-destroy LDR_PXI $sp, 15
-# CHECK:      $p5 = frame-destroy LDR_PXI $sp, 14
-# CHECK:      $p14 = frame-destroy LDR_PXI $sp, 5
+# CHECK:      $z22 = frame-destroy LDR_ZXI $sp, 3
+# CHECK:      $z9 = frame-destroy LDR_ZXI $sp, 16
+# CHECK:      $z8 = frame-destroy LDR_ZXI $sp, 17
 # CHECK:      $p15 = frame-destroy LDR_PXI $sp, 4
+# CHECK:      $p14 = frame-destroy LDR_PXI $sp, 5
+# CHECK:      $p5 = frame-destroy LDR_PXI $sp, 14
+# CHECK:      $p4 = frame-destroy LDR_PXI $sp, 15
 # CHECK:      $sp = frame-destroy ADDVL_XXI $sp, 18
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 32
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8
@@ -1037,14 +1037,14 @@ body:             |
 # CHECK-NEXT: $sp = frame-setup ANDXri killed $[[TMP]]
 
 # CHECK:      $sp = frame-destroy ADDVL_XXI $fp, -18
-# CHECK:      $z8 = frame-destroy LDR_ZXI $sp, 17
-# CHECK-NEXT: $z9 = frame-destroy LDR_ZXI $sp, 16
-# CHECK:      $z22 = frame-destroy LDR_ZXI $sp, 3
-# CHECK-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 2
-# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 15
-# CHECK-NEXT: $p5 = frame-destroy LDR_PXI $sp, 14
-# CHECK:      $p14 = frame-destroy LDR_PXI $sp, 5
+# CHECK:      $z23 = frame-destroy LDR_ZXI $sp, 2
+# CHECK-NEXT: $z22 = frame-destroy LDR_ZXI $sp, 3
+# CHECK:      $z9 = frame-destroy LDR_ZXI $sp, 16
+# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 17
 # CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 4
+# CHECK-NEXT: $p14 = frame-destroy LDR_PXI $sp, 5
+# CHECK:      $p5 = frame-destroy LDR_PXI $sp, 14
+# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 15
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z9
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z10
@@ -1198,10 +1198,10 @@ body:             |
 
 # CHECK:      $sp = frame-destroy ADDVL_XXI $sp, 7
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2
 # CHECK-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 1
-# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 7
+# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2
 # CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 6
+# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 7
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 3
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll b/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
index 23acc57159907b..af5a6e1704f927 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
@@ -219,34 +219,34 @@ define <vscale x 2 x double> @streaming_compatible_with_scalable_vectors(<vscale
 ; CHECK-NEXT:    ldr z1, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    fadd z0.d, z1.d, z0.d
 ; CHECK-NEXT:    addvl sp, sp, #2
-; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x30, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x29, [sp], #32 // 8-byte Folded Reload
@@ -311,34 +311,34 @@ define <vscale x 2 x i1> @streaming_compatible_with_predicate_vectors(<vscale x
 ; CHECK-NEXT:    ldr p1, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    and p0.b, p1/z, p1.b, p0.b
 ; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x30, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x29, [sp], #32 // 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll b/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll
index efb904b9bb333e..5699d3e33a7add 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll
@@ -187,34 +187,34 @@ define <vscale x 4 x i32> @smstart_clobber_sve(<vscale x 4 x i32> %x) nounwind {
 ; CHECK-NEXT:    smstop sm
 ; CHECK-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -267,34 +267,34 @@ define <vscale x 4 x i32> @smstart_clobber_sve_duplicate(<vscale x 4 x i32> %x)
 ; CHECK-NEXT:    smstop sm
 ; CHECK-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll
index f575d65490e362..ea7808d73093e6 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll
@@ -77,23 +77,23 @@ define <vscale x 32 x i8> @ld1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale x
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -129,21 +129,21 @@ define <vscale x 32 x i8> @ld1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale x
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -232,23 +232,23 @@ define <vscale x 32 x i8> @ld1_x2_i8_z0_z8_scalar(<vscale x 16 x i8> %unused, <v
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -284,21 +284,21 @@ define <vscale x 32 x i8> @ld1_x2_i8_z0_z8_scalar(<vscale x 16 x i8> %unused, <v
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -388,23 +388,23 @@ define <vscale x 16 x i16> @ld1_x2_i16_z0_z8(<vscale x 8 x i16> %unused, <vscale
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -440,21 +440,21 @@ define <vscale x 16 x i16> @ld1_x2_i16_z0_z8(<vscale x 8 x i16> %unused, <vscale
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -543,23 +543,23 @@ define <vscale x 16 x i16> @ld1_x2_i16_z0_z8_scalar(<vscale x 8 x i16> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -595,21 +595,21 @@ define <vscale x 16 x i16> @ld1_x2_i16_z0_z8_scalar(<vscale x 8 x i16> %unused,
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -699,23 +699,23 @@ define <vscale x 8 x i32> @ld1_x2_i32_z0_z8(<vscale x 4 x i32> %unused, <vscale
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -751,21 +751,21 @@ define <vscale x 8 x i32> @ld1_x2_i32_z0_z8(<vscale x 4 x i32> %unused, <vscale
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -854,23 +854,23 @@ define <vscale x 8 x i32> @ld1_x2_i32_z0_z8_scalar(<vscale x 4 x i32> %unused, <
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -906,21 +906,21 @@ define <vscale x 8 x i32> @ld1_x2_i32_z0_z8_scalar(<vscale x 4 x i32> %unused, <
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1010,23 +1010,23 @@ define <vscale x 4 x i64> @ld1_x2_i64_z0_z8(<vscale x 2 x i64> %unused, <vscale
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -1062,21 +1062,21 @@ define <vscale x 4 x i64> @ld1_x2_i64_z0_z8(<vscale x 2 x i64> %unused, <vscale
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1165,23 +1165,23 @@ define <vscale x 4 x i64> @ld1_x2_i64_z0_z8_scalar(<vscale x 2 x i64> %unused, <
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -1217,21 +1217,21 @@ define <vscale x 4 x i64> @ld1_x2_i64_z0_z8_scalar(<vscale x 2 x i64> %unused, <
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1323,24 +1323,24 @@ define <vscale x 64 x i8> @ld1_x4_i8_z0_z4_z8_z12(<vscale x 16 x i8> %unused, <v
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1380,20 +1380,20 @@ define <vscale x 64 x i8> @ld1_x4_i8_z0_z4_z8_z12(<vscale x 16 x i8> %unused, <v
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1488,24 +1488,24 @@ define <vscale x 64 x i8> @ld1_x4_i8_z0_z4_z8_z12_scalar(<vscale x 16 x i8> %unu
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1545,20 +1545,20 @@ define <vscale x 64 x i8> @ld1_x4_i8_z0_z4_z8_z12_scalar(<vscale x 16 x i8> %unu
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1654,24 +1654,24 @@ define <vscale x 32 x i16> @ld1_x4_i16_z0_z4_z8_z12(<vscale x 8 x i16> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1711,20 +1711,20 @@ define <vscale x 32 x i16> @ld1_x4_i16_z0_z4_z8_z12(<vscale x 8 x i16> %unused,
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1820,24 +1820,24 @@ define <vscale x 32 x i16> @ld1_x4_i16_z0_z4_z8_z12_scalar(<vscale x 8 x i16> %u
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1877,20 +1877,20 @@ define <vscale x 32 x i16> @ld1_x4_i16_z0_z4_z8_z12_scalar(<vscale x 8 x i16> %u
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1986,24 +1986,24 @@ define <vscale x 16 x i32> @ld1_x4_i32_z0_z4_z8_z12(<vscale x 4 x i32> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -2043,20 +2043,20 @@ define <vscale x 16 x i32> @ld1_x4_i32_z0_z4_z8_z12(<vscale x 4 x i32> %unused,
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -2152,24 +2152,24 @@ define <vscale x 16 x i32> @ld1_x4_i32_z0_z4_z8_z12_scalar(<vscale x 4 x i32> %u
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -2209,20 +2209,20 @@ define <vscale x 16 x i32> @ld1_x4_i32_z0_z4_z8_z12_scalar(<vscale x 4 x i32> %u
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -2318,24 +2318,24 @@ define <vscale x 8 x i64> @ld1_x4_i64_z0_z4_z8_z12(<vscale x 2 x i64> %unused, <
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -2375,20 +2375,20 @@ define <vscale x 8 x i64> @ld1_x4_i64_z0_z4_z8_z12(<vscale x 2 x i64> %unused, <
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -2484,24 +2484,24 @@ define <vscale x 8 x i64> @ld1_x4_i64_z0_z4_z8_z12_scalar(<vscale x 2 x i64> %un
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -2541,20 +2541,20 @@ define <vscale x 8 x i64> @ld1_x4_i64_z0_z4_z8_z12_scalar(<vscale x 2 x i64> %un
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll
index 0c42b8ba17c60c..7e2d28fbf79828 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll
@@ -30,23 +30,23 @@ define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -82,21 +82,21 @@ define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -138,23 +138,23 @@ define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8_scalar(<vscale x 16 x i8> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -190,21 +190,21 @@ define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8_scalar(<vscale x 16 x i8> %unused,
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -247,23 +247,23 @@ define <vscale x 16 x i16> @ldnt1_x2_i16_z0_z8(<vscale x 8 x i16> %unused, <vsca
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -299,21 +299,21 @@ define <vscale x 16 x i16> @ldnt1_x2_i16_z0_z8(<vscale x 8 x i16> %unused, <vsca
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -355,23 +355,23 @@ define <vscale x 16 x i16> @ldnt1_x2_i16_z0_z8_scalar(<vscale x 8 x i16> %unused
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -407,21 +407,21 @@ define <vscale x 16 x i16> @ldnt1_x2_i16_z0_z8_scalar(<vscale x 8 x i16> %unused
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -464,23 +464,23 @@ define <vscale x 8 x i32> @ldnt1_x2_i32_z0_z8(<vscale x 4 x i32> %unused, <vscal
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -516,21 +516,21 @@ define <vscale x 8 x i32> @ldnt1_x2_i32_z0_z8(<vscale x 4 x i32> %unused, <vscal
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -572,23 +572,23 @@ define <vscale x 8 x i32> @ldnt1_x2_i32_z0_z8_scalar(<vscale x 4 x i32> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -624,21 +624,21 @@ define <vscale x 8 x i32> @ldnt1_x2_i32_z0_z8_scalar(<vscale x 4 x i32> %unused,
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -681,23 +681,23 @@ define <vscale x 4 x i64> @ldnt1_x2_i64_z0_z8(<vscale x 2 x i64> %unused, <vscal
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -733,21 +733,21 @@ define <vscale x 4 x i64> @ldnt1_x2_i64_z0_z8(<vscale x 2 x i64> %unused, <vscal
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -789,23 +789,23 @@ define <vscale x 4 x i64> @ldnt1_x2_i64_z0_z8_scalar(<vscale x 2 x i64> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z8.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; STRIDED-NEXT:    ret
@@ -841,21 +841,21 @@ define <vscale x 4 x i64> @ldnt1_x2_i64_z0_z8_scalar(<vscale x 2 x i64> %unused,
 ; CONTIGUOUS-NEXT:    ldr z0, [sp]
 ; CONTIGUOUS-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #2
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #16
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -898,24 +898,24 @@ define <vscale x 64 x i8> @ldnt1_x4_i8_z0_z4_z8_z12(<vscale x 16 x i8> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -955,20 +955,20 @@ define <vscale x 64 x i8> @ldnt1_x4_i8_z0_z4_z8_z12(<vscale x 16 x i8> %unused,
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1014,24 +1014,24 @@ define <vscale x 64 x i8> @ldnt1_x4_i8_z0_z4_z8_z12_scalar(<vscale x 16 x i8> %u
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1071,20 +1071,20 @@ define <vscale x 64 x i8> @ldnt1_x4_i8_z0_z4_z8_z12_scalar(<vscale x 16 x i8> %u
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1131,24 +1131,24 @@ define <vscale x 32 x i16> @ldnt1_x4_i16_z0_z4_z8_z12(<vscale x 8 x i16> %unused
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1188,20 +1188,20 @@ define <vscale x 32 x i16> @ldnt1_x4_i16_z0_z4_z8_z12(<vscale x 8 x i16> %unused
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1247,24 +1247,24 @@ define <vscale x 32 x i16> @ldnt1_x4_i16_z0_z4_z8_z12_scalar(<vscale x 8 x i16>
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1304,20 +1304,20 @@ define <vscale x 32 x i16> @ldnt1_x4_i16_z0_z4_z8_z12_scalar(<vscale x 8 x i16>
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1364,24 +1364,24 @@ define <vscale x 16 x i32> @ldnt1_x4_i32_z0_z4_z8_z12(<vscale x 4 x i32> %unused
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1421,20 +1421,20 @@ define <vscale x 16 x i32> @ldnt1_x4_i32_z0_z4_z8_z12(<vscale x 4 x i32> %unused
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1480,24 +1480,24 @@ define <vscale x 16 x i32> @ldnt1_x4_i32_z0_z4_z8_z12_scalar(<vscale x 4 x i32>
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1537,20 +1537,20 @@ define <vscale x 16 x i32> @ldnt1_x4_i32_z0_z4_z8_z12_scalar(<vscale x 4 x i32>
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1597,24 +1597,24 @@ define <vscale x 8 x i64> @ldnt1_x4_i64_z0_z4_z8_z12(<vscale x 2 x i64> %unused,
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1654,20 +1654,20 @@ define <vscale x 8 x i64> @ldnt1_x4_i64_z0_z4_z8_z12(<vscale x 2 x i64> %unused,
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1713,24 +1713,24 @@ define <vscale x 8 x i64> @ldnt1_x4_i64_z0_z4_z8_z12_scalar(<vscale x 2 x i64> %
 ; STRIDED-NEXT:    nop
 ; STRIDED-NEXT:    //NO_APP
 ; STRIDED-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z2.d, z8.d
 ; STRIDED-NEXT:    mov z3.d, z12.d
-; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
-; STRIDED-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; STRIDED-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; STRIDED-NEXT:    mov z1.d, z4.d
 ; STRIDED-NEXT:    addvl sp, sp, #17
 ; STRIDED-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1770,20 +1770,20 @@ define <vscale x 8 x i64> @ldnt1_x4_i64_z0_z4_z8_z12_scalar(<vscale x 2 x i64> %
 ; CONTIGUOUS-NEXT:    ldr z2, [sp, #2, mul vl]
 ; CONTIGUOUS-NEXT:    ldr z3, [sp, #3, mul vl]
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #4
-; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
 ; CONTIGUOUS-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CONTIGUOUS-NEXT:    addvl sp, sp, #15
 ; CONTIGUOUS-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll
index 9f76cbb8912b44..06946d1be8067f 100644
--- a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll
+++ b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll
@@ -191,10 +191,10 @@ define void @sve_4v_csr(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
-; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    .cfi_restore z8
@@ -245,22 +245,22 @@ define void @sve_16v_csr(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
-; CHECK-NEXT:    ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z23, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    .cfi_restore z8
@@ -317,10 +317,10 @@ define void @sve_4p_csr(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
-; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    ldr p11, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -380,22 +380,22 @@ define void @sve_16v_1p_csr(<vscale x 4 x float> %a) #0 {
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
-; CHECK-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #17
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
@@ -697,9 +697,9 @@ define void @sve_unprobed_area(<vscale x 4 x float> %a, i32 %n) #0 {
 ; CHECK-NEXT:    //NO_APP
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
-; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p9, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
diff --git a/llvm/test/CodeGen/AArch64/sve-alloca.ll b/llvm/test/CodeGen/AArch64/sve-alloca.ll
index d2087f0de07440..565311d8e3f8fe 100644
--- a/llvm/test/CodeGen/AArch64/sve-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/sve-alloca.ll
@@ -66,34 +66,34 @@ define void @foo(<vscale x 4 x i64> %dst, i1 %cond) {
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    bl bar
 ; CHECK-NEXT:    addvl sp, x29, #-18
-; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov sp, x29
 ; CHECK-NEXT:    ldp x28, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldp x29, x30, [sp], #32 // 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sve-breakdown-scalable-vectortype.ll b/llvm/test/CodeGen/AArch64/sve-breakdown-scalable-vectortype.ll
index f2247ac11608e9..5b69b68552a4d7 100644
--- a/llvm/test/CodeGen/AArch64/sve-breakdown-scalable-vectortype.ll
+++ b/llvm/test/CodeGen/AArch64/sve-breakdown-scalable-vectortype.ll
@@ -25,8 +25,8 @@ define <vscale x 32 x i8> @wide_32i8(i1 %b, <vscale x 16 x i8> %legal, <vscale x
 ; CHECK-NEXT:  .LBB0_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z9.d
 ; CHECK-NEXT:    mov z1.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -53,8 +53,8 @@ define <vscale x 16 x i16> @wide_16i16(i1 %b, <vscale x 16 x i8> %legal, <vscale
 ; CHECK-NEXT:  .LBB1_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z9.d
 ; CHECK-NEXT:    mov z1.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -81,8 +81,8 @@ define <vscale x 8 x i32> @wide_8i32(i1 %b, <vscale x 16 x i8> %legal, <vscale x
 ; CHECK-NEXT:  .LBB2_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z9.d
 ; CHECK-NEXT:    mov z1.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -109,8 +109,8 @@ define <vscale x 4 x i64> @wide_4i64(i1 %b, <vscale x 16 x i8> %legal, <vscale x
 ; CHECK-NEXT:  .LBB3_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z9.d
 ; CHECK-NEXT:    mov z1.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -137,8 +137,8 @@ define <vscale x 16 x half> @wide_16f16(i1 %b, <vscale x 16 x i8> %legal, <vscal
 ; CHECK-NEXT:  .LBB4_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z9.d
 ; CHECK-NEXT:    mov z1.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -165,8 +165,8 @@ define <vscale x 8 x float> @wide_8f32(i1 %b, <vscale x 16 x i8> %legal, <vscale
 ; CHECK-NEXT:  .LBB5_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z9.d
 ; CHECK-NEXT:    mov z1.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -193,8 +193,8 @@ define <vscale x 4 x double> @wide_4f64(i1 %b, <vscale x 16 x i8> %legal, <vscal
 ; CHECK-NEXT:  .LBB6_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z9.d
 ; CHECK-NEXT:    mov z1.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -227,10 +227,10 @@ define <vscale x 48 x i8> @wide_48i8(i1 %b, <vscale x 16 x i8> %legal, <vscale x
 ; CHECK-NEXT:  .LBB7_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z10.d
 ; CHECK-NEXT:    mov z1.d, z9.d
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z8.d
+; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -259,10 +259,10 @@ define <vscale x 24 x i16> @wide_24i16(i1 %b, <vscale x 16 x i8> %legal, <vscale
 ; CHECK-NEXT:  .LBB8_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z10.d
 ; CHECK-NEXT:    mov z1.d, z9.d
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z8.d
+; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -291,10 +291,10 @@ define <vscale x 12 x i32> @wide_12i32(i1 %b, <vscale x 16 x i8> %legal, <vscale
 ; CHECK-NEXT:  .LBB9_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z10.d
 ; CHECK-NEXT:    mov z1.d, z9.d
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z8.d
+; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -323,10 +323,10 @@ define <vscale x 6 x i64> @wide_6i64(i1 %b, <vscale x 16 x i8> %legal, <vscale x
 ; CHECK-NEXT:  .LBB10_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z10.d
 ; CHECK-NEXT:    mov z1.d, z9.d
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z8.d
+; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -355,10 +355,10 @@ define <vscale x 24 x half> @wide_24f16(i1 %b, <vscale x 16 x i8> %legal, <vscal
 ; CHECK-NEXT:  .LBB11_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z10.d
 ; CHECK-NEXT:    mov z1.d, z9.d
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z8.d
+; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -387,10 +387,10 @@ define <vscale x 12 x float> @wide_12f32(i1 %b, <vscale x 16 x i8> %legal, <vsca
 ; CHECK-NEXT:  .LBB12_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z10.d
 ; CHECK-NEXT:    mov z1.d, z9.d
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z8.d
+; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -419,10 +419,10 @@ define <vscale x 6 x double> @wide_6f64(i1 %b, <vscale x 16 x i8> %legal, <vscal
 ; CHECK-NEXT:  .LBB13_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z10.d
 ; CHECK-NEXT:    mov z1.d, z9.d
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z8.d
+; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -457,12 +457,12 @@ define <vscale x 64 x i8> @wide_64i8(i1 %b, <vscale x 16 x i8> %legal, <vscale x
 ; CHECK-NEXT:  .LBB14_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z11.d
 ; CHECK-NEXT:    mov z1.d, z10.d
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z9.d
 ; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -493,12 +493,12 @@ define <vscale x 32 x i16> @wide_32i16(i1 %b, <vscale x 16 x i8> %legal, <vscale
 ; CHECK-NEXT:  .LBB15_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z11.d
 ; CHECK-NEXT:    mov z1.d, z10.d
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z9.d
 ; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -529,12 +529,12 @@ define <vscale x 16 x i32> @wide_16i32(i1 %b, <vscale x 16 x i8> %legal, <vscale
 ; CHECK-NEXT:  .LBB16_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z11.d
 ; CHECK-NEXT:    mov z1.d, z10.d
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z9.d
 ; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -565,12 +565,12 @@ define <vscale x 8 x i64> @wide_8i64(i1 %b, <vscale x 16 x i8> %legal, <vscale x
 ; CHECK-NEXT:  .LBB17_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z11.d
 ; CHECK-NEXT:    mov z1.d, z10.d
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z9.d
 ; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -601,12 +601,12 @@ define <vscale x 32 x half> @wide_32f16(i1 %b, <vscale x 16 x i8> %legal, <vscal
 ; CHECK-NEXT:  .LBB18_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z11.d
 ; CHECK-NEXT:    mov z1.d, z10.d
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z9.d
 ; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -637,12 +637,12 @@ define <vscale x 16 x float> @wide_16f32(i1 %b, <vscale x 16 x i8> %legal, <vsca
 ; CHECK-NEXT:  .LBB19_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z11.d
 ; CHECK-NEXT:    mov z1.d, z10.d
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z9.d
 ; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -673,12 +673,12 @@ define <vscale x 8 x double> @wide_8f64(i1 %b, <vscale x 16 x i8> %legal, <vscal
 ; CHECK-NEXT:  .LBB20_2: // %common.ret
 ; CHECK-NEXT:    mov z0.d, z11.d
 ; CHECK-NEXT:    mov z1.d, z10.d
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, z9.d
 ; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
index 4e3b88bccae521..c621c33b5d288b 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
@@ -567,34 +567,34 @@ define <vscale x 4 x float> @sve_caller_non_sve_callee_high_range(<vscale x 4 x
 ; CHECK-NEXT:    bl non_sve_callee_high_range
 ; CHECK-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #3
-; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -659,34 +659,34 @@ define <vscale x 4 x float> @sve_ret_caller_non_sve_callee_high_range()  {
 ; CHECK-NEXT:    fmov s7, #7.00000000
 ; CHECK-NEXT:    bl non_sve_callee_high_range
 ; CHECK-NEXT:    addvl sp, sp, #2
-; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll b/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll
index 50d38e1777951b..43113df07c079f 100644
--- a/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll
@@ -360,11 +360,11 @@ define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) {
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z2.d, z2.d
 ; CHECK-NEXT:    sel z0.d, p5, z26.d, z6.d
 ; CHECK-NEXT:    sel z1.d, p6, z26.d, z5.d
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z2.d, p7, z26.d, z7.d
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z3.d, p1, z26.d, z4.d
+; CHECK-NEXT:    sel z2.d, p7, z26.d, z7.d
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    sel z3.d, p1, z26.d, z4.d
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z2.d, p4/m, #0 // =0x0
@@ -469,11 +469,11 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z0.d, z0.d
 ; CHECK-NEXT:    sel z2.d, p5, z25.d, z5.d
 ; CHECK-NEXT:    sel z0.d, p6, z25.d, z6.d
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z1.d, p7, z25.d, z7.d
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z3.d, p1, z25.d, z24.d
+; CHECK-NEXT:    sel z1.d, p7, z25.d, z7.d
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    sel z3.d, p1, z25.d, z24.d
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    mov z0.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z1.d, p4/m, #0 // =0x0
diff --git a/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll b/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll
index 51ea661840dfa4..c9e06fd9f3414e 100644
--- a/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll
@@ -268,11 +268,11 @@ define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) {
 ; CHECK-NEXT:    mov z7.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z24.d, p4/m, #0 // =0x0
 ; CHECK-NEXT:    sel z1.d, p5, z0.d, z5.d
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z2.d, p6, z0.d, z6.d
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z3.d, p1, z0.d, z7.d
+; CHECK-NEXT:    sel z2.d, p6, z0.d, z6.d
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    sel z3.d, p1, z0.d, z7.d
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z4.d, p0, z0.d, z24.d
 ; CHECK-NEXT:    uzp1 z0.s, z2.s, z1.s
 ; CHECK-NEXT:    uzp1 z1.s, z4.s, z3.s
@@ -351,11 +351,11 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
 ; CHECK-NEXT:    mov z7.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z24.d, p4/m, #0 // =0x0
 ; CHECK-NEXT:    sel z0.d, p5, z2.d, z5.d
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z1.d, p6, z2.d, z6.d
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z3.d, p1, z2.d, z7.d
+; CHECK-NEXT:    sel z1.d, p6, z2.d, z6.d
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    sel z3.d, p1, z2.d, z7.d
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z2.d, p0, z2.d, z24.d
 ; CHECK-NEXT:    uzp1 z0.s, z1.s, z0.s
 ; CHECK-NEXT:    uzp1 z1.s, z2.s, z3.s
diff --git a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll
index 536d03cce31825..4d46ac5ecbaa95 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll
@@ -66,13 +66,13 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @add_nxv64i1(<vscale x 64 x i1>
 ; CHECK-NEXT:    ldr p8, [x3]
 ; CHECK-NEXT:    eor p0.b, p4/z, p0.b, p5.b
 ; CHECK-NEXT:    eor p1.b, p4/z, p1.b, p6.b
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    eor p2.b, p4/z, p2.b, p7.b
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    eor p3.b, p4/z, p3.b, p8.b
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    eor p2.b, p4/z, p2.b, p7.b
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    eor p3.b, p4/z, p3.b, p8.b
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -150,13 +150,13 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @sub_nxv64i1(<vscale x 64 x i1>
 ; CHECK-NEXT:    ldr p8, [x3]
 ; CHECK-NEXT:    eor p0.b, p4/z, p0.b, p5.b
 ; CHECK-NEXT:    eor p1.b, p4/z, p1.b, p6.b
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    eor p2.b, p4/z, p2.b, p7.b
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    eor p3.b, p4/z, p3.b, p8.b
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    eor p2.b, p4/z, p2.b, p7.b
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    eor p3.b, p4/z, p3.b, p8.b
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sve-tailcall.ll b/llvm/test/CodeGen/AArch64/sve-tailcall.ll
index 9857931eff3d66..4ddf007768fd2c 100644
--- a/llvm/test/CodeGen/AArch64/sve-tailcall.ll
+++ b/llvm/test/CodeGen/AArch64/sve-tailcall.ll
@@ -13,8 +13,8 @@ define <vscale x 4 x i32> @sve_caller_sve_callee() nounwind {
 ; CHECK-NEXT:    str z9, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    b sve_callee
@@ -32,8 +32,8 @@ define <vscale x 4 x i32> @sve_caller_sve_callee_fastcc() nounwind {
 ; CHECK-NEXT:    str z9, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z10, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    b sve_callee
@@ -83,34 +83,34 @@ define i32 @sve_caller_non_sve_callee(<vscale x 4 x i32> %arg) nounwind {
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
 ; CHECK-NEXT:    bl non_sve_callee
-; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -158,34 +158,34 @@ define i32 @sve_caller_non_sve_callee_fastcc(<vscale x 4 x i32> %arg) nounwind {
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
 ; CHECK-NEXT:    bl non_sve_callee
-; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-trunc.ll b/llvm/test/CodeGen/AArch64/sve-trunc.ll
index 214e79ed033555..dfa4a6148b86e9 100644
--- a/llvm/test/CodeGen/AArch64/sve-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-trunc.ll
@@ -136,9 +136,9 @@ define <vscale x 16 x i1> @trunc_i64toi1_split3(<vscale x 16 x i64> %in) {
 ; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    uzp1 p3.s, p4.s, p3.s
 ; CHECK-NEXT:    uzp1 p4.s, p6.s, p5.s
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p2.s
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    uzp1 p0.s, p0.s, p2.s
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    uzp1 p1.h, p3.h, p1.h
 ; CHECK-NEXT:    uzp1 p0.h, p0.h, p4.h
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/unwind-preserved.ll b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
index 415865b5293b76..822be14faaeb1f 100644
--- a/llvm/test/CodeGen/AArch64/unwind-preserved.ll
+++ b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
@@ -63,34 +63,34 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
 ; CHECK-NEXT:    ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
-; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    .cfi_restore z8
@@ -112,34 +112,34 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
 ; CHECK-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
-; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    .cfi_restore z8
@@ -215,34 +215,34 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
 ; GISEL-NEXT:    ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
 ; GISEL-NEXT:    addvl sp, sp, #2
 ; GISEL-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
-; GISEL-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
 ; GISEL-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; GISEL-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
 ; GISEL-NEXT:    addvl sp, sp, #18
 ; GISEL-NEXT:    .cfi_def_cfa wsp, 16
 ; GISEL-NEXT:    .cfi_restore z8
@@ -264,34 +264,34 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
 ; GISEL-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
 ; GISEL-NEXT:    addvl sp, sp, #2
 ; GISEL-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
-; GISEL-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
 ; GISEL-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; GISEL-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; GISEL-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
 ; GISEL-NEXT:    addvl sp, sp, #18
 ; GISEL-NEXT:    .cfi_def_cfa wsp, 16
 ; GISEL-NEXT:    .cfi_restore z8

>From f93078f0c2b4bbd14960b6a445d07bdc5160804d Mon Sep 17 00:00:00 2001
From: Caroline Concatto <caroline.concatto at arm.com>
Date: Wed, 7 Feb 2024 12:05:16 +0000
Subject: [PATCH 3/3] Use std function as it seems more stable then llvm for
 Predicate

---
 .../Target/AArch64/AArch64FrameLowering.cpp   | 28 +++++++++++--------
 1 file changed, 16 insertions(+), 12 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 9370cbbc6ca21d..b06a63c8916df8 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -3196,21 +3196,27 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
     return true;
   }
 
-  SmallVector<RegPairInfo, 8> RegPairsScalable = RegPairs;
-  llvm::stable_sort(
-      RegPairsScalable, [](const RegPairInfo &A, const RegPairInfo &B) {
-        return !(A.Type == RegPairInfo::PPR && B.Type == RegPairInfo::ZPR);
+  // For performance reasons restore SVE register in increasing order
+  auto PPRBegin =
+      std::find_if(RegPairs.begin(), RegPairs.end(), [](const RegPairInfo &c) {
+        return c.Type == RegPairInfo::PPR;
       });
-
-  for (const RegPairInfo &RPI : RegPairsScalable)
-    if (RPI.isScalable())
-      EmitMI(RPI);
+  auto PPREnd = std::find_if(
+      RegPairs.rbegin(), RegPairs.rend(),
+      [](const RegPairInfo &c) { return c.Type == RegPairInfo::PPR; });
+  std::reverse(PPRBegin, PPREnd.base());
+  auto ZPRBegin =
+      std::find_if(RegPairs.begin(), RegPairs.end(), [](const RegPairInfo &c) {
+        return c.Type == RegPairInfo::ZPR;
+      });
+  auto ZPREnd = std::find_if(
+      RegPairs.rbegin(), RegPairs.rend(),
+      [](const RegPairInfo &c) { return c.Type == RegPairInfo::ZPR; });
+  std::reverse(ZPRBegin, ZPREnd.base());
 
   if (ReverseCSRRestoreSeq) {
     MachineBasicBlock::iterator First = MBB.end();
     for (const RegPairInfo &RPI : reverse(RegPairs)) {
-      if (RPI.isScalable())
-        continue;
       MachineBasicBlock::iterator It = EmitMI(RPI);
       if (First == MBB.end())
         First = It;
@@ -3219,8 +3225,6 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
       MBB.splice(MBBI, &MBB, First);
   } else {
     for (const RegPairInfo &RPI : RegPairs) {
-      if (RPI.isScalable())
-        continue;
       (void)EmitMI(RPI);
     }
   }



More information about the llvm-commits mailing list