[llvm] [AArch64][SVE] Remove pseudo from LD1_IMM (PR #73631)

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 28 02:13:30 PST 2023


https://github.com/davemgreen created https://github.com/llvm/llvm-project/pull/73631

The LD1 immediate offset instructions have both a pseudo and a real instruction, mostly as the instructions shares a tablegen class with the FFR version of the instructions. As far as I can tell the pseudo for the non-ffr versions does not serve any useful purpose though, and we can rejig the the classes to only define the pseudo for FFR instructions similar to the existing sve_mem_cld_ss instructions.

The end result of this is that we don't have a SideEffects flag on the LD1_IMM instructions whilst scheduling them, and have a few less pseudo instructions which is usually a good thing.

>From 126258f63c622a81a88a9ea132f8a74dda9a8e32 Mon Sep 17 00:00:00 2001
From: David Green <david.green at arm.com>
Date: Tue, 28 Nov 2023 10:10:11 +0000
Subject: [PATCH] [AArch64][SVE] Remove pseudo from LD1_IMM

The LD1 immediate offset instructions have both a pseudo and a real
instruction, mostly as the instructions shares a tablegen class with the FFR
version of the instructions. As far as I can tell the pseudo for the non-ffr
versions does not serve any useful purpose though, and we can rejig the the
classes to only define the pseudo for FFR instructions similar to the existing
sve_mem_cld_ss instructions.

The end result of this is that we don't have a SideEffects flag on the LD1_IMM
instructions whilst scheduling them, and have a few less pseudo instructions
which is usually a good thing.
---
 llvm/lib/Target/AArch64/AArch64SchedA510.td   |   8 +-
 .../Target/AArch64/AArch64SchedNeoverseN2.td  |   8 +-
 .../Target/AArch64/AArch64SchedNeoverseV1.td  |   8 +-
 .../Target/AArch64/AArch64SchedNeoverseV2.td  |   8 +-
 llvm/lib/Target/AArch64/SVEInstrFormats.td    |  42 +++--
 .../alloca-load-store-scalable-array.ll       |  36 ++--
 .../alloca-load-store-scalable-struct.ll      |   8 +-
 ...rleaving-reductions-predicated-scalable.ll |  18 +-
 ...plex-deinterleaving-reductions-scalable.ll |  32 ++--
 .../insert-subvector-res-legalization.ll      |  60 +++----
 .../AArch64/sve-calling-convention-mixed.ll   | 122 ++++++-------
 .../AArch64/sve-fixed-length-concat.ll        |  56 +++---
 .../AArch64/sve-fixed-length-fcopysign.ll     | 100 +++++------
 .../AArch64/sve-fixed-length-fp-arith.ll      | 150 ++++++++--------
 .../AArch64/sve-fixed-length-fp-compares.ll   |  24 +--
 .../AArch64/sve-fixed-length-fp-fma.ll        |  45 ++---
 .../AArch64/sve-fixed-length-fp-minmax.ll     | 108 +++++++-----
 .../AArch64/sve-fixed-length-fp-reduce.ll     |  18 +-
 .../AArch64/sve-fixed-length-fp-vselect.ll    |  36 ++--
 .../sve-fixed-length-frame-offests-crash.ll   |  32 ++--
 .../AArch64/sve-fixed-length-int-arith.ll     | 120 ++++++-------
 .../AArch64/sve-fixed-length-int-compares.ll  |  32 ++--
 .../AArch64/sve-fixed-length-int-div.ll       |  24 +--
 .../AArch64/sve-fixed-length-int-log.ll       |  96 +++++-----
 .../AArch64/sve-fixed-length-int-minmax.ll    | 144 ++++++++-------
 .../AArch64/sve-fixed-length-int-mulh.ll      |  72 ++++----
 .../AArch64/sve-fixed-length-int-rem.ll       |  60 ++++---
 .../AArch64/sve-fixed-length-int-shifts.ll    | 108 +++++++-----
 .../AArch64/sve-fixed-length-int-vselect.ll   |  48 ++---
 .../AArch64/sve-fixed-length-masked-gather.ll |   2 +-
 .../AArch64/sve-fixed-length-masked-loads.ll  |  68 +++----
 .../sve-fixed-length-masked-scatter.ll        |  72 ++++----
 .../AArch64/sve-fixed-length-masked-stores.ll |  10 +-
 .../sve-fixed-length-permute-zip-uzp-trn.ll   |  64 +++----
 .../AArch64/sve-fixed-length-subvector.ll     |   4 +-
 .../sve-fixed-length-vector-shuffle-tbl.ll    |   6 +-
 .../sve-fixed-length-vector-shuffle.ll        | 166 +++++++++---------
 .../test/CodeGen/AArch64/sve-fptrunc-store.ll |  16 +-
 .../AArch64/sve-gather-scatter-dag-combine.ll |  36 ++--
 .../test/CodeGen/AArch64/sve-insert-vector.ll |  24 +--
 llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll  |   4 +-
 .../CodeGen/AArch64/sve-masked-ldst-sext.ll   |  28 +--
 .../CodeGen/AArch64/sve-masked-ldst-zext.ll   |  28 +--
 ...e-streaming-mode-fixed-length-ext-loads.ll |  26 +--
 ...e-streaming-mode-fixed-length-fcopysign.ll |  52 +++---
 ...aming-mode-fixed-length-fp-extend-trunc.ll |  40 ++---
 llvm/test/CodeGen/AArch64/sve-vscale-attr.ll  |  16 +-
 .../AArch64/sve2-fixed-length-fcopysign.ll    |  94 +++++-----
 48 files changed, 1226 insertions(+), 1153 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64SchedA510.td b/llvm/lib/Target/AArch64/AArch64SchedA510.td
index 1afbc5d9102ca96..1b66d6bb8fbd443 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedA510.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedA510.td
@@ -1168,10 +1168,10 @@ def : InstRW<[CortexA510Write<3, CortexA510UnitLd>], (instrs LDR_ZXI)>;
 def : InstRW<[CortexA510Write<3, CortexA510UnitLdSt>], (instrs LDR_PXI)>;
 
 // Contiguous load, scalar + imm
-def : InstRW<[CortexA510Write<3, CortexA510UnitLd>], (instregex "^LD1[BHWD]_IMM_REAL$",
-                                           "^LD1S?B_[HSD]_IMM_REAL$",
-                                           "^LD1S?H_[SD]_IMM_REAL$",
-                                           "^LD1S?W_D_IMM_REAL$" )>;
+def : InstRW<[CortexA510Write<3, CortexA510UnitLd>], (instregex "^LD1[BHWD]_IMM$",
+                                           "^LD1S?B_[HSD]_IMM$",
+                                           "^LD1S?H_[SD]_IMM$",
+                                           "^LD1S?W_D_IMM$" )>;
 // Contiguous load, scalar + scalar
 def : InstRW<[CortexA510Write<3, CortexA510UnitLd>], (instregex "^LD1[BHWD]$",
                                              "^LD1S?B_[HSD]$",
diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td
index 517d0da7f47f428..503de3bee2b8678 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td
@@ -2082,10 +2082,10 @@ def : InstRW<[N2Write_6cyc_1L], (instrs LDR_ZXI)>;
 def : InstRW<[N2Write_6cyc_1L_1M], (instrs LDR_PXI)>;
 
 // Contiguous load, scalar + imm
-def : InstRW<[N2Write_6cyc_1L], (instregex "^LD1[BHWD]_IMM_REAL$",
-                                           "^LD1S?B_[HSD]_IMM_REAL$",
-                                           "^LD1S?H_[SD]_IMM_REAL$",
-                                           "^LD1S?W_D_IMM_REAL$" )>;
+def : InstRW<[N2Write_6cyc_1L], (instregex "^LD1[BHWD]_IMM$",
+                                           "^LD1S?B_[HSD]_IMM$",
+                                           "^LD1S?H_[SD]_IMM$",
+                                           "^LD1S?W_D_IMM$" )>;
 // Contiguous load, scalar + scalar
 def : InstRW<[N2Write_6cyc_1L01], (instregex "^LD1[BHWD]$",
                                              "^LD1S?B_[HSD]$",
diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td
index 5c155c936da9fea..726be1a547b9519 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td
@@ -1687,10 +1687,10 @@ def : InstRW<[V1Write_6c_1L_1M], (instrs LDR_PXI)>;
 // Contiguous load, scalar + scalar
 // Contiguous load broadcast, scalar + imm
 // Contiguous load broadcast, scalar + scalar
-def : InstRW<[V1Write_6c_1L01], (instregex "^LD1[BHWD]_IMM_REAL$",
-                                           "^LD1S?B_[HSD]_IMM_REAL$",
-                                           "^LD1S?H_[SD]_IMM_REAL$",
-                                           "^LD1S?W_D_IMM_REAL$",
+def : InstRW<[V1Write_6c_1L01], (instregex "^LD1[BHWD]_IMM$",
+                                           "^LD1S?B_[HSD]_IMM$",
+                                           "^LD1S?H_[SD]_IMM$",
+                                           "^LD1S?W_D_IMM$",
                                            "^LD1[BWD]$",
                                            "^LD1S?B_[HSD]$",
                                            "^LD1S?W_D$",
diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td
index eca7700d5ff6ae9..3367d5d0cd315ff 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td
@@ -2608,10 +2608,10 @@ def : InstRW<[V2Write_6cyc_1L], (instrs LDR_ZXI)>;
 def : InstRW<[V2Write_6cyc_1L_1M], (instrs LDR_PXI)>;
 
 // Contiguous load, scalar + imm
-def : InstRW<[V2Write_6cyc_1L], (instregex "^LD1[BHWD]_IMM_REAL$",
-                                           "^LD1S?B_[HSD]_IMM_REAL$",
-                                           "^LD1S?H_[SD]_IMM_REAL$",
-                                           "^LD1S?W_D_IMM_REAL$" )>;
+def : InstRW<[V2Write_6cyc_1L], (instregex "^LD1[BHWD]_IMM$",
+                                           "^LD1S?B_[HSD]_IMM$",
+                                           "^LD1S?H_[SD]_IMM$",
+                                           "^LD1S?W_D_IMM$" )>;
 // Contiguous load, scalar + scalar
 def : InstRW<[V2Write_6cyc_1L], (instregex "^LD1[BHWD]$",
                                            "^LD1S?B_[HSD]$",
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index e765926d8a6355e..c0894e9c70680a4 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -7318,29 +7318,18 @@ class sve_mem_cld_si_base<bits<4> dtype, bit nf, string asm,
   let mayLoad = 1;
 }
 
-multiclass sve_mem_cld_si_base<bits<4> dtype, bit nf, string asm,
-                               RegisterOperand listty, ZPRRegOp zprty> {
-  def _REAL : sve_mem_cld_si_base<dtype, nf, asm, listty>;
+multiclass sve_mem_cld_si<bits<4> dtype, string asm, RegisterOperand listty,
+                          ZPRRegOp zprty> {
+  def "" : sve_mem_cld_si_base<dtype, 0, asm, listty>;
 
   def : InstAlias<asm # "\t$Zt, $Pg/z, [$Rn]",
-                  (!cast<Instruction>(NAME # _REAL) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 0>;
+                  (!cast<Instruction>(NAME) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 0>;
   def : InstAlias<asm # "\t$Zt, $Pg/z, [$Rn, $imm4, mul vl]",
-                  (!cast<Instruction>(NAME # _REAL) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4), 0>;
+                  (!cast<Instruction>(NAME) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4), 0>;
   def : InstAlias<asm # "\t$Zt, $Pg/z, [$Rn]",
-                  (!cast<Instruction>(NAME # _REAL) listty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 1>;
-
-  // We need a layer of indirection because early machine code passes balk at
-  // physical register (i.e. FFR) uses that have no previous definition.
-  let hasSideEffects = 1, hasNoSchedulingInfo = 1, mayLoad = 1 in {
-  def "" : Pseudo<(outs listty:$Zt), (ins PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4), []>,
-           PseudoInstExpansion<(!cast<Instruction>(NAME # _REAL) listty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4)>;
-  }
+                  (!cast<Instruction>(NAME) listty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 1>;
 }
 
-multiclass sve_mem_cld_si<bits<4> dtype, string asm, RegisterOperand listty,
-                          ZPRRegOp zprty>
-: sve_mem_cld_si_base<dtype, 0, asm, listty, zprty>;
-
 class sve_mem_cldnt_si_base<bits<2> msz, string asm, RegisterOperand VecList>
 : I<(outs VecList:$Zt), (ins PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4),
   asm, "\t$Zt, $Pg/z, [$Rn, $imm4, mul vl]",
@@ -7559,8 +7548,23 @@ multiclass sve_mem_cldff_ss<bits<4> dtype, string asm, RegisterOperand listty,
 }
 
 multiclass sve_mem_cldnf_si<bits<4> dtype, string asm, RegisterOperand listty,
-                            ZPRRegOp zprty>
-: sve_mem_cld_si_base<dtype, 1, asm, listty, zprty>;
+                            ZPRRegOp zprty> {
+  def _REAL : sve_mem_cld_si_base<dtype, 1, asm, listty>;
+
+  def : InstAlias<asm # "\t$Zt, $Pg/z, [$Rn]",
+                  (!cast<Instruction>(NAME # _REAL) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 0>;
+  def : InstAlias<asm # "\t$Zt, $Pg/z, [$Rn, $imm4, mul vl]",
+                  (!cast<Instruction>(NAME # _REAL) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4), 0>;
+  def : InstAlias<asm # "\t$Zt, $Pg/z, [$Rn]",
+                  (!cast<Instruction>(NAME # _REAL) listty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 1>;
+
+  // We need a layer of indirection because early machine code passes balk at
+  // physical register (i.e. FFR) uses that have no previous definition.
+  let hasSideEffects = 1, hasNoSchedulingInfo = 1, mayLoad = 1 in {
+  def "" : Pseudo<(outs listty:$Zt), (ins PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4), []>,
+           PseudoInstExpansion<(!cast<Instruction>(NAME # _REAL) listty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4)>;
+  }
+}
 
 class sve_mem_eld_si<bits<2> sz, bits<3> nregs, RegisterOperand VecList,
                      string asm, Operand immtype>
diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
index 49bdaf0fcde9418..110f0ef7f4a5500 100644
--- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
+++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
@@ -14,12 +14,12 @@ define void @array_1D(ptr %addr) #0 {
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x0]
-; CHECK-NEXT:    st1d { z2.d }, p0, [sp]
+; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
 ; CHECK-NEXT:    st1d { z1.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT:    st1d { z0.d }, p0, [sp, #1, mul vl]
+; CHECK-NEXT:    st1d { z2.d }, p0, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -57,10 +57,10 @@ define void @array_1D_insert(ptr %addr, %my_subtype %elt) #0 {
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT:    st1d { z2.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT:    st1d { z1.d }, p0, [sp]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x0]
+; CHECK-NEXT:    st1d { z1.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT:    st1d { z2.d }, p0, [sp]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -81,18 +81,18 @@ define void @array_2D(ptr %addr) #0 {
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 48 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0, #5, mul vl]
+; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ld1d { z3.d }, p0/z, [x0, #4, mul vl]
-; CHECK-NEXT:    ld1d { z4.d }, p0/z, [x0, #5, mul vl]
-; CHECK-NEXT:    ld1d { z5.d }, p0/z, [x0]
-; CHECK-NEXT:    st1d { z5.d }, p0, [sp]
-; CHECK-NEXT:    st1d { z4.d }, p0, [sp, #5, mul vl]
+; CHECK-NEXT:    ld1d { z4.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT:    ld1d { z5.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
+; CHECK-NEXT:    st1d { z1.d }, p0, [sp, #5, mul vl]
 ; CHECK-NEXT:    st1d { z3.d }, p0, [sp, #4, mul vl]
-; CHECK-NEXT:    st1d { z2.d }, p0, [sp, #3, mul vl]
-; CHECK-NEXT:    st1d { z1.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT:    st1d { z0.d }, p0, [sp, #1, mul vl]
+; CHECK-NEXT:    st1d { z5.d }, p0, [sp, #3, mul vl]
+; CHECK-NEXT:    st1d { z4.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT:    st1d { z2.d }, p0, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #6
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
index cc0f441d0aaae4e..f03a6f018d34d0c 100644
--- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
+++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
@@ -13,12 +13,12 @@ define void @test(ptr %addr) #0 {
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x0]
-; CHECK-NEXT:    st1d { z2.d }, p0, [sp]
+; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
 ; CHECK-NEXT:    st1d { z1.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT:    st1d { z0.d }, p0, [sp, #1, mul vl]
+; CHECK-NEXT:    st1d { z2.d }, p0, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
index 01fd2b1113b000b..467c3c254fc2d38 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
@@ -36,13 +36,13 @@ define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
 ; CHECK-NEXT:    add x8, x8, x11
 ; CHECK-NEXT:    add x12, x12, x10
 ; CHECK-NEXT:    ld1d { z2.d }, p3/z, [x13, #1, mul vl]
-; CHECK-NEXT:    ld1d { z3.d }, p2/z, [x13]
 ; CHECK-NEXT:    ld1d { z4.d }, p3/z, [x14, #1, mul vl]
+; CHECK-NEXT:    ld1d { z3.d }, p2/z, [x13]
 ; CHECK-NEXT:    ld1d { z5.d }, p2/z, [x14]
-; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #0
 ; CHECK-NEXT:    fcmla z7.d, p0/m, z4.d, z2.d, #0
-; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #90
+; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #0
 ; CHECK-NEXT:    fcmla z7.d, p0/m, z4.d, z2.d, #90
+; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #90
 ; CHECK-NEXT:    mov z0.d, p3/m, z7.d
 ; CHECK-NEXT:    mov z1.d, p2/m, z6.d
 ; CHECK-NEXT:    b.mi .LBB0_1
@@ -138,13 +138,13 @@ define %"class.std::complex" @complex_mul_predicated_v2f64(ptr %a, ptr %b, ptr %
 ; CHECK-NEXT:    zip2 p2.d, p1.d, p1.d
 ; CHECK-NEXT:    zip1 p1.d, p1.d, p1.d
 ; CHECK-NEXT:    ld1d { z2.d }, p2/z, [x13, #1, mul vl]
-; CHECK-NEXT:    ld1d { z3.d }, p1/z, [x13]
 ; CHECK-NEXT:    ld1d { z4.d }, p2/z, [x14, #1, mul vl]
+; CHECK-NEXT:    ld1d { z3.d }, p1/z, [x13]
 ; CHECK-NEXT:    ld1d { z5.d }, p1/z, [x14]
-; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #0
 ; CHECK-NEXT:    fcmla z7.d, p0/m, z4.d, z2.d, #0
-; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #90
+; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #0
 ; CHECK-NEXT:    fcmla z7.d, p0/m, z4.d, z2.d, #90
+; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #90
 ; CHECK-NEXT:    mov z0.d, p2/m, z7.d
 ; CHECK-NEXT:    mov z1.d, p1/m, z6.d
 ; CHECK-NEXT:    b.ne .LBB1_1
@@ -241,13 +241,13 @@ define %"class.std::complex" @complex_mul_predicated_x2_v2f64(ptr %a, ptr %b, pt
 ; CHECK-NEXT:    zip1 p2.d, p1.d, p1.d
 ; CHECK-NEXT:    whilelo p1.d, x9, x10
 ; CHECK-NEXT:    ld1d { z2.d }, p3/z, [x13, #1, mul vl]
-; CHECK-NEXT:    ld1d { z3.d }, p2/z, [x13]
 ; CHECK-NEXT:    ld1d { z4.d }, p3/z, [x14, #1, mul vl]
+; CHECK-NEXT:    ld1d { z3.d }, p2/z, [x13]
 ; CHECK-NEXT:    ld1d { z5.d }, p2/z, [x14]
-; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #0
 ; CHECK-NEXT:    fcmla z7.d, p0/m, z4.d, z2.d, #0
-; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #90
+; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #0
 ; CHECK-NEXT:    fcmla z7.d, p0/m, z4.d, z2.d, #90
+; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #90
 ; CHECK-NEXT:    mov z0.d, p3/m, z7.d
 ; CHECK-NEXT:    mov z1.d, p2/m, z6.d
 ; CHECK-NEXT:    b.mi .LBB2_1
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
index 956d2d941ac714d..1696ac8709d4060 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
@@ -214,21 +214,21 @@ define %"class.std::complex" @complex_mul_v2f64_unrolled(ptr %a, ptr %b) {
 ; CHECK-NEXT:    ld1b { z4.b }, p1/z, [x0, x8]
 ; CHECK-NEXT:    ld1d { z5.d }, p0/z, [x14, #1, mul vl]
 ; CHECK-NEXT:    ld1b { z6.b }, p1/z, [x12, x8]
-; CHECK-NEXT:    ld1d { z7.d }, p0/z, [x15, #1, mul vl]
-; CHECK-NEXT:    ld1b { z16.b }, p1/z, [x1, x8]
-; CHECK-NEXT:    ld1d { z17.d }, p0/z, [x16, #1, mul vl]
+; CHECK-NEXT:    ld1b { z7.b }, p1/z, [x1, x8]
+; CHECK-NEXT:    ld1d { z16.d }, p0/z, [x16, #1, mul vl]
+; CHECK-NEXT:    ld1d { z17.d }, p0/z, [x15, #1, mul vl]
 ; CHECK-NEXT:    ld1b { z18.b }, p1/z, [x11, x8]
 ; CHECK-NEXT:    ld1d { z19.d }, p0/z, [x17, #1, mul vl]
 ; CHECK-NEXT:    adds x10, x10, x9
 ; CHECK-NEXT:    add x8, x8, x13
-; CHECK-NEXT:    fcmla z1.d, p0/m, z16.d, z4.d, #0
-; CHECK-NEXT:    fcmla z0.d, p0/m, z17.d, z5.d, #0
+; CHECK-NEXT:    fcmla z1.d, p0/m, z7.d, z4.d, #0
+; CHECK-NEXT:    fcmla z0.d, p0/m, z16.d, z5.d, #0
 ; CHECK-NEXT:    fcmla z2.d, p0/m, z18.d, z6.d, #0
-; CHECK-NEXT:    fcmla z3.d, p0/m, z19.d, z7.d, #0
-; CHECK-NEXT:    fcmla z1.d, p0/m, z16.d, z4.d, #90
-; CHECK-NEXT:    fcmla z0.d, p0/m, z17.d, z5.d, #90
+; CHECK-NEXT:    fcmla z3.d, p0/m, z19.d, z17.d, #0
+; CHECK-NEXT:    fcmla z1.d, p0/m, z7.d, z4.d, #90
+; CHECK-NEXT:    fcmla z0.d, p0/m, z16.d, z5.d, #90
 ; CHECK-NEXT:    fcmla z2.d, p0/m, z18.d, z6.d, #90
-; CHECK-NEXT:    fcmla z3.d, p0/m, z19.d, z7.d, #90
+; CHECK-NEXT:    fcmla z3.d, p0/m, z19.d, z17.d, #90
 ; CHECK-NEXT:    b.ne .LBB2_1
 ; CHECK-NEXT:  // %bb.2: // %exit.block
 ; CHECK-NEXT:    uzp1 z4.d, z2.d, z3.d
@@ -335,15 +335,15 @@ define dso_local %"class.std::complex" @reduction_mix(ptr %a, ptr %b, ptr noalia
 ; CHECK-NEXT:    zip1 z1.d, z2.d, z2.d
 ; CHECK-NEXT:  .LBB3_1: // %vector.body
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    ld1w { z3.d }, p0/z, [x3, x8, lsl #2]
-; CHECK-NEXT:    ld1d { z4.d }, p0/z, [x0]
-; CHECK-NEXT:    ld1d { z5.d }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT:    add x8, x8, x9
+; CHECK-NEXT:    ld1d { z3.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z4.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    add x0, x0, x11
+; CHECK-NEXT:    ld1w { z5.d }, p0/z, [x3, x8, lsl #2]
+; CHECK-NEXT:    add x8, x8, x9
 ; CHECK-NEXT:    cmp x10, x8
-; CHECK-NEXT:    fadd z0.d, z5.d, z0.d
-; CHECK-NEXT:    fadd z1.d, z4.d, z1.d
-; CHECK-NEXT:    add z2.d, z3.d, z2.d
+; CHECK-NEXT:    fadd z0.d, z4.d, z0.d
+; CHECK-NEXT:    fadd z1.d, z3.d, z1.d
+; CHECK-NEXT:    add z2.d, z5.d, z2.d
 ; CHECK-NEXT:    b.ne .LBB3_1
 ; CHECK-NEXT:  // %bb.2: // %middle.block
 ; CHECK-NEXT:    uaddv d2, p0, z2.d
diff --git a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
index f5d14779f6586e3..b80ea04823e9f69 100644
--- a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
@@ -75,11 +75,11 @@ define <vscale x 8 x i8> @vec_scalable_subvec_fixed_idx_zero_i8(<vscale x 8 x i8
 ; CHECK-LABEL: vec_scalable_subvec_fixed_idx_zero_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ldr d0, [x1]
 ; CHECK-NEXT:    ptrue p1.h, vl8
-; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
-; CHECK-NEXT:    ldr d1, [x1]
-; CHECK-NEXT:    ushll v1.8h, v1.8b, #0
-; CHECK-NEXT:    mov z0.h, p1/m, z1.h
+; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEXT:    ld1b { z1.h }, p0/z, [x0]
+; CHECK-NEXT:    sel z0.h, p1, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %vec = load <vscale x 8 x i8>, <vscale x 8 x i8>* %a
   %subvec = load <8 x i8>, <8 x i8>* %b
@@ -94,17 +94,17 @@ define <vscale x 8 x i8> @vec_scalable_subvec_fixed_idx_nonzero_i8(<vscale x 8 x
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    cnth x8
-; CHECK-NEXT:    mov w9, #8 // =0x8
+; CHECK-NEXT:    ldr d0, [x1]
 ; CHECK-NEXT:    sub x8, x8, #8
+; CHECK-NEXT:    mov w9, #8 // =0x8
 ; CHECK-NEXT:    cmp x8, #8
+; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
 ; CHECK-NEXT:    csel x8, x8, x9, lo
 ; CHECK-NEXT:    mov x9, sp
 ; CHECK-NEXT:    lsl x8, x8, #1
-; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
-; CHECK-NEXT:    ldr d1, [x1]
-; CHECK-NEXT:    ushll v1.8h, v1.8b, #0
-; CHECK-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK-NEXT:    str q1, [x9, x8]
+; CHECK-NEXT:    ld1b { z1.h }, p0/z, [x0]
+; CHECK-NEXT:    st1h { z1.h }, p0, [sp]
+; CHECK-NEXT:    str q0, [x9, x8]
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [sp]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -119,11 +119,11 @@ define <vscale x 4 x i16> @vec_scalable_subvec_fixed_idx_zero_i16(<vscale x 4 x
 ; CHECK-LABEL: vec_scalable_subvec_fixed_idx_zero_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ldr d0, [x1]
 ; CHECK-NEXT:    ptrue p1.s, vl4
-; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
-; CHECK-NEXT:    ldr d1, [x1]
-; CHECK-NEXT:    ushll v1.4s, v1.4h, #0
-; CHECK-NEXT:    mov z0.s, p1/m, z1.s
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
+; CHECK-NEXT:    ld1h { z1.s }, p0/z, [x0]
+; CHECK-NEXT:    sel z0.s, p1, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %vec = load <vscale x 4 x i16>, <vscale x 4 x i16>* %a
   %subvec = load <4 x i16>, <4 x i16>* %b
@@ -138,17 +138,17 @@ define <vscale x 4 x i16> @vec_scalable_subvec_fixed_idx_nonzero_i16(<vscale x 4
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    cntw x8
-; CHECK-NEXT:    mov w9, #4 // =0x4
+; CHECK-NEXT:    ldr d0, [x1]
 ; CHECK-NEXT:    sub x8, x8, #4
+; CHECK-NEXT:    mov w9, #4 // =0x4
 ; CHECK-NEXT:    cmp x8, #4
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
 ; CHECK-NEXT:    csel x8, x8, x9, lo
 ; CHECK-NEXT:    mov x9, sp
 ; CHECK-NEXT:    lsl x8, x8, #2
-; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
-; CHECK-NEXT:    ldr d1, [x1]
-; CHECK-NEXT:    ushll v1.4s, v1.4h, #0
-; CHECK-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK-NEXT:    str q1, [x9, x8]
+; CHECK-NEXT:    ld1h { z1.s }, p0/z, [x0]
+; CHECK-NEXT:    st1w { z1.s }, p0, [sp]
+; CHECK-NEXT:    str q0, [x9, x8]
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [sp]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -163,11 +163,11 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_zero_i32(<vscale x 2 x
 ; CHECK-LABEL: vec_scalable_subvec_fixed_idx_zero_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ldr d0, [x1]
 ; CHECK-NEXT:    ptrue p1.d, vl2
-; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
-; CHECK-NEXT:    ldr d1, [x1]
-; CHECK-NEXT:    ushll v1.2d, v1.2s, #0
-; CHECK-NEXT:    mov z0.d, p1/m, z1.d
+; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
+; CHECK-NEXT:    ld1w { z1.d }, p0/z, [x0]
+; CHECK-NEXT:    sel z0.d, p1, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %vec = load <vscale x 2 x i32>, <vscale x 2 x i32>* %a
   %subvec = load <2 x i32>, <2 x i32>* %b
@@ -182,17 +182,17 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_i32(<vscale x 2
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    cntd x8
-; CHECK-NEXT:    mov w9, #2 // =0x2
+; CHECK-NEXT:    ldr d0, [x1]
 ; CHECK-NEXT:    sub x8, x8, #2
+; CHECK-NEXT:    mov w9, #2 // =0x2
 ; CHECK-NEXT:    cmp x8, #2
+; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
 ; CHECK-NEXT:    csel x8, x8, x9, lo
 ; CHECK-NEXT:    mov x9, sp
 ; CHECK-NEXT:    lsl x8, x8, #3
-; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
-; CHECK-NEXT:    ldr d1, [x1]
-; CHECK-NEXT:    ushll v1.2d, v1.2s, #0
-; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK-NEXT:    str q1, [x9, x8]
+; CHECK-NEXT:    ld1w { z1.d }, p0/z, [x0]
+; CHECK-NEXT:    st1d { z1.d }, p0, [sp]
+; CHECK-NEXT:    str q0, [x9, x8]
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
index 6738bddb8af3442..783878fe738068c 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
@@ -265,19 +265,19 @@ define void @aavpcs2(float %s0, float %s1, float %s2, float %s3, float %s4, floa
 ; CHECK-NEXT:    ldp x8, x9, [sp]
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x8]
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x7]
-; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x6]
-; CHECK-NEXT:    ld1w { z3.s }, p0/z, [x5]
-; CHECK-NEXT:    ld1w { z4.s }, p0/z, [x4]
-; CHECK-NEXT:    ld1w { z5.s }, p0/z, [x3]
-; CHECK-NEXT:    ld1w { z6.s }, p0/z, [x1]
-; CHECK-NEXT:    ld1w { z24.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z3.s }, p0/z, [x6]
+; CHECK-NEXT:    ld1w { z4.s }, p0/z, [x5]
+; CHECK-NEXT:    ld1w { z5.s }, p0/z, [x1]
+; CHECK-NEXT:    ld1w { z6.s }, p0/z, [x4]
+; CHECK-NEXT:    ld1w { z24.s }, p0/z, [x3]
 ; CHECK-NEXT:    st1w { z7.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z2.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z5.s }, p0, [x9]
 ; CHECK-NEXT:    st1w { z24.s }, p0, [x9]
 ; CHECK-NEXT:    st1w { z6.s }, p0, [x9]
-; CHECK-NEXT:    st1w { z5.s }, p0, [x9]
 ; CHECK-NEXT:    st1w { z4.s }, p0, [x9]
 ; CHECK-NEXT:    st1w { z3.s }, p0, [x9]
-; CHECK-NEXT:    st1w { z2.s }, p0, [x9]
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x9]
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x9]
 ; CHECK-NEXT:    ret
@@ -301,25 +301,25 @@ define void @aavpcs3(float %s0, float %s1, float %s2, float %s3, float %s4, floa
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ldr x8, [sp]
-; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x8]
-; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x7]
-; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x6]
-; CHECK-NEXT:    ld1w { z3.s }, p0/z, [x5]
-; CHECK-NEXT:    ld1w { z4.s }, p0/z, [x4]
-; CHECK-NEXT:    ld1w { z5.s }, p0/z, [x3]
+; CHECK-NEXT:    ldr x9, [sp, #16]
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x8]
+; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x7]
+; CHECK-NEXT:    ld1w { z3.s }, p0/z, [x1]
+; CHECK-NEXT:    ld1w { z4.s }, p0/z, [x6]
+; CHECK-NEXT:    ld1w { z5.s }, p0/z, [x5]
 ; CHECK-NEXT:    ld1w { z6.s }, p0/z, [x2]
-; CHECK-NEXT:    ld1w { z7.s }, p0/z, [x1]
-; CHECK-NEXT:    ld1w { z24.s }, p0/z, [x0]
-; CHECK-NEXT:    ldr x8, [sp, #16]
-; CHECK-NEXT:    st1w { z24.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z7.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z6.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z5.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z4.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z3.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z2.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z1.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z0.s }, p0, [x8]
+; CHECK-NEXT:    ld1w { z7.s }, p0/z, [x4]
+; CHECK-NEXT:    ld1w { z24.s }, p0/z, [x3]
+; CHECK-NEXT:    st1w { z0.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z3.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z6.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z24.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z7.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z5.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z4.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z2.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z1.s }, p0, [x9]
 ; CHECK-NEXT:    ret
 entry:
   store volatile <vscale x 4 x float> %s8, <vscale x 4 x float>* %ptr
@@ -373,25 +373,25 @@ define <vscale x 4 x float> @aavpcs5(float %s0, float %s1, float %s2, float %s3,
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ldr x8, [sp]
+; CHECK-NEXT:    ldr x9, [sp, #16]
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x8]
 ; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x7]
-; CHECK-NEXT:    ld1w { z3.s }, p0/z, [x6]
-; CHECK-NEXT:    ld1w { z4.s }, p0/z, [x5]
-; CHECK-NEXT:    ld1w { z5.s }, p0/z, [x4]
-; CHECK-NEXT:    ld1w { z6.s }, p0/z, [x3]
-; CHECK-NEXT:    ld1w { z7.s }, p0/z, [x2]
-; CHECK-NEXT:    ld1w { z24.s }, p0/z, [x1]
-; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT:    ldr x8, [sp, #16]
-; CHECK-NEXT:    st1w { z0.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z24.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z7.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z6.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z5.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z4.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z3.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z2.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z1.s }, p0, [x8]
+; CHECK-NEXT:    ld1w { z3.s }, p0/z, [x1]
+; CHECK-NEXT:    ld1w { z4.s }, p0/z, [x6]
+; CHECK-NEXT:    ld1w { z5.s }, p0/z, [x5]
+; CHECK-NEXT:    ld1w { z6.s }, p0/z, [x2]
+; CHECK-NEXT:    ld1w { z7.s }, p0/z, [x4]
+; CHECK-NEXT:    ld1w { z24.s }, p0/z, [x3]
+; CHECK-NEXT:    st1w { z0.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z3.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z6.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z24.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z7.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z5.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z4.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z2.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z1.s }, p0, [x9]
 ; CHECK-NEXT:    ret
 entry:
   store volatile <vscale x 4 x float> %s8, <vscale x 4 x float>* %ptr
@@ -411,25 +411,25 @@ define void @aapcs1(float %s0, float %s1, float %s2, float %s3, float %s4, float
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ldr x8, [sp]
-; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x8]
-; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x7]
-; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x6]
-; CHECK-NEXT:    ld1w { z3.s }, p0/z, [x5]
-; CHECK-NEXT:    ld1w { z4.s }, p0/z, [x4]
-; CHECK-NEXT:    ld1w { z5.s }, p0/z, [x3]
+; CHECK-NEXT:    ldr x9, [sp, #16]
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x8]
+; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x7]
+; CHECK-NEXT:    ld1w { z3.s }, p0/z, [x1]
+; CHECK-NEXT:    ld1w { z4.s }, p0/z, [x6]
+; CHECK-NEXT:    ld1w { z5.s }, p0/z, [x5]
 ; CHECK-NEXT:    ld1w { z6.s }, p0/z, [x2]
-; CHECK-NEXT:    ld1w { z7.s }, p0/z, [x1]
-; CHECK-NEXT:    ld1w { z16.s }, p0/z, [x0]
-; CHECK-NEXT:    ldr x8, [sp, #16]
-; CHECK-NEXT:    st1w { z16.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z7.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z6.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z5.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z4.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z3.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z2.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z1.s }, p0, [x8]
-; CHECK-NEXT:    st1w { z0.s }, p0, [x8]
+; CHECK-NEXT:    ld1w { z7.s }, p0/z, [x4]
+; CHECK-NEXT:    ld1w { z16.s }, p0/z, [x3]
+; CHECK-NEXT:    st1w { z0.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z3.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z6.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z16.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z7.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z5.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z4.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z2.s }, p0, [x9]
+; CHECK-NEXT:    st1w { z1.s }, p0, [x9]
 ; CHECK-NEXT:    ret
 entry:
   store volatile <vscale x 4 x float> %s8, <vscale x 4 x float>* %ptr
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-concat.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-concat.ll
index e54d22b140bf60c..65cb448cac117ce 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-concat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-concat.ll
@@ -57,10 +57,10 @@ define void @concat_v64i8(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
-; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x2, x8]
-; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x2]
+; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x2, x8]
+; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: concat_v64i8:
@@ -215,10 +215,10 @@ define void @concat_v32i16(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
-; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x2, x8, lsl #1]
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x2]
+; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x2, x8, lsl #1]
+; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: concat_v32i16:
@@ -344,10 +344,10 @@ define void @concat_v16i32(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
-; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x2, x8, lsl #2]
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x2]
+; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x2, x8, lsl #2]
+; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: concat_v16i32:
@@ -449,10 +449,10 @@ define void @concat_v8i64(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
-; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x2, x8, lsl #3]
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x2]
+; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x2, x8, lsl #3]
+; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: concat_v8i64:
@@ -558,10 +558,10 @@ define void @concat_v32f16(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
-; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x2, x8, lsl #1]
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x2]
+; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x2, x8, lsl #1]
+; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: concat_v32f16:
@@ -687,10 +687,10 @@ define void @concat_v16f32(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
-; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x2, x8, lsl #2]
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x2]
+; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x2, x8, lsl #2]
+; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: concat_v16f32:
@@ -792,10 +792,10 @@ define void @concat_v8f64(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
-; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x2, x8, lsl #3]
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x2]
+; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x2, x8, lsl #3]
+; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: concat_v8f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll
index 9a9804329458413..bca3dfe5717efc8 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll
@@ -68,15 +68,15 @@ define void @test_copysign_v32f16_v32f16(ptr %ap, ptr %bp) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    and z2.h, z2.h, #0x8000
+; VBITS_GE_256-NEXT:    and z1.h, z1.h, #0x8000
 ; VBITS_GE_256-NEXT:    and z0.h, z0.h, #0x7fff
-; VBITS_GE_256-NEXT:    and z1.h, z1.h, #0x7fff
+; VBITS_GE_256-NEXT:    and z2.h, z2.h, #0x7fff
 ; VBITS_GE_256-NEXT:    and z3.h, z3.h, #0x8000
-; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    orr z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    orr z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -192,15 +192,15 @@ define void @test_copysign_v16f32_v16f32(ptr %ap, ptr %bp) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    and z2.s, z2.s, #0x80000000
+; VBITS_GE_256-NEXT:    and z1.s, z1.s, #0x80000000
 ; VBITS_GE_256-NEXT:    and z0.s, z0.s, #0x7fffffff
-; VBITS_GE_256-NEXT:    and z1.s, z1.s, #0x7fffffff
+; VBITS_GE_256-NEXT:    and z2.s, z2.s, #0x7fffffff
 ; VBITS_GE_256-NEXT:    and z3.s, z3.s, #0x80000000
-; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    orr z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    orr z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -301,15 +301,15 @@ define void @test_copysign_v8f64_v8f64(ptr %ap, ptr %bp) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    and z2.d, z2.d, #0x8000000000000000
+; VBITS_GE_256-NEXT:    and z1.d, z1.d, #0x8000000000000000
 ; VBITS_GE_256-NEXT:    and z0.d, z0.d, #0x7fffffffffffffff
-; VBITS_GE_256-NEXT:    and z1.d, z1.d, #0x7fffffffffffffff
+; VBITS_GE_256-NEXT:    and z2.d, z2.d, #0x7fffffffffffffff
 ; VBITS_GE_256-NEXT:    and z3.d, z3.d, #0x8000000000000000
-; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    orr z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    orr z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -394,13 +394,13 @@ define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
 ; CHECK-LABEL: test_copysign_v4f32_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d, vl4
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    mvni v2.4s, #128, lsl #24
+; CHECK-NEXT:    mvni v1.4s, #128, lsl #24
+; CHECK-NEXT:    ldr q2, [x0]
 ; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    fcvt z1.s, p1/m, z1.d
-; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
-; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; CHECK-NEXT:    fcvt z0.s, p1/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    bit v0.16b, v2.16b, v1.16b
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %ap
@@ -439,25 +439,25 @@ define void @test_copysign_v4f64_v4f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
 ; CHECK_NO_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32:
 ; CHECK_NO_EXTEND_ROUND:       // %bb.0:
 ; CHECK_NO_EXTEND_ROUND-NEXT:    ptrue p0.d, vl4
-; CHECK_NO_EXTEND_ROUND-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; CHECK_NO_EXTEND_ROUND-NEXT:    ld1w { z1.d }, p0/z, [x1]
-; CHECK_NO_EXTEND_ROUND-NEXT:    fcvt z1.d, p0/m, z1.s
-; CHECK_NO_EXTEND_ROUND-NEXT:    and z0.d, z0.d, #0x7fffffffffffffff
-; CHECK_NO_EXTEND_ROUND-NEXT:    and z1.d, z1.d, #0x8000000000000000
-; CHECK_NO_EXTEND_ROUND-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    ld1w { z0.d }, p0/z, [x1]
+; CHECK_NO_EXTEND_ROUND-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; CHECK_NO_EXTEND_ROUND-NEXT:    fcvt z0.d, p0/m, z0.s
+; CHECK_NO_EXTEND_ROUND-NEXT:    and z1.d, z1.d, #0x7fffffffffffffff
+; CHECK_NO_EXTEND_ROUND-NEXT:    and z0.d, z0.d, #0x8000000000000000
+; CHECK_NO_EXTEND_ROUND-NEXT:    orr z0.d, z1.d, z0.d
 ; CHECK_NO_EXTEND_ROUND-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK_NO_EXTEND_ROUND-NEXT:    ret
 ;
 ; CHECK_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32:
 ; CHECK_EXTEND_ROUND:       // %bb.0:
 ; CHECK_EXTEND_ROUND-NEXT:    ptrue p0.d, vl4
-; CHECK_EXTEND_ROUND-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; CHECK_EXTEND_ROUND-NEXT:    ldr q1, [x1]
-; CHECK_EXTEND_ROUND-NEXT:    uunpklo z1.d, z1.s
-; CHECK_EXTEND_ROUND-NEXT:    and z0.d, z0.d, #0x7fffffffffffffff
-; CHECK_EXTEND_ROUND-NEXT:    fcvt z1.d, p0/m, z1.s
-; CHECK_EXTEND_ROUND-NEXT:    and z1.d, z1.d, #0x8000000000000000
-; CHECK_EXTEND_ROUND-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK_EXTEND_ROUND-NEXT:    ldr q0, [x1]
+; CHECK_EXTEND_ROUND-NEXT:    uunpklo z0.d, z0.s
+; CHECK_EXTEND_ROUND-NEXT:    fcvt z0.d, p0/m, z0.s
+; CHECK_EXTEND_ROUND-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; CHECK_EXTEND_ROUND-NEXT:    and z1.d, z1.d, #0x7fffffffffffffff
+; CHECK_EXTEND_ROUND-NEXT:    and z0.d, z0.d, #0x8000000000000000
+; CHECK_EXTEND_ROUND-NEXT:    orr z0.d, z1.d, z0.d
 ; CHECK_EXTEND_ROUND-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK_EXTEND_ROUND-NEXT:    ret
   %a = load <4 x double>, ptr %ap
@@ -492,14 +492,14 @@ define void @test_copysign_v4f16_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
 ; CHECK-LABEL: test_copysign_v4f16_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d, vl4
-; CHECK-NEXT:    ldr d0, [x0]
-; CHECK-NEXT:    mvni v2.4h, #128, lsl #8
+; CHECK-NEXT:    mvni v1.4h, #128, lsl #8
+; CHECK-NEXT:    ldr d2, [x0]
 ; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    fcvt z1.h, p1/m, z1.d
-; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
-; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
-; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; CHECK-NEXT:    fcvt z0.h, p1/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    bit v0.8b, v2.8b, v1.8b
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %a = load <4 x half>, ptr %ap
@@ -519,13 +519,13 @@ define void @test_copysign_v8f16_v8f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
 ; CHECK-LABEL: test_copysign_v8f16_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s, vl8
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    mvni v2.8h, #128, lsl #8
+; CHECK-NEXT:    mvni v1.8h, #128, lsl #8
+; CHECK-NEXT:    ldr q2, [x0]
 ; CHECK-NEXT:    ptrue p1.s
-; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; CHECK-NEXT:    fcvt z1.h, p1/m, z1.s
-; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
-; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x1]
+; CHECK-NEXT:    fcvt z0.h, p1/m, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    bit v0.16b, v2.16b, v1.16b
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, ptr %ap
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
index 3f831ea54bc817e..64c4eea0ee5f36b 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
@@ -51,13 +51,14 @@ define void @fadd_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fadd z0.h, p0/m, z0.h, z2.h
+; VBITS_GE_256-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    fadd z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fadd_v32f16:
@@ -149,13 +150,14 @@ define void @fadd_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fadd z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    fadd z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fadd_v16f32:
@@ -247,13 +249,14 @@ define void @fadd_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fadd z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    fadd z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fadd_v8f64:
@@ -349,10 +352,10 @@ define void @fdiv_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
-; VBITS_GE_256-NEXT:    fdiv z0.h, p0/m, z0.h, z2.h
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    fdiv z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    fdiv z1.h, p0/m, z1.h, z2.h
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
@@ -447,10 +450,10 @@ define void @fdiv_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
-; VBITS_GE_256-NEXT:    fdiv z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    fdiv z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    fdiv z1.s, p0/m, z1.s, z2.s
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
@@ -545,10 +548,10 @@ define void @fdiv_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
-; VBITS_GE_256-NEXT:    fdiv z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    fdiv z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    fdiv z1.d, p0/m, z1.d, z2.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
@@ -651,15 +654,16 @@ define void @fma_v32f16(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    ld1h { z4.h }, p0/z, [x2, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x2, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    ld1h { z4.h }, p0/z, [x1]
 ; VBITS_GE_256-NEXT:    ld1h { z5.h }, p0/z, [x2]
-; VBITS_GE_256-NEXT:    fmad z0.h, p0/m, z2.h, z4.h
-; VBITS_GE_256-NEXT:    fmad z1.h, p0/m, z3.h, z5.h
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
+; VBITS_GE_256-NEXT:    fmad z0.h, p0/m, z1.h, z2.h
+; VBITS_GE_256-NEXT:    movprfx z1, z5
+; VBITS_GE_256-NEXT:    fmla z1.h, p0/m, z3.h, z4.h
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fma_v32f16:
@@ -761,15 +765,16 @@ define void @fma_v16f32(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    ld1w { z4.s }, p0/z, [x2, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x2, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    ld1w { z4.s }, p0/z, [x1]
 ; VBITS_GE_256-NEXT:    ld1w { z5.s }, p0/z, [x2]
-; VBITS_GE_256-NEXT:    fmad z0.s, p0/m, z2.s, z4.s
-; VBITS_GE_256-NEXT:    fmad z1.s, p0/m, z3.s, z5.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
+; VBITS_GE_256-NEXT:    fmad z0.s, p0/m, z1.s, z2.s
+; VBITS_GE_256-NEXT:    movprfx z1, z5
+; VBITS_GE_256-NEXT:    fmla z1.s, p0/m, z3.s, z4.s
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fma_v16f32:
@@ -870,15 +875,16 @@ define void @fma_v8f64(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x2, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x2, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x1]
 ; VBITS_GE_256-NEXT:    ld1d { z5.d }, p0/z, [x2]
-; VBITS_GE_256-NEXT:    fmad z0.d, p0/m, z2.d, z4.d
-; VBITS_GE_256-NEXT:    fmad z1.d, p0/m, z3.d, z5.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
+; VBITS_GE_256-NEXT:    fmad z0.d, p0/m, z1.d, z2.d
+; VBITS_GE_256-NEXT:    movprfx z1, z5
+; VBITS_GE_256-NEXT:    fmla z1.d, p0/m, z3.d, z4.d
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fma_v8f64:
@@ -980,13 +986,14 @@ define void @fmul_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fmul z0.h, p0/m, z0.h, z2.h
+; VBITS_GE_256-NEXT:    fmul z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    fmul z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fmul_v32f16:
@@ -1078,13 +1085,14 @@ define void @fmul_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fmul z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    fmul z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    fmul z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fmul_v16f32:
@@ -1176,13 +1184,14 @@ define void @fmul_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fmul z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    fmul z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    fmul z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fmul_v8f64:
@@ -1544,8 +1553,8 @@ define void @fsqrt_v32f16(ptr %a) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    fsqrt z0.h, p0/m, z0.h
 ; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    fsqrt z0.h, p0/m, z0.h
 ; VBITS_GE_256-NEXT:    fsqrt z1.h, p0/m, z1.h
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
@@ -1632,8 +1641,8 @@ define void @fsqrt_v16f32(ptr %a) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    fsqrt z0.s, p0/m, z0.s
 ; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    fsqrt z0.s, p0/m, z0.s
 ; VBITS_GE_256-NEXT:    fsqrt z1.s, p0/m, z1.s
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
@@ -1720,8 +1729,8 @@ define void @fsqrt_v8f64(ptr %a) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    fsqrt z0.d, p0/m, z0.d
 ; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    fsqrt z0.d, p0/m, z0.d
 ; VBITS_GE_256-NEXT:    fsqrt z1.d, p0/m, z1.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
@@ -1814,13 +1823,14 @@ define void @fsub_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fsub z0.h, p0/m, z0.h, z2.h
+; VBITS_GE_256-NEXT:    fsub z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    fsub z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fsub_v32f16:
@@ -1912,13 +1922,14 @@ define void @fsub_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fsub z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    fsub z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    fsub z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fsub_v16f32:
@@ -2010,13 +2021,14 @@ define void @fsub_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fsub z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    fsub z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    fsub z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fsub_v8f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-compares.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-compares.ll
index b97a5f7b0559732..20f71bb380652e3 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-compares.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-compares.ll
@@ -55,11 +55,11 @@ define void @fcmp_oeq_v32f16(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fcmeq p1.h, p0/z, z0.h, z2.h
-; VBITS_GE_256-NEXT:    fcmeq p2.h, p0/z, z1.h, z3.h
+; VBITS_GE_256-NEXT:    fcmeq p1.h, p0/z, z0.h, z1.h
+; VBITS_GE_256-NEXT:    fcmeq p2.h, p0/z, z2.h, z3.h
 ; VBITS_GE_256-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    mov z1.h, p2/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x2, x8, lsl #1]
@@ -165,11 +165,11 @@ define void @fcmp_oeq_v16f32(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fcmeq p1.s, p0/z, z0.s, z2.s
-; VBITS_GE_256-NEXT:    fcmeq p2.s, p0/z, z1.s, z3.s
+; VBITS_GE_256-NEXT:    fcmeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_256-NEXT:    fcmeq p2.s, p0/z, z2.s, z3.s
 ; VBITS_GE_256-NEXT:    mov z0.s, p1/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    mov z1.s, p2/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x2, x8, lsl #2]
@@ -275,11 +275,11 @@ define void @fcmp_oeq_v8f64(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fcmeq p1.d, p0/z, z0.d, z2.d
-; VBITS_GE_256-NEXT:    fcmeq p2.d, p0/z, z1.d, z3.d
+; VBITS_GE_256-NEXT:    fcmeq p1.d, p0/z, z0.d, z1.d
+; VBITS_GE_256-NEXT:    fcmeq p2.d, p0/z, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    mov z0.d, p1/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    mov z1.d, p2/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x2, x8, lsl #3]
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll
index 7f65f4b10277ff1..d3713b989301b24 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll
@@ -58,15 +58,16 @@ define void @fma_v32f16(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    ld1h { z4.h }, p0/z, [x2, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x2, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    ld1h { z4.h }, p0/z, [x1]
 ; VBITS_GE_256-NEXT:    ld1h { z5.h }, p0/z, [x2]
-; VBITS_GE_256-NEXT:    fmad z0.h, p0/m, z2.h, z4.h
-; VBITS_GE_256-NEXT:    fmad z1.h, p0/m, z3.h, z5.h
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
+; VBITS_GE_256-NEXT:    fmad z0.h, p0/m, z1.h, z2.h
+; VBITS_GE_256-NEXT:    movprfx z1, z5
+; VBITS_GE_256-NEXT:    fmla z1.h, p0/m, z3.h, z4.h
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fma_v32f16:
@@ -174,15 +175,16 @@ define void @fma_v16f32(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    ld1w { z4.s }, p0/z, [x2, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x2, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    ld1w { z4.s }, p0/z, [x1]
 ; VBITS_GE_256-NEXT:    ld1w { z5.s }, p0/z, [x2]
-; VBITS_GE_256-NEXT:    fmad z0.s, p0/m, z2.s, z4.s
-; VBITS_GE_256-NEXT:    fmad z1.s, p0/m, z3.s, z5.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
+; VBITS_GE_256-NEXT:    fmad z0.s, p0/m, z1.s, z2.s
+; VBITS_GE_256-NEXT:    movprfx z1, z5
+; VBITS_GE_256-NEXT:    fmla z1.s, p0/m, z3.s, z4.s
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fma_v16f32:
@@ -289,15 +291,16 @@ define void @fma_v8f64(ptr %a, ptr %b, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x2, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x2, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x1]
 ; VBITS_GE_256-NEXT:    ld1d { z5.d }, p0/z, [x2]
-; VBITS_GE_256-NEXT:    fmad z0.d, p0/m, z2.d, z4.d
-; VBITS_GE_256-NEXT:    fmad z1.d, p0/m, z3.d, z5.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
+; VBITS_GE_256-NEXT:    fmad z0.d, p0/m, z1.d, z2.d
+; VBITS_GE_256-NEXT:    movprfx z1, z5
+; VBITS_GE_256-NEXT:    fmla z1.d, p0/m, z3.d, z4.d
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fma_v8f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll
index 6e81876adc3a034..d20f083066a8323 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll
@@ -51,13 +51,14 @@ define void @fmaxnm_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_EQ_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_EQ_256-NEXT:    fmaxnm z0.h, p0/m, z0.h, z2.h
+; VBITS_EQ_256-NEXT:    fmaxnm z0.h, p0/m, z0.h, z1.h
+; VBITS_EQ_256-NEXT:    movprfx z1, z2
 ; VBITS_EQ_256-NEXT:    fmaxnm z1.h, p0/m, z1.h, z3.h
-; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_EQ_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fmaxnm_v32f16:
@@ -149,13 +150,14 @@ define void @fmaxnm_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_EQ_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_EQ_256-NEXT:    fmaxnm z0.s, p0/m, z0.s, z2.s
+; VBITS_EQ_256-NEXT:    fmaxnm z0.s, p0/m, z0.s, z1.s
+; VBITS_EQ_256-NEXT:    movprfx z1, z2
 ; VBITS_EQ_256-NEXT:    fmaxnm z1.s, p0/m, z1.s, z3.s
-; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fmaxnm_v16f32:
@@ -247,13 +249,14 @@ define void @fmaxnm_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_EQ_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_EQ_256-NEXT:    fmaxnm z0.d, p0/m, z0.d, z2.d
+; VBITS_EQ_256-NEXT:    fmaxnm z0.d, p0/m, z0.d, z1.d
+; VBITS_EQ_256-NEXT:    movprfx z1, z2
 ; VBITS_EQ_256-NEXT:    fmaxnm z1.d, p0/m, z1.d, z3.d
-; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fmaxnm_v8f64:
@@ -349,13 +352,14 @@ define void @fminnm_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_EQ_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_EQ_256-NEXT:    fminnm z0.h, p0/m, z0.h, z2.h
+; VBITS_EQ_256-NEXT:    fminnm z0.h, p0/m, z0.h, z1.h
+; VBITS_EQ_256-NEXT:    movprfx z1, z2
 ; VBITS_EQ_256-NEXT:    fminnm z1.h, p0/m, z1.h, z3.h
-; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_EQ_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fminnm_v32f16:
@@ -447,13 +451,14 @@ define void @fminnm_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_EQ_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_EQ_256-NEXT:    fminnm z0.s, p0/m, z0.s, z2.s
+; VBITS_EQ_256-NEXT:    fminnm z0.s, p0/m, z0.s, z1.s
+; VBITS_EQ_256-NEXT:    movprfx z1, z2
 ; VBITS_EQ_256-NEXT:    fminnm z1.s, p0/m, z1.s, z3.s
-; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fminnm_v16f32:
@@ -545,13 +550,14 @@ define void @fminnm_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_EQ_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_EQ_256-NEXT:    fminnm z0.d, p0/m, z0.d, z2.d
+; VBITS_EQ_256-NEXT:    fminnm z0.d, p0/m, z0.d, z1.d
+; VBITS_EQ_256-NEXT:    movprfx z1, z2
 ; VBITS_EQ_256-NEXT:    fminnm z1.d, p0/m, z1.d, z3.d
-; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fminnm_v8f64:
@@ -647,13 +653,14 @@ define void @fmax_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_EQ_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_EQ_256-NEXT:    fmax z0.h, p0/m, z0.h, z2.h
+; VBITS_EQ_256-NEXT:    fmax z0.h, p0/m, z0.h, z1.h
+; VBITS_EQ_256-NEXT:    movprfx z1, z2
 ; VBITS_EQ_256-NEXT:    fmax z1.h, p0/m, z1.h, z3.h
-; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_EQ_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fmax_v32f16:
@@ -745,13 +752,14 @@ define void @fmax_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_EQ_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_EQ_256-NEXT:    fmax z0.s, p0/m, z0.s, z2.s
+; VBITS_EQ_256-NEXT:    fmax z0.s, p0/m, z0.s, z1.s
+; VBITS_EQ_256-NEXT:    movprfx z1, z2
 ; VBITS_EQ_256-NEXT:    fmax z1.s, p0/m, z1.s, z3.s
-; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fmax_v16f32:
@@ -843,13 +851,14 @@ define void @fmax_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_EQ_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_EQ_256-NEXT:    fmax z0.d, p0/m, z0.d, z2.d
+; VBITS_EQ_256-NEXT:    fmax z0.d, p0/m, z0.d, z1.d
+; VBITS_EQ_256-NEXT:    movprfx z1, z2
 ; VBITS_EQ_256-NEXT:    fmax z1.d, p0/m, z1.d, z3.d
-; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fmax_v8f64:
@@ -945,13 +954,14 @@ define void @fmin_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_EQ_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_EQ_256-NEXT:    fmin z0.h, p0/m, z0.h, z2.h
+; VBITS_EQ_256-NEXT:    fmin z0.h, p0/m, z0.h, z1.h
+; VBITS_EQ_256-NEXT:    movprfx z1, z2
 ; VBITS_EQ_256-NEXT:    fmin z1.h, p0/m, z1.h, z3.h
-; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_EQ_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fmin_v32f16:
@@ -1043,13 +1053,14 @@ define void @fmin_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_EQ_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_EQ_256-NEXT:    fmin z0.s, p0/m, z0.s, z2.s
+; VBITS_EQ_256-NEXT:    fmin z0.s, p0/m, z0.s, z1.s
+; VBITS_EQ_256-NEXT:    movprfx z1, z2
 ; VBITS_EQ_256-NEXT:    fmin z1.s, p0/m, z1.s, z3.s
-; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fmin_v16f32:
@@ -1141,13 +1152,14 @@ define void @fmin_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_EQ_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_EQ_256-NEXT:    fmin z0.d, p0/m, z0.d, z2.d
+; VBITS_EQ_256-NEXT:    fmin z0.d, p0/m, z0.d, z1.d
+; VBITS_EQ_256-NEXT:    movprfx z1, z2
 ; VBITS_EQ_256-NEXT:    fmin z1.d, p0/m, z1.d, z3.d
-; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fmin_v8f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll
index bbd9613819b19ef..8053a401e5f45ea 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll
@@ -55,11 +55,11 @@ define half @fadda_v32f16(half %start, ptr %a) #0 {
 ; VBITS_GE_256-LABEL: fadda_v32f16:
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
-; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    // kill: def $h0 killed $h0 def $z0
+; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    fadda h0, p0, h0, z1.h
 ; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    fadda h0, p0, h0, z2.h
 ; VBITS_GE_256-NEXT:    fadda h0, p0, h0, z1.h
 ; VBITS_GE_256-NEXT:    // kill: def $h0 killed $h0 killed $z0
 ; VBITS_GE_256-NEXT:    ret
@@ -151,11 +151,11 @@ define float @fadda_v16f32(float %start, ptr %a) #0 {
 ; VBITS_GE_256-LABEL: fadda_v16f32:
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
-; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    // kill: def $s0 killed $s0 def $z0
+; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    fadda s0, p0, s0, z1.s
 ; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    fadda s0, p0, s0, z2.s
 ; VBITS_GE_256-NEXT:    fadda s0, p0, s0, z1.s
 ; VBITS_GE_256-NEXT:    // kill: def $s0 killed $s0 killed $z0
 ; VBITS_GE_256-NEXT:    ret
@@ -243,11 +243,11 @@ define double @fadda_v8f64(double %start, ptr %a) #0 {
 ; VBITS_GE_256-LABEL: fadda_v8f64:
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
-; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    // kill: def $d0 killed $d0 def $z0
+; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    fadda d0, p0, d0, z1.d
 ; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    fadda d0, p0, d0, z2.d
 ; VBITS_GE_256-NEXT:    fadda d0, p0, d0, z1.d
 ; VBITS_GE_256-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; VBITS_GE_256-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
index 3566bbc2b456141..9efe0b33910c8ac 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
@@ -54,13 +54,13 @@ define void @select_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fcmeq p1.h, p0/z, z0.h, z2.h
-; VBITS_GE_256-NEXT:    fcmeq p2.h, p0/z, z1.h, z3.h
-; VBITS_GE_256-NEXT:    sel z0.h, p1, z0.h, z2.h
-; VBITS_GE_256-NEXT:    sel z1.h, p2, z1.h, z3.h
+; VBITS_GE_256-NEXT:    fcmeq p1.h, p0/z, z0.h, z1.h
+; VBITS_GE_256-NEXT:    fcmeq p2.h, p0/z, z2.h, z3.h
+; VBITS_GE_256-NEXT:    sel z0.h, p1, z0.h, z1.h
+; VBITS_GE_256-NEXT:    sel z1.h, p2, z2.h, z3.h
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -167,13 +167,13 @@ define void @select_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fcmeq p1.s, p0/z, z0.s, z2.s
-; VBITS_GE_256-NEXT:    fcmeq p2.s, p0/z, z1.s, z3.s
-; VBITS_GE_256-NEXT:    sel z0.s, p1, z0.s, z2.s
-; VBITS_GE_256-NEXT:    sel z1.s, p2, z1.s, z3.s
+; VBITS_GE_256-NEXT:    fcmeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_256-NEXT:    fcmeq p2.s, p0/z, z2.s, z3.s
+; VBITS_GE_256-NEXT:    sel z0.s, p1, z0.s, z1.s
+; VBITS_GE_256-NEXT:    sel z1.s, p2, z2.s, z3.s
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -281,13 +281,13 @@ define void @select_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fcmeq p1.d, p0/z, z0.d, z2.d
-; VBITS_GE_256-NEXT:    fcmeq p2.d, p0/z, z1.d, z3.d
-; VBITS_GE_256-NEXT:    sel z0.d, p1, z0.d, z2.d
-; VBITS_GE_256-NEXT:    sel z1.d, p2, z1.d, z3.d
+; VBITS_GE_256-NEXT:    fcmeq p1.d, p0/z, z0.d, z1.d
+; VBITS_GE_256-NEXT:    fcmeq p2.d, p0/z, z2.d, z3.d
+; VBITS_GE_256-NEXT:    sel z0.d, p1, z0.d, z1.d
+; VBITS_GE_256-NEXT:    sel z1.d, p2, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests-crash.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests-crash.ll
index d73ef4731d48fc7..a4b5ccd69fdb759 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests-crash.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests-crash.ll
@@ -21,24 +21,24 @@ define dso_local void @func1(ptr %v1, ptr %v2, ptr %v3, ptr %v4, ptr %v5, ptr %v
 ; CHECK-NEXT:    .cfi_offset w22, -32
 ; CHECK-NEXT:    .cfi_offset w29, -48
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x8, sp, #48
-; CHECK-NEXT:    add x9, sp, #112
-; CHECK-NEXT:    add x10, sp, #144
 ; CHECK-NEXT:    add x11, sp, #176
+; CHECK-NEXT:    add x10, sp, #144
+; CHECK-NEXT:    add x9, sp, #112
+; CHECK-NEXT:    add x8, sp, #48
 ; CHECK-NEXT:    add x20, sp, #176
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x8]
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x9]
-; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x10]
-; CHECK-NEXT:    ld1d { z3.d }, p0/z, [x11]
-; CHECK-NEXT:    ldp x9, x8, [sp, #328]
-; CHECK-NEXT:    ldp x11, x10, [sp, #312]
+; CHECK-NEXT:    ldp x13, x12, [sp, #328]
 ; CHECK-NEXT:    ldr x15, [sp, #104]
-; CHECK-NEXT:    ldp x13, x12, [sp, #296]
 ; CHECK-NEXT:    ldur q4, [sp, #88]
-; CHECK-NEXT:    ldp x18, x14, [sp, #280]
-; CHECK-NEXT:    ldr x19, [sp, #272]
 ; CHECK-NEXT:    ldp x16, x17, [sp, #208]
+; CHECK-NEXT:    ldr x19, [sp, #272]
 ; CHECK-NEXT:    ldp x21, x22, [sp, #352]
+; CHECK-NEXT:    ld1d { z3.d }, p0/z, [x11]
+; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x10]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x9]
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x8]
+; CHECK-NEXT:    ldp x8, x14, [sp, #312]
+; CHECK-NEXT:    ldp x10, x9, [sp, #296]
+; CHECK-NEXT:    ldp x18, x11, [sp, #280]
 ; CHECK-NEXT:    st1d { z3.d }, p0, [x20]
 ; CHECK-NEXT:    add x20, sp, #144
 ; CHECK-NEXT:    st1d { z2.d }, p0, [x20]
@@ -53,10 +53,10 @@ define dso_local void @func1(ptr %v1, ptr %v2, ptr %v3, ptr %v4, ptr %v5, ptr %v
 ; CHECK-NEXT:    stp x16, x17, [sp, #208]
 ; CHECK-NEXT:    stur q4, [sp, #88]
 ; CHECK-NEXT:    str x15, [sp, #104]
-; CHECK-NEXT:    stp x14, x13, [sp, #288]
-; CHECK-NEXT:    stp x12, x11, [sp, #304]
-; CHECK-NEXT:    stp x10, x9, [sp, #320]
-; CHECK-NEXT:    str x8, [sp, #336]
+; CHECK-NEXT:    stp x11, x10, [sp, #288]
+; CHECK-NEXT:    stp x9, x8, [sp, #304]
+; CHECK-NEXT:    stp x14, x13, [sp, #320]
+; CHECK-NEXT:    str x12, [sp, #336]
 ; CHECK-NEXT:    ldr x29, [sp], #48 // 8-byte Folded Reload
 ; CHECK-NEXT:    b func2
                              ptr %v9, ptr %v10, ptr %v11, ptr %v12, ptr %v13, ptr %v14,  ptr %v15, ptr %v16,
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll
index 6a8d7e47a2bd3a8..8f2aeade48a0f8f 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll
@@ -51,11 +51,11 @@ define void @add_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    add z0.b, z0.b, z2.b
-; VBITS_GE_256-NEXT:    add z1.b, z1.b, z3.b
+; VBITS_GE_256-NEXT:    add z0.b, z0.b, z1.b
+; VBITS_GE_256-NEXT:    add z1.b, z2.b, z3.b
 ; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -149,11 +149,11 @@ define void @add_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    add z0.h, z0.h, z2.h
-; VBITS_GE_256-NEXT:    add z1.h, z1.h, z3.h
+; VBITS_GE_256-NEXT:    add z0.h, z0.h, z1.h
+; VBITS_GE_256-NEXT:    add z1.h, z2.h, z3.h
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -247,11 +247,11 @@ define void @add_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    add z0.s, z0.s, z2.s
-; VBITS_GE_256-NEXT:    add z1.s, z1.s, z3.s
+; VBITS_GE_256-NEXT:    add z0.s, z0.s, z1.s
+; VBITS_GE_256-NEXT:    add z1.s, z2.s, z3.s
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -345,11 +345,11 @@ define void @add_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    add z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    add z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    add z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    add z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -391,11 +391,11 @@ define void @add_v32i64(ptr %a, ptr %b) vscale_range(8,0) #0 {
 ; CHECK-NEXT:    ptrue p0.d, vl16
 ; CHECK-NEXT:    mov x8, #16 // =0x10
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; CHECK-NEXT:    add z0.d, z0.d, z2.d
-; CHECK-NEXT:    add z1.d, z1.d, z3.d
+; CHECK-NEXT:    add z0.d, z0.d, z1.d
+; CHECK-NEXT:    add z1.d, z2.d, z3.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x0]
 ; CHECK-NEXT:    ret
@@ -452,13 +452,14 @@ define void @mul_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    mul z0.b, p0/m, z0.b, z2.b
+; VBITS_GE_256-NEXT:    mul z0.b, p0/m, z0.b, z1.b
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    mul z1.b, p0/m, z1.b, z3.b
-; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: mul_v64i8:
@@ -550,13 +551,14 @@ define void @mul_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    mul z0.h, p0/m, z0.h, z2.h
+; VBITS_GE_256-NEXT:    mul z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    mul z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: mul_v32i16:
@@ -648,13 +650,14 @@ define void @mul_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    mul z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    mul z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    mul z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: mul_v16i32:
@@ -752,13 +755,14 @@ define void @mul_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    mul z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    mul z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    mul z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: mul_v8i64:
@@ -854,11 +858,11 @@ define void @sub_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    sub z0.b, z0.b, z2.b
-; VBITS_GE_256-NEXT:    sub z1.b, z1.b, z3.b
+; VBITS_GE_256-NEXT:    sub z0.b, z0.b, z1.b
+; VBITS_GE_256-NEXT:    sub z1.b, z2.b, z3.b
 ; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -952,11 +956,11 @@ define void @sub_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    sub z0.h, z0.h, z2.h
-; VBITS_GE_256-NEXT:    sub z1.h, z1.h, z3.h
+; VBITS_GE_256-NEXT:    sub z0.h, z0.h, z1.h
+; VBITS_GE_256-NEXT:    sub z1.h, z2.h, z3.h
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -1050,11 +1054,11 @@ define void @sub_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    sub z0.s, z0.s, z2.s
-; VBITS_GE_256-NEXT:    sub z1.s, z1.s, z3.s
+; VBITS_GE_256-NEXT:    sub z0.s, z0.s, z1.s
+; VBITS_GE_256-NEXT:    sub z1.s, z2.s, z3.s
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -1148,11 +1152,11 @@ define void @sub_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    sub z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    sub z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    sub z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    sub z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -1393,27 +1397,25 @@ define void @abs_v128i16(ptr %a) vscale_range(2,0) #0 {
 ; CHECK-NEXT:    ld1h { z4.h }, p0/z, [x0, x12, lsl #1]
 ; CHECK-NEXT:    ld1h { z5.h }, p0/z, [x0, x13, lsl #1]
 ; CHECK-NEXT:    ld1h { z6.h }, p0/z, [x0, x14, lsl #1]
-; CHECK-NEXT:    ld1h { z7.h }, p0/z, [x0]
 ; CHECK-NEXT:    abs z0.h, p0/m, z0.h
 ; CHECK-NEXT:    abs z1.h, p0/m, z1.h
 ; CHECK-NEXT:    abs z2.h, p0/m, z2.h
 ; CHECK-NEXT:    abs z3.h, p0/m, z3.h
+; CHECK-NEXT:    abs z4.h, p0/m, z4.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
-; CHECK-NEXT:    movprfx z0, z4
-; CHECK-NEXT:    abs z0.h, p0/m, z4.h
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    st1h { z1.h }, p0, [x0, x9, lsl #1]
 ; CHECK-NEXT:    movprfx z1, z5
 ; CHECK-NEXT:    abs z1.h, p0/m, z5.h
 ; CHECK-NEXT:    st1h { z2.h }, p0, [x0, x10, lsl #1]
 ; CHECK-NEXT:    movprfx z2, z6
 ; CHECK-NEXT:    abs z2.h, p0/m, z6.h
+; CHECK-NEXT:    abs z0.h, p0/m, z0.h
 ; CHECK-NEXT:    st1h { z3.h }, p0, [x0, x11, lsl #1]
-; CHECK-NEXT:    movprfx z3, z7
-; CHECK-NEXT:    abs z3.h, p0/m, z7.h
-; CHECK-NEXT:    st1h { z0.h }, p0, [x0, x12, lsl #1]
+; CHECK-NEXT:    st1h { z4.h }, p0, [x0, x12, lsl #1]
 ; CHECK-NEXT:    st1h { z1.h }, p0, [x0, x13, lsl #1]
 ; CHECK-NEXT:    st1h { z2.h }, p0, [x0, x14, lsl #1]
-; CHECK-NEXT:    st1h { z3.h }, p0, [x0]
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <128 x i16>, ptr %a
   %res = call <128 x i16> @llvm.abs.v128i16(<128 x i16> %op1, i1 false)
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-compares.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-compares.ll
index 254dd912545573f..7a598df3158a49d 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-compares.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-compares.ll
@@ -55,11 +55,11 @@ define void @icmp_eq_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    cmpeq p1.b, p0/z, z0.b, z2.b
-; VBITS_GE_256-NEXT:    cmpeq p2.b, p0/z, z1.b, z3.b
+; VBITS_GE_256-NEXT:    cmpeq p1.b, p0/z, z0.b, z1.b
+; VBITS_GE_256-NEXT:    cmpeq p2.b, p0/z, z2.b, z3.b
 ; VBITS_GE_256-NEXT:    mov z0.b, p1/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    mov z1.b, p2/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
@@ -165,11 +165,11 @@ define void @icmp_eq_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    cmpeq p1.h, p0/z, z0.h, z2.h
-; VBITS_GE_256-NEXT:    cmpeq p2.h, p0/z, z1.h, z3.h
+; VBITS_GE_256-NEXT:    cmpeq p1.h, p0/z, z0.h, z1.h
+; VBITS_GE_256-NEXT:    cmpeq p2.h, p0/z, z2.h, z3.h
 ; VBITS_GE_256-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    mov z1.h, p2/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
@@ -275,11 +275,11 @@ define void @icmp_eq_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    cmpeq p1.s, p0/z, z0.s, z2.s
-; VBITS_GE_256-NEXT:    cmpeq p2.s, p0/z, z1.s, z3.s
+; VBITS_GE_256-NEXT:    cmpeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_256-NEXT:    cmpeq p2.s, p0/z, z2.s, z3.s
 ; VBITS_GE_256-NEXT:    mov z0.s, p1/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    mov z1.s, p2/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
@@ -385,11 +385,11 @@ define void @icmp_eq_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    cmpeq p1.d, p0/z, z0.d, z2.d
-; VBITS_GE_256-NEXT:    cmpeq p2.d, p0/z, z1.d, z3.d
+; VBITS_GE_256-NEXT:    cmpeq p1.d, p0/z, z0.d, z1.d
+; VBITS_GE_256-NEXT:    cmpeq p2.d, p0/z, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    mov z0.d, p1/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    mov z1.d, p2/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-div.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-div.ll
index a01ef8bf064e835..11ed69513917c32 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-div.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-div.ll
@@ -561,10 +561,10 @@ define void @sdiv_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
-; VBITS_GE_256-NEXT:    sdiv z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    sdiv z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    sdiv z1.s, p0/m, z1.s, z2.s
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
@@ -683,10 +683,10 @@ define void @sdiv_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
-; VBITS_GE_256-NEXT:    sdiv z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    sdiv z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    sdiv z1.d, p0/m, z1.d, z2.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
@@ -1272,10 +1272,10 @@ define void @udiv_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
-; VBITS_GE_256-NEXT:    udiv z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    udiv z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    udiv z1.s, p0/m, z1.s, z2.s
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
@@ -1394,10 +1394,10 @@ define void @udiv_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
-; VBITS_GE_256-NEXT:    udiv z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    udiv z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    udiv z1.d, p0/m, z1.d, z2.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-log.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-log.ll
index 149ce49665c64f4..1285a5783677e5e 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-log.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-log.ll
@@ -51,11 +51,11 @@ define void @and_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    and z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    and z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    and z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    and z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -149,11 +149,11 @@ define void @and_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    and z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    and z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    and z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    and z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -247,11 +247,11 @@ define void @and_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    and z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    and z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    and z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    and z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -345,11 +345,11 @@ define void @and_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    and z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    and z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    and z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    and z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -447,11 +447,11 @@ define void @or_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    orr z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    orr z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -545,11 +545,11 @@ define void @or_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    orr z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    orr z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -643,11 +643,11 @@ define void @or_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    orr z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    orr z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -741,11 +741,11 @@ define void @or_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    orr z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    orr z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -843,11 +843,11 @@ define void @xor_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    eor z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    eor z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    eor z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    eor z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -941,11 +941,11 @@ define void @xor_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    eor z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    eor z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    eor z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    eor z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -1039,11 +1039,11 @@ define void @xor_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    eor z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    eor z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    eor z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    eor z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -1137,11 +1137,11 @@ define void @xor_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    eor z0.d, z0.d, z2.d
-; VBITS_GE_256-NEXT:    eor z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    eor z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    eor z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll
index 4091c01fe93fbd3..296e4818b1cd7f6 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll
@@ -51,13 +51,14 @@ define void @smax_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    smax z0.b, p0/m, z0.b, z2.b
+; VBITS_GE_256-NEXT:    smax z0.b, p0/m, z0.b, z1.b
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    smax z1.b, p0/m, z1.b, z3.b
-; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: smax_v64i8:
@@ -149,13 +150,14 @@ define void @smax_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    smax z0.h, p0/m, z0.h, z2.h
+; VBITS_GE_256-NEXT:    smax z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    smax z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: smax_v32i16:
@@ -247,13 +249,14 @@ define void @smax_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    smax z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    smax z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    smax z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: smax_v16i32:
@@ -353,13 +356,14 @@ define void @smax_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    smax z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    smax z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    smax z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: smax_v8i64:
@@ -455,13 +459,14 @@ define void @smin_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    smin z0.b, p0/m, z0.b, z2.b
+; VBITS_GE_256-NEXT:    smin z0.b, p0/m, z0.b, z1.b
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    smin z1.b, p0/m, z1.b, z3.b
-; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: smin_v64i8:
@@ -553,13 +558,14 @@ define void @smin_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    smin z0.h, p0/m, z0.h, z2.h
+; VBITS_GE_256-NEXT:    smin z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    smin z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: smin_v32i16:
@@ -651,13 +657,14 @@ define void @smin_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    smin z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    smin z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    smin z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: smin_v16i32:
@@ -757,13 +764,14 @@ define void @smin_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    smin z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    smin z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    smin z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: smin_v8i64:
@@ -859,13 +867,14 @@ define void @umax_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    umax z0.b, p0/m, z0.b, z2.b
+; VBITS_GE_256-NEXT:    umax z0.b, p0/m, z0.b, z1.b
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    umax z1.b, p0/m, z1.b, z3.b
-; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: umax_v64i8:
@@ -957,13 +966,14 @@ define void @umax_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    umax z0.h, p0/m, z0.h, z2.h
+; VBITS_GE_256-NEXT:    umax z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    umax z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: umax_v32i16:
@@ -1055,13 +1065,14 @@ define void @umax_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    umax z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    umax z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    umax z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: umax_v16i32:
@@ -1161,13 +1172,14 @@ define void @umax_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    umax z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    umax z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    umax z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: umax_v8i64:
@@ -1263,13 +1275,14 @@ define void @umin_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    umin z0.b, p0/m, z0.b, z2.b
+; VBITS_GE_256-NEXT:    umin z0.b, p0/m, z0.b, z1.b
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    umin z1.b, p0/m, z1.b, z3.b
-; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: umin_v64i8:
@@ -1361,13 +1374,14 @@ define void @umin_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    umin z0.h, p0/m, z0.h, z2.h
+; VBITS_GE_256-NEXT:    umin z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    umin z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: umin_v32i16:
@@ -1459,13 +1473,14 @@ define void @umin_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    umin z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    umin z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    umin z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: umin_v16i32:
@@ -1565,13 +1580,14 @@ define void @umin_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    umin z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    umin z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    umin z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: umin_v8i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll
index ae230fc2c92d93c..995da2be0cc1700 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll
@@ -76,13 +76,14 @@ define void @smulh_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    smulh z0.b, p0/m, z0.b, z2.b
+; VBITS_GE_256-NEXT:    smulh z0.b, p0/m, z0.b, z1.b
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    smulh z1.b, p0/m, z1.b, z3.b
-; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: smulh_v64i8:
@@ -208,13 +209,14 @@ define void @smulh_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    smulh z0.h, p0/m, z0.h, z2.h
+; VBITS_GE_256-NEXT:    smulh z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    smulh z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: smulh_v32i16:
@@ -338,13 +340,14 @@ define void @smulh_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    smulh z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    smulh z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    smulh z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: smulh_v16i32:
@@ -470,13 +473,14 @@ define void @smulh_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    smulh z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    smulh z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    smulh z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: smulh_v8i64:
@@ -605,13 +609,14 @@ define void @umulh_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    umulh z0.b, p0/m, z0.b, z2.b
+; VBITS_GE_256-NEXT:    umulh z0.b, p0/m, z0.b, z1.b
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    umulh z1.b, p0/m, z1.b, z3.b
-; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: umulh_v64i8:
@@ -738,13 +743,14 @@ define void @umulh_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    umulh z0.h, p0/m, z0.h, z2.h
+; VBITS_GE_256-NEXT:    umulh z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    umulh z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: umulh_v32i16:
@@ -870,13 +876,14 @@ define void @umulh_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    umulh z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    umulh z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    umulh z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: umulh_v16i32:
@@ -1000,13 +1007,14 @@ define void @umulh_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    umulh z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    umulh z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    umulh z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: umulh_v8i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll
index b0edf0ddebd4e05..45960617f1d9355 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll
@@ -608,17 +608,18 @@ define void @srem_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    movprfx z2, z0
+; VBITS_GE_256-NEXT:    sdiv z2.s, p0/m, z2.s, z1.s
+; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z4.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    movprfx z3, z0
-; VBITS_GE_256-NEXT:    sdiv z3.s, p0/m, z3.s, z2.s
-; VBITS_GE_256-NEXT:    movprfx z5, z1
+; VBITS_GE_256-NEXT:    movprfx z5, z3
 ; VBITS_GE_256-NEXT:    sdiv z5.s, p0/m, z5.s, z4.s
-; VBITS_GE_256-NEXT:    mls z0.s, p0/m, z3.s, z2.s
+; VBITS_GE_256-NEXT:    mls z0.s, p0/m, z2.s, z1.s
+; VBITS_GE_256-NEXT:    movprfx z1, z3
 ; VBITS_GE_256-NEXT:    mls z1.s, p0/m, z5.s, z4.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: srem_v16i32:
@@ -756,17 +757,18 @@ define void @srem_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    movprfx z2, z0
+; VBITS_GE_256-NEXT:    sdiv z2.d, p0/m, z2.d, z1.d
+; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    movprfx z3, z0
-; VBITS_GE_256-NEXT:    sdiv z3.d, p0/m, z3.d, z2.d
-; VBITS_GE_256-NEXT:    movprfx z5, z1
+; VBITS_GE_256-NEXT:    movprfx z5, z3
 ; VBITS_GE_256-NEXT:    sdiv z5.d, p0/m, z5.d, z4.d
-; VBITS_GE_256-NEXT:    mls z0.d, p0/m, z3.d, z2.d
+; VBITS_GE_256-NEXT:    mls z0.d, p0/m, z2.d, z1.d
+; VBITS_GE_256-NEXT:    movprfx z1, z3
 ; VBITS_GE_256-NEXT:    mls z1.d, p0/m, z5.d, z4.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: srem_v8i64:
@@ -1424,17 +1426,18 @@ define void @urem_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    movprfx z2, z0
+; VBITS_GE_256-NEXT:    udiv z2.s, p0/m, z2.s, z1.s
+; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z4.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    movprfx z3, z0
-; VBITS_GE_256-NEXT:    udiv z3.s, p0/m, z3.s, z2.s
-; VBITS_GE_256-NEXT:    movprfx z5, z1
+; VBITS_GE_256-NEXT:    movprfx z5, z3
 ; VBITS_GE_256-NEXT:    udiv z5.s, p0/m, z5.s, z4.s
-; VBITS_GE_256-NEXT:    mls z0.s, p0/m, z3.s, z2.s
+; VBITS_GE_256-NEXT:    mls z0.s, p0/m, z2.s, z1.s
+; VBITS_GE_256-NEXT:    movprfx z1, z3
 ; VBITS_GE_256-NEXT:    mls z1.s, p0/m, z5.s, z4.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: urem_v16i32:
@@ -1572,17 +1575,18 @@ define void @urem_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    movprfx z2, z0
+; VBITS_GE_256-NEXT:    udiv z2.d, p0/m, z2.d, z1.d
+; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    movprfx z3, z0
-; VBITS_GE_256-NEXT:    udiv z3.d, p0/m, z3.d, z2.d
-; VBITS_GE_256-NEXT:    movprfx z5, z1
+; VBITS_GE_256-NEXT:    movprfx z5, z3
 ; VBITS_GE_256-NEXT:    udiv z5.d, p0/m, z5.d, z4.d
-; VBITS_GE_256-NEXT:    mls z0.d, p0/m, z3.d, z2.d
+; VBITS_GE_256-NEXT:    mls z0.d, p0/m, z2.d, z1.d
+; VBITS_GE_256-NEXT:    movprfx z1, z3
 ; VBITS_GE_256-NEXT:    mls z1.d, p0/m, z5.d, z4.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: urem_v8i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll
index 0424773c14fd08c..7184cc9705973df 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll
@@ -53,13 +53,14 @@ define void @ashr_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    asr z0.b, p0/m, z0.b, z2.b
+; VBITS_GE_256-NEXT:    asr z0.b, p0/m, z0.b, z1.b
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    asr z1.b, p0/m, z1.b, z3.b
-; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: ashr_v64i8:
@@ -153,13 +154,14 @@ define void @ashr_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    asr z0.h, p0/m, z0.h, z2.h
+; VBITS_GE_256-NEXT:    asr z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    asr z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: ashr_v32i16:
@@ -253,13 +255,14 @@ define void @ashr_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    asr z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    asr z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    asr z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: ashr_v16i32:
@@ -353,13 +356,14 @@ define void @ashr_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    asr z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    asr z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    asr z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: ashr_v8i64:
@@ -457,13 +461,14 @@ define void @lshr_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    lsr z0.b, p0/m, z0.b, z2.b
+; VBITS_GE_256-NEXT:    lsr z0.b, p0/m, z0.b, z1.b
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    lsr z1.b, p0/m, z1.b, z3.b
-; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: lshr_v64i8:
@@ -557,13 +562,14 @@ define void @lshr_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    lsr z0.h, p0/m, z0.h, z2.h
+; VBITS_GE_256-NEXT:    lsr z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    lsr z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: lshr_v32i16:
@@ -657,13 +663,14 @@ define void @lshr_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    lsr z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    lsr z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    lsr z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: lshr_v16i32:
@@ -757,13 +764,14 @@ define void @lshr_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    lsr z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    lsr z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    lsr z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: lshr_v8i64:
@@ -859,13 +867,14 @@ define void @shl_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    lsl z0.b, p0/m, z0.b, z2.b
+; VBITS_GE_256-NEXT:    lsl z0.b, p0/m, z0.b, z1.b
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    lsl z1.b, p0/m, z1.b, z3.b
-; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: shl_v64i8:
@@ -957,13 +966,14 @@ define void @shl_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    lsl z0.h, p0/m, z0.h, z2.h
+; VBITS_GE_256-NEXT:    lsl z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    lsl z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: shl_v32i16:
@@ -1055,13 +1065,14 @@ define void @shl_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    lsl z0.s, p0/m, z0.s, z2.s
+; VBITS_GE_256-NEXT:    lsl z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    lsl z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: shl_v16i32:
@@ -1153,13 +1164,14 @@ define void @shl_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    lsl z0.d, p0/m, z0.d, z2.d
+; VBITS_GE_256-NEXT:    lsl z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_256-NEXT:    movprfx z1, z2
 ; VBITS_GE_256-NEXT:    lsl z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: shl_v8i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
index 2c6ffeaeefd6d95..9cebbc4aab9b7b8 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
@@ -53,13 +53,13 @@ define void @select_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    cmpeq p1.b, p0/z, z0.b, z2.b
-; VBITS_GE_256-NEXT:    cmpeq p2.b, p0/z, z1.b, z3.b
-; VBITS_GE_256-NEXT:    sel z0.b, p1, z0.b, z2.b
-; VBITS_GE_256-NEXT:    sel z1.b, p2, z1.b, z3.b
+; VBITS_GE_256-NEXT:    cmpeq p1.b, p0/z, z0.b, z1.b
+; VBITS_GE_256-NEXT:    cmpeq p2.b, p0/z, z2.b, z3.b
+; VBITS_GE_256-NEXT:    sel z0.b, p1, z0.b, z1.b
+; VBITS_GE_256-NEXT:    sel z1.b, p2, z2.b, z3.b
 ; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -166,13 +166,13 @@ define void @select_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    cmpeq p1.h, p0/z, z0.h, z2.h
-; VBITS_GE_256-NEXT:    cmpeq p2.h, p0/z, z1.h, z3.h
-; VBITS_GE_256-NEXT:    sel z0.h, p1, z0.h, z2.h
-; VBITS_GE_256-NEXT:    sel z1.h, p2, z1.h, z3.h
+; VBITS_GE_256-NEXT:    cmpeq p1.h, p0/z, z0.h, z1.h
+; VBITS_GE_256-NEXT:    cmpeq p2.h, p0/z, z2.h, z3.h
+; VBITS_GE_256-NEXT:    sel z0.h, p1, z0.h, z1.h
+; VBITS_GE_256-NEXT:    sel z1.h, p2, z2.h, z3.h
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -279,13 +279,13 @@ define void @select_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    cmpeq p1.s, p0/z, z0.s, z2.s
-; VBITS_GE_256-NEXT:    cmpeq p2.s, p0/z, z1.s, z3.s
-; VBITS_GE_256-NEXT:    sel z0.s, p1, z0.s, z2.s
-; VBITS_GE_256-NEXT:    sel z1.s, p2, z1.s, z3.s
+; VBITS_GE_256-NEXT:    cmpeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_256-NEXT:    cmpeq p2.s, p0/z, z2.s, z3.s
+; VBITS_GE_256-NEXT:    sel z0.s, p1, z0.s, z1.s
+; VBITS_GE_256-NEXT:    sel z1.s, p2, z2.s, z3.s
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
@@ -393,13 +393,13 @@ define void @select_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    cmpeq p1.d, p0/z, z0.d, z2.d
-; VBITS_GE_256-NEXT:    cmpeq p2.d, p0/z, z1.d, z3.d
-; VBITS_GE_256-NEXT:    sel z0.d, p1, z0.d, z2.d
-; VBITS_GE_256-NEXT:    sel z1.d, p2, z1.d, z3.d
+; VBITS_GE_256-NEXT:    cmpeq p1.d, p0/z, z0.d, z1.d
+; VBITS_GE_256-NEXT:    cmpeq p2.d, p0/z, z2.d, z3.d
+; VBITS_GE_256-NEXT:    sel z0.d, p1, z0.d, z1.d
+; VBITS_GE_256-NEXT:    sel z1.d, p2, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
index f62d3c14d19e4e9..4ac0abcb851d44e 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
@@ -1199,9 +1199,9 @@ define void @masked_gather_passthru(ptr %a, ptr %b, ptr %c) vscale_range(16,0) #
 ; CHECK-NEXT:    ptrue p0.s, vl32
 ; CHECK-NEXT:    ptrue p2.d, vl32
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x2]
 ; CHECK-NEXT:    fcmeq p1.s, p0/z, z0.s, #0.0
 ; CHECK-NEXT:    ld1d { z0.d }, p2/z, [x1]
-; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x2]
 ; CHECK-NEXT:    punpklo p3.h, p1.b
 ; CHECK-NEXT:    ld1w { z0.d }, p3/z, [z0.d]
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll
index fee233643a8e569..467378e7da59bc2 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll
@@ -92,11 +92,11 @@ define void @masked_load_v16f32(ptr %ap, ptr %bp, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fcmeq p1.s, p0/z, z0.s, z2.s
-; VBITS_GE_256-NEXT:    fcmeq p2.s, p0/z, z1.s, z3.s
+; VBITS_GE_256-NEXT:    fcmeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_256-NEXT:    fcmeq p2.s, p0/z, z2.s, z3.s
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p1/z, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ld1w { z1.s }, p2/z, [x0]
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x2, x8, lsl #2]
@@ -162,11 +162,11 @@ define void @masked_load_v64i8(ptr %ap, ptr %bp, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    cmpeq p1.b, p0/z, z0.b, z2.b
-; VBITS_GE_256-NEXT:    cmpeq p2.b, p0/z, z1.b, z3.b
+; VBITS_GE_256-NEXT:    cmpeq p1.b, p0/z, z0.b, z1.b
+; VBITS_GE_256-NEXT:    cmpeq p2.b, p0/z, z2.b, z3.b
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p1/z, [x0, x8]
 ; VBITS_GE_256-NEXT:    ld1b { z1.b }, p2/z, [x0]
 ; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x2, x8]
@@ -196,11 +196,11 @@ define void @masked_load_v32i16(ptr %ap, ptr %bp, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    cmpeq p1.h, p0/z, z0.h, z2.h
-; VBITS_GE_256-NEXT:    cmpeq p2.h, p0/z, z1.h, z3.h
+; VBITS_GE_256-NEXT:    cmpeq p1.h, p0/z, z0.h, z1.h
+; VBITS_GE_256-NEXT:    cmpeq p2.h, p0/z, z2.h, z3.h
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p1/z, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ld1h { z1.h }, p2/z, [x0]
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x2, x8, lsl #1]
@@ -230,11 +230,11 @@ define void @masked_load_v16i32(ptr %ap, ptr %bp, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    cmpeq p1.s, p0/z, z0.s, z2.s
-; VBITS_GE_256-NEXT:    cmpeq p2.s, p0/z, z1.s, z3.s
+; VBITS_GE_256-NEXT:    cmpeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_256-NEXT:    cmpeq p2.s, p0/z, z2.s, z3.s
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p1/z, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ld1w { z1.s }, p2/z, [x0]
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x2, x8, lsl #2]
@@ -264,11 +264,11 @@ define void @masked_load_v8i64(ptr %ap, ptr %bp, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    cmpeq p1.d, p0/z, z0.d, z2.d
-; VBITS_GE_256-NEXT:    cmpeq p2.d, p0/z, z1.d, z3.d
+; VBITS_GE_256-NEXT:    cmpeq p1.d, p0/z, z0.d, z1.d
+; VBITS_GE_256-NEXT:    cmpeq p2.d, p0/z, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p1/z, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ld1d { z1.d }, p2/z, [x0]
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x2, x8, lsl #3]
@@ -298,15 +298,15 @@ define void @masked_load_passthru_v8i64(ptr %ap, ptr %bp, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    cmpeq p1.d, p0/z, z0.d, z2.d
-; VBITS_GE_256-NEXT:    cmpeq p2.d, p0/z, z1.d, z3.d
+; VBITS_GE_256-NEXT:    cmpeq p1.d, p0/z, z0.d, z1.d
+; VBITS_GE_256-NEXT:    cmpeq p2.d, p0/z, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p1/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p2/z, [x0]
-; VBITS_GE_256-NEXT:    sel z0.d, p1, z0.d, z2.d
-; VBITS_GE_256-NEXT:    sel z1.d, p2, z1.d, z3.d
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p2/z, [x0]
+; VBITS_GE_256-NEXT:    sel z0.d, p1, z0.d, z1.d
+; VBITS_GE_256-NEXT:    sel z1.d, p2, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x2, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x2]
 ; VBITS_GE_256-NEXT:    ret
@@ -335,15 +335,15 @@ define void @masked_load_passthru_v8f64(ptr %ap, ptr %bp, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fcmeq p1.d, p0/z, z0.d, z2.d
-; VBITS_GE_256-NEXT:    fcmeq p2.d, p0/z, z1.d, z3.d
+; VBITS_GE_256-NEXT:    fcmeq p1.d, p0/z, z0.d, z1.d
+; VBITS_GE_256-NEXT:    fcmeq p2.d, p0/z, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p1/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p2/z, [x0]
-; VBITS_GE_256-NEXT:    sel z0.d, p1, z0.d, z2.d
-; VBITS_GE_256-NEXT:    sel z1.d, p2, z1.d, z3.d
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p2/z, [x0]
+; VBITS_GE_256-NEXT:    sel z0.d, p1, z0.d, z1.d
+; VBITS_GE_256-NEXT:    sel z1.d, p2, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x2, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x2]
 ; VBITS_GE_256-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll
index 70987df1c9c04ec..e2d341c22efc268 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll
@@ -65,7 +65,7 @@ define void @masked_scatter_v8i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    zip2 v1.8b, v1.8b, v0.8b
 ; VBITS_GE_256-NEXT:    zip2 v0.8b, v0.8b, v0.8b
 ; VBITS_GE_256-NEXT:    uunpklo z3.s, z3.h
-; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x1]
 ; VBITS_GE_256-NEXT:    shl v2.4h, v2.4h, #8
 ; VBITS_GE_256-NEXT:    shl v1.4h, v1.4h, #8
 ; VBITS_GE_256-NEXT:    uunpklo z0.s, z0.h
@@ -78,10 +78,10 @@ define void @masked_scatter_v8i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    sunpklo z2.d, z2.s
 ; VBITS_GE_256-NEXT:    sunpklo z1.d, z1.s
 ; VBITS_GE_256-NEXT:    cmpne p1.d, p0/z, z2.d, #0
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    cmpne p0.d, p0/z, z1.d, #0
-; VBITS_GE_256-NEXT:    st1b { z3.d }, p1, [z2.d]
-; VBITS_GE_256-NEXT:    st1b { z0.d }, p0, [z4.d]
+; VBITS_GE_256-NEXT:    st1b { z3.d }, p1, [z4.d]
+; VBITS_GE_256-NEXT:    st1b { z0.d }, p0, [z2.d]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: masked_scatter_v8i8:
@@ -212,16 +212,16 @@ define void @masked_scatter_v8i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
 ; VBITS_GE_256-NEXT:    uunpklo z0.s, z0.h
 ; VBITS_GE_256-NEXT:    uunpklo z3.d, z3.s
-; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x1]
 ; VBITS_GE_256-NEXT:    sunpklo z1.s, z1.h
 ; VBITS_GE_256-NEXT:    sunpklo z2.d, z2.s
 ; VBITS_GE_256-NEXT:    uunpklo z0.d, z0.s
 ; VBITS_GE_256-NEXT:    sunpklo z1.d, z1.s
 ; VBITS_GE_256-NEXT:    cmpne p1.d, p0/z, z2.d, #0
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    st1h { z3.d }, p1, [z2.d]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    st1h { z3.d }, p1, [z4.d]
 ; VBITS_GE_256-NEXT:    cmpne p0.d, p0/z, z1.d, #0
-; VBITS_GE_256-NEXT:    st1h { z0.d }, p0, [z4.d]
+; VBITS_GE_256-NEXT:    st1h { z0.d }, p0, [z2.d]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: masked_scatter_v8i16:
@@ -342,13 +342,13 @@ define void @masked_scatter_v8i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    punpklo p2.h, p1.b
 ; VBITS_GE_256-NEXT:    uunpklo z0.d, z0.s
 ; VBITS_GE_256-NEXT:    ext z1.b, z1.b, z1.b, #16
-; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x1, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    sunpklo z1.d, z1.s
 ; VBITS_GE_256-NEXT:    and p1.b, p2/z, p2.b, p0.b
 ; VBITS_GE_256-NEXT:    cmpne p0.d, p0/z, z1.d, #0
-; VBITS_GE_256-NEXT:    st1w { z2.d }, p1, [z4.d]
-; VBITS_GE_256-NEXT:    st1w { z0.d }, p0, [z3.d]
+; VBITS_GE_256-NEXT:    st1w { z2.d }, p1, [z3.d]
+; VBITS_GE_256-NEXT:    st1w { z0.d }, p0, [z4.d]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: masked_scatter_v8i32:
@@ -469,14 +469,14 @@ define void @masked_scatter_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
-; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    cmpeq p1.d, p0/z, z1.d, #0
-; VBITS_GE_256-NEXT:    cmpeq p0.d, p0/z, z0.d, #0
-; VBITS_GE_256-NEXT:    st1d { z1.d }, p1, [z3.d]
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [z2.d]
+; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
+; VBITS_GE_256-NEXT:    cmpeq p0.d, p0/z, z2.d, #0
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p1, [z1.d]
+; VBITS_GE_256-NEXT:    st1d { z2.d }, p0, [z3.d]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: masked_scatter_v8i64:
@@ -590,16 +590,16 @@ define void @masked_scatter_v8f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
 ; VBITS_GE_256-NEXT:    uunpklo z0.s, z0.h
 ; VBITS_GE_256-NEXT:    uunpklo z3.d, z3.s
-; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x1]
 ; VBITS_GE_256-NEXT:    sunpklo z1.s, z1.h
 ; VBITS_GE_256-NEXT:    sunpklo z2.d, z2.s
 ; VBITS_GE_256-NEXT:    uunpklo z0.d, z0.s
 ; VBITS_GE_256-NEXT:    sunpklo z1.d, z1.s
 ; VBITS_GE_256-NEXT:    cmpne p1.d, p0/z, z2.d, #0
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    st1h { z3.d }, p1, [z2.d]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    st1h { z3.d }, p1, [z4.d]
 ; VBITS_GE_256-NEXT:    cmpne p0.d, p0/z, z1.d, #0
-; VBITS_GE_256-NEXT:    st1h { z0.d }, p0, [z4.d]
+; VBITS_GE_256-NEXT:    st1h { z0.d }, p0, [z2.d]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: masked_scatter_v8f16:
@@ -719,14 +719,14 @@ define void @masked_scatter_v8f32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    mov z1.s, p1/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    punpklo p2.h, p1.b
 ; VBITS_GE_256-NEXT:    uunpklo z0.d, z0.s
-; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x1, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ext z1.b, z1.b, z1.b, #16
 ; VBITS_GE_256-NEXT:    sunpklo z1.d, z1.s
 ; VBITS_GE_256-NEXT:    and p1.b, p2/z, p2.b, p0.b
 ; VBITS_GE_256-NEXT:    cmpne p0.d, p0/z, z1.d, #0
-; VBITS_GE_256-NEXT:    st1w { z2.d }, p1, [z4.d]
-; VBITS_GE_256-NEXT:    st1w { z0.d }, p0, [z3.d]
+; VBITS_GE_256-NEXT:    st1w { z2.d }, p1, [z3.d]
+; VBITS_GE_256-NEXT:    st1w { z0.d }, p0, [z4.d]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: masked_scatter_v8f32:
@@ -847,14 +847,14 @@ define void @masked_scatter_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
-; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fcmeq p1.d, p0/z, z1.d, #0.0
-; VBITS_GE_256-NEXT:    fcmeq p0.d, p0/z, z0.d, #0.0
-; VBITS_GE_256-NEXT:    st1d { z1.d }, p1, [z3.d]
-; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [z2.d]
+; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    fcmeq p1.d, p0/z, z0.d, #0.0
+; VBITS_GE_256-NEXT:    fcmeq p0.d, p0/z, z2.d, #0.0
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p1, [z1.d]
+; VBITS_GE_256-NEXT:    st1d { z2.d }, p0, [z3.d]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: masked_scatter_v8f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll
index db0a5b2ba942532..68fb4cc6afb093e 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll
@@ -87,13 +87,13 @@ define void @masked_store_v16f32(ptr %ap, ptr %bp) #0 {
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fcmeq p1.s, p0/z, z0.s, z2.s
-; VBITS_GE_256-NEXT:    fcmeq p0.s, p0/z, z1.s, z3.s
+; VBITS_GE_256-NEXT:    fcmeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_256-NEXT:    fcmeq p0.s, p0/z, z2.s, z3.s
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p1, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z2.s }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: masked_store_v16f32:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
index 42b3196fdf1dd66..e07645c27df728d 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
@@ -140,13 +140,13 @@ define void @zip_v4f64(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_512-NEXT:    .cfi_offset w29, -16
 ; VBITS_EQ_512-NEXT:    ptrue p0.d, vl4
 ; VBITS_EQ_512-NEXT:    mov x8, sp
-; VBITS_EQ_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; VBITS_EQ_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; VBITS_EQ_512-NEXT:    mov z2.d, z1.d[3]
-; VBITS_EQ_512-NEXT:    mov z3.d, z0.d[3]
-; VBITS_EQ_512-NEXT:    mov z4.d, z1.d[2]
-; VBITS_EQ_512-NEXT:    mov z5.d, z0.d[2]
-; VBITS_EQ_512-NEXT:    zip1 z0.d, z0.d, z1.d
+; VBITS_EQ_512-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; VBITS_EQ_512-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_EQ_512-NEXT:    mov z2.d, z0.d[3]
+; VBITS_EQ_512-NEXT:    mov z3.d, z1.d[3]
+; VBITS_EQ_512-NEXT:    mov z4.d, z0.d[2]
+; VBITS_EQ_512-NEXT:    mov z5.d, z1.d[2]
+; VBITS_EQ_512-NEXT:    zip1 z0.d, z1.d, z0.d
 ; VBITS_EQ_512-NEXT:    stp d3, d2, [sp, #16]
 ; VBITS_EQ_512-NEXT:    stp d5, d4, [sp]
 ; VBITS_EQ_512-NEXT:    ld1d { z1.d }, p0/z, [x8]
@@ -243,15 +243,15 @@ define void @trn_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_256-NEXT:    ptrue p0.h
 ; VBITS_EQ_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_EQ_256-NEXT:    trn1 z4.h, z0.h, z2.h
-; VBITS_EQ_256-NEXT:    trn2 z0.h, z0.h, z2.h
-; VBITS_EQ_256-NEXT:    trn1 z2.h, z1.h, z3.h
-; VBITS_EQ_256-NEXT:    trn2 z1.h, z1.h, z3.h
+; VBITS_EQ_256-NEXT:    trn1 z4.h, z0.h, z1.h
+; VBITS_EQ_256-NEXT:    trn2 z0.h, z0.h, z1.h
+; VBITS_EQ_256-NEXT:    trn1 z1.h, z2.h, z3.h
+; VBITS_EQ_256-NEXT:    trn2 z2.h, z2.h, z3.h
 ; VBITS_EQ_256-NEXT:    add z0.h, z4.h, z0.h
-; VBITS_EQ_256-NEXT:    add z1.h, z2.h, z1.h
+; VBITS_EQ_256-NEXT:    add z1.h, z1.h, z2.h
 ; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
 ; VBITS_EQ_256-NEXT:    st1h { z1.h }, p0, [x0]
 ; VBITS_EQ_256-NEXT:    ret
@@ -515,18 +515,18 @@ define void @uzp_v32i16(ptr %a, ptr %b) #1 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mov x8, #16 // =0x10
-; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
-; CHECK-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; CHECK-NEXT:    uzp1 z4.h, z3.h, z2.h
-; CHECK-NEXT:    uzp2 z2.h, z3.h, z2.h
-; CHECK-NEXT:    uzp1 z3.h, z1.h, z0.h
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x1, x8, lsl #1]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x0, x8, lsl #1]
+; CHECK-NEXT:    ld1h { z3.h }, p0/z, [x0]
+; CHECK-NEXT:    uzp1 z4.h, z1.h, z0.h
 ; CHECK-NEXT:    uzp2 z0.h, z1.h, z0.h
-; CHECK-NEXT:    add z1.h, z4.h, z2.h
-; CHECK-NEXT:    add z0.h, z3.h, z0.h
-; CHECK-NEXT:    st1h { z1.h }, p0, [x0, x8, lsl #1]
-; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    uzp1 z1.h, z3.h, z2.h
+; CHECK-NEXT:    uzp2 z2.h, z3.h, z2.h
+; CHECK-NEXT:    add z0.h, z4.h, z0.h
+; CHECK-NEXT:    add z1.h, z1.h, z2.h
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
+; CHECK-NEXT:    st1h { z1.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   %tmp1 = load <32 x i16>, ptr %a
   %tmp2 = load <32 x i16>, ptr %b
@@ -657,13 +657,13 @@ define void @zip_vscale2_4(ptr %a, ptr %b) #2 {
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    ptrue p0.d, vl4
 ; CHECK-NEXT:    mov x8, sp
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    mov z2.d, z1.d[3]
-; CHECK-NEXT:    mov z3.d, z0.d[3]
-; CHECK-NEXT:    mov z4.d, z1.d[2]
-; CHECK-NEXT:    mov z5.d, z0.d[2]
-; CHECK-NEXT:    zip1 z0.d, z0.d, z1.d
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; CHECK-NEXT:    mov z2.d, z0.d[3]
+; CHECK-NEXT:    mov z3.d, z1.d[3]
+; CHECK-NEXT:    mov z4.d, z0.d[2]
+; CHECK-NEXT:    mov z5.d, z1.d[2]
+; CHECK-NEXT:    zip1 z0.d, z1.d, z0.d
 ; CHECK-NEXT:    stp d3, d2, [sp, #16]
 ; CHECK-NEXT:    stp d5, d4, [sp]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x8]
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-subvector.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-subvector.ll
index 557b349d482f03d..aef19d23109b4f5 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-subvector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-subvector.ll
@@ -421,11 +421,11 @@ bb1:
 define void @no_subvector_binop_hang(ptr %in, ptr %out, i1 %cond) #0 {
 ; CHECK-LABEL: no_subvector_binop_hang:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    tbz w2, #0, .LBB23_2
+; CHECK-NEXT:  // %bb.1: // %bb.1
 ; CHECK-NEXT:    ptrue p0.s, vl8
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; CHECK-NEXT:    tbz w2, #0, .LBB23_2
-; CHECK-NEXT:  // %bb.1: // %bb.1
 ; CHECK-NEXT:    orr z0.d, z0.d, z1.d
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x1]
 ; CHECK-NEXT:  .LBB23_2: // %bb.2
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll
index f646319ba5fccb3..bae3c6582c6b0c2 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll
@@ -242,9 +242,9 @@ define <8 x i8> @shuffle_index_size_op1_maxhw(ptr %a, ptr %b) "target-features"=
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    adrp x8, .LCPI6_0
 ; CHECK-NEXT:    add x8, x8, :lo12:.LCPI6_0
-; CHECK-NEXT:    ldr d0, [x0]
-; CHECK-NEXT:    ld1b { z1.b }, p0/z, [x8]
-; CHECK-NEXT:    tbl z0.b, { z0.b }, z1.b
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x8]
+; CHECK-NEXT:    tbl z0.b, { z1.b }, z0.b
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %op1 = load <8 x i8>, ptr %a
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll
index f32175a42d8eaa1..1967926d85d8676 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll
@@ -52,17 +52,17 @@ define void @shuffle_ext_byone_v64i8(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
-; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
-; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
-; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    mov z3.b, z2.b[31]
-; VBITS_GE_256-NEXT:    mov z0.b, z0.b[31]
-; VBITS_GE_256-NEXT:    fmov w9, s3
-; VBITS_GE_256-NEXT:    insr z1.b, w9
-; VBITS_GE_256-NEXT:    fmov w9, s0
-; VBITS_GE_256-NEXT:    insr z2.b, w9
-; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT:    st1b { z2.b }, p0, [x0]
+; VBITS_GE_256-NEXT:    ld1b { z0.b }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0, x8]
+; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT:    mov z2.b, z0.b[31]
+; VBITS_GE_256-NEXT:    mov z1.b, z1.b[31]
+; VBITS_GE_256-NEXT:    fmov w9, s2
+; VBITS_GE_256-NEXT:    insr z3.b, w9
+; VBITS_GE_256-NEXT:    fmov w9, s1
+; VBITS_GE_256-NEXT:    insr z0.b, w9
+; VBITS_GE_256-NEXT:    st1b { z3.b }, p0, [x0, x8]
+; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: shuffle_ext_byone_v64i8:
@@ -217,17 +217,17 @@ define void @shuffle_ext_byone_v32i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
-; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    mov z3.h, z2.h[15]
-; VBITS_GE_256-NEXT:    mov z0.h, z0.h[15]
-; VBITS_GE_256-NEXT:    fmov w9, s3
-; VBITS_GE_256-NEXT:    insr z1.h, w9
-; VBITS_GE_256-NEXT:    fmov w9, s0
-; VBITS_GE_256-NEXT:    insr z2.h, w9
-; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    st1h { z2.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    mov z2.h, z0.h[15]
+; VBITS_GE_256-NEXT:    mov z1.h, z1.h[15]
+; VBITS_GE_256-NEXT:    fmov w9, s2
+; VBITS_GE_256-NEXT:    insr z3.h, w9
+; VBITS_GE_256-NEXT:    fmov w9, s1
+; VBITS_GE_256-NEXT:    insr z0.h, w9
+; VBITS_GE_256-NEXT:    st1h { z3.h }, p0, [x0, x8, lsl #1]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: shuffle_ext_byone_v32i16:
@@ -353,17 +353,17 @@ define void @shuffle_ext_byone_v16i32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
-; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    mov z3.s, z2.s[7]
-; VBITS_GE_256-NEXT:    mov z0.s, z0.s[7]
-; VBITS_GE_256-NEXT:    fmov w9, s3
-; VBITS_GE_256-NEXT:    insr z1.s, w9
-; VBITS_GE_256-NEXT:    fmov w9, s0
-; VBITS_GE_256-NEXT:    insr z2.s, w9
-; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    st1w { z2.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    mov z2.s, z0.s[7]
+; VBITS_GE_256-NEXT:    mov z1.s, z1.s[7]
+; VBITS_GE_256-NEXT:    fmov w9, s2
+; VBITS_GE_256-NEXT:    insr z3.s, w9
+; VBITS_GE_256-NEXT:    fmov w9, s1
+; VBITS_GE_256-NEXT:    insr z0.s, w9
+; VBITS_GE_256-NEXT:    st1w { z3.s }, p0, [x0, x8, lsl #2]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: shuffle_ext_byone_v16i32:
@@ -465,17 +465,17 @@ define void @shuffle_ext_byone_v8i64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
-; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    mov z3.d, z2.d[3]
-; VBITS_GE_256-NEXT:    mov z0.d, z0.d[3]
-; VBITS_GE_256-NEXT:    fmov x9, d3
-; VBITS_GE_256-NEXT:    insr z1.d, x9
-; VBITS_GE_256-NEXT:    fmov x9, d0
-; VBITS_GE_256-NEXT:    insr z2.d, x9
-; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    st1d { z2.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    mov z2.d, z0.d[3]
+; VBITS_GE_256-NEXT:    mov z1.d, z1.d[3]
+; VBITS_GE_256-NEXT:    fmov x9, d2
+; VBITS_GE_256-NEXT:    insr z3.d, x9
+; VBITS_GE_256-NEXT:    fmov x9, d1
+; VBITS_GE_256-NEXT:    insr z0.d, x9
+; VBITS_GE_256-NEXT:    st1d { z3.d }, p0, [x0, x8, lsl #3]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: shuffle_ext_byone_v8i64:
@@ -580,15 +580,15 @@ define void @shuffle_ext_byone_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
-; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    mov z3.h, z2.h[15]
-; VBITS_GE_256-NEXT:    mov z0.h, z0.h[15]
-; VBITS_GE_256-NEXT:    insr z1.h, h3
-; VBITS_GE_256-NEXT:    insr z2.h, h0
-; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    st1h { z2.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    mov z3.h, z0.h[15]
+; VBITS_GE_256-NEXT:    mov z1.h, z1.h[15]
+; VBITS_GE_256-NEXT:    insr z2.h, h3
+; VBITS_GE_256-NEXT:    insr z0.h, h1
+; VBITS_GE_256-NEXT:    st1h { z2.h }, p0, [x0, x8, lsl #1]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: shuffle_ext_byone_v32f16:
@@ -712,15 +712,15 @@ define void @shuffle_ext_byone_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
-; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    mov z3.s, z2.s[7]
-; VBITS_GE_256-NEXT:    mov z0.s, z0.s[7]
-; VBITS_GE_256-NEXT:    insr z1.s, s3
-; VBITS_GE_256-NEXT:    insr z2.s, s0
-; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    st1w { z2.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    mov z3.s, z0.s[7]
+; VBITS_GE_256-NEXT:    mov z1.s, z1.s[7]
+; VBITS_GE_256-NEXT:    insr z2.s, s3
+; VBITS_GE_256-NEXT:    insr z0.s, s1
+; VBITS_GE_256-NEXT:    st1w { z2.s }, p0, [x0, x8, lsl #2]
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: shuffle_ext_byone_v16f32:
@@ -820,15 +820,15 @@ define void @shuffle_ext_byone_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
-; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    mov z3.d, z2.d[3]
-; VBITS_GE_256-NEXT:    mov z0.d, z0.d[3]
-; VBITS_GE_256-NEXT:    insr z1.d, d3
-; VBITS_GE_256-NEXT:    insr z2.d, d0
-; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    st1d { z2.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    mov z3.d, z0.d[3]
+; VBITS_GE_256-NEXT:    mov z1.d, z1.d[3]
+; VBITS_GE_256-NEXT:    insr z2.d, d3
+; VBITS_GE_256-NEXT:    insr z0.d, d1
+; VBITS_GE_256-NEXT:    st1d { z2.d }, p0, [x0, x8, lsl #3]
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: shuffle_ext_byone_v8f64:
@@ -893,11 +893,11 @@ define void @shuffle_ext_byone_reverse(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-LABEL: shuffle_ext_byone_reverse:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d, vl4
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    mov z1.d, z1.d[3]
-; CHECK-NEXT:    insr z0.d, d1
-; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; CHECK-NEXT:    mov z0.d, z0.d[3]
+; CHECK-NEXT:    insr z1.d, d0
+; CHECK-NEXT:    st1d { z1.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <4 x double>, ptr %a
   %op2 = load <4 x double>, ptr %b
@@ -918,13 +918,13 @@ define void @shuffle_ext_invalid(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    ptrue p0.d, vl4
 ; CHECK-NEXT:    mov x8, sp
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    mov z2.d, z1.d[1]
-; CHECK-NEXT:    mov z3.d, z0.d[3]
-; CHECK-NEXT:    mov z0.d, z0.d[2]
-; CHECK-NEXT:    stp d1, d2, [sp, #16]
-; CHECK-NEXT:    stp d0, d3, [sp]
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; CHECK-NEXT:    mov z2.d, z0.d[1]
+; CHECK-NEXT:    mov z3.d, z1.d[3]
+; CHECK-NEXT:    mov z1.d, z1.d[2]
+; CHECK-NEXT:    stp d0, d2, [sp, #16]
+; CHECK-NEXT:    stp d1, d3, [sp]
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    mov sp, x29
diff --git a/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll b/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll
index d1ca2ba0f95815f..106be131bf10ba1 100644
--- a/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll
@@ -65,18 +65,18 @@ define void @fptrunc8_f64_f16(<vscale x 8 x half> *%dst, <vscale x 8 x double> *
 ; CHECK-LABEL: fptrunc8_f64_f16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x1]
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1, #1, mul vl]
-; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x1, #2, mul vl]
-; CHECK-NEXT:    ld1d { z3.d }, p0/z, [x1, #3, mul vl]
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x1, #3, mul vl]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x1, #1, mul vl]
+; CHECK-NEXT:    ld1d { z3.d }, p0/z, [x1, #2, mul vl]
+; CHECK-NEXT:    fcvt z0.h, p0/m, z0.d
 ; CHECK-NEXT:    fcvt z2.h, p0/m, z2.d
 ; CHECK-NEXT:    fcvt z1.h, p0/m, z1.d
-; CHECK-NEXT:    fcvt z0.h, p0/m, z0.d
 ; CHECK-NEXT:    fcvt z3.h, p0/m, z3.d
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    uzp1 z0.s, z0.s, z1.s
-; CHECK-NEXT:    uzp1 z2.s, z2.s, z3.s
-; CHECK-NEXT:    uzp1 z0.h, z0.h, z2.h
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z2.s
+; CHECK-NEXT:    uzp1 z0.s, z3.s, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z1.h, z0.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll b/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
index 9028d36528f2627..34918237433cadc 100644
--- a/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
@@ -78,17 +78,17 @@ define <vscale x 16 x i8> @narrow_i64_gather_index_i8_zext(i8* %out, i8* %in, <v
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    add x8, x1, x2
-; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x1, x2]
-; CHECK-NEXT:    ld1b { z1.s }, p0/z, [x8, #1, mul vl]
-; CHECK-NEXT:    ld1b { z2.s }, p0/z, [x8, #2, mul vl]
-; CHECK-NEXT:    ld1b { z3.s }, p0/z, [x8, #3, mul vl]
-; CHECK-NEXT:    ld1b { z3.s }, p0/z, [x1, z3.s, uxtw]
-; CHECK-NEXT:    ld1b { z2.s }, p0/z, [x1, z2.s, uxtw]
+; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x8, #3, mul vl]
+; CHECK-NEXT:    ld1b { z1.s }, p0/z, [x1, x2]
+; CHECK-NEXT:    ld1b { z2.s }, p0/z, [x8, #1, mul vl]
+; CHECK-NEXT:    ld1b { z3.s }, p0/z, [x8, #2, mul vl]
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x1, z0.s, uxtw]
+; CHECK-NEXT:    ld1b { z3.s }, p0/z, [x1, z3.s, uxtw]
 ; CHECK-NEXT:    ld1b { z1.s }, p0/z, [x1, z1.s, uxtw]
-; CHECK-NEXT:    uzp1 z2.h, z2.h, z3.h
-; CHECK-NEXT:    uzp1 z0.h, z0.h, z1.h
-; CHECK-NEXT:    uzp1 z0.b, z0.b, z2.b
+; CHECK-NEXT:    ld1b { z2.s }, p0/z, [x1, z2.s, uxtw]
+; CHECK-NEXT:    uzp1 z0.h, z3.h, z0.h
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z2.h
+; CHECK-NEXT:    uzp1 z0.b, z1.b, z0.b
 ; CHECK-NEXT:    ret
   %1 = getelementptr inbounds i8, i8* %in, i64 %ptr
   %2 = bitcast i8* %1 to <vscale x 16 x i8>*
@@ -104,17 +104,17 @@ define <vscale x 16 x i8> @narrow_i64_gather_index_i8_sext(i8* %out, i8* %in, <v
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    add x8, x1, x2
-; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x1, x2]
-; CHECK-NEXT:    ld1sb { z1.s }, p0/z, [x8, #1, mul vl]
-; CHECK-NEXT:    ld1sb { z2.s }, p0/z, [x8, #2, mul vl]
-; CHECK-NEXT:    ld1sb { z3.s }, p0/z, [x8, #3, mul vl]
-; CHECK-NEXT:    ld1b { z3.s }, p0/z, [x1, z3.s, sxtw]
-; CHECK-NEXT:    ld1b { z2.s }, p0/z, [x1, z2.s, sxtw]
+; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x8, #3, mul vl]
+; CHECK-NEXT:    ld1sb { z1.s }, p0/z, [x1, x2]
+; CHECK-NEXT:    ld1sb { z2.s }, p0/z, [x8, #1, mul vl]
+; CHECK-NEXT:    ld1sb { z3.s }, p0/z, [x8, #2, mul vl]
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x1, z0.s, sxtw]
+; CHECK-NEXT:    ld1b { z3.s }, p0/z, [x1, z3.s, sxtw]
 ; CHECK-NEXT:    ld1b { z1.s }, p0/z, [x1, z1.s, sxtw]
-; CHECK-NEXT:    uzp1 z2.h, z2.h, z3.h
-; CHECK-NEXT:    uzp1 z0.h, z0.h, z1.h
-; CHECK-NEXT:    uzp1 z0.b, z0.b, z2.b
+; CHECK-NEXT:    ld1b { z2.s }, p0/z, [x1, z2.s, sxtw]
+; CHECK-NEXT:    uzp1 z0.h, z3.h, z0.h
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z2.h
+; CHECK-NEXT:    uzp1 z0.b, z1.b, z0.b
 ; CHECK-NEXT:    ret
   %1 = getelementptr inbounds i8, i8* %in, i64 %ptr
   %2 = bitcast i8* %1 to <vscale x 16 x i8>*
diff --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
index de2efe288ac4707..b7f9ef839090306 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
@@ -198,14 +198,14 @@ define void @insert_v2i64_nxv16i64(<2 x i64> %sv0, <2 x i64> %sv1, <vscale x 16
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
 ; CHECK-NEXT:    str q1, [sp, #32]
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp]
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1d { z2.d }, p0/z, [sp, #2, mul vl]
-; CHECK-NEXT:    ld1d { z3.d }, p0/z, [sp, #3, mul vl]
-; CHECK-NEXT:    st1d { z3.d }, p0, [x0, #3, mul vl]
-; CHECK-NEXT:    st1d { z2.d }, p0, [x0, #2, mul vl]
-; CHECK-NEXT:    st1d { z1.d }, p0, [x0, #1, mul vl]
-; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #3, mul vl]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [sp, #2, mul vl]
+; CHECK-NEXT:    ld1d { z2.d }, p0/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1d { z3.d }, p0/z, [sp]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0, #3, mul vl]
+; CHECK-NEXT:    st1d { z1.d }, p0, [x0, #2, mul vl]
+; CHECK-NEXT:    st1d { z2.d }, p0, [x0, #1, mul vl]
+; CHECK-NEXT:    st1d { z3.d }, p0, [x0]
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -242,10 +242,10 @@ define void @insert_v2i64_nxv16i64_lo2(<2 x i64>* %psv, <vscale x 16 x i64>* %ou
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    str q0, [sp, #16]
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp]
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [sp, #1, mul vl]
-; CHECK-NEXT:    st1d { z1.d }, p0, [x1, #1, mul vl]
-; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [sp]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x1, #1, mul vl]
+; CHECK-NEXT:    st1d { z1.d }, p0, [x1]
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll b/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll
index 79ef20270eda85b..da5dc5c5b34d908 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll
@@ -43,10 +43,10 @@ define void @test_post_ld1_int_fixed(ptr %data, i64 %idx, ptr %addr, ptr %res_pt
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    ptrue p1.d, vl1
 ; CHECK-NEXT:    mov z1.d, x8
-; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x2]
-; CHECK-NEXT:    cmpeq p2.d, p0/z, z0.d, z1.d
 ; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    ldr x9, [x0, x1, lsl #3]
+; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x2]
+; CHECK-NEXT:    cmpeq p2.d, p0/z, z0.d, z1.d
 ; CHECK-NEXT:    mov z0.d, z2.d
 ; CHECK-NEXT:    mov z2.d, p2/m, x9
 ; CHECK-NEXT:    mov z0.d, p1/m, x8
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
index e266ab78603fb0b..de8579652803b7c 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
@@ -173,11 +173,11 @@ define <vscale x 4 x i64> @masked_sload_x2_4i8_4i64(ptr %a, ptr %b, <vscale x 4
 ; CHECK-NEXT:    punpkhi p1.h, p0.b
 ; CHECK-NEXT:    punpklo p0.h, p0.b
 ; CHECK-NEXT:    ld1sb { z1.d }, p1/z, [x0, #1, mul vl]
-; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sb { z2.d }, p1/z, [x1, #1, mul vl]
+; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sb { z3.d }, p0/z, [x1]
-; CHECK-NEXT:    add z0.d, z0.d, z3.d
 ; CHECK-NEXT:    add z1.d, z1.d, z2.d
+; CHECK-NEXT:    add z0.d, z0.d, z3.d
 ; CHECK-NEXT:    ret
   %aval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
   %bval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
@@ -193,11 +193,11 @@ define <vscale x 4 x i64> @masked_sload_x2_4i16_4i64(ptr %a, ptr %b, <vscale x 4
 ; CHECK-NEXT:    punpkhi p1.h, p0.b
 ; CHECK-NEXT:    punpklo p0.h, p0.b
 ; CHECK-NEXT:    ld1sh { z1.d }, p1/z, [x0, #1, mul vl]
-; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sh { z2.d }, p1/z, [x1, #1, mul vl]
+; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sh { z3.d }, p0/z, [x1]
-; CHECK-NEXT:    add z0.d, z0.d, z3.d
 ; CHECK-NEXT:    add z1.d, z1.d, z2.d
+; CHECK-NEXT:    add z0.d, z0.d, z3.d
 ; CHECK-NEXT:    ret
   %aval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
   %bval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
@@ -213,11 +213,11 @@ define <vscale x 8 x i32> @masked_sload_x2_8i8_8i32(ptr %a, ptr %b, <vscale x 8
 ; CHECK-NEXT:    punpkhi p1.h, p0.b
 ; CHECK-NEXT:    punpklo p0.h, p0.b
 ; CHECK-NEXT:    ld1sb { z1.s }, p1/z, [x0, #1, mul vl]
-; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sb { z2.s }, p1/z, [x1, #1, mul vl]
+; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sb { z3.s }, p0/z, [x1]
-; CHECK-NEXT:    add z0.s, z0.s, z3.s
 ; CHECK-NEXT:    add z1.s, z1.s, z2.s
+; CHECK-NEXT:    add z0.s, z0.s, z3.s
 ; CHECK-NEXT:    ret
   %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
   %bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
@@ -237,17 +237,17 @@ define <vscale x 8 x i64> @masked_sload_x2_8i8_8i64(ptr %a, ptr %b, <vscale x 8
 ; CHECK-NEXT:    punpkhi p3.h, p0.b
 ; CHECK-NEXT:    punpklo p0.h, p0.b
 ; CHECK-NEXT:    ld1sb { z3.d }, p2/z, [x0, #3, mul vl]
+; CHECK-NEXT:    ld1sb { z5.d }, p2/z, [x1, #3, mul vl]
 ; CHECK-NEXT:    ld1sb { z2.d }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT:    ld1sb { z6.d }, p1/z, [x1, #2, mul vl]
 ; CHECK-NEXT:    ld1sb { z1.d }, p3/z, [x0, #1, mul vl]
+; CHECK-NEXT:    ld1sb { z7.d }, p3/z, [x1, #1, mul vl]
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0]
-; CHECK-NEXT:    ld1sb { z4.d }, p2/z, [x1, #3, mul vl]
-; CHECK-NEXT:    ld1sb { z5.d }, p1/z, [x1, #2, mul vl]
-; CHECK-NEXT:    ld1sb { z6.d }, p3/z, [x1, #1, mul vl]
-; CHECK-NEXT:    ld1sb { z7.d }, p0/z, [x1]
-; CHECK-NEXT:    add z2.d, z2.d, z5.d
-; CHECK-NEXT:    add z3.d, z3.d, z4.d
-; CHECK-NEXT:    add z0.d, z0.d, z7.d
-; CHECK-NEXT:    add z1.d, z1.d, z6.d
+; CHECK-NEXT:    ld1sb { z4.d }, p0/z, [x1]
+; CHECK-NEXT:    add z3.d, z3.d, z5.d
+; CHECK-NEXT:    add z2.d, z2.d, z6.d
+; CHECK-NEXT:    add z1.d, z1.d, z7.d
+; CHECK-NEXT:    add z0.d, z0.d, z4.d
 ; CHECK-NEXT:    ret
   %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
   %bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
index 81863e7217d3a1a..da338b741872870 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
@@ -168,11 +168,11 @@ define <vscale x 4 x i64> @masked_zload_x2_4i8_4i64(ptr %a, ptr %b, <vscale x 4
 ; CHECK-NEXT:    punpkhi p1.h, p0.b
 ; CHECK-NEXT:    punpklo p0.h, p0.b
 ; CHECK-NEXT:    ld1b { z1.d }, p1/z, [x0, #1, mul vl]
-; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1b { z2.d }, p1/z, [x1, #1, mul vl]
+; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1b { z3.d }, p0/z, [x1]
-; CHECK-NEXT:    add z0.d, z0.d, z3.d
 ; CHECK-NEXT:    add z1.d, z1.d, z2.d
+; CHECK-NEXT:    add z0.d, z0.d, z3.d
 ; CHECK-NEXT:    ret
   %aval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
   %bval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
@@ -188,11 +188,11 @@ define <vscale x 4 x i64> @masked_zload_x2_4i16_4i64(ptr %a, ptr %b, <vscale x 4
 ; CHECK-NEXT:    punpkhi p1.h, p0.b
 ; CHECK-NEXT:    punpklo p0.h, p0.b
 ; CHECK-NEXT:    ld1h { z1.d }, p1/z, [x0, #1, mul vl]
-; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z2.d }, p1/z, [x1, #1, mul vl]
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z3.d }, p0/z, [x1]
-; CHECK-NEXT:    add z0.d, z0.d, z3.d
 ; CHECK-NEXT:    add z1.d, z1.d, z2.d
+; CHECK-NEXT:    add z0.d, z0.d, z3.d
 ; CHECK-NEXT:    ret
   %aval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
   %bval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
@@ -208,11 +208,11 @@ define <vscale x 8 x i32> @masked_zload_x2_8i8_8i32(ptr %a, ptr %b, <vscale x 8
 ; CHECK-NEXT:    punpkhi p1.h, p0.b
 ; CHECK-NEXT:    punpklo p0.h, p0.b
 ; CHECK-NEXT:    ld1b { z1.s }, p1/z, [x0, #1, mul vl]
-; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1b { z2.s }, p1/z, [x1, #1, mul vl]
+; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1b { z3.s }, p0/z, [x1]
-; CHECK-NEXT:    add z0.s, z0.s, z3.s
 ; CHECK-NEXT:    add z1.s, z1.s, z2.s
+; CHECK-NEXT:    add z0.s, z0.s, z3.s
 ; CHECK-NEXT:    ret
   %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
   %bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
@@ -232,17 +232,17 @@ define <vscale x 8 x i64> @masked_zload_x2_8i8_8i64(ptr %a, ptr %b, <vscale x 8
 ; CHECK-NEXT:    punpkhi p3.h, p0.b
 ; CHECK-NEXT:    punpklo p0.h, p0.b
 ; CHECK-NEXT:    ld1b { z3.d }, p2/z, [x0, #3, mul vl]
+; CHECK-NEXT:    ld1b { z5.d }, p2/z, [x1, #3, mul vl]
 ; CHECK-NEXT:    ld1b { z2.d }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT:    ld1b { z6.d }, p1/z, [x1, #2, mul vl]
 ; CHECK-NEXT:    ld1b { z1.d }, p3/z, [x0, #1, mul vl]
+; CHECK-NEXT:    ld1b { z7.d }, p3/z, [x1, #1, mul vl]
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0]
-; CHECK-NEXT:    ld1b { z4.d }, p2/z, [x1, #3, mul vl]
-; CHECK-NEXT:    ld1b { z5.d }, p1/z, [x1, #2, mul vl]
-; CHECK-NEXT:    ld1b { z6.d }, p3/z, [x1, #1, mul vl]
-; CHECK-NEXT:    ld1b { z7.d }, p0/z, [x1]
-; CHECK-NEXT:    add z2.d, z2.d, z5.d
-; CHECK-NEXT:    add z3.d, z3.d, z4.d
-; CHECK-NEXT:    add z0.d, z0.d, z7.d
-; CHECK-NEXT:    add z1.d, z1.d, z6.d
+; CHECK-NEXT:    ld1b { z4.d }, p0/z, [x1]
+; CHECK-NEXT:    add z3.d, z3.d, z5.d
+; CHECK-NEXT:    add z2.d, z2.d, z6.d
+; CHECK-NEXT:    add z1.d, z1.d, z7.d
+; CHECK-NEXT:    add z0.d, z0.d, z4.d
 ; CHECK-NEXT:    ret
   %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
   %bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
index 782098a82f177d6..251a7c3b18a9ffc 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
@@ -66,14 +66,14 @@ define <16 x i32> @load_sext_v16i8i32(ptr %ap)  {
 ; CHECK-NEXT:    mov w8, #4 // =0x4
 ; CHECK-NEXT:    mov w9, #8 // =0x8
 ; CHECK-NEXT:    mov w10, #12 // =0xc
+; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sb { z1.s }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ld1sb { z2.s }, p0/z, [x0, x9]
 ; CHECK-NEXT:    ld1sb { z3.s }, p0/z, [x0, x10]
-; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    // kill: def $q2 killed $q2 killed $z2
 ; CHECK-NEXT:    // kill: def $q3 killed $q3 killed $z3
-; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %ap
   %val = sext <16 x i8> %a to <16 x i32>
@@ -165,26 +165,26 @@ define <16 x i64> @load_zext_v16i16i64(ptr %ap)  {
 ; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    mov x8, #2 // =0x2
 ; CHECK-NEXT:    mov x9, #4 // =0x4
+; CHECK-NEXT:    mov x10, #6 // =0x6
 ; CHECK-NEXT:    ld1h { z1.d }, p0/z, [x0, x8, lsl #1]
-; CHECK-NEXT:    mov x8, #6 // =0x6
+; CHECK-NEXT:    mov x8, #8 // =0x8
 ; CHECK-NEXT:    ld1h { z2.d }, p0/z, [x0, x9, lsl #1]
-; CHECK-NEXT:    mov x9, #8 // =0x8
-; CHECK-NEXT:    ld1h { z3.d }, p0/z, [x0, x8, lsl #1]
-; CHECK-NEXT:    mov x8, #10 // =0xa
-; CHECK-NEXT:    ld1h { z4.d }, p0/z, [x0, x9, lsl #1]
-; CHECK-NEXT:    mov x9, #12 // =0xc
-; CHECK-NEXT:    ld1h { z5.d }, p0/z, [x0, x8, lsl #1]
+; CHECK-NEXT:    mov x9, #10 // =0xa
+; CHECK-NEXT:    ld1h { z3.d }, p0/z, [x0, x10, lsl #1]
+; CHECK-NEXT:    mov x10, #12 // =0xc
+; CHECK-NEXT:    ld1h { z4.d }, p0/z, [x0, x8, lsl #1]
 ; CHECK-NEXT:    mov x8, #14 // =0xe
-; CHECK-NEXT:    ld1h { z6.d }, p0/z, [x0, x9, lsl #1]
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z5.d }, p0/z, [x0, x9, lsl #1]
+; CHECK-NEXT:    ld1h { z6.d }, p0/z, [x0, x10, lsl #1]
+; CHECK-NEXT:    ld1h { z7.d }, p0/z, [x0, x8, lsl #1]
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    // kill: def $q2 killed $q2 killed $z2
 ; CHECK-NEXT:    // kill: def $q3 killed $q3 killed $z3
 ; CHECK-NEXT:    // kill: def $q4 killed $q4 killed $z4
 ; CHECK-NEXT:    // kill: def $q5 killed $q5 killed $z5
-; CHECK-NEXT:    ld1h { z7.d }, p0/z, [x0, x8, lsl #1]
-; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    // kill: def $q6 killed $q6 killed $z6
-; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    // kill: def $q7 killed $q7 killed $z7
 ; CHECK-NEXT:    ret
   %a = load <16 x i16>, ptr %ap
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
index fa35f4e438883f3..da9b79a56a9518e 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
@@ -330,12 +330,12 @@ define void @test_copysign_v2f64_v2f32(ptr %ap, ptr %bp) {
 ; SVE2-LABEL: test_copysign_v2f64_v2f32:
 ; SVE2:       // %bb.0:
 ; SVE2-NEXT:    ptrue p0.d, vl2
-; SVE2-NEXT:    ldr q0, [x0]
-; SVE2-NEXT:    mov z2.d, #0x7fffffffffffffff
-; SVE2-NEXT:    ld1w { z1.d }, p0/z, [x1]
-; SVE2-NEXT:    fcvt z1.d, p0/m, z1.s
-; SVE2-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
-; SVE2-NEXT:    str q0, [x0]
+; SVE2-NEXT:    mov z1.d, #0x7fffffffffffffff
+; SVE2-NEXT:    ldr q2, [x0]
+; SVE2-NEXT:    ld1w { z0.d }, p0/z, [x1]
+; SVE2-NEXT:    fcvt z0.d, p0/m, z0.s
+; SVE2-NEXT:    bsl z2.d, z2.d, z0.d, z1.d
+; SVE2-NEXT:    str q2, [x0]
 ; SVE2-NEXT:    ret
   %a = load <2 x double>, ptr %ap
   %b = load < 2 x float>, ptr %bp
@@ -353,17 +353,17 @@ define void @test_copysign_v4f64_v4f32(ptr %ap, ptr %bp) {
 ; SVE:       // %bb.0:
 ; SVE-NEXT:    ptrue p0.d, vl2
 ; SVE-NEXT:    mov x8, #2 // =0x2
-; SVE-NEXT:    ldp q0, q1, [x0]
-; SVE-NEXT:    and z0.d, z0.d, #0x7fffffffffffffff
-; SVE-NEXT:    and z1.d, z1.d, #0x7fffffffffffffff
-; SVE-NEXT:    ld1w { z2.d }, p0/z, [x1, x8, lsl #2]
-; SVE-NEXT:    ld1w { z3.d }, p0/z, [x1]
-; SVE-NEXT:    fcvt z3.d, p0/m, z3.s
-; SVE-NEXT:    fcvt z2.d, p0/m, z2.s
-; SVE-NEXT:    and z3.d, z3.d, #0x8000000000000000
-; SVE-NEXT:    and z2.d, z2.d, #0x8000000000000000
-; SVE-NEXT:    orr z0.d, z0.d, z3.d
-; SVE-NEXT:    orr z1.d, z1.d, z2.d
+; SVE-NEXT:    ldp q2, q3, [x0]
+; SVE-NEXT:    and z2.d, z2.d, #0x7fffffffffffffff
+; SVE-NEXT:    and z3.d, z3.d, #0x7fffffffffffffff
+; SVE-NEXT:    ld1w { z0.d }, p0/z, [x1]
+; SVE-NEXT:    ld1w { z1.d }, p0/z, [x1, x8, lsl #2]
+; SVE-NEXT:    fcvt z0.d, p0/m, z0.s
+; SVE-NEXT:    fcvt z1.d, p0/m, z1.s
+; SVE-NEXT:    and z0.d, z0.d, #0x8000000000000000
+; SVE-NEXT:    and z1.d, z1.d, #0x8000000000000000
+; SVE-NEXT:    orr z0.d, z2.d, z0.d
+; SVE-NEXT:    orr z1.d, z3.d, z1.d
 ; SVE-NEXT:    stp q0, q1, [x0]
 ; SVE-NEXT:    ret
 ;
@@ -371,15 +371,15 @@ define void @test_copysign_v4f64_v4f32(ptr %ap, ptr %bp) {
 ; SVE2:       // %bb.0:
 ; SVE2-NEXT:    ptrue p0.d, vl2
 ; SVE2-NEXT:    mov x8, #2 // =0x2
-; SVE2-NEXT:    mov z4.d, #0x7fffffffffffffff
-; SVE2-NEXT:    ldp q0, q1, [x0]
-; SVE2-NEXT:    ld1w { z2.d }, p0/z, [x1, x8, lsl #2]
-; SVE2-NEXT:    ld1w { z3.d }, p0/z, [x1]
-; SVE2-NEXT:    fcvt z3.d, p0/m, z3.s
-; SVE2-NEXT:    fcvt z2.d, p0/m, z2.s
-; SVE2-NEXT:    bsl z0.d, z0.d, z3.d, z4.d
-; SVE2-NEXT:    bsl z1.d, z1.d, z2.d, z4.d
-; SVE2-NEXT:    stp q0, q1, [x0]
+; SVE2-NEXT:    mov z2.d, #0x7fffffffffffffff
+; SVE2-NEXT:    ldp q3, q4, [x0]
+; SVE2-NEXT:    ld1w { z0.d }, p0/z, [x1]
+; SVE2-NEXT:    ld1w { z1.d }, p0/z, [x1, x8, lsl #2]
+; SVE2-NEXT:    fcvt z0.d, p0/m, z0.s
+; SVE2-NEXT:    fcvt z1.d, p0/m, z1.s
+; SVE2-NEXT:    bsl z3.d, z3.d, z0.d, z2.d
+; SVE2-NEXT:    bsl z4.d, z4.d, z1.d, z2.d
+; SVE2-NEXT:    stp q3, q4, [x0]
 ; SVE2-NEXT:    ret
   %a = load <4 x double>, ptr %ap
   %b = load <4 x float>, ptr %bp
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
index 4018ab02fd58648..37cc79011a9866b 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
@@ -134,16 +134,16 @@ define void @fcvt_v16f16_v16f32(ptr %a, ptr %b) {
 ; CHECK-NEXT:    mov x8, #8 // =0x8
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, x8, lsl #1]
 ; CHECK-NEXT:    mov x8, #12 // =0xc
+; CHECK-NEXT:    ld1h { z2.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z1.s }, p0/z, [x0, x8, lsl #1]
 ; CHECK-NEXT:    mov x8, #4 // =0x4
-; CHECK-NEXT:    ld1h { z2.s }, p0/z, [x0, x8, lsl #1]
-; CHECK-NEXT:    ld1h { z3.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z3.s }, p0/z, [x0, x8, lsl #1]
 ; CHECK-NEXT:    fcvt z0.s, p0/m, z0.h
+; CHECK-NEXT:    fcvt z2.s, p0/m, z2.h
 ; CHECK-NEXT:    fcvt z1.s, p0/m, z1.h
 ; CHECK-NEXT:    fcvt z3.s, p0/m, z3.h
-; CHECK-NEXT:    fcvt z2.s, p0/m, z2.h
 ; CHECK-NEXT:    stp q0, q1, [x1, #32]
-; CHECK-NEXT:    stp q3, q2, [x1]
+; CHECK-NEXT:    stp q2, q3, [x1]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x half>, ptr %a
   %res = fpext <16 x half> %op1 to <16 x float>
@@ -206,16 +206,16 @@ define void @fcvt_v8f16_v8f64(ptr %a, ptr %b) {
 ; CHECK-NEXT:    mov x8, #4 // =0x4
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, x8, lsl #1]
 ; CHECK-NEXT:    mov x8, #6 // =0x6
+; CHECK-NEXT:    ld1h { z2.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z1.d }, p0/z, [x0, x8, lsl #1]
 ; CHECK-NEXT:    mov x8, #2 // =0x2
-; CHECK-NEXT:    ld1h { z2.d }, p0/z, [x0, x8, lsl #1]
-; CHECK-NEXT:    ld1h { z3.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z3.d }, p0/z, [x0, x8, lsl #1]
 ; CHECK-NEXT:    fcvt z0.d, p0/m, z0.h
+; CHECK-NEXT:    fcvt z2.d, p0/m, z2.h
 ; CHECK-NEXT:    fcvt z1.d, p0/m, z1.h
 ; CHECK-NEXT:    fcvt z3.d, p0/m, z3.h
-; CHECK-NEXT:    fcvt z2.d, p0/m, z2.h
 ; CHECK-NEXT:    stp q0, q1, [x1, #32]
-; CHECK-NEXT:    stp q3, q2, [x1]
+; CHECK-NEXT:    stp q2, q3, [x1]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x half>, ptr %a
   %res = fpext <8 x half> %op1 to <8 x double>
@@ -230,6 +230,7 @@ define void @fcvt_v16f16_v16f64(ptr %a, ptr %b) {
 ; CHECK-NEXT:    mov x8, #12 // =0xc
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, x8, lsl #1]
 ; CHECK-NEXT:    mov x8, #14 // =0xe
+; CHECK-NEXT:    ld1h { z6.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z1.d }, p0/z, [x0, x8, lsl #1]
 ; CHECK-NEXT:    mov x8, #8 // =0x8
 ; CHECK-NEXT:    ld1h { z2.d }, p0/z, [x0, x8, lsl #1]
@@ -238,23 +239,22 @@ define void @fcvt_v16f16_v16f64(ptr %a, ptr %b) {
 ; CHECK-NEXT:    mov x8, #4 // =0x4
 ; CHECK-NEXT:    fcvt z0.d, p0/m, z0.h
 ; CHECK-NEXT:    ld1h { z4.d }, p0/z, [x0, x8, lsl #1]
-; CHECK-NEXT:    mov x8, #6 // =0x6
 ; CHECK-NEXT:    fcvt z1.d, p0/m, z1.h
+; CHECK-NEXT:    mov x8, #6 // =0x6
+; CHECK-NEXT:    fcvt z2.d, p0/m, z2.h
 ; CHECK-NEXT:    ld1h { z5.d }, p0/z, [x0, x8, lsl #1]
 ; CHECK-NEXT:    mov x8, #2 // =0x2
-; CHECK-NEXT:    fcvt z2.d, p0/m, z2.h
-; CHECK-NEXT:    ld1h { z6.d }, p0/z, [x0, x8, lsl #1]
-; CHECK-NEXT:    ld1h { z7.d }, p0/z, [x0]
 ; CHECK-NEXT:    fcvt z3.d, p0/m, z3.h
+; CHECK-NEXT:    ld1h { z7.d }, p0/z, [x0, x8, lsl #1]
 ; CHECK-NEXT:    fcvt z4.d, p0/m, z4.h
 ; CHECK-NEXT:    stp q0, q1, [x1, #96]
 ; CHECK-NEXT:    movprfx z0, z5
 ; CHECK-NEXT:    fcvt z0.d, p0/m, z5.h
-; CHECK-NEXT:    movprfx z1, z7
-; CHECK-NEXT:    fcvt z1.d, p0/m, z7.h
+; CHECK-NEXT:    movprfx z1, z6
+; CHECK-NEXT:    fcvt z1.d, p0/m, z6.h
 ; CHECK-NEXT:    stp q2, q3, [x1, #64]
-; CHECK-NEXT:    movprfx z2, z6
-; CHECK-NEXT:    fcvt z2.d, p0/m, z6.h
+; CHECK-NEXT:    movprfx z2, z7
+; CHECK-NEXT:    fcvt z2.d, p0/m, z7.h
 ; CHECK-NEXT:    stp q1, q2, [x1]
 ; CHECK-NEXT:    stp q4, q0, [x1, #32]
 ; CHECK-NEXT:    ret
@@ -319,16 +319,16 @@ define void @fcvt_v8f32_v8f64(ptr %a, ptr %b) {
 ; CHECK-NEXT:    mov x8, #4 // =0x4
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, x8, lsl #2]
 ; CHECK-NEXT:    mov x8, #6 // =0x6
+; CHECK-NEXT:    ld1w { z2.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.d }, p0/z, [x0, x8, lsl #2]
 ; CHECK-NEXT:    mov x8, #2 // =0x2
-; CHECK-NEXT:    ld1w { z2.d }, p0/z, [x0, x8, lsl #2]
-; CHECK-NEXT:    ld1w { z3.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z3.d }, p0/z, [x0, x8, lsl #2]
 ; CHECK-NEXT:    fcvt z0.d, p0/m, z0.s
+; CHECK-NEXT:    fcvt z2.d, p0/m, z2.s
 ; CHECK-NEXT:    fcvt z1.d, p0/m, z1.s
 ; CHECK-NEXT:    fcvt z3.d, p0/m, z3.s
-; CHECK-NEXT:    fcvt z2.d, p0/m, z2.s
 ; CHECK-NEXT:    stp q0, q1, [x1, #32]
-; CHECK-NEXT:    stp q3, q2, [x1]
+; CHECK-NEXT:    stp q2, q3, [x1]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x float>, ptr %a
   %res = fpext <8 x float> %op1 to <8 x double>
diff --git a/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll b/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll
index 1323ffcf12db0f1..5f746861d868e5b 100644
--- a/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll
@@ -65,11 +65,11 @@ define void @func_vscale2_2(ptr %a, ptr %b) #2 {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov x8, #8 // =0x8
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; CHECK-NEXT:    add z0.s, z0.s, z2.s
-; CHECK-NEXT:    add z1.s, z1.s, z3.s
+; CHECK-NEXT:    add z0.s, z0.s, z1.s
+; CHECK-NEXT:    add z1.s, z2.s, z3.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x0]
 ; CHECK-NEXT:    ret
@@ -88,11 +88,11 @@ define void @func_vscale2_4(ptr %a, ptr %b) #3 {
 ; CHECK-NEXT:    ptrue p0.s, vl8
 ; CHECK-NEXT:    mov x8, #8 // =0x8
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0]
-; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; CHECK-NEXT:    add z0.s, z0.s, z2.s
-; CHECK-NEXT:    add z1.s, z1.s, z3.s
+; CHECK-NEXT:    add z0.s, z0.s, z1.s
+; CHECK-NEXT:    add z1.s, z2.s, z3.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x0]
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll
index e77f85bd46dd384..5735c36558bdbb4 100644
--- a/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll
@@ -69,13 +69,13 @@ define void @test_copysign_v32f16_v32f16(ptr %ap, ptr %bp) #0 {
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    mov z0.h, #32767 // =0x7fff
 ; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z4.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    bsl z1.d, z1.d, z3.d, z0.d
-; VBITS_GE_256-NEXT:    bsl z2.d, z2.d, z4.d, z0.d
+; VBITS_GE_256-NEXT:    bsl z1.d, z1.d, z2.d, z0.d
+; VBITS_GE_256-NEXT:    bsl z3.d, z3.d, z4.d, z0.d
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT:    st1h { z2.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1h { z3.h }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: test_copysign_v32f16_v32f16:
@@ -186,13 +186,13 @@ define void @test_copysign_v16f32_v16f32(ptr %ap, ptr %bp) #0 {
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    mov z0.s, #0x7fffffff
 ; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z4.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    bsl z1.d, z1.d, z3.d, z0.d
-; VBITS_GE_256-NEXT:    bsl z2.d, z2.d, z4.d, z0.d
+; VBITS_GE_256-NEXT:    bsl z1.d, z1.d, z2.d, z0.d
+; VBITS_GE_256-NEXT:    bsl z3.d, z3.d, z4.d, z0.d
 ; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT:    st1w { z2.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1w { z3.s }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: test_copysign_v16f32_v16f32:
@@ -288,13 +288,13 @@ define void @test_copysign_v8f64_v8f64(ptr %ap, ptr %bp) #0 {
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    mov z0.d, #0x7fffffffffffffff
 ; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z4.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    bsl z1.d, z1.d, z3.d, z0.d
-; VBITS_GE_256-NEXT:    bsl z2.d, z2.d, z4.d, z0.d
+; VBITS_GE_256-NEXT:    bsl z1.d, z1.d, z2.d, z0.d
+; VBITS_GE_256-NEXT:    bsl z3.d, z3.d, z4.d, z0.d
 ; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT:    st1d { z2.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    st1d { z3.d }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: test_copysign_v8f64_v8f64:
@@ -374,13 +374,13 @@ define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
 ; CHECK-LABEL: test_copysign_v4f32_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d, vl4
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    mvni v2.4s, #128, lsl #24
+; CHECK-NEXT:    mvni v1.4s, #128, lsl #24
+; CHECK-NEXT:    ldr q2, [x0]
 ; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    fcvt z1.s, p1/m, z1.d
-; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
-; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; CHECK-NEXT:    fcvt z0.s, p1/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    bit v0.16b, v2.16b, v1.16b
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %ap
@@ -419,24 +419,24 @@ define void @test_copysign_v4f64_v4f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
 ; CHECK_NO_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32:
 ; CHECK_NO_EXTEND_ROUND:       // %bb.0:
 ; CHECK_NO_EXTEND_ROUND-NEXT:    ptrue p0.d, vl4
-; CHECK_NO_EXTEND_ROUND-NEXT:    mov z2.d, #0x7fffffffffffffff
-; CHECK_NO_EXTEND_ROUND-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; CHECK_NO_EXTEND_ROUND-NEXT:    ld1w { z1.d }, p0/z, [x1]
-; CHECK_NO_EXTEND_ROUND-NEXT:    fcvt z1.d, p0/m, z1.s
-; CHECK_NO_EXTEND_ROUND-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
-; CHECK_NO_EXTEND_ROUND-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK_NO_EXTEND_ROUND-NEXT:    mov z1.d, #0x7fffffffffffffff
+; CHECK_NO_EXTEND_ROUND-NEXT:    ld1w { z0.d }, p0/z, [x1]
+; CHECK_NO_EXTEND_ROUND-NEXT:    ld1d { z2.d }, p0/z, [x0]
+; CHECK_NO_EXTEND_ROUND-NEXT:    fcvt z0.d, p0/m, z0.s
+; CHECK_NO_EXTEND_ROUND-NEXT:    bsl z2.d, z2.d, z0.d, z1.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    st1d { z2.d }, p0, [x0]
 ; CHECK_NO_EXTEND_ROUND-NEXT:    ret
 ;
 ; CHECK_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32:
 ; CHECK_EXTEND_ROUND:       // %bb.0:
 ; CHECK_EXTEND_ROUND-NEXT:    ptrue p0.d, vl4
-; CHECK_EXTEND_ROUND-NEXT:    mov z2.d, #0x7fffffffffffffff
-; CHECK_EXTEND_ROUND-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; CHECK_EXTEND_ROUND-NEXT:    ldr q1, [x1]
-; CHECK_EXTEND_ROUND-NEXT:    uunpklo z1.d, z1.s
-; CHECK_EXTEND_ROUND-NEXT:    fcvt z1.d, p0/m, z1.s
-; CHECK_EXTEND_ROUND-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
-; CHECK_EXTEND_ROUND-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK_EXTEND_ROUND-NEXT:    ldr q0, [x1]
+; CHECK_EXTEND_ROUND-NEXT:    mov z1.d, #0x7fffffffffffffff
+; CHECK_EXTEND_ROUND-NEXT:    uunpklo z0.d, z0.s
+; CHECK_EXTEND_ROUND-NEXT:    fcvt z0.d, p0/m, z0.s
+; CHECK_EXTEND_ROUND-NEXT:    ld1d { z2.d }, p0/z, [x0]
+; CHECK_EXTEND_ROUND-NEXT:    bsl z2.d, z2.d, z0.d, z1.d
+; CHECK_EXTEND_ROUND-NEXT:    st1d { z2.d }, p0, [x0]
 ; CHECK_EXTEND_ROUND-NEXT:    ret
   %a = load <4 x double>, ptr %ap
   %b = load <4 x float>, ptr %bp
@@ -470,14 +470,14 @@ define void @test_copysign_v4f16_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
 ; CHECK-LABEL: test_copysign_v4f16_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d, vl4
-; CHECK-NEXT:    ldr d0, [x0]
-; CHECK-NEXT:    mvni v2.4h, #128, lsl #8
+; CHECK-NEXT:    mvni v1.4h, #128, lsl #8
+; CHECK-NEXT:    ldr d2, [x0]
 ; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    fcvt z1.h, p1/m, z1.d
-; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
-; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
-; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; CHECK-NEXT:    fcvt z0.h, p1/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    bit v0.8b, v2.8b, v1.8b
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %a = load <4 x half>, ptr %ap
@@ -497,13 +497,13 @@ define void @test_copysign_v8f16_v8f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
 ; CHECK-LABEL: test_copysign_v8f16_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s, vl8
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    mvni v2.8h, #128, lsl #8
+; CHECK-NEXT:    mvni v1.8h, #128, lsl #8
+; CHECK-NEXT:    ldr q2, [x0]
 ; CHECK-NEXT:    ptrue p1.s
-; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; CHECK-NEXT:    fcvt z1.h, p1/m, z1.s
-; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
-; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x1]
+; CHECK-NEXT:    fcvt z0.h, p1/m, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    bit v0.16b, v2.16b, v1.16b
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, ptr %ap



More information about the llvm-commits mailing list