[llvm] AArch64: Use PTRUE_B as much as possible to increase CSE (PR #137042)

Matthias Braun via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 23 12:09:26 PDT 2025


https://github.com/MatzeB created https://github.com/llvm/llvm-project/pull/137042

None

>From 7fe7774beb37196400c0bac11876bcee440dd2b8 Mon Sep 17 00:00:00 2001
From: Matthias Braun <matze at braunis.de>
Date: Wed, 23 Apr 2025 11:45:22 -0700
Subject: [PATCH] AArch64: Use PTRUE_B as much as possible to increase CSE

---
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td |  235 ++-
 llvm/lib/Target/AArch64/SVEInstrFormats.td    |    6 +-
 .../test/CodeGen/AArch64/DAGCombine_vscale.ll |    2 +-
 .../AArch64/GlobalISel/sve-formal-argument.ll |    6 +-
 .../AArch64/GlobalISel/sve-load-store.ll      |    6 +-
 .../AArch64/aarch64-sve-fill-spill-pair.ll    |    2 +-
 ...interleaving-add-mull-scalable-contract.ll |    6 +-
 ...x-deinterleaving-add-mull-scalable-fast.ll |    8 +-
 ...complex-deinterleaving-f16-add-scalable.ll |    6 +-
 ...complex-deinterleaving-f16-mul-scalable.ll |    6 +-
 ...complex-deinterleaving-f32-add-scalable.ll |    6 +-
 ...complex-deinterleaving-f32-mul-scalable.ll |    6 +-
 ...complex-deinterleaving-f64-add-scalable.ll |    6 +-
 ...complex-deinterleaving-f64-mul-scalable.ll |    6 +-
 ...rleaving-reductions-predicated-scalable.ll |   97 +-
 ...plex-deinterleaving-reductions-scalable.ll |   16 +-
 .../complex-deinterleaving-splat-scalable.ll  |    4 +-
 .../AArch64/consecutive-stores-of-faddv.ll    |    8 +-
 .../CodeGen/AArch64/extract-vector-elt-sve.ll |   16 +-
 .../CodeGen/AArch64/fp-veclib-expansion.ll    |    8 +-
 .../insert-subvector-res-legalization.ll      |   31 +-
 .../AArch64/intrinsic-cttz-elts-sve.ll        |   40 +-
 .../CodeGen/AArch64/llvm-ir-to-intrinsic.ll   |   18 +-
 llvm/test/CodeGen/AArch64/load-insert-zero.ll |   24 +-
 .../AArch64/named-vector-shuffles-sve.ll      |    4 +-
 llvm/test/CodeGen/AArch64/nontemporal-load.ll |    2 +-
 llvm/test/CodeGen/AArch64/rcpc3-sve.ll        |    4 +-
 llvm/test/CodeGen/AArch64/reassocmls.ll       |    7 +-
 llvm/test/CodeGen/AArch64/reduce-or-opt.ll    |   14 +-
 llvm/test/CodeGen/AArch64/sinksplat.ll        |    2 +-
 .../CodeGen/AArch64/sme-framelower-use-bp.ll  |    2 +-
 llvm/test/CodeGen/AArch64/sve-aba.ll          |    6 +-
 llvm/test/CodeGen/AArch64/sve-abd.ll          |   12 +-
 llvm/test/CodeGen/AArch64/sve-aliasing.ll     |   63 +-
 .../CodeGen/AArch64/sve-bf16-int-converts.ll  |   14 +-
 .../CodeGen/AArch64/sve-bit-counting-pred.ll  |   30 +-
 llvm/test/CodeGen/AArch64/sve-bitcast.ll      |  288 ++-
 .../CodeGen/AArch64/sve-dead-masked-store.ll  |    2 +-
 llvm/test/CodeGen/AArch64/sve-extload-icmp.ll |    9 +-
 .../AArch64/sve-extract-fixed-vector.ll       |    2 +-
 .../AArch64/sve-extract-scalable-vector.ll    |    2 +-
 llvm/test/CodeGen/AArch64/sve-fcmp.ll         |   27 +-
 .../sve-fixed-length-insert-vector-elt.ll     |   30 +-
 .../AArch64/sve-fixed-length-offsets.ll       |   34 +-
 .../sve-fold-loadext-and-splat-vector.ll      |    5 +-
 llvm/test/CodeGen/AArch64/sve-fold-vscale.ll  |    4 +-
 .../CodeGen/AArch64/sve-forward-st-to-ld.ll   |    2 +-
 .../CodeGen/AArch64/sve-fp-int-min-max.ll     |    5 +-
 .../CodeGen/AArch64/sve-fp-reduce-fadda.ll    |    4 +-
 llvm/test/CodeGen/AArch64/sve-fpext-load.ll   |    8 +-
 llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll   |  187 +-
 llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll   |  139 +-
 .../test/CodeGen/AArch64/sve-fptrunc-store.ll |    8 +-
 .../AArch64/sve-gather-scatter-addr-opts.ll   |    8 +-
 .../AArch64/sve-gather-scatter-dag-combine.ll |   14 +-
 llvm/test/CodeGen/AArch64/sve-gep.ll          |    3 +-
 llvm/test/CodeGen/AArch64/sve-hadd.ll         |   70 +-
 .../AArch64/sve-implicit-zero-filling.ll      |    4 +-
 .../CodeGen/AArch64/sve-insert-element.ll     |   87 +-
 .../test/CodeGen/AArch64/sve-insert-vector.ll |   10 +-
 llvm/test/CodeGen/AArch64/sve-int-arith.ll    |    3 +-
 .../CodeGen/AArch64/sve-int-pred-reduce.ll    |   35 +-
 .../sve-intrinsics-int-compares-with-imm.ll   |    2 +-
 .../AArch64/sve-intrinsics-ldst-ext.ll        |   32 +-
 .../CodeGen/AArch64/sve-intrinsics-loads.ll   |   32 +-
 ...sve-intrinsics-reinterpret-no-streaming.ll |    2 +-
 .../AArch64/sve-intrinsics-reinterpret.ll     |   10 +-
 llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll  |   20 +-
 .../sve-ld1-addressing-mode-reg-imm.ll        |    9 +-
 .../sve-ld1-addressing-mode-reg-reg.ll        |   48 +-
 llvm/test/CodeGen/AArch64/sve-ld1r.ll         |  112 +-
 llvm/test/CodeGen/AArch64/sve-llrint.ll       | 1695 +++++++++--------
 .../AArch64/sve-load-store-strict-align.ll    |    6 +-
 llvm/test/CodeGen/AArch64/sve-lrint.ll        | 1695 +++++++++--------
 .../sve-lsr-scaled-index-addressing-mode.ll   |    8 +-
 llvm/test/CodeGen/AArch64/sve-lsrchain.ll     |    4 +-
 .../AArch64/sve-masked-gather-legalize.ll     |    9 +-
 .../CodeGen/AArch64/sve-masked-ldst-sext.ll   |    2 +-
 .../AArch64/sve-partial-reduce-dot-product.ll |   12 +-
 .../AArch64/sve-partial-reduce-wide-add.ll    |    2 +-
 llvm/test/CodeGen/AArch64/sve-pr92779.ll      |    2 +-
 .../CodeGen/AArch64/sve-pred-selectop2.ll     |  180 +-
 .../CodeGen/AArch64/sve-pred-selectop3.ll     |   99 +-
 .../AArch64/sve-ptest-removal-cmple.ll        |   20 +-
 llvm/test/CodeGen/AArch64/sve-reassocadd.ll   |   14 +-
 llvm/test/CodeGen/AArch64/sve-scmp.ll         |    9 +-
 llvm/test/CodeGen/AArch64/sve-setcc.ll        |    2 +-
 llvm/test/CodeGen/AArch64/sve-sext-zext.ll    |   20 +-
 llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll |   42 +-
 .../AArch64/sve-splat-one-and-ptrue.ll        |    8 +-
 llvm/test/CodeGen/AArch64/sve-splat-sext.ll   |    6 +-
 .../CodeGen/AArch64/sve-split-insert-elt.ll   |    4 +-
 .../CodeGen/AArch64/sve-split-int-reduce.ll   |    3 +-
 llvm/test/CodeGen/AArch64/sve-split-load.ll   |    2 +-
 llvm/test/CodeGen/AArch64/sve-split-store.ll  |    2 +-
 .../sve-st1-addressing-mode-reg-imm.ll        |    8 +-
 .../sve-st1-addressing-mode-reg-reg.ll        |   36 +-
 ...ing-mode-fixed-length-insert-vector-elt.ll |   36 +-
 ...mode-fixed-length-masked-gather-scatter.ll |    2 +-
 llvm/test/CodeGen/AArch64/sve-trunc.ll        |    2 +-
 .../test/CodeGen/AArch64/sve-unary-movprfx.ll |   24 +-
 .../CodeGen/AArch64/sve-vector-compress.ll    |   11 +-
 llvm/test/CodeGen/AArch64/sve-vector-splat.ll |    4 +-
 llvm/test/CodeGen/AArch64/sve2-histcnt.ll     |    3 +-
 .../AArch64/sve2-intrinsics-combine-rshrnb.ll |   27 +-
 llvm/test/CodeGen/AArch64/veclib-llvm.modf.ll |    4 +-
 .../CodeGen/AArch64/veclib-llvm.sincos.ll     |    4 +-
 .../CodeGen/AArch64/veclib-llvm.sincospi.ll   |    4 +-
 llvm/test/CodeGen/AArch64/zext-to-tbl.ll      |    4 +-
 .../AArch64/vscale-fixups.ll                  |   15 +-
 110 files changed, 3114 insertions(+), 2902 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index a2f326c994c2f..f0cb865524644 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -973,7 +973,7 @@ let Predicates = [HasSVE_or_SME] in {
   // Define pattern for `nxv1i1 splat_vector(1)`.
   // We do this here instead of in ISelLowering such that PatFrag's can still
   // recognize a splat.
-  def : Pat<(nxv1i1 immAllOnesV), (PUNPKLO_PP (PTRUE_D 31))>;
+  def : Pat<(nxv1i1 immAllOnesV), (PUNPKLO_PP (PTRUE_B 31))>;
 
   defm MOVPRFX_ZPzZ : sve_int_movprfx_pred_zero<0b000, "movprfx">;
   defm MOVPRFX_ZPmZ : sve_int_movprfx_pred_merge<0b001, "movprfx">;
@@ -1096,9 +1096,9 @@ let Predicates = [HasSVE_or_SME] in {
           (load_instr_scalar (ptrue 31), GPR64sp:$Xn, $idx)>;
   }
   defm : sve_ld1rq_duplane_pat<nxv16i8, v16i8, AArch64duplane128, LD1RQ_B_IMM, PTRUE_B, LD1RQ_B, am_sve_regreg_lsl0>;
-  defm : sve_ld1rq_duplane_pat<nxv8i16, v8i16, AArch64duplane128, LD1RQ_H_IMM, PTRUE_H, LD1RQ_H, am_sve_regreg_lsl1>;
-  defm : sve_ld1rq_duplane_pat<nxv4i32, v4i32, AArch64duplane128, LD1RQ_W_IMM, PTRUE_S, LD1RQ_W, am_sve_regreg_lsl2>;
-  defm : sve_ld1rq_duplane_pat<nxv2i64, v2i64, AArch64duplane128, LD1RQ_D_IMM, PTRUE_D, LD1RQ_D, am_sve_regreg_lsl3>;
+  defm : sve_ld1rq_duplane_pat<nxv8i16, v8i16, AArch64duplane128, LD1RQ_H_IMM, PTRUE_B, LD1RQ_H, am_sve_regreg_lsl1>;
+  defm : sve_ld1rq_duplane_pat<nxv4i32, v4i32, AArch64duplane128, LD1RQ_W_IMM, PTRUE_B, LD1RQ_W, am_sve_regreg_lsl2>;
+  defm : sve_ld1rq_duplane_pat<nxv2i64, v2i64, AArch64duplane128, LD1RQ_D_IMM, PTRUE_B, LD1RQ_D, am_sve_regreg_lsl3>;
 
   // continuous load with reg+reg addressing.
   defm LD1B    : sve_mem_cld_ss<0b0000, "ld1b",  Z_b, ZPR8,  GPR64NoXZRshifted8>;
@@ -2053,13 +2053,13 @@ let Predicates = [HasSVE_or_SME] in {
             (LASTB_VPZ_B (PTRUE_B 31), ZPR:$Z1), bsub))>;
   def : Pat<(nxv8i16 (vector_splice nxv8i16:$Z1, nxv8i16:$Z2, (i64 -1))),
             (INSR_ZV_H ZPR:$Z2, (INSERT_SUBREG (IMPLICIT_DEF),
-            (LASTB_VPZ_H (PTRUE_H 31), ZPR:$Z1), hsub))>;
+            (LASTB_VPZ_H (PTRUE_B 31), ZPR:$Z1), hsub))>;
   def : Pat<(nxv4i32 (vector_splice nxv4i32:$Z1, nxv4i32:$Z2, (i64 -1))),
             (INSR_ZV_S ZPR:$Z2, (INSERT_SUBREG (IMPLICIT_DEF),
-            (LASTB_VPZ_S (PTRUE_S 31), ZPR:$Z1), ssub))>;
+            (LASTB_VPZ_S (PTRUE_B 31), ZPR:$Z1), ssub))>;
   def : Pat<(nxv2i64 (vector_splice nxv2i64:$Z1, nxv2i64:$Z2, (i64 -1))),
             (INSR_ZV_D ZPR:$Z2, (INSERT_SUBREG (IMPLICIT_DEF),
-            (LASTB_VPZ_D (PTRUE_D 31), ZPR:$Z1), dsub))>;
+            (LASTB_VPZ_D (PTRUE_B 31), ZPR:$Z1), dsub))>;
 
   // Splice with lane bigger or equal to 0
   foreach VT = [nxv16i8] in
@@ -2152,16 +2152,16 @@ let Predicates = [HasSVE_or_SME] in {
                         (BRKB_PPzP (PTRUE_B 31), PPR:$Op1))>;
 
   def : Pat<(i64 (AArch64CttzElts nxv8i1:$Op1)),
-            (CNTP_XPP_H (BRKB_PPzP (PTRUE_H 31), PPR:$Op1),
-                        (BRKB_PPzP (PTRUE_H 31), PPR:$Op1))>;
+            (CNTP_XPP_H (BRKB_PPzP (PTRUE_B 31), PPR:$Op1),
+                        (BRKB_PPzP (PTRUE_B 31), PPR:$Op1))>;
 
   def : Pat<(i64 (AArch64CttzElts nxv4i1:$Op1)),
-            (CNTP_XPP_S (BRKB_PPzP (PTRUE_S 31), PPR:$Op1),
-                        (BRKB_PPzP (PTRUE_S 31), PPR:$Op1))>;
+            (CNTP_XPP_S (BRKB_PPzP (PTRUE_B 31), PPR:$Op1),
+                        (BRKB_PPzP (PTRUE_B 31), PPR:$Op1))>;
 
   def : Pat<(i64 (AArch64CttzElts nxv2i1:$Op1)),
-            (CNTP_XPP_D (BRKB_PPzP (PTRUE_D 31), PPR:$Op1),
-                        (BRKB_PPzP (PTRUE_D 31), PPR:$Op1))>;
+            (CNTP_XPP_D (BRKB_PPzP (PTRUE_B 31), PPR:$Op1),
+                        (BRKB_PPzP (PTRUE_B 31), PPR:$Op1))>;
 }
 
   defm INCB_XPiI : sve_int_pred_pattern_a<0b000, "incb", add, int_aarch64_sve_cntb>;
@@ -2256,26 +2256,26 @@ let Predicates = [HasSVE_or_SME] in {
                             sub_32)>;
 
   def : Pat<(i64 (add GPR64:$Op1, (i64 (AArch64CttzElts nxv8i1:$Op2)))),
-            (INCP_XP_H (BRKB_PPzP (PTRUE_H 31), PPR:$Op2), GPR64:$Op1)>;
+            (INCP_XP_H (BRKB_PPzP (PTRUE_B 31), PPR:$Op2), GPR64:$Op1)>;
 
   def : Pat<(i32 (add GPR32:$Op1, (trunc (i64 (AArch64CttzElts nxv8i1:$Op2))))),
-            (EXTRACT_SUBREG (INCP_XP_H (BRKB_PPzP (PTRUE_H 31), PPR:$Op2),
+            (EXTRACT_SUBREG (INCP_XP_H (BRKB_PPzP (PTRUE_B 31), PPR:$Op2),
                                        (INSERT_SUBREG (IMPLICIT_DEF), GPR32:$Op1, sub_32)),
                             sub_32)>;
 
   def : Pat<(i64 (add GPR64:$Op1, (i64 (AArch64CttzElts nxv4i1:$Op2)))),
-            (INCP_XP_S (BRKB_PPzP (PTRUE_S 31), PPR:$Op2), GPR64:$Op1)>;
+            (INCP_XP_S (BRKB_PPzP (PTRUE_B 31), PPR:$Op2), GPR64:$Op1)>;
 
   def : Pat<(i32 (add GPR32:$Op1, (trunc (i64 (AArch64CttzElts nxv4i1:$Op2))))),
-            (EXTRACT_SUBREG (INCP_XP_S (BRKB_PPzP (PTRUE_S 31), PPR:$Op2),
+            (EXTRACT_SUBREG (INCP_XP_S (BRKB_PPzP (PTRUE_B 31), PPR:$Op2),
                                        (INSERT_SUBREG (IMPLICIT_DEF), GPR32:$Op1, sub_32)),
                             sub_32)>;
 
   def : Pat<(i64 (add GPR64:$Op1, (i64 (AArch64CttzElts nxv2i1:$Op2)))),
-            (INCP_XP_D (BRKB_PPzP (PTRUE_D 31), PPR:$Op2), GPR64:$Op1)>;
+            (INCP_XP_D (BRKB_PPzP (PTRUE_B 31), PPR:$Op2), GPR64:$Op1)>;
 
   def : Pat<(i32 (add GPR32:$Op1, (trunc (i64 (AArch64CttzElts nxv2i1:$Op2))))),
-            (EXTRACT_SUBREG (INCP_XP_D (BRKB_PPzP (PTRUE_D 31), PPR:$Op2),
+            (EXTRACT_SUBREG (INCP_XP_D (BRKB_PPzP (PTRUE_B 31), PPR:$Op2),
                                        (INSERT_SUBREG (IMPLICIT_DEF), GPR32:$Op1, sub_32)),
                             sub_32)>;
 
@@ -2571,36 +2571,36 @@ let Predicates = [HasSVE_or_SME] in {
 
   // LDR1 of 8-bit data
   defm : LD1RPat<nxv16i8, extloadi8,  LD1RB_IMM,    PTRUE_B, i32, am_indexed8_6b, uimm6s1>;
-  defm : LD1RPat<nxv8i16, zextloadi8, LD1RB_H_IMM,  PTRUE_H, i32, am_indexed8_6b, uimm6s1>;
-  defm : LD1RPat<nxv4i32, zextloadi8, LD1RB_S_IMM,  PTRUE_S, i32, am_indexed8_6b, uimm6s1>;
-  defm : LD1RPat<nxv2i64, zextloadi8, LD1RB_D_IMM,  PTRUE_D, i64, am_indexed8_6b, uimm6s1>;
-  defm : LD1RPat<nxv8i16, sextloadi8, LD1RSB_H_IMM, PTRUE_H, i32, am_indexed8_6b, uimm6s1>;
-  defm : LD1RPat<nxv4i32, sextloadi8, LD1RSB_S_IMM, PTRUE_S, i32, am_indexed8_6b, uimm6s1>;
-  defm : LD1RPat<nxv2i64, sextloadi8, LD1RSB_D_IMM, PTRUE_D, i64, am_indexed8_6b, uimm6s1>;
+  defm : LD1RPat<nxv8i16, zextloadi8, LD1RB_H_IMM,  PTRUE_B, i32, am_indexed8_6b, uimm6s1>;
+  defm : LD1RPat<nxv4i32, zextloadi8, LD1RB_S_IMM,  PTRUE_B, i32, am_indexed8_6b, uimm6s1>;
+  defm : LD1RPat<nxv2i64, zextloadi8, LD1RB_D_IMM,  PTRUE_B, i64, am_indexed8_6b, uimm6s1>;
+  defm : LD1RPat<nxv8i16, sextloadi8, LD1RSB_H_IMM, PTRUE_B, i32, am_indexed8_6b, uimm6s1>;
+  defm : LD1RPat<nxv4i32, sextloadi8, LD1RSB_S_IMM, PTRUE_B, i32, am_indexed8_6b, uimm6s1>;
+  defm : LD1RPat<nxv2i64, sextloadi8, LD1RSB_D_IMM, PTRUE_B, i64, am_indexed8_6b, uimm6s1>;
 
   // LDR1 of 16-bit data
-  defm : LD1RPat<nxv8i16, extloadi16,  LD1RH_IMM,    PTRUE_H, i32, am_indexed16_6b, uimm6s2>;
-  defm : LD1RPat<nxv4i32, zextloadi16, LD1RH_S_IMM,  PTRUE_S, i32, am_indexed16_6b, uimm6s2>;
-  defm : LD1RPat<nxv2i64, zextloadi16, LD1RH_D_IMM,  PTRUE_D, i64, am_indexed16_6b, uimm6s2>;
-  defm : LD1RPat<nxv4i32, sextloadi16, LD1RSH_S_IMM, PTRUE_S, i32, am_indexed16_6b, uimm6s2>;
-  defm : LD1RPat<nxv2i64, sextloadi16, LD1RSH_D_IMM, PTRUE_D, i64, am_indexed16_6b, uimm6s2>;
+  defm : LD1RPat<nxv8i16, extloadi16,  LD1RH_IMM,    PTRUE_B, i32, am_indexed16_6b, uimm6s2>;
+  defm : LD1RPat<nxv4i32, zextloadi16, LD1RH_S_IMM,  PTRUE_B, i32, am_indexed16_6b, uimm6s2>;
+  defm : LD1RPat<nxv2i64, zextloadi16, LD1RH_D_IMM,  PTRUE_B, i64, am_indexed16_6b, uimm6s2>;
+  defm : LD1RPat<nxv4i32, sextloadi16, LD1RSH_S_IMM, PTRUE_B, i32, am_indexed16_6b, uimm6s2>;
+  defm : LD1RPat<nxv2i64, sextloadi16, LD1RSH_D_IMM, PTRUE_B, i64, am_indexed16_6b, uimm6s2>;
 
   // LDR1 of 32-bit data
-  defm : LD1RPat<nxv4i32, load,        LD1RW_IMM,   PTRUE_S, i32, am_indexed32_6b, uimm6s4>;
-  defm : LD1RPat<nxv2i64, zextloadi32, LD1RW_D_IMM, PTRUE_D, i64, am_indexed32_6b, uimm6s4>;
-  defm : LD1RPat<nxv2i64, sextloadi32, LD1RSW_IMM,  PTRUE_D, i64, am_indexed32_6b, uimm6s4>;
+  defm : LD1RPat<nxv4i32, load,        LD1RW_IMM,   PTRUE_B, i32, am_indexed32_6b, uimm6s4>;
+  defm : LD1RPat<nxv2i64, zextloadi32, LD1RW_D_IMM, PTRUE_B, i64, am_indexed32_6b, uimm6s4>;
+  defm : LD1RPat<nxv2i64, sextloadi32, LD1RSW_IMM,  PTRUE_B, i64, am_indexed32_6b, uimm6s4>;
 
   // LDR1 of 64-bit data
-  defm : LD1RPat<nxv2i64, load, LD1RD_IMM, PTRUE_D, i64, am_indexed64_6b, uimm6s8>;
+  defm : LD1RPat<nxv2i64, load, LD1RD_IMM, PTRUE_B, i64, am_indexed64_6b, uimm6s8>;
 
   let Predicates = [HasSVE_or_SME, UseSVEFPLD1R] in {
     // LD1R of FP data
-    defm : LD1RPat<nxv8f16, load, LD1RH_IMM,   PTRUE_H, f16, am_indexed16_6b, uimm6s2>;
-    defm : LD1RPat<nxv4f16, load, LD1RH_S_IMM, PTRUE_S, f16, am_indexed16_6b, uimm6s2>;
-    defm : LD1RPat<nxv2f16, load, LD1RH_D_IMM, PTRUE_D, f16, am_indexed16_6b, uimm6s2>;
-    defm : LD1RPat<nxv4f32, load, LD1RW_IMM,   PTRUE_S, f32, am_indexed32_6b, uimm6s4>;
-    defm : LD1RPat<nxv2f32, load, LD1RW_D_IMM, PTRUE_D, f32, am_indexed32_6b, uimm6s4>;
-    defm : LD1RPat<nxv2f64, load, LD1RD_IMM,   PTRUE_D, f64, am_indexed64_6b, uimm6s8>;
+    defm : LD1RPat<nxv8f16, load, LD1RH_IMM,   PTRUE_B, f16, am_indexed16_6b, uimm6s2>;
+    defm : LD1RPat<nxv4f16, load, LD1RH_S_IMM, PTRUE_B, f16, am_indexed16_6b, uimm6s2>;
+    defm : LD1RPat<nxv2f16, load, LD1RH_D_IMM, PTRUE_B, f16, am_indexed16_6b, uimm6s2>;
+    defm : LD1RPat<nxv4f32, load, LD1RW_IMM,   PTRUE_B, f32, am_indexed32_6b, uimm6s4>;
+    defm : LD1RPat<nxv2f32, load, LD1RW_D_IMM, PTRUE_B, f32, am_indexed32_6b, uimm6s4>;
+    defm : LD1RPat<nxv2f64, load, LD1RD_IMM,   PTRUE_B, f64, am_indexed64_6b, uimm6s8>;
   }
 
 // LD1R of 128-bit masked data
@@ -2620,12 +2620,12 @@ let Predicates = [HasSVE_or_SME] in {
   defm : ld1rq_pat<nxv4i32, AArch64ld1rq_z, LD1RQ_W, am_sve_regreg_lsl2>;
   defm : ld1rq_pat<nxv2i64, AArch64ld1rq_z, LD1RQ_D, am_sve_regreg_lsl3>;
 
-  def : Pat<(sext_inreg nxv2i64:$Zs, nxv2i32), (SXTW_ZPmZ_D_UNDEF (IMPLICIT_DEF), (PTRUE_D 31), ZPR:$Zs)>;
-  def : Pat<(sext_inreg nxv2i64:$Zs, nxv2i16), (SXTH_ZPmZ_D_UNDEF (IMPLICIT_DEF), (PTRUE_D 31), ZPR:$Zs)>;
-  def : Pat<(sext_inreg nxv2i64:$Zs, nxv2i8),  (SXTB_ZPmZ_D_UNDEF (IMPLICIT_DEF), (PTRUE_D 31), ZPR:$Zs)>;
-  def : Pat<(sext_inreg nxv4i32:$Zs, nxv4i16), (SXTH_ZPmZ_S_UNDEF (IMPLICIT_DEF), (PTRUE_S 31), ZPR:$Zs)>;
-  def : Pat<(sext_inreg nxv4i32:$Zs, nxv4i8),  (SXTB_ZPmZ_S_UNDEF (IMPLICIT_DEF), (PTRUE_S 31), ZPR:$Zs)>;
-  def : Pat<(sext_inreg nxv8i16:$Zs, nxv8i8),  (SXTB_ZPmZ_H_UNDEF (IMPLICIT_DEF), (PTRUE_H 31), ZPR:$Zs)>;
+  def : Pat<(sext_inreg nxv2i64:$Zs, nxv2i32), (SXTW_ZPmZ_D_UNDEF (IMPLICIT_DEF), (PTRUE_B 31), ZPR:$Zs)>;
+  def : Pat<(sext_inreg nxv2i64:$Zs, nxv2i16), (SXTH_ZPmZ_D_UNDEF (IMPLICIT_DEF), (PTRUE_B 31), ZPR:$Zs)>;
+  def : Pat<(sext_inreg nxv2i64:$Zs, nxv2i8),  (SXTB_ZPmZ_D_UNDEF (IMPLICIT_DEF), (PTRUE_B 31), ZPR:$Zs)>;
+  def : Pat<(sext_inreg nxv4i32:$Zs, nxv4i16), (SXTH_ZPmZ_S_UNDEF (IMPLICIT_DEF), (PTRUE_B 31), ZPR:$Zs)>;
+  def : Pat<(sext_inreg nxv4i32:$Zs, nxv4i8),  (SXTB_ZPmZ_S_UNDEF (IMPLICIT_DEF), (PTRUE_B 31), ZPR:$Zs)>;
+  def : Pat<(sext_inreg nxv8i16:$Zs, nxv8i8),  (SXTB_ZPmZ_H_UNDEF (IMPLICIT_DEF), (PTRUE_B 31), ZPR:$Zs)>;
 
   // General case that we ideally never want to match.
   def : Pat<(vscale GPR64:$scale), (MADDXrrr (UBFMXri (RDVLI_XI 1), 4, 63), $scale, XZR)>;
@@ -2787,14 +2787,13 @@ let Predicates = [HasSVE_or_SME] in {
   def : Pat<(nxv16i1 (and PPR:$Ps1, PPR:$Ps2)),
             (AND_PPzPP (PTRUE_B 31), PPR:$Ps1, PPR:$Ps2)>;
   def : Pat<(nxv8i1 (and PPR:$Ps1, PPR:$Ps2)),
-            (AND_PPzPP (PTRUE_H 31), PPR:$Ps1, PPR:$Ps2)>;
+            (AND_PPzPP (PTRUE_B 31), PPR:$Ps1, PPR:$Ps2)>;
   def : Pat<(nxv4i1 (and PPR:$Ps1, PPR:$Ps2)),
-            (AND_PPzPP (PTRUE_S 31), PPR:$Ps1, PPR:$Ps2)>;
+            (AND_PPzPP (PTRUE_B 31), PPR:$Ps1, PPR:$Ps2)>;
   def : Pat<(nxv2i1 (and PPR:$Ps1, PPR:$Ps2)),
-            (AND_PPzPP (PTRUE_D 31), PPR:$Ps1, PPR:$Ps2)>;
-  // Emulate .Q operation using a PTRUE_D when the other lanes don't matter.
+            (AND_PPzPP (PTRUE_B 31), PPR:$Ps1, PPR:$Ps2)>;
   def : Pat<(nxv1i1 (and PPR:$Ps1, PPR:$Ps2)),
-            (AND_PPzPP (PTRUE_D 31), PPR:$Ps1, PPR:$Ps2)>;
+            (AND_PPzPP (PTRUE_B 31), PPR:$Ps1, PPR:$Ps2)>;
 
   // Add more complex addressing modes here as required
   multiclass pred_load<ValueType Ty, ValueType PredTy, SDPatternOperator Load,
@@ -2912,24 +2911,24 @@ let Predicates = [HasSVE_or_SME] in {
   }
 
   defm : unpred_store<         store, nxv16i8,    ST1B,   ST1B_IMM, PTRUE_B, am_sve_regreg_lsl0>;
-  defm : unpred_store< truncstorevi8, nxv8i16,  ST1B_H, ST1B_H_IMM, PTRUE_H, am_sve_regreg_lsl0>;
-  defm : unpred_store< truncstorevi8, nxv4i32,  ST1B_S, ST1B_S_IMM, PTRUE_S, am_sve_regreg_lsl0>;
-  defm : unpred_store< truncstorevi8, nxv2i64,  ST1B_D, ST1B_D_IMM, PTRUE_D, am_sve_regreg_lsl0>;
-  defm : unpred_store<         store, nxv8i16,    ST1H,   ST1H_IMM, PTRUE_H, am_sve_regreg_lsl1>;
-  defm : unpred_store<truncstorevi16, nxv4i32,  ST1H_S, ST1H_S_IMM, PTRUE_S, am_sve_regreg_lsl1>;
-  defm : unpred_store<truncstorevi16, nxv2i64,  ST1H_D, ST1H_D_IMM, PTRUE_D, am_sve_regreg_lsl1>;
-  defm : unpred_store<         store, nxv4i32,    ST1W,   ST1W_IMM, PTRUE_S, am_sve_regreg_lsl2>;
-  defm : unpred_store<truncstorevi32, nxv2i64,  ST1W_D, ST1W_D_IMM, PTRUE_D, am_sve_regreg_lsl2>;
-  defm : unpred_store<         store, nxv2i64,    ST1D,   ST1D_IMM, PTRUE_D, am_sve_regreg_lsl3>;
-  defm : unpred_store<         store, nxv8f16,    ST1H,   ST1H_IMM, PTRUE_H, am_sve_regreg_lsl1>;
-  defm : unpred_store<         store, nxv8bf16,   ST1H,   ST1H_IMM, PTRUE_H, am_sve_regreg_lsl1>;
-  defm : unpred_store<         store, nxv4f16,  ST1H_S, ST1H_S_IMM, PTRUE_S, am_sve_regreg_lsl1>;
-  defm : unpred_store<         store, nxv4bf16, ST1H_S, ST1H_S_IMM, PTRUE_S, am_sve_regreg_lsl1>;
-  defm : unpred_store<         store, nxv2f16,  ST1H_D, ST1H_D_IMM, PTRUE_D, am_sve_regreg_lsl1>;
-  defm : unpred_store<         store, nxv2bf16, ST1H_D, ST1H_D_IMM, PTRUE_D, am_sve_regreg_lsl1>;
-  defm : unpred_store<         store, nxv4f32,    ST1W,   ST1W_IMM, PTRUE_S, am_sve_regreg_lsl2>;
-  defm : unpred_store<         store, nxv2f32,  ST1W_D, ST1W_D_IMM, PTRUE_D, am_sve_regreg_lsl2>;
-  defm : unpred_store<         store, nxv2f64,    ST1D,   ST1D_IMM, PTRUE_D, am_sve_regreg_lsl3>;
+  defm : unpred_store< truncstorevi8, nxv8i16,  ST1B_H, ST1B_H_IMM, PTRUE_B, am_sve_regreg_lsl0>;
+  defm : unpred_store< truncstorevi8, nxv4i32,  ST1B_S, ST1B_S_IMM, PTRUE_B, am_sve_regreg_lsl0>;
+  defm : unpred_store< truncstorevi8, nxv2i64,  ST1B_D, ST1B_D_IMM, PTRUE_B, am_sve_regreg_lsl0>;
+  defm : unpred_store<         store, nxv8i16,    ST1H,   ST1H_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_store<truncstorevi16, nxv4i32,  ST1H_S, ST1H_S_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_store<truncstorevi16, nxv2i64,  ST1H_D, ST1H_D_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_store<         store, nxv4i32,    ST1W,   ST1W_IMM, PTRUE_B, am_sve_regreg_lsl2>;
+  defm : unpred_store<truncstorevi32, nxv2i64,  ST1W_D, ST1W_D_IMM, PTRUE_B, am_sve_regreg_lsl2>;
+  defm : unpred_store<         store, nxv2i64,    ST1D,   ST1D_IMM, PTRUE_B, am_sve_regreg_lsl3>;
+  defm : unpred_store<         store, nxv8f16,    ST1H,   ST1H_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_store<         store, nxv8bf16,   ST1H,   ST1H_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_store<         store, nxv4f16,  ST1H_S, ST1H_S_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_store<         store, nxv4bf16, ST1H_S, ST1H_S_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_store<         store, nxv2f16,  ST1H_D, ST1H_D_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_store<         store, nxv2bf16, ST1H_D, ST1H_D_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_store<         store, nxv4f32,    ST1W,   ST1W_IMM, PTRUE_B, am_sve_regreg_lsl2>;
+  defm : unpred_store<         store, nxv2f32,  ST1W_D, ST1W_D_IMM, PTRUE_B, am_sve_regreg_lsl2>;
+  defm : unpred_store<         store, nxv2f64,    ST1D,   ST1D_IMM, PTRUE_B, am_sve_regreg_lsl3>;
 
   multiclass unpred_load<PatFrag Load, ValueType Ty, Instruction RegRegInst,
                          Instruction RegImmInst, Instruction PTrue,
@@ -2948,36 +2947,36 @@ let Predicates = [HasSVE_or_SME] in {
   }
 
   defm : unpred_load<        load, nxv16i8,    LD1B,    LD1B_IMM, PTRUE_B, am_sve_regreg_lsl0>;
-  defm : unpred_load< zextloadvi8, nxv8i16,  LD1B_H,  LD1B_H_IMM, PTRUE_H, am_sve_regreg_lsl0>;
-  defm : unpred_load< zextloadvi8, nxv4i32,  LD1B_S,  LD1B_S_IMM, PTRUE_S, am_sve_regreg_lsl0>;
-  defm : unpred_load< zextloadvi8, nxv2i64,  LD1B_D,  LD1B_D_IMM, PTRUE_D, am_sve_regreg_lsl0>;
-  defm : unpred_load<  extloadvi8, nxv8i16,  LD1B_H,  LD1B_H_IMM, PTRUE_H, am_sve_regreg_lsl0>;
-  defm : unpred_load<  extloadvi8, nxv4i32,  LD1B_S,  LD1B_S_IMM, PTRUE_S, am_sve_regreg_lsl0>;
-  defm : unpred_load<  extloadvi8, nxv2i64,  LD1B_D,  LD1B_D_IMM, PTRUE_D, am_sve_regreg_lsl0>;
-  defm : unpred_load< sextloadvi8, nxv8i16, LD1SB_H, LD1SB_H_IMM, PTRUE_H, am_sve_regreg_lsl0>;
-  defm : unpred_load< sextloadvi8, nxv4i32, LD1SB_S, LD1SB_S_IMM, PTRUE_S, am_sve_regreg_lsl0>;
-  defm : unpred_load< sextloadvi8, nxv2i64, LD1SB_D, LD1SB_D_IMM, PTRUE_D, am_sve_regreg_lsl0>;
-  defm : unpred_load<        load, nxv8i16,    LD1H,    LD1H_IMM, PTRUE_H, am_sve_regreg_lsl1>;
-  defm : unpred_load<zextloadvi16, nxv4i32,  LD1H_S,  LD1H_S_IMM, PTRUE_S, am_sve_regreg_lsl1>;
-  defm : unpred_load<zextloadvi16, nxv2i64,  LD1H_D,  LD1H_D_IMM, PTRUE_D, am_sve_regreg_lsl1>;
-  defm : unpred_load< extloadvi16, nxv4i32,  LD1H_S,  LD1H_S_IMM, PTRUE_S, am_sve_regreg_lsl1>;
-  defm : unpred_load< extloadvi16, nxv2i64,  LD1H_D,  LD1H_D_IMM, PTRUE_D, am_sve_regreg_lsl1>;
-  defm : unpred_load<sextloadvi16, nxv4i32, LD1SH_S, LD1SH_S_IMM, PTRUE_S, am_sve_regreg_lsl1>;
-  defm : unpred_load<sextloadvi16, nxv2i64, LD1SH_D, LD1SH_D_IMM, PTRUE_D, am_sve_regreg_lsl1>;
-  defm : unpred_load<        load, nxv4i32,    LD1W,    LD1W_IMM, PTRUE_S, am_sve_regreg_lsl2>;
-  defm : unpred_load<zextloadvi32, nxv2i64,  LD1W_D,  LD1W_D_IMM, PTRUE_D, am_sve_regreg_lsl2>;
-  defm : unpred_load< extloadvi32, nxv2i64,  LD1W_D,  LD1W_D_IMM, PTRUE_D, am_sve_regreg_lsl2>;
-  defm : unpred_load<sextloadvi32, nxv2i64, LD1SW_D, LD1SW_D_IMM, PTRUE_D, am_sve_regreg_lsl2>;
-  defm : unpred_load<        load, nxv2i64,    LD1D,    LD1D_IMM, PTRUE_D, am_sve_regreg_lsl3>;
-  defm : unpred_load<        load, nxv8f16,    LD1H,    LD1H_IMM, PTRUE_H, am_sve_regreg_lsl1>;
-  defm : unpred_load<        load, nxv8bf16,   LD1H,    LD1H_IMM, PTRUE_H, am_sve_regreg_lsl1>;
-  defm : unpred_load<        load, nxv4f16,  LD1H_S,  LD1H_S_IMM, PTRUE_S, am_sve_regreg_lsl1>;
-  defm : unpred_load<        load, nxv4bf16, LD1H_S,  LD1H_S_IMM, PTRUE_S, am_sve_regreg_lsl1>;
-  defm : unpred_load<        load, nxv2f16,  LD1H_D,  LD1H_D_IMM, PTRUE_D, am_sve_regreg_lsl1>;
-  defm : unpred_load<        load, nxv2bf16, LD1H_D,  LD1H_D_IMM, PTRUE_D, am_sve_regreg_lsl1>;
-  defm : unpred_load<        load, nxv4f32,    LD1W,    LD1W_IMM, PTRUE_S, am_sve_regreg_lsl2>;
-  defm : unpred_load<        load, nxv2f32,  LD1W_D,  LD1W_D_IMM, PTRUE_D, am_sve_regreg_lsl2>;
-  defm : unpred_load<        load, nxv2f64,    LD1D,    LD1D_IMM, PTRUE_D, am_sve_regreg_lsl3>;
+  defm : unpred_load< zextloadvi8, nxv8i16,  LD1B_H,  LD1B_H_IMM, PTRUE_B, am_sve_regreg_lsl0>;
+  defm : unpred_load< zextloadvi8, nxv4i32,  LD1B_S,  LD1B_S_IMM, PTRUE_B, am_sve_regreg_lsl0>;
+  defm : unpred_load< zextloadvi8, nxv2i64,  LD1B_D,  LD1B_D_IMM, PTRUE_B, am_sve_regreg_lsl0>;
+  defm : unpred_load<  extloadvi8, nxv8i16,  LD1B_H,  LD1B_H_IMM, PTRUE_B, am_sve_regreg_lsl0>;
+  defm : unpred_load<  extloadvi8, nxv4i32,  LD1B_S,  LD1B_S_IMM, PTRUE_B, am_sve_regreg_lsl0>;
+  defm : unpred_load<  extloadvi8, nxv2i64,  LD1B_D,  LD1B_D_IMM, PTRUE_B, am_sve_regreg_lsl0>;
+  defm : unpred_load< sextloadvi8, nxv8i16, LD1SB_H, LD1SB_H_IMM, PTRUE_B, am_sve_regreg_lsl0>;
+  defm : unpred_load< sextloadvi8, nxv4i32, LD1SB_S, LD1SB_S_IMM, PTRUE_B, am_sve_regreg_lsl0>;
+  defm : unpred_load< sextloadvi8, nxv2i64, LD1SB_D, LD1SB_D_IMM, PTRUE_B, am_sve_regreg_lsl0>;
+  defm : unpred_load<        load, nxv8i16,    LD1H,    LD1H_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_load<zextloadvi16, nxv4i32,  LD1H_S,  LD1H_S_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_load<zextloadvi16, nxv2i64,  LD1H_D,  LD1H_D_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_load< extloadvi16, nxv4i32,  LD1H_S,  LD1H_S_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_load< extloadvi16, nxv2i64,  LD1H_D,  LD1H_D_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_load<sextloadvi16, nxv4i32, LD1SH_S, LD1SH_S_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_load<sextloadvi16, nxv2i64, LD1SH_D, LD1SH_D_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_load<        load, nxv4i32,    LD1W,    LD1W_IMM, PTRUE_B, am_sve_regreg_lsl2>;
+  defm : unpred_load<zextloadvi32, nxv2i64,  LD1W_D,  LD1W_D_IMM, PTRUE_B, am_sve_regreg_lsl2>;
+  defm : unpred_load< extloadvi32, nxv2i64,  LD1W_D,  LD1W_D_IMM, PTRUE_B, am_sve_regreg_lsl2>;
+  defm : unpred_load<sextloadvi32, nxv2i64, LD1SW_D, LD1SW_D_IMM, PTRUE_B, am_sve_regreg_lsl2>;
+  defm : unpred_load<        load, nxv2i64,    LD1D,    LD1D_IMM, PTRUE_B, am_sve_regreg_lsl3>;
+  defm : unpred_load<        load, nxv8f16,    LD1H,    LD1H_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_load<        load, nxv8bf16,   LD1H,    LD1H_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_load<        load, nxv4f16,  LD1H_S,  LD1H_S_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_load<        load, nxv4bf16, LD1H_S,  LD1H_S_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_load<        load, nxv2f16,  LD1H_D,  LD1H_D_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_load<        load, nxv2bf16, LD1H_D,  LD1H_D_IMM, PTRUE_B, am_sve_regreg_lsl1>;
+  defm : unpred_load<        load, nxv4f32,    LD1W,    LD1W_IMM, PTRUE_B, am_sve_regreg_lsl2>;
+  defm : unpred_load<        load, nxv2f32,  LD1W_D,  LD1W_D_IMM, PTRUE_B, am_sve_regreg_lsl2>;
+  defm : unpred_load<        load, nxv2f64,    LD1D,    LD1D_IMM, PTRUE_B, am_sve_regreg_lsl3>;
 
   // Allow using the reg+reg form of ld1b/st1b for memory accesses with the
   // same width as nxv16i8.  This saves an add in cases where we would
@@ -3236,20 +3235,20 @@ let Predicates = [HasSVE_or_SME] in {
   def : Pat<(nxv16i8 (vector_insert nxv16i8:$vec, (i32 GPR32:$src), 0)),
             (CPY_ZPmR_B ZPR:$vec, (PTRUE_B 1), GPR32:$src)>;
   def : Pat<(nxv8i16 (vector_insert nxv8i16:$vec, (i32 GPR32:$src), 0)),
-            (CPY_ZPmR_H ZPR:$vec, (PTRUE_H 1), GPR32:$src)>;
+            (CPY_ZPmR_H ZPR:$vec, (PTRUE_B 1), GPR32:$src)>;
   def : Pat<(nxv4i32 (vector_insert nxv4i32:$vec, (i32 GPR32:$src), 0)),
-            (CPY_ZPmR_S ZPR:$vec, (PTRUE_S 1), GPR32:$src)>;
+            (CPY_ZPmR_S ZPR:$vec, (PTRUE_B 1), GPR32:$src)>;
   def : Pat<(nxv2i64 (vector_insert nxv2i64:$vec, (i64 GPR64:$src), 0)),
-            (CPY_ZPmR_D ZPR:$vec, (PTRUE_D 1), GPR64:$src)>;
+            (CPY_ZPmR_D ZPR:$vec, (PTRUE_B 1), GPR64:$src)>;
 
   def : Pat<(nxv8f16 (vector_insert nxv8f16:$vec, (f16 FPR16:$src), 0)),
-            (SEL_ZPZZ_H (PTRUE_H 1), (INSERT_SUBREG (IMPLICIT_DEF), FPR16:$src, hsub), ZPR:$vec)>;
+            (SEL_ZPZZ_H (PTRUE_B 1), (INSERT_SUBREG (IMPLICIT_DEF), FPR16:$src, hsub), ZPR:$vec)>;
   def : Pat<(nxv8bf16 (vector_insert nxv8bf16:$vec, (bf16 FPR16:$src), 0)),
-            (SEL_ZPZZ_H (PTRUE_H 1), (INSERT_SUBREG (IMPLICIT_DEF), FPR16:$src, hsub), ZPR:$vec)>;
+            (SEL_ZPZZ_H (PTRUE_B 1), (INSERT_SUBREG (IMPLICIT_DEF), FPR16:$src, hsub), ZPR:$vec)>;
   def : Pat<(nxv4f32 (vector_insert nxv4f32:$vec, (f32 FPR32:$src), 0)),
-            (SEL_ZPZZ_S (PTRUE_S 1), (INSERT_SUBREG (IMPLICIT_DEF), FPR32:$src, ssub), ZPR:$vec)>;
+            (SEL_ZPZZ_S (PTRUE_B 1), (INSERT_SUBREG (IMPLICIT_DEF), FPR32:$src, ssub), ZPR:$vec)>;
   def : Pat<(nxv2f64 (vector_insert nxv2f64:$vec, (f64 FPR64:$src), 0)),
-            (SEL_ZPZZ_D (PTRUE_D 1), (INSERT_SUBREG (IMPLICIT_DEF), FPR64:$src, dsub), ZPR:$vec)>;
+            (SEL_ZPZZ_D (PTRUE_B 1), (INSERT_SUBREG (IMPLICIT_DEF), FPR64:$src, dsub), ZPR:$vec)>;
 
   // Insert scalar into vector with scalar index
   def : Pat<(nxv16i8 (vector_insert nxv16i8:$vec, GPR32:$src, GPR64:$index)),
@@ -3260,19 +3259,19 @@ let Predicates = [HasSVE_or_SME] in {
                         GPR32:$src)>;
   def : Pat<(nxv8i16 (vector_insert nxv8i16:$vec, GPR32:$src, GPR64:$index)),
             (CPY_ZPmR_H ZPR:$vec,
-                        (CMPEQ_PPzZZ_H (PTRUE_H 31),
+                        (CMPEQ_PPzZZ_H (PTRUE_B 31),
                                        (INDEX_II_H 0, 1),
                                        (DUP_ZR_H (i32 (EXTRACT_SUBREG GPR64:$index, sub_32)))),
                         GPR32:$src)>;
   def : Pat<(nxv4i32 (vector_insert nxv4i32:$vec, GPR32:$src, GPR64:$index)),
             (CPY_ZPmR_S ZPR:$vec,
-                        (CMPEQ_PPzZZ_S (PTRUE_S 31),
+                        (CMPEQ_PPzZZ_S (PTRUE_B 31),
                                        (INDEX_II_S 0, 1),
                                        (DUP_ZR_S (i32 (EXTRACT_SUBREG GPR64:$index, sub_32)))),
                         GPR32:$src)>;
   def : Pat<(nxv2i64 (vector_insert nxv2i64:$vec, GPR64:$src, GPR64:$index)),
             (CPY_ZPmR_D ZPR:$vec,
-                        (CMPEQ_PPzZZ_D (PTRUE_D 31),
+                        (CMPEQ_PPzZZ_D (PTRUE_B 31),
                                        (INDEX_II_D 0, 1),
                                        (DUP_ZR_D GPR64:$index)),
                         GPR64:$src)>;
@@ -3280,55 +3279,55 @@ let Predicates = [HasSVE_or_SME] in {
   // Insert FP scalar into vector with scalar index
   def : Pat<(nxv2f16 (vector_insert nxv2f16:$vec, (f16 FPR16:$src), GPR64:$index)),
             (CPY_ZPmV_H ZPR:$vec,
-                        (CMPEQ_PPzZZ_D (PTRUE_D 31),
+                        (CMPEQ_PPzZZ_D (PTRUE_B 31),
                                        (INDEX_II_D 0, 1),
                                        (DUP_ZR_D GPR64:$index)),
                         $src)>;
   def : Pat<(nxv4f16 (vector_insert nxv4f16:$vec, (f16 FPR16:$src), GPR64:$index)),
             (CPY_ZPmV_H ZPR:$vec,
-                        (CMPEQ_PPzZZ_S (PTRUE_S 31),
+                        (CMPEQ_PPzZZ_S (PTRUE_B 31),
                                        (INDEX_II_S 0, 1),
                                        (DUP_ZR_S (i32 (EXTRACT_SUBREG GPR64:$index, sub_32)))),
                         $src)>;
   def : Pat<(nxv8f16 (vector_insert nxv8f16:$vec, (f16 FPR16:$src), GPR64:$index)),
             (CPY_ZPmV_H ZPR:$vec,
-                        (CMPEQ_PPzZZ_H (PTRUE_H 31),
+                        (CMPEQ_PPzZZ_H (PTRUE_B 31),
                                        (INDEX_II_H 0, 1),
                                        (DUP_ZR_H (i32 (EXTRACT_SUBREG GPR64:$index, sub_32)))),
                         $src)>;
   def : Pat<(nxv2bf16 (vector_insert nxv2bf16:$vec, (bf16 FPR16:$src), GPR64:$index)),
             (CPY_ZPmV_H ZPR:$vec,
-                        (CMPEQ_PPzZZ_D (PTRUE_D 31),
+                        (CMPEQ_PPzZZ_D (PTRUE_B 31),
                                        (INDEX_II_D 0, 1),
                                        (DUP_ZR_D GPR64:$index)),
                         $src)>;
   def : Pat<(nxv4bf16 (vector_insert nxv4bf16:$vec, (bf16 FPR16:$src), GPR64:$index)),
             (CPY_ZPmV_H ZPR:$vec,
-                        (CMPEQ_PPzZZ_S (PTRUE_S 31),
+                        (CMPEQ_PPzZZ_S (PTRUE_B 31),
                                        (INDEX_II_S 0, 1),
                                        (DUP_ZR_S (i32 (EXTRACT_SUBREG GPR64:$index, sub_32)))),
                         $src)>;
   def : Pat<(nxv8bf16 (vector_insert nxv8bf16:$vec, (bf16 FPR16:$src), GPR64:$index)),
             (CPY_ZPmV_H ZPR:$vec,
-                        (CMPEQ_PPzZZ_H (PTRUE_H 31),
+                        (CMPEQ_PPzZZ_H (PTRUE_B 31),
                                        (INDEX_II_H 0, 1),
                                        (DUP_ZR_H (i32 (EXTRACT_SUBREG GPR64:$index, sub_32)))),
                         $src)>;
   def : Pat<(nxv2f32 (vector_insert nxv2f32:$vec, (f32 FPR32:$src), GPR64:$index)),
             (CPY_ZPmV_S ZPR:$vec,
-                        (CMPEQ_PPzZZ_D (PTRUE_D 31),
+                        (CMPEQ_PPzZZ_D (PTRUE_B 31),
                                        (INDEX_II_D 0, 1),
                                        (DUP_ZR_D GPR64:$index)),
                         $src) >;
   def : Pat<(nxv4f32 (vector_insert nxv4f32:$vec, (f32 FPR32:$src), GPR64:$index)),
             (CPY_ZPmV_S ZPR:$vec,
-                        (CMPEQ_PPzZZ_S (PTRUE_S 31),
+                        (CMPEQ_PPzZZ_S (PTRUE_B 31),
                                        (INDEX_II_S 0, 1),
                                        (DUP_ZR_S (i32 (EXTRACT_SUBREG GPR64:$index, sub_32)))),
                         $src)>;
   def : Pat<(nxv2f64 (vector_insert nxv2f64:$vec, (f64 FPR64:$src), GPR64:$index)),
             (CPY_ZPmV_D ZPR:$vec,
-                        (CMPEQ_PPzZZ_D (PTRUE_D 31),
+                        (CMPEQ_PPzZZ_D (PTRUE_B 31),
                                        (INDEX_II_D 0, 1),
                                        (DUP_ZR_D $index)),
                         $src)>;
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index c56713783289e..f5ec128548542 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -470,9 +470,9 @@ let Predicates = [HasSVE_or_SME] in {
   defm PTRUES : sve_int_ptrue<0b001, "ptrues", null_frag>;
 
   def : Pat<(nxv16i1 immAllOnesV), (PTRUE_B 31)>;
-  def : Pat<(nxv8i1 immAllOnesV), (PTRUE_H 31)>;
-  def : Pat<(nxv4i1 immAllOnesV), (PTRUE_S 31)>;
-  def : Pat<(nxv2i1 immAllOnesV), (PTRUE_D 31)>;
+  def : Pat<(nxv8i1 immAllOnesV), (PTRUE_B 31)>;
+  def : Pat<(nxv4i1 immAllOnesV), (PTRUE_B 31)>;
+  def : Pat<(nxv2i1 immAllOnesV), (PTRUE_B 31)>;
 }
 
 //===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/AArch64/DAGCombine_vscale.ll b/llvm/test/CodeGen/AArch64/DAGCombine_vscale.ll
index 487b726253cc7..64334d13635b8 100644
--- a/llvm/test/CodeGen/AArch64/DAGCombine_vscale.ll
+++ b/llvm/test/CodeGen/AArch64/DAGCombine_vscale.ll
@@ -7,7 +7,7 @@
 define <vscale x 4 x i32> @sext_inreg(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: sext_inreg:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
 ; CHECK-NEXT:    ret
   %sext = shl <vscale x 4 x i32> %a, splat(i32 16)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/sve-formal-argument.ll b/llvm/test/CodeGen/AArch64/GlobalISel/sve-formal-argument.ll
index aa9a671087f41..a4cf8bf06c96b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/sve-formal-argument.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/sve-formal-argument.ll
@@ -15,7 +15,7 @@ define void @formal_argument_nxv16i8(<vscale x 16 x i8> %0, ptr %p) {
 define void @formal_argument_nxv8i16(<vscale x 8 x i16> %0, ptr %p) {
 ; CHECK-LABEL: formal_argument_nxv8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   store <vscale x 8 x i16> %0, ptr %p, align 16
@@ -25,7 +25,7 @@ define void @formal_argument_nxv8i16(<vscale x 8 x i16> %0, ptr %p) {
 define void @formal_argument_nxv4i32(<vscale x 4 x i32> %0, ptr %p) {
 ; CHECK-LABEL: formal_argument_nxv4i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   store <vscale x 4 x i32> %0, ptr %p, align 16
@@ -35,7 +35,7 @@ define void @formal_argument_nxv4i32(<vscale x 4 x i32> %0, ptr %p) {
 define void @formal_argument_nxv2i64(<vscale x 2 x i64> %0, ptr %p) {
 ; CHECK-LABEL: formal_argument_nxv2i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   store <vscale x 2 x i64> %0, ptr %p, align 16
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/sve-load-store.ll b/llvm/test/CodeGen/AArch64/GlobalISel/sve-load-store.ll
index 95a5bfa4b038f..137f9c32aa48e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/sve-load-store.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/sve-load-store.ll
@@ -16,7 +16,7 @@ define void @scalable_v16i8(ptr %l0, ptr %l1) {
 define void @scalable_v8i16(ptr %l0, ptr %l1) {
 ; CHECK-LABEL: scalable_v8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x1]
 ; CHECK-NEXT:    ret
@@ -28,7 +28,7 @@ define void @scalable_v8i16(ptr %l0, ptr %l1) {
 define void @scalable_v4i32(ptr %l0, ptr %l1) {
 ; CHECK-LABEL: scalable_v4i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x1]
 ; CHECK-NEXT:    ret
@@ -40,7 +40,7 @@ define void @scalable_v4i32(ptr %l0, ptr %l1) {
 define void @scalable_v2i64(ptr %l0, ptr %l1) {
 ; CHECK-LABEL: scalable_v2i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll b/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll
index 503ead4eba2db..2817f2a3ae5c1 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll
@@ -245,7 +245,7 @@ define void @nxv2f64_32b_aligned(ptr %ldptr, ptr %stptr) {
 ;
 ; CHECK-BE-LABEL: nxv2f64_32b_aligned:
 ; CHECK-BE:       // %bb.0:
-; CHECK-BE-NEXT:    ptrue p0.d
+; CHECK-BE-NEXT:    ptrue p0.b
 ; CHECK-BE-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-BE-NEXT:    ld1d { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-BE-NEXT:    st1d { z0.d }, p0, [x1]
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll
index 533e831de0df8..ab428f1e55f79 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll
@@ -54,7 +54,7 @@ define <vscale x 4 x double> @mul_add_mull(<vscale x 4 x double> %a, <vscale x 4
 ; CHECK-NEXT:    movi v25.2d, #0000000000000000
 ; CHECK-NEXT:    movi v26.2d, #0000000000000000
 ; CHECK-NEXT:    movi v27.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z24.d, p0/m, z2.d, z0.d, #0
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z3.d, z1.d, #0
 ; CHECK-NEXT:    fcmla z27.d, p0/m, z6.d, z4.d, #0
@@ -105,7 +105,7 @@ define <vscale x 4 x double> @mul_sub_mull(<vscale x 4 x double> %a, <vscale x 4
 ; CHECK-NEXT:    movi v25.2d, #0000000000000000
 ; CHECK-NEXT:    movi v26.2d, #0000000000000000
 ; CHECK-NEXT:    movi v27.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z24.d, p0/m, z2.d, z0.d, #0
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z3.d, z1.d, #0
 ; CHECK-NEXT:    fcmla z27.d, p0/m, z6.d, z4.d, #0
@@ -156,7 +156,7 @@ define <vscale x 4 x double> @mul_conj_mull(<vscale x 4 x double> %a, <vscale x
 ; CHECK-NEXT:    movi v25.2d, #0000000000000000
 ; CHECK-NEXT:    movi v26.2d, #0000000000000000
 ; CHECK-NEXT:    movi v27.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z24.d, p0/m, z2.d, z0.d, #0
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z3.d, z1.d, #0
 ; CHECK-NEXT:    fcmla z27.d, p0/m, z4.d, z6.d, #0
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll
index 1eed9722f57be..951b37dd85810 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll
@@ -7,7 +7,7 @@ target triple = "aarch64"
 define <vscale x 4 x double> @mull_add(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c) {
 ; CHECK-LABEL: mull_add:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z4.d, p0/m, z0.d, z2.d, #0
 ; CHECK-NEXT:    fcmla z5.d, p0/m, z1.d, z3.d, #0
 ; CHECK-NEXT:    fcmla z4.d, p0/m, z0.d, z2.d, #90
@@ -43,7 +43,7 @@ define <vscale x 4 x double> @mul_add_mull(<vscale x 4 x double> %a, <vscale x 4
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v24.2d, #0000000000000000
 ; CHECK-NEXT:    movi v25.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z6.d, z4.d, #0
 ; CHECK-NEXT:    fcmla z24.d, p0/m, z7.d, z5.d, #0
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z0.d, z2.d, #0
@@ -92,7 +92,7 @@ define <vscale x 4 x double> @mul_sub_mull(<vscale x 4 x double> %a, <vscale x 4
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v24.2d, #0000000000000000
 ; CHECK-NEXT:    movi v25.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z6.d, z4.d, #270
 ; CHECK-NEXT:    fcmla z24.d, p0/m, z7.d, z5.d, #270
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z0.d, z2.d, #0
@@ -141,7 +141,7 @@ define <vscale x 4 x double> @mul_conj_mull(<vscale x 4 x double> %a, <vscale x
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v24.2d, #0000000000000000
 ; CHECK-NEXT:    movi v25.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z0.d, z2.d, #0
 ; CHECK-NEXT:    fcmla z24.d, p0/m, z1.d, z3.d, #0
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z0.d, z2.d, #90
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add-scalable.ll
index c2fc959d8e101..e58c494b95308 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add-scalable.ll
@@ -40,7 +40,7 @@ entry:
 define <vscale x 8 x half> @complex_add_v8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
 ; CHECK-LABEL: complex_add_v8f16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcadd z1.h, p0/m, z1.h, z0.h, #90
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -61,7 +61,7 @@ entry:
 define <vscale x 16 x half> @complex_add_v16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b) {
 ; CHECK-LABEL: complex_add_v16f16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcadd z2.h, p0/m, z2.h, z0.h, #90
 ; CHECK-NEXT:    fcadd z3.h, p0/m, z3.h, z1.h, #90
 ; CHECK-NEXT:    mov z0.d, z2.d
@@ -84,7 +84,7 @@ entry:
 define <vscale x 32 x half> @complex_add_v32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b) {
 ; CHECK-LABEL: complex_add_v32f16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcadd z4.h, p0/m, z4.h, z0.h, #90
 ; CHECK-NEXT:    fcadd z5.h, p0/m, z5.h, z1.h, #90
 ; CHECK-NEXT:    fcadd z6.h, p0/m, z6.h, z2.h, #90
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-mul-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-mul-scalable.ll
index a7442cae84c2d..e53954b0df542 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-mul-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-mul-scalable.ll
@@ -47,7 +47,7 @@ define <vscale x 8 x half> @complex_mul_v8f16(<vscale x 8 x half> %a, <vscale x
 ; CHECK-LABEL: complex_mul_v8f16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v2.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z2.h, p0/m, z1.h, z0.h, #0
 ; CHECK-NEXT:    fcmla z2.h, p0/m, z1.h, z0.h, #90
 ; CHECK-NEXT:    mov z0.d, z2.d
@@ -74,7 +74,7 @@ define <vscale x 16 x half> @complex_mul_v16f16(<vscale x 16 x half> %a, <vscale
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v4.2d, #0000000000000000
 ; CHECK-NEXT:    movi v5.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z5.h, p0/m, z2.h, z0.h, #0
 ; CHECK-NEXT:    fcmla z4.h, p0/m, z3.h, z1.h, #0
 ; CHECK-NEXT:    fcmla z5.h, p0/m, z2.h, z0.h, #90
@@ -107,7 +107,7 @@ define <vscale x 32 x half> @complex_mul_v32f16(<vscale x 32 x half> %a, <vscale
 ; CHECK-NEXT:    movi v25.2d, #0000000000000000
 ; CHECK-NEXT:    movi v26.2d, #0000000000000000
 ; CHECK-NEXT:    movi v27.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z24.h, p0/m, z4.h, z0.h, #0
 ; CHECK-NEXT:    fcmla z25.h, p0/m, z5.h, z1.h, #0
 ; CHECK-NEXT:    fcmla z27.h, p0/m, z6.h, z2.h, #0
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f32-add-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f32-add-scalable.ll
index 47ad9ea2451a1..25c7ca1a1d79f 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f32-add-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f32-add-scalable.ll
@@ -7,7 +7,7 @@ target triple = "aarch64"
 define <vscale x 4 x float> @complex_add_v4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
 ; CHECK-LABEL: complex_add_v4f32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcadd z1.s, p0/m, z1.s, z0.s, #90
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -28,7 +28,7 @@ entry:
 define <vscale x 8 x float> @complex_add_v8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b) {
 ; CHECK-LABEL: complex_add_v8f32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcadd z2.s, p0/m, z2.s, z0.s, #90
 ; CHECK-NEXT:    fcadd z3.s, p0/m, z3.s, z1.s, #90
 ; CHECK-NEXT:    mov z0.d, z2.d
@@ -50,7 +50,7 @@ entry:
 define <vscale x 16 x float> @complex_add_v16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b) {
 ; CHECK-LABEL: complex_add_v16f32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcadd z4.s, p0/m, z4.s, z0.s, #90
 ; CHECK-NEXT:    fcadd z5.s, p0/m, z5.s, z1.s, #90
 ; CHECK-NEXT:    fcadd z6.s, p0/m, z6.s, z2.s, #90
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f32-mul-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f32-mul-scalable.ll
index 3cad74b7f5fc6..93b0819f2b51b 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f32-mul-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f32-mul-scalable.ll
@@ -8,7 +8,7 @@ define <vscale x 4 x float> @complex_mul_v4f32(<vscale x 4 x float> %a, <vscale
 ; CHECK-LABEL: complex_mul_v4f32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v2.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z2.s, p0/m, z1.s, z0.s, #0
 ; CHECK-NEXT:    fcmla z2.s, p0/m, z1.s, z0.s, #90
 ; CHECK-NEXT:    mov z0.d, z2.d
@@ -36,7 +36,7 @@ define <vscale x 8 x float> @complex_mul_v8f32(<vscale x 8 x float> %a, <vscale
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v4.2d, #0000000000000000
 ; CHECK-NEXT:    movi v5.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z5.s, p0/m, z2.s, z0.s, #0
 ; CHECK-NEXT:    fcmla z4.s, p0/m, z3.s, z1.s, #0
 ; CHECK-NEXT:    fcmla z5.s, p0/m, z2.s, z0.s, #90
@@ -69,7 +69,7 @@ define <vscale x 16 x float> @complex_mul_v16f32(<vscale x 16 x float> %a, <vsca
 ; CHECK-NEXT:    movi v25.2d, #0000000000000000
 ; CHECK-NEXT:    movi v26.2d, #0000000000000000
 ; CHECK-NEXT:    movi v27.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z24.s, p0/m, z4.s, z0.s, #0
 ; CHECK-NEXT:    fcmla z25.s, p0/m, z5.s, z1.s, #0
 ; CHECK-NEXT:    fcmla z27.s, p0/m, z6.s, z2.s, #0
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f64-add-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f64-add-scalable.ll
index c992d63ca2838..eb980f3c9ed5b 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f64-add-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f64-add-scalable.ll
@@ -7,7 +7,7 @@ target triple = "aarch64"
 define <vscale x 2 x double> @complex_add_v2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
 ; CHECK-LABEL: complex_add_v2f64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcadd z1.d, p0/m, z1.d, z0.d, #90
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -28,7 +28,7 @@ entry:
 define <vscale x 4 x double> @complex_add_v4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b) {
 ; CHECK-LABEL: complex_add_v4f64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcadd z2.d, p0/m, z2.d, z0.d, #90
 ; CHECK-NEXT:    fcadd z3.d, p0/m, z3.d, z1.d, #90
 ; CHECK-NEXT:    mov z0.d, z2.d
@@ -51,7 +51,7 @@ entry:
 define <vscale x 8 x double> @complex_add_v8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b) {
 ; CHECK-LABEL: complex_add_v8f64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcadd z4.d, p0/m, z4.d, z0.d, #90
 ; CHECK-NEXT:    fcadd z5.d, p0/m, z5.d, z1.d, #90
 ; CHECK-NEXT:    fcadd z6.d, p0/m, z6.d, z2.d, #90
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f64-mul-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f64-mul-scalable.ll
index e3d99fa457bbc..77a8acbcbde86 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f64-mul-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f64-mul-scalable.ll
@@ -8,7 +8,7 @@ define <vscale x 2 x double> @complex_mul_v2f64(<vscale x 2 x double> %a, <vscal
 ; CHECK-LABEL: complex_mul_v2f64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v2.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z2.d, p0/m, z1.d, z0.d, #0
 ; CHECK-NEXT:    fcmla z2.d, p0/m, z1.d, z0.d, #90
 ; CHECK-NEXT:    mov z0.d, z2.d
@@ -36,7 +36,7 @@ define <vscale x 4 x double> @complex_mul_v4f64(<vscale x 4 x double> %a, <vscal
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v4.2d, #0000000000000000
 ; CHECK-NEXT:    movi v5.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z5.d, p0/m, z2.d, z0.d, #0
 ; CHECK-NEXT:    fcmla z4.d, p0/m, z3.d, z1.d, #0
 ; CHECK-NEXT:    fcmla z5.d, p0/m, z2.d, z0.d, #90
@@ -69,7 +69,7 @@ define <vscale x 8 x double> @complex_mul_v8f64(<vscale x 8 x double> %a, <vscal
 ; CHECK-NEXT:    movi v25.2d, #0000000000000000
 ; CHECK-NEXT:    movi v26.2d, #0000000000000000
 ; CHECK-NEXT:    movi v27.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fcmla z24.d, p0/m, z4.d, z0.d, #0
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z5.d, z1.d, #0
 ; CHECK-NEXT:    fcmla z27.d, p0/m, z6.d, z2.d, #0
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
index 880bd2904154c..7850d2da34951 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
@@ -16,32 +16,33 @@ define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    mov w8, #100 // =0x64
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    whilelo p2.d, xzr, x8
 ; CHECK-NEXT:    cntd x9
-; CHECK-NEXT:    whilelo p1.d, xzr, x8
 ; CHECK-NEXT:    rdvl x10, #2
-; CHECK-NEXT:    mov x11, x9
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    mov x11, x9
 ; CHECK-NEXT:    zip2 z0.d, z1.d, z1.d
 ; CHECK-NEXT:    zip1 z1.d, z1.d, z1.d
 ; CHECK-NEXT:  .LBB0_1: // %vector.body
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    zip2 p2.d, p1.d, p1.d
+; CHECK-NEXT:    zip2 p3.d, p2.d, p2.d
 ; CHECK-NEXT:    mov z6.d, z1.d
 ; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    zip1 p1.d, p1.d, p1.d
-; CHECK-NEXT:    ld1d { z2.d }, p2/z, [x0, #1, mul vl]
-; CHECK-NEXT:    ld1d { z4.d }, p2/z, [x1, #1, mul vl]
-; CHECK-NEXT:    ld1d { z3.d }, p1/z, [x0]
-; CHECK-NEXT:    ld1d { z5.d }, p1/z, [x1]
+; CHECK-NEXT:    zip1 p2.d, p2.d, p2.d
+; CHECK-NEXT:    ld1d { z2.d }, p3/z, [x0, #1, mul vl]
+; CHECK-NEXT:    ld1d { z4.d }, p3/z, [x1, #1, mul vl]
+; CHECK-NEXT:    ld1d { z3.d }, p2/z, [x0]
+; CHECK-NEXT:    ld1d { z5.d }, p2/z, [x1]
 ; CHECK-NEXT:    add x1, x1, x10
 ; CHECK-NEXT:    add x0, x0, x10
-; CHECK-NEXT:    fcmla z7.d, p0/m, z4.d, z2.d, #0
-; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #0
-; CHECK-NEXT:    fcmla z7.d, p0/m, z4.d, z2.d, #90
-; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #90
-; CHECK-NEXT:    mov z0.d, p2/m, z7.d
-; CHECK-NEXT:    mov z1.d, p1/m, z6.d
-; CHECK-NEXT:    whilelo p1.d, x11, x8
+; CHECK-NEXT:    fcmla z7.d, p1/m, z4.d, z2.d, #0
+; CHECK-NEXT:    fcmla z6.d, p1/m, z5.d, z3.d, #0
+; CHECK-NEXT:    fcmla z7.d, p1/m, z4.d, z2.d, #90
+; CHECK-NEXT:    fcmla z6.d, p1/m, z5.d, z3.d, #90
+; CHECK-NEXT:    mov z0.d, p3/m, z7.d
+; CHECK-NEXT:    mov z1.d, p2/m, z6.d
+; CHECK-NEXT:    whilelo p2.d, x11, x8
 ; CHECK-NEXT:    add x11, x11, x9
 ; CHECK-NEXT:    b.mi .LBB0_1
 ; CHECK-NEXT:  // %bb.2: // %exit.block
@@ -113,8 +114,9 @@ define %"class.std::complex" @complex_mul_predicated_v2f64(ptr %a, ptr %b, ptr %
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    cntd x9
-; CHECK-NEXT:    mov w11, #100 // =0x64
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    neg x10, x9
+; CHECK-NEXT:    mov w11, #100 // =0x64
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, xzr
 ; CHECK-NEXT:    and x10, x10, x11
@@ -123,26 +125,26 @@ define %"class.std::complex" @complex_mul_predicated_v2f64(ptr %a, ptr %b, ptr %
 ; CHECK-NEXT:    zip1 z1.d, z1.d, z1.d
 ; CHECK-NEXT:  .LBB1_1: // %vector.body
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    ld1w { z2.d }, p0/z, [x2, x8, lsl #2]
+; CHECK-NEXT:    ld1w { z2.d }, p1/z, [x2, x8, lsl #2]
 ; CHECK-NEXT:    mov z6.d, z1.d
 ; CHECK-NEXT:    mov z7.d, z0.d
 ; CHECK-NEXT:    add x8, x8, x9
-; CHECK-NEXT:    cmpne p1.d, p0/z, z2.d, #0
+; CHECK-NEXT:    cmpne p2.d, p0/z, z2.d, #0
 ; CHECK-NEXT:    cmp x10, x8
-; CHECK-NEXT:    zip2 p2.d, p1.d, p1.d
-; CHECK-NEXT:    zip1 p1.d, p1.d, p1.d
-; CHECK-NEXT:    ld1d { z2.d }, p2/z, [x0, #1, mul vl]
-; CHECK-NEXT:    ld1d { z4.d }, p2/z, [x1, #1, mul vl]
-; CHECK-NEXT:    ld1d { z3.d }, p1/z, [x0]
-; CHECK-NEXT:    ld1d { z5.d }, p1/z, [x1]
+; CHECK-NEXT:    zip2 p3.d, p2.d, p2.d
+; CHECK-NEXT:    zip1 p2.d, p2.d, p2.d
+; CHECK-NEXT:    ld1d { z2.d }, p3/z, [x0, #1, mul vl]
+; CHECK-NEXT:    ld1d { z4.d }, p3/z, [x1, #1, mul vl]
+; CHECK-NEXT:    ld1d { z3.d }, p2/z, [x0]
+; CHECK-NEXT:    ld1d { z5.d }, p2/z, [x1]
 ; CHECK-NEXT:    add x1, x1, x11
 ; CHECK-NEXT:    add x0, x0, x11
-; CHECK-NEXT:    fcmla z7.d, p0/m, z4.d, z2.d, #0
-; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #0
-; CHECK-NEXT:    fcmla z7.d, p0/m, z4.d, z2.d, #90
-; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #90
-; CHECK-NEXT:    mov z0.d, p2/m, z7.d
-; CHECK-NEXT:    mov z1.d, p1/m, z6.d
+; CHECK-NEXT:    fcmla z7.d, p1/m, z4.d, z2.d, #0
+; CHECK-NEXT:    fcmla z6.d, p1/m, z5.d, z3.d, #0
+; CHECK-NEXT:    fcmla z7.d, p1/m, z4.d, z2.d, #90
+; CHECK-NEXT:    fcmla z6.d, p1/m, z5.d, z3.d, #90
+; CHECK-NEXT:    mov z0.d, p3/m, z7.d
+; CHECK-NEXT:    mov z1.d, p2/m, z6.d
 ; CHECK-NEXT:    b.ne .LBB1_1
 ; CHECK-NEXT:  // %bb.2: // %exit.block
 ; CHECK-NEXT:    uzp1 z2.d, z1.d, z0.d
@@ -215,37 +217,38 @@ define %"class.std::complex" @complex_mul_predicated_x2_v2f64(ptr %a, ptr %b, pt
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    mov w8, #100 // =0x64
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    whilelo p2.d, xzr, x8
 ; CHECK-NEXT:    cntd x9
-; CHECK-NEXT:    whilelo p1.d, xzr, x8
 ; CHECK-NEXT:    rdvl x10, #2
-; CHECK-NEXT:    cnth x11
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    cnth x11
 ; CHECK-NEXT:    mov x12, x9
 ; CHECK-NEXT:    zip2 z0.d, z1.d, z1.d
 ; CHECK-NEXT:    zip1 z1.d, z1.d, z1.d
 ; CHECK-NEXT:  .LBB2_1: // %vector.body
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    ld1w { z2.d }, p1/z, [x2]
+; CHECK-NEXT:    ld1w { z2.d }, p2/z, [x2]
 ; CHECK-NEXT:    mov z6.d, z1.d
 ; CHECK-NEXT:    mov z7.d, z0.d
 ; CHECK-NEXT:    add x2, x2, x11
 ; CHECK-NEXT:    and z2.d, z2.d, #0xffffffff
-; CHECK-NEXT:    cmpne p1.d, p1/z, z2.d, #0
-; CHECK-NEXT:    zip2 p2.d, p1.d, p1.d
-; CHECK-NEXT:    zip1 p1.d, p1.d, p1.d
-; CHECK-NEXT:    ld1d { z2.d }, p2/z, [x0, #1, mul vl]
-; CHECK-NEXT:    ld1d { z4.d }, p2/z, [x1, #1, mul vl]
-; CHECK-NEXT:    ld1d { z3.d }, p1/z, [x0]
-; CHECK-NEXT:    ld1d { z5.d }, p1/z, [x1]
+; CHECK-NEXT:    cmpne p2.d, p2/z, z2.d, #0
+; CHECK-NEXT:    zip2 p3.d, p2.d, p2.d
+; CHECK-NEXT:    zip1 p2.d, p2.d, p2.d
+; CHECK-NEXT:    ld1d { z2.d }, p3/z, [x0, #1, mul vl]
+; CHECK-NEXT:    ld1d { z4.d }, p3/z, [x1, #1, mul vl]
+; CHECK-NEXT:    ld1d { z3.d }, p2/z, [x0]
+; CHECK-NEXT:    ld1d { z5.d }, p2/z, [x1]
 ; CHECK-NEXT:    add x1, x1, x10
 ; CHECK-NEXT:    add x0, x0, x10
-; CHECK-NEXT:    fcmla z7.d, p0/m, z4.d, z2.d, #0
-; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #0
-; CHECK-NEXT:    fcmla z7.d, p0/m, z4.d, z2.d, #90
-; CHECK-NEXT:    fcmla z6.d, p0/m, z5.d, z3.d, #90
-; CHECK-NEXT:    mov z0.d, p2/m, z7.d
-; CHECK-NEXT:    mov z1.d, p1/m, z6.d
-; CHECK-NEXT:    whilelo p1.d, x12, x8
+; CHECK-NEXT:    fcmla z7.d, p1/m, z4.d, z2.d, #0
+; CHECK-NEXT:    fcmla z6.d, p1/m, z5.d, z3.d, #0
+; CHECK-NEXT:    fcmla z7.d, p1/m, z4.d, z2.d, #90
+; CHECK-NEXT:    fcmla z6.d, p1/m, z5.d, z3.d, #90
+; CHECK-NEXT:    mov z0.d, p3/m, z7.d
+; CHECK-NEXT:    mov z1.d, p2/m, z6.d
+; CHECK-NEXT:    whilelo p2.d, x12, x8
 ; CHECK-NEXT:    add x12, x12, x9
 ; CHECK-NEXT:    b.mi .LBB2_1
 ; CHECK-NEXT:  // %bb.2: // %exit.block
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
index 29be231920305..6803efc85eeee 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
@@ -18,7 +18,7 @@ define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
 ; CHECK-NEXT:    cntd x8
 ; CHECK-NEXT:    mov w10, #100 // =0x64
 ; CHECK-NEXT:    neg x9, x8
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    and x9, x9, x10
 ; CHECK-NEXT:    rdvl x10, #2
 ; CHECK-NEXT:    zip2 z0.d, z1.d, z1.d
@@ -40,6 +40,7 @@ define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
 ; CHECK-NEXT:  // %bb.2: // %exit.block
 ; CHECK-NEXT:    uzp1 z2.d, z1.d, z0.d
 ; CHECK-NEXT:    uzp2 z1.d, z1.d, z0.d
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    faddv d0, p0, z2.d
 ; CHECK-NEXT:    faddv d1, p0, z1.d
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
@@ -102,14 +103,14 @@ define %"class.std::complex" @complex_mul_nonzero_init_v2f64(ptr %a, ptr %b) {
 ; CHECK-NEXT:    fmov d1, #1.00000000
 ; CHECK-NEXT:    cntd x8
 ; CHECK-NEXT:    fmov d2, #2.00000000
-; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    neg x9, x8
 ; CHECK-NEXT:    mov w10, #100 // =0x64
 ; CHECK-NEXT:    and x9, x9, x10
 ; CHECK-NEXT:    rdvl x10, #2
 ; CHECK-NEXT:    sel z1.d, p0, z1.d, z0.d
 ; CHECK-NEXT:    sel z2.d, p0, z2.d, z0.d
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    zip2 z0.d, z2.d, z1.d
 ; CHECK-NEXT:    zip1 z1.d, z2.d, z1.d
 ; CHECK-NEXT:  .LBB1_1: // %vector.body
@@ -129,6 +130,7 @@ define %"class.std::complex" @complex_mul_nonzero_init_v2f64(ptr %a, ptr %b) {
 ; CHECK-NEXT:  // %bb.2: // %exit.block
 ; CHECK-NEXT:    uzp1 z2.d, z1.d, z0.d
 ; CHECK-NEXT:    uzp2 z1.d, z1.d, z0.d
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    faddv d0, p0, z2.d
 ; CHECK-NEXT:    faddv d1, p0, z1.d
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
@@ -187,7 +189,7 @@ define %"class.std::complex" @complex_mul_v2f64_unrolled(ptr %a, ptr %b) {
 ; CHECK-NEXT:    cntw x8
 ; CHECK-NEXT:    mov w10, #1000 // =0x3e8
 ; CHECK-NEXT:    neg x9, x8
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    and x9, x9, x10
 ; CHECK-NEXT:    rdvl x10, #4
 ; CHECK-NEXT:    zip2 z0.d, z1.d, z1.d
@@ -221,6 +223,7 @@ define %"class.std::complex" @complex_mul_v2f64_unrolled(ptr %a, ptr %b) {
 ; CHECK-NEXT:    uzp1 z5.d, z1.d, z0.d
 ; CHECK-NEXT:    uzp2 z2.d, z2.d, z3.d
 ; CHECK-NEXT:    uzp2 z0.d, z1.d, z0.d
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fadd z1.d, z4.d, z5.d
 ; CHECK-NEXT:    fadd z2.d, z2.d, z0.d
 ; CHECK-NEXT:    faddv d0, p0, z1.d
@@ -313,7 +316,7 @@ define dso_local %"class.std::complex" @reduction_mix(ptr %a, ptr %b, ptr noalia
 ; CHECK-NEXT:    cntd x9
 ; CHECK-NEXT:    mov w11, #100 // =0x64
 ; CHECK-NEXT:    neg x10, x9
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x8, xzr
 ; CHECK-NEXT:    and x10, x10, x11
 ; CHECK-NEXT:    rdvl x11, #2
@@ -332,14 +335,15 @@ define dso_local %"class.std::complex" @reduction_mix(ptr %a, ptr %b, ptr noalia
 ; CHECK-NEXT:    add z2.d, z5.d, z2.d
 ; CHECK-NEXT:    b.ne .LBB3_1
 ; CHECK-NEXT:  // %bb.2: // %middle.block
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    uzp2 z3.d, z1.d, z0.d
 ; CHECK-NEXT:    uzp1 z1.d, z1.d, z0.d
 ; CHECK-NEXT:    uaddv d2, p0, z2.d
 ; CHECK-NEXT:    faddv d0, p0, z3.d
 ; CHECK-NEXT:    faddv d1, p0, z1.d
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
-; CHECK-NEXT:    str s2, [x4]
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 killed $z1
+; CHECK-NEXT:    str s2, [x4]
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call i64 @llvm.vscale.i64()
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll
index 6615313613153..1dca1779cddbf 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll
@@ -10,7 +10,7 @@ define <vscale x 4 x double> @complex_mul_const(<vscale x 4 x double> %a, <vscal
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v4.2d, #0000000000000000
 ; CHECK-NEXT:    movi v5.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fmov z6.d, #3.00000000
 ; CHECK-NEXT:    fmov z7.d, #11.00000000
 ; CHECK-NEXT:    fcmla z4.d, p0/m, z0.d, z2.d, #0
@@ -56,7 +56,7 @@ define <vscale x 4 x double> @complex_mul_non_const(<vscale x 4 x double> %a, <v
 ; CHECK-LABEL: complex_mul_non_const:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v7.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    // kill: def $d5 killed $d5 def $z5
 ; CHECK-NEXT:    // kill: def $d4 killed $d4 def $z4
 ; CHECK-NEXT:    movi v6.2d, #0000000000000000
diff --git a/llvm/test/CodeGen/AArch64/consecutive-stores-of-faddv.ll b/llvm/test/CodeGen/AArch64/consecutive-stores-of-faddv.ll
index 64482e15aed81..5976c41612c4c 100644
--- a/llvm/test/CodeGen/AArch64/consecutive-stores-of-faddv.ll
+++ b/llvm/test/CodeGen/AArch64/consecutive-stores-of-faddv.ll
@@ -10,7 +10,7 @@
 define void @consecutive_stores_pair(ptr %dest0, <vscale x 4 x float> %vec0, <vscale x 4 x float> %vec1) {
 ; CHECK-LABEL: consecutive_stores_pair:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    faddv s0, p0, z0.s
 ; CHECK-NEXT:    faddv s1, p0, z1.s
 ; CHECK-NEXT:    mov v0.s[1], v1.s[0]
@@ -27,7 +27,7 @@ define void @consecutive_stores_pair(ptr %dest0, <vscale x 4 x float> %vec0, <vs
 define void @consecutive_stores_quadruple(ptr %dest0, <vscale x 4 x float> %vec0, <vscale x 4 x float> %vec1, <vscale x 4 x float> %vec2, <vscale x 4 x float> %vec3) {
 ; CHECK-LABEL: consecutive_stores_quadruple:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    faddv s0, p0, z0.s
 ; CHECK-NEXT:    faddv s1, p0, z1.s
 ; CHECK-NEXT:    faddv s2, p0, z2.s
@@ -53,7 +53,7 @@ define void @consecutive_stores_quadruple(ptr %dest0, <vscale x 4 x float> %vec0
 define void @consecutive_stores_pair_streaming_function(ptr %dest0, <vscale x 4 x float> %vec0, <vscale x 4 x float> %vec1) "aarch64_pstate_sm_enabled"  {
 ; CHECK-LABEL: consecutive_stores_pair_streaming_function:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    faddv s0, p0, z0.s
 ; CHECK-NEXT:    faddv s1, p0, z1.s
 ; CHECK-NEXT:    stp s0, s1, [x0]
@@ -69,7 +69,7 @@ define void @consecutive_stores_pair_streaming_function(ptr %dest0, <vscale x 4
 define void @consecutive_stores_quadruple_streaming_function(ptr %dest0, <vscale x 4 x float> %vec0, <vscale x 4 x float> %vec1, <vscale x 4 x float> %vec2, <vscale x 4 x float> %vec3) "aarch64_pstate_sm_enabled" {
 ; CHECK-LABEL: consecutive_stores_quadruple_streaming_function:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    faddv s0, p0, z0.s
 ; CHECK-NEXT:    faddv s1, p0, z1.s
 ; CHECK-NEXT:    faddv s2, p0, z2.s
diff --git a/llvm/test/CodeGen/AArch64/extract-vector-elt-sve.ll b/llvm/test/CodeGen/AArch64/extract-vector-elt-sve.ll
index 7705d8949ca1e..d222660ee483e 100644
--- a/llvm/test/CodeGen/AArch64/extract-vector-elt-sve.ll
+++ b/llvm/test/CodeGen/AArch64/extract-vector-elt-sve.ll
@@ -12,7 +12,7 @@
 define <vscale x 2 x i64> @insert_vscale_2_i64_zero(<vscale x 2 x i64> %vec, i64 %elt) {
 ; CHECK-SD-LABEL: insert_vscale_2_i64_zero:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    ptrue p0.d, vl1
+; CHECK-SD-NEXT:    ptrue p0.b, vl1
 ; CHECK-SD-NEXT:    mov z0.d, p0/m, x0
 ; CHECK-SD-NEXT:    ret
 ;
@@ -20,7 +20,7 @@ define <vscale x 2 x i64> @insert_vscale_2_i64_zero(<vscale x 2 x i64> %vec, i64
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    mov x8, xzr
 ; CHECK-GI-NEXT:    index z1.d, #0, #1
-; CHECK-GI-NEXT:    ptrue p0.d
+; CHECK-GI-NEXT:    ptrue p0.b
 ; CHECK-GI-NEXT:    mov z2.d, x8
 ; CHECK-GI-NEXT:    cmpeq p0.d, p0/z, z1.d, z2.d
 ; CHECK-GI-NEXT:    mov z0.d, p0/m, x0
@@ -35,7 +35,7 @@ define <vscale x 2 x i64> @insert_vscale_2_i64(<vscale x 2 x i64> %vec, i64 %elt
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    index z1.d, #0, #1
 ; CHECK-NEXT:    mov z2.d, x1
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z1.d, z2.d
 ; CHECK-NEXT:    mov z0.d, p0/m, x0
 ; CHECK-NEXT:    ret
@@ -47,7 +47,7 @@ entry:
 define <vscale x 4 x i32> @insert_vscale_4_i32_zero(<vscale x 4 x i32> %vec, i32 %elt) {
 ; CHECK-SD-LABEL: insert_vscale_4_i32_zero:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    ptrue p0.s, vl1
+; CHECK-SD-NEXT:    ptrue p0.b, vl1
 ; CHECK-SD-NEXT:    mov z0.s, p0/m, w0
 ; CHECK-SD-NEXT:    ret
 ;
@@ -55,7 +55,7 @@ define <vscale x 4 x i32> @insert_vscale_4_i32_zero(<vscale x 4 x i32> %vec, i32
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    mov w8, wzr
 ; CHECK-GI-NEXT:    index z1.s, #0, #1
-; CHECK-GI-NEXT:    ptrue p0.s
+; CHECK-GI-NEXT:    ptrue p0.b
 ; CHECK-GI-NEXT:    mov z2.s, w8
 ; CHECK-GI-NEXT:    cmpeq p0.s, p0/z, z1.s, z2.s
 ; CHECK-GI-NEXT:    mov z0.s, p0/m, w0
@@ -70,7 +70,7 @@ define <vscale x 4 x i32> @insert_vscale_4_i32(<vscale x 4 x i32> %vec, i32 %elt
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    index z1.s, #0, #1
 ; CHECK-NEXT:    mov z2.s, w1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z1.s, z2.s
 ; CHECK-NEXT:    mov z0.s, p0/m, w0
 ; CHECK-NEXT:    ret
@@ -82,7 +82,7 @@ entry:
 define <vscale x 8 x i16> @insert_vscale_8_i16_zero(<vscale x 8 x i16> %vec, i16 %elt) {
 ; CHECK-LABEL: insert_vscale_8_i16_zero:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.h, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    mov z0.h, p0/m, w0
 ; CHECK-NEXT:    ret
 entry:
@@ -95,7 +95,7 @@ define <vscale x 8 x i16> @insert_vscale_8_i16(<vscale x 8 x i16> %vec, i16 %elt
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    index z1.h, #0, #1
 ; CHECK-NEXT:    mov z2.h, w1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z1.h, z2.h
 ; CHECK-NEXT:    mov z0.h, p0/m, w0
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/fp-veclib-expansion.ll b/llvm/test/CodeGen/AArch64/fp-veclib-expansion.ll
index 2c8e2190f8209..72f0f7e108a4d 100644
--- a/llvm/test/CodeGen/AArch64/fp-veclib-expansion.ll
+++ b/llvm/test/CodeGen/AArch64/fp-veclib-expansion.ll
@@ -64,7 +64,7 @@ define <vscale x 4 x float> @frem_nxv4f32(<vscale x 4 x float> %unused, <vscale
 ; ARMPL-NEXT:    .cfi_offset w30, -16
 ; ARMPL-NEXT:    mov z0.d, z1.d
 ; ARMPL-NEXT:    mov z1.d, z2.d
-; ARMPL-NEXT:    ptrue p0.s
+; ARMPL-NEXT:    ptrue p0.b
 ; ARMPL-NEXT:    bl armpl_svfmod_f32_x
 ; ARMPL-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; ARMPL-NEXT:    ret
@@ -76,7 +76,7 @@ define <vscale x 4 x float> @frem_nxv4f32(<vscale x 4 x float> %unused, <vscale
 ; SLEEF-NEXT:    .cfi_offset w30, -16
 ; SLEEF-NEXT:    mov z0.d, z1.d
 ; SLEEF-NEXT:    mov z1.d, z2.d
-; SLEEF-NEXT:    ptrue p0.s
+; SLEEF-NEXT:    ptrue p0.b
 ; SLEEF-NEXT:    bl _ZGVsMxvv_fmodf
 ; SLEEF-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; SLEEF-NEXT:    ret
@@ -92,7 +92,7 @@ define <vscale x 2 x double> @frem_strict_nxv2f64(<vscale x 2 x double> %unused,
 ; ARMPL-NEXT:    .cfi_offset w30, -16
 ; ARMPL-NEXT:    mov z0.d, z1.d
 ; ARMPL-NEXT:    mov z1.d, z2.d
-; ARMPL-NEXT:    ptrue p0.d
+; ARMPL-NEXT:    ptrue p0.b
 ; ARMPL-NEXT:    bl armpl_svfmod_f64_x
 ; ARMPL-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; ARMPL-NEXT:    ret
@@ -104,7 +104,7 @@ define <vscale x 2 x double> @frem_strict_nxv2f64(<vscale x 2 x double> %unused,
 ; SLEEF-NEXT:    .cfi_offset w30, -16
 ; SLEEF-NEXT:    mov z0.d, z1.d
 ; SLEEF-NEXT:    mov z1.d, z2.d
-; SLEEF-NEXT:    ptrue p0.d
+; SLEEF-NEXT:    ptrue p0.b
 ; SLEEF-NEXT:    bl _ZGVsMxvv_fmod
 ; SLEEF-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; SLEEF-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
index d5b9d17a98d55..f1a0adf9ce0d4 100644
--- a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
@@ -8,9 +8,8 @@ target triple = "aarch64-unknown-linux-gnu"
 define <vscale x 8 x i8> @vec_scalable_subvec_scalable_idx_zero_i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_scalable_idx_zero_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
-; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1b { z1.s }, p0/z, [x1]
 ; CHECK-NEXT:    uunpkhi z0.s, z0.h
 ; CHECK-NEXT:    uzp1 z0.h, z1.h, z0.h
@@ -24,9 +23,8 @@ define <vscale x 8 x i8> @vec_scalable_subvec_scalable_idx_zero_i8(ptr %a, ptr %
 define <vscale x 8 x i8> @vec_scalable_subvec_scalable_idx_nonzero_i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_scalable_idx_nonzero_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
-; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1b { z1.s }, p0/z, [x1]
 ; CHECK-NEXT:    uunpklo z0.s, z0.h
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z1.h
@@ -40,9 +38,8 @@ define <vscale x 8 x i8> @vec_scalable_subvec_scalable_idx_nonzero_i8(ptr %a, pt
 define <vscale x 4 x i16> @vec_scalable_subvec_scalable_idx_zero_i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_scalable_idx_zero_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
-; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1h { z1.d }, p0/z, [x1]
 ; CHECK-NEXT:    uunpkhi z0.d, z0.s
 ; CHECK-NEXT:    uzp1 z0.s, z1.s, z0.s
@@ -56,9 +53,8 @@ define <vscale x 4 x i16> @vec_scalable_subvec_scalable_idx_zero_i16(ptr %a, ptr
 define <vscale x 4 x i16> @vec_scalable_subvec_scalable_idx_nonzero_i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_scalable_idx_nonzero_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
-; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1h { z1.d }, p0/z, [x1]
 ; CHECK-NEXT:    uunpklo z0.d, z0.s
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z1.s
@@ -74,7 +70,7 @@ define <vscale x 4 x i16> @vec_scalable_subvec_scalable_idx_nonzero_i16(ptr %a,
 define <vscale x 8 x i8> @vec_scalable_subvec_fixed_idx_zero_i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_fixed_idx_zero_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ldr d0, [x1]
 ; CHECK-NEXT:    ld1b { z1.h }, p0/z, [x0]
 ; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
@@ -93,7 +89,7 @@ define <vscale x 8 x i8> @vec_scalable_subvec_fixed_idx_nonzero_i8(ptr %a, ptr %
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    cnth x8
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sub x8, x8, #8
 ; CHECK-NEXT:    mov w9, #8 // =0x8
@@ -118,7 +114,7 @@ define <vscale x 8 x i8> @vec_scalable_subvec_fixed_idx_nonzero_i8(ptr %a, ptr %
 define <vscale x 4 x i16> @vec_scalable_subvec_fixed_idx_zero_i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_fixed_idx_zero_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ldr d0, [x1]
 ; CHECK-NEXT:    ld1h { z1.s }, p0/z, [x0]
 ; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
@@ -137,7 +133,7 @@ define <vscale x 4 x i16> @vec_scalable_subvec_fixed_idx_nonzero_i16(ptr %a, ptr
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    cntw x8
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sub x8, x8, #4
 ; CHECK-NEXT:    mov w9, #4 // =0x4
@@ -162,7 +158,7 @@ define <vscale x 4 x i16> @vec_scalable_subvec_fixed_idx_nonzero_i16(ptr %a, ptr
 define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_zero_i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_fixed_idx_zero_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ldr d0, [x1]
 ; CHECK-NEXT:    ld1w { z1.d }, p0/z, [x0]
 ; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
@@ -181,7 +177,7 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_i32(ptr %a, ptr
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    cntd x8
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sub x8, x8, #2
 ; CHECK-NEXT:    mov w9, #2 // =0x2
@@ -208,11 +204,12 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_large_i32(ptr %
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ptrue p1.d, vl8
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl8
 ; CHECK-NEXT:    str z0, [sp]
-; CHECK-NEXT:    ld1w { z0.d }, p1/z, [x1]
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x1]
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
 ; CHECK-NEXT:    ldr z0, [sp]
 ; CHECK-NEXT:    addvl sp, sp, #1
diff --git a/llvm/test/CodeGen/AArch64/intrinsic-cttz-elts-sve.ll b/llvm/test/CodeGen/AArch64/intrinsic-cttz-elts-sve.ll
index cdf2a962f9322..7131b721a6dc2 100644
--- a/llvm/test/CodeGen/AArch64/intrinsic-cttz-elts-sve.ll
+++ b/llvm/test/CodeGen/AArch64/intrinsic-cttz-elts-sve.ll
@@ -142,7 +142,7 @@ define i64 @vscale_4096_poison(<vscale x 16 x i8> %a) #1 {
 define i32 @ctz_nxv2i1(<vscale x 2 x i1> %a) {
 ; CHECK-LABEL: ctz_nxv2i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    brkb p0.b, p1/z, p0.b
 ; CHECK-NEXT:    cntp x0, p0, p0.d
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
@@ -154,7 +154,7 @@ define i32 @ctz_nxv2i1(<vscale x 2 x i1> %a) {
 define i32 @ctz_nxv2i1_poison(<vscale x 2 x i1> %a) {
 ; CHECK-LABEL: ctz_nxv2i1_poison:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    brkb p0.b, p1/z, p0.b
 ; CHECK-NEXT:    cntp x0, p0, p0.d
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
@@ -166,7 +166,7 @@ define i32 @ctz_nxv2i1_poison(<vscale x 2 x i1> %a) {
 define i64 @add_i64_ctz_nxv2i1_poison(<vscale x 2 x i1> %a, i64 %b) {
 ; CHECK-LABEL: add_i64_ctz_nxv2i1_poison:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    brkb p0.b, p1/z, p0.b
 ; CHECK-NEXT:    incp x0, p0.d
 ; CHECK-NEXT:    ret
@@ -178,7 +178,7 @@ define i64 @add_i64_ctz_nxv2i1_poison(<vscale x 2 x i1> %a, i64 %b) {
 define i32 @add_i32_ctz_nxv2i1_poison(<vscale x 2 x i1> %a, i32 %b) {
 ; CHECK-LABEL: add_i32_ctz_nxv2i1_poison:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    brkb p0.b, p1/z, p0.b
 ; CHECK-NEXT:    incp x0, p0.d
@@ -193,7 +193,7 @@ define i32 @add_i32_ctz_nxv2i1_poison(<vscale x 2 x i1> %a, i32 %b) {
 define i32 @ctz_nxv4i1(<vscale x 4 x i1> %a) {
 ; CHECK-LABEL: ctz_nxv4i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    brkb p0.b, p1/z, p0.b
 ; CHECK-NEXT:    cntp x0, p0, p0.s
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
@@ -205,7 +205,7 @@ define i32 @ctz_nxv4i1(<vscale x 4 x i1> %a) {
 define i32 @ctz_nxv4i1_poison(<vscale x 4 x i1> %a) {
 ; CHECK-LABEL: ctz_nxv4i1_poison:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    brkb p0.b, p1/z, p0.b
 ; CHECK-NEXT:    cntp x0, p0, p0.s
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
@@ -217,7 +217,7 @@ define i32 @ctz_nxv4i1_poison(<vscale x 4 x i1> %a) {
 define i64 @add_i64_ctz_nxv4i1_poison(<vscale x 4 x i1> %a, i64 %b) {
 ; CHECK-LABEL: add_i64_ctz_nxv4i1_poison:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    brkb p0.b, p1/z, p0.b
 ; CHECK-NEXT:    incp x0, p0.s
 ; CHECK-NEXT:    ret
@@ -229,7 +229,7 @@ define i64 @add_i64_ctz_nxv4i1_poison(<vscale x 4 x i1> %a, i64 %b) {
 define i32 @add_i32_ctz_nxv4i1_poison(<vscale x 4 x i1> %a, i32 %b) {
 ; CHECK-LABEL: add_i32_ctz_nxv4i1_poison:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    brkb p0.b, p1/z, p0.b
 ; CHECK-NEXT:    incp x0, p0.s
@@ -244,7 +244,7 @@ define i32 @add_i32_ctz_nxv4i1_poison(<vscale x 4 x i1> %a, i32 %b) {
 define i32 @ctz_nxv8i1(<vscale x 8 x i1> %a) {
 ; CHECK-LABEL: ctz_nxv8i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    brkb p0.b, p1/z, p0.b
 ; CHECK-NEXT:    cntp x0, p0, p0.h
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
@@ -256,7 +256,7 @@ define i32 @ctz_nxv8i1(<vscale x 8 x i1> %a) {
 define i32 @ctz_nxv8i1_poison(<vscale x 8 x i1> %a) {
 ; CHECK-LABEL: ctz_nxv8i1_poison:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    brkb p0.b, p1/z, p0.b
 ; CHECK-NEXT:    cntp x0, p0, p0.h
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
@@ -268,7 +268,7 @@ define i32 @ctz_nxv8i1_poison(<vscale x 8 x i1> %a) {
 define i64 @add_i64_ctz_nxv8i1_poison(<vscale x 8 x i1> %a, i64 %b) {
 ; CHECK-LABEL: add_i64_ctz_nxv8i1_poison:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    brkb p0.b, p1/z, p0.b
 ; CHECK-NEXT:    incp x0, p0.h
 ; CHECK-NEXT:    ret
@@ -280,7 +280,7 @@ define i64 @add_i64_ctz_nxv8i1_poison(<vscale x 8 x i1> %a, i64 %b) {
 define i32 @add_i32_ctz_nxv8i1_poison(<vscale x 8 x i1> %a, i32 %b) {
 ; CHECK-LABEL: add_i32_ctz_nxv8i1_poison:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    brkb p0.b, p1/z, p0.b
 ; CHECK-NEXT:    incp x0, p0.h
@@ -510,7 +510,7 @@ define i32 @ctz_v4i1(<4 x i1> %a) {
 ; NONSTREAMING:       // %bb.0:
 ; NONSTREAMING-NEXT:    shl v0.4h, v0.4h, #15
 ; NONSTREAMING-NEXT:    ptrue p0.h, vl4
-; NONSTREAMING-NEXT:    ptrue p1.h
+; NONSTREAMING-NEXT:    ptrue p1.b
 ; NONSTREAMING-NEXT:    cmlt v0.4h, v0.4h, #0
 ; NONSTREAMING-NEXT:    cmpne p0.h, p0/z, z0.h, #0
 ; NONSTREAMING-NEXT:    brkb p0.b, p1/z, p0.b
@@ -523,7 +523,7 @@ define i32 @ctz_v4i1(<4 x i1> %a) {
 ; STREAMING-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; STREAMING-NEXT:    ptrue p0.h, vl4
 ; STREAMING-NEXT:    lsl z0.h, z0.h, #15
-; STREAMING-NEXT:    ptrue p1.h
+; STREAMING-NEXT:    ptrue p1.b
 ; STREAMING-NEXT:    asr z0.h, z0.h, #15
 ; STREAMING-NEXT:    cmpne p0.h, p0/z, z0.h, #0
 ; STREAMING-NEXT:    brkb p0.b, p1/z, p0.b
@@ -539,7 +539,7 @@ define i32 @ctz_v4i1_poison(<4 x i1> %a) {
 ; NONSTREAMING:       // %bb.0:
 ; NONSTREAMING-NEXT:    shl v0.4h, v0.4h, #15
 ; NONSTREAMING-NEXT:    ptrue p0.h, vl4
-; NONSTREAMING-NEXT:    ptrue p1.h
+; NONSTREAMING-NEXT:    ptrue p1.b
 ; NONSTREAMING-NEXT:    cmlt v0.4h, v0.4h, #0
 ; NONSTREAMING-NEXT:    cmpne p0.h, p0/z, z0.h, #0
 ; NONSTREAMING-NEXT:    brkb p0.b, p1/z, p0.b
@@ -552,7 +552,7 @@ define i32 @ctz_v4i1_poison(<4 x i1> %a) {
 ; STREAMING-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; STREAMING-NEXT:    ptrue p0.h, vl4
 ; STREAMING-NEXT:    lsl z0.h, z0.h, #15
-; STREAMING-NEXT:    ptrue p1.h
+; STREAMING-NEXT:    ptrue p1.b
 ; STREAMING-NEXT:    asr z0.h, z0.h, #15
 ; STREAMING-NEXT:    cmpne p0.h, p0/z, z0.h, #0
 ; STREAMING-NEXT:    brkb p0.b, p1/z, p0.b
@@ -568,7 +568,7 @@ define i32 @ctz_v2i1(<2 x i1> %a) {
 ; NONSTREAMING:       // %bb.0:
 ; NONSTREAMING-NEXT:    shl v0.2s, v0.2s, #31
 ; NONSTREAMING-NEXT:    ptrue p0.s, vl2
-; NONSTREAMING-NEXT:    ptrue p1.s
+; NONSTREAMING-NEXT:    ptrue p1.b
 ; NONSTREAMING-NEXT:    cmlt v0.2s, v0.2s, #0
 ; NONSTREAMING-NEXT:    cmpne p0.s, p0/z, z0.s, #0
 ; NONSTREAMING-NEXT:    brkb p0.b, p1/z, p0.b
@@ -581,7 +581,7 @@ define i32 @ctz_v2i1(<2 x i1> %a) {
 ; STREAMING-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; STREAMING-NEXT:    ptrue p0.s, vl2
 ; STREAMING-NEXT:    lsl z0.s, z0.s, #31
-; STREAMING-NEXT:    ptrue p1.s
+; STREAMING-NEXT:    ptrue p1.b
 ; STREAMING-NEXT:    asr z0.s, z0.s, #31
 ; STREAMING-NEXT:    cmpne p0.s, p0/z, z0.s, #0
 ; STREAMING-NEXT:    brkb p0.b, p1/z, p0.b
@@ -597,7 +597,7 @@ define i32 @ctz_v2i1_poison(<2 x i1> %a) {
 ; NONSTREAMING:       // %bb.0:
 ; NONSTREAMING-NEXT:    shl v0.2s, v0.2s, #31
 ; NONSTREAMING-NEXT:    ptrue p0.s, vl2
-; NONSTREAMING-NEXT:    ptrue p1.s
+; NONSTREAMING-NEXT:    ptrue p1.b
 ; NONSTREAMING-NEXT:    cmlt v0.2s, v0.2s, #0
 ; NONSTREAMING-NEXT:    cmpne p0.s, p0/z, z0.s, #0
 ; NONSTREAMING-NEXT:    brkb p0.b, p1/z, p0.b
@@ -610,7 +610,7 @@ define i32 @ctz_v2i1_poison(<2 x i1> %a) {
 ; STREAMING-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; STREAMING-NEXT:    ptrue p0.s, vl2
 ; STREAMING-NEXT:    lsl z0.s, z0.s, #31
-; STREAMING-NEXT:    ptrue p1.s
+; STREAMING-NEXT:    ptrue p1.b
 ; STREAMING-NEXT:    asr z0.s, z0.s, #31
 ; STREAMING-NEXT:    cmpne p0.s, p0/z, z0.s, #0
 ; STREAMING-NEXT:    brkb p0.b, p1/z, p0.b
diff --git a/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll b/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
index 47fae5a01c931..9207937502e59 100644
--- a/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
+++ b/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
@@ -83,9 +83,10 @@ define <vscale x 8 x i32> @sdiv_split_i32(<vscale x 8 x i32> %a, <vscale x 8 x i
 define <vscale x 2 x i32> @sdiv_widen_i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: sdiv_widen_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtw z1.d, p0/m, z1.d
 ; CHECK-NEXT:    sxtw z0.d, p0/m, z0.d
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    sdiv z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %div = sdiv <vscale x 2 x i32> %a, %b
@@ -459,9 +460,10 @@ define <vscale x 4 x i64> @smin_split_i64(<vscale x 4 x i64> %a, <vscale x 4 x i
 define <vscale x 8 x i8> @smin_promote_i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
 ; CHECK-LABEL: smin_promote_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z1.h, p0/m, z1.h
 ; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
+; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    smin z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %cmp = icmp slt <vscale x 8 x i8> %a, %b
@@ -472,9 +474,10 @@ define <vscale x 8 x i8> @smin_promote_i8(<vscale x 8 x i8> %a, <vscale x 8 x i8
 define <vscale x 4 x i16> @smin_promote_i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
 ; CHECK-LABEL: smin_promote_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z1.s, p0/m, z1.s
 ; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
+; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    smin z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %cmp = icmp slt <vscale x 4 x i16> %a, %b
@@ -485,9 +488,10 @@ define <vscale x 4 x i16> @smin_promote_i16(<vscale x 4 x i16> %a, <vscale x 4 x
 define <vscale x 2 x i32> @smin_promote_i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: smin_promote_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtw z1.d, p0/m, z1.d
 ; CHECK-NEXT:    sxtw z0.d, p0/m, z0.d
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    smin z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %cmp = icmp slt <vscale x 2 x i32> %a, %b
@@ -631,9 +635,10 @@ define <vscale x 8 x i32> @smax_split_i32(<vscale x 8 x i32> %a, <vscale x 8 x i
 define <vscale x 4 x i16> @smax_promote_i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
 ; CHECK-LABEL: smax_promote_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z1.s, p0/m, z1.s
 ; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
+; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    smax z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %cmp = icmp sgt <vscale x 4 x i16> %a, %b
@@ -772,9 +777,10 @@ define <vscale x 16 x i16> @asr_split_i16(<vscale x 16 x i16> %a, <vscale x 16 x
 define <vscale x 2 x i32> @asr_promote_i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b){
 ; CHECK-LABEL: asr_promote_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    and z1.d, z1.d, #0xffffffff
 ; CHECK-NEXT:    sxtw z0.d, p0/m, z0.d
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    asr z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %shr = ashr <vscale x 2 x i32> %a, %b
diff --git a/llvm/test/CodeGen/AArch64/load-insert-zero.ll b/llvm/test/CodeGen/AArch64/load-insert-zero.ll
index 8b4cc7bcc0311..8cf9dc85c6fed 100644
--- a/llvm/test/CodeGen/AArch64/load-insert-zero.ll
+++ b/llvm/test/CodeGen/AArch64/load-insert-zero.ll
@@ -923,7 +923,7 @@ define <vscale x 8 x i8> @loadnxv8i8(ptr %p) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldrb w8, [x0]
-; CHECK-NEXT:    ptrue p0.h, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    mov z0.h, p0/m, w8
 ; CHECK-NEXT:    ret
   %l = load i8, ptr %p
@@ -946,7 +946,7 @@ define <vscale x 4 x i16> @loadnxv4i16(ptr %p) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldrh w8, [x0]
-; CHECK-NEXT:    ptrue p0.s, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    mov z0.s, p0/m, w8
 ; CHECK-NEXT:    ret
   %l = load i16, ptr %p
@@ -969,7 +969,7 @@ define <vscale x 2 x i32> @loadnxv2i32(ptr %p) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldr w8, [x0]
-; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    mov z0.d, p0/m, x8
 ; CHECK-NEXT:    ret
   %l = load i32, ptr %p
@@ -1003,7 +1003,7 @@ define <vscale x 4 x half> @loadnxv4f16(ptr %p) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, wzr
 ; CHECK-NEXT:    index z0.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
@@ -1030,7 +1030,7 @@ define <vscale x 4 x bfloat> @loadnxv4bf16(ptr %p) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, wzr
 ; CHECK-NEXT:    index z0.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
@@ -1057,7 +1057,7 @@ define <vscale x 2 x float> @loadnxv2f32(ptr %p) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, xzr
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
@@ -1097,7 +1097,7 @@ define <vscale x 8 x i8> @loadnxv8i8_offset(ptr %p) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldrb w8, [x0, #1]
-; CHECK-NEXT:    ptrue p0.h, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    mov z0.h, p0/m, w8
 ; CHECK-NEXT:    ret
   %g = getelementptr inbounds i8, ptr %p, i64 1
@@ -1122,7 +1122,7 @@ define <vscale x 4 x i16> @loadnxv4i16_offset(ptr %p) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldurh w8, [x0, #1]
-; CHECK-NEXT:    ptrue p0.s, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    mov z0.s, p0/m, w8
 ; CHECK-NEXT:    ret
   %g = getelementptr inbounds i8, ptr %p, i64 1
@@ -1147,7 +1147,7 @@ define <vscale x 2 x i32> @loadnxv2i32_offset(ptr %p) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldur w8, [x0, #1]
-; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    mov z0.d, p0/m, x8
 ; CHECK-NEXT:    ret
   %g = getelementptr inbounds i8, ptr %p, i64 1
@@ -1184,7 +1184,7 @@ define <vscale x 4 x half> @loadnxv4f16_offset(ptr %p) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, wzr
 ; CHECK-NEXT:    index z0.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
@@ -1213,7 +1213,7 @@ define <vscale x 4 x bfloat> @loadnxv4bf16_offset(ptr %p) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, wzr
 ; CHECK-NEXT:    index z0.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
@@ -1242,7 +1242,7 @@ define <vscale x 2 x float> @loadnxv2f32_offset(ptr %p) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, xzr
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
diff --git a/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll b/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll
index 69e805d9ca2ee..f6cdc55d7b0ab 100644
--- a/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll
+++ b/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll
@@ -351,7 +351,7 @@ define <vscale x 16 x float> @splice_nxv16f32_16(<vscale x 16 x float> %a, <vsca
 ; CHECK-NEXT:    addvl sp, sp, #-8
 ; CHECK-NEXT:    rdvl x8, #1
 ; CHECK-NEXT:    mov w9, #16 // =0x10
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sub x8, x8, #1
 ; CHECK-NEXT:    str z3, [sp, #3, mul vl]
 ; CHECK-NEXT:    cmp x8, #16
@@ -874,7 +874,7 @@ define <vscale x 8 x i32> @splice_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i
 ; CHECK-NEXT:    addvl sp, sp, #-4
 ; CHECK-NEXT:    rdvl x8, #2
 ; CHECK-NEXT:    mov x9, sp
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add x8, x9, x8
 ; CHECK-NEXT:    str z1, [sp, #1, mul vl]
 ; CHECK-NEXT:    mov x9, #-8 // =0xfffffffffffffff8
diff --git a/llvm/test/CodeGen/AArch64/nontemporal-load.ll b/llvm/test/CodeGen/AArch64/nontemporal-load.ll
index 959ac7f68e351..9659b1024acb2 100644
--- a/llvm/test/CodeGen/AArch64/nontemporal-load.ll
+++ b/llvm/test/CodeGen/AArch64/nontemporal-load.ll
@@ -621,7 +621,7 @@ define <vscale x 20 x float> @test_ldnp_v20f32_vscale(ptr %A) {
 ;
 ; CHECK-BE-LABEL: test_ldnp_v20f32_vscale:
 ; CHECK-BE:       // %bb.0:
-; CHECK-BE-NEXT:    ptrue p0.s
+; CHECK-BE-NEXT:    ptrue p0.b
 ; CHECK-BE-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-BE-NEXT:    ld1w { z1.s }, p0/z, [x0, #1, mul vl]
 ; CHECK-BE-NEXT:    ld1w { z2.s }, p0/z, [x0, #2, mul vl]
diff --git a/llvm/test/CodeGen/AArch64/rcpc3-sve.ll b/llvm/test/CodeGen/AArch64/rcpc3-sve.ll
index 6b03e5d12bfd3..35131f06b2c01 100644
--- a/llvm/test/CodeGen/AArch64/rcpc3-sve.ll
+++ b/llvm/test/CodeGen/AArch64/rcpc3-sve.ll
@@ -9,7 +9,7 @@ define hidden <vscale x 2 x i64> @test_load_sve_lane0(ptr nocapture noundef read
 ; CHECK-LABEL: test_load_sve_lane0:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldapr x8, [x0]
-; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    mov z0.d, p0/m, x8
 ; CHECK-NEXT:    ret
   %1 = load atomic i64, ptr %a acquire, align 8
@@ -22,7 +22,7 @@ define hidden <vscale x 2 x i64> @test_load_sve_lane1(ptr nocapture noundef read
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1 // =0x1
 ; CHECK-NEXT:    index z1.d, #0, #1
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.d, x8
 ; CHECK-NEXT:    ldapr x8, [x0]
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z1.d, z2.d
diff --git a/llvm/test/CodeGen/AArch64/reassocmls.ll b/llvm/test/CodeGen/AArch64/reassocmls.ll
index acbf9fc584a2e..2b7b05c833bdd 100644
--- a/llvm/test/CodeGen/AArch64/reassocmls.ll
+++ b/llvm/test/CodeGen/AArch64/reassocmls.ll
@@ -268,13 +268,14 @@ define <8 x i16> @mla_v8i16_C(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16
 define <vscale x 8 x i16> @smlsl_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i8> %d, <vscale x 8 x i8> %e) {
 ; CHECK-LABEL: smlsl_nxv8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p1.h
 ; CHECK-NEXT:    sxtb z3.h, p0/m, z3.h
 ; CHECK-NEXT:    sxtb z4.h, p0/m, z4.h
 ; CHECK-NEXT:    sxtb z1.h, p0/m, z1.h
 ; CHECK-NEXT:    sxtb z2.h, p0/m, z2.h
-; CHECK-NEXT:    mls z0.h, p0/m, z4.h, z3.h
-; CHECK-NEXT:    mls z0.h, p0/m, z2.h, z1.h
+; CHECK-NEXT:    mls z0.h, p1/m, z4.h, z3.h
+; CHECK-NEXT:    mls z0.h, p1/m, z2.h, z1.h
 ; CHECK-NEXT:    ret
   %be = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
   %ce = sext <vscale x 8 x i8> %c to <vscale x 8 x i16>
diff --git a/llvm/test/CodeGen/AArch64/reduce-or-opt.ll b/llvm/test/CodeGen/AArch64/reduce-or-opt.ll
index f5df5ea53c990..618b52b2e5eb7 100644
--- a/llvm/test/CodeGen/AArch64/reduce-or-opt.ll
+++ b/llvm/test/CodeGen/AArch64/reduce-or-opt.ll
@@ -94,14 +94,15 @@ define i64 @select_or_reduce_nxv2i1(ptr nocapture noundef readonly %src) {
 ; CHECK-LABEL: select_or_reduce_nxv2i1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cntd x8
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x9, xzr
 ; CHECK-NEXT:    neg x10, x8
+; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    add x10, x10, #4
 ; CHECK-NEXT:  .LBB2_1: // %vector.body
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, x9, lsl #3]
-; CHECK-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
+; CHECK-NEXT:    cmpeq p2.d, p1/z, z0.d, #0
 ; CHECK-NEXT:    b.ne .LBB2_3
 ; CHECK-NEXT:  // %bb.2: // %vector.body
 ; CHECK-NEXT:    // in Loop: Header=BB2_1 Depth=1
@@ -109,7 +110,7 @@ define i64 @select_or_reduce_nxv2i1(ptr nocapture noundef readonly %src) {
 ; CHECK-NEXT:    add x9, x9, x8
 ; CHECK-NEXT:    b.ne .LBB2_1
 ; CHECK-NEXT:  .LBB2_3: // %middle.split
-; CHECK-NEXT:    ptest p0, p1.b
+; CHECK-NEXT:    ptest p1, p2.b
 ; CHECK-NEXT:    cset w0, ne
 ; CHECK-NEXT:    ret
 entry:
@@ -137,14 +138,15 @@ define i64 @br_or_reduce_nxv2i1(ptr nocapture noundef readonly %src, ptr noundef
 ; CHECK-LABEL: br_or_reduce_nxv2i1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cntd x8
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x9, xzr
 ; CHECK-NEXT:    neg x10, x8
+; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    add x10, x10, #4
 ; CHECK-NEXT:  .LBB3_1: // %vector.body
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, x9, lsl #3]
-; CHECK-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
+; CHECK-NEXT:    cmpeq p2.d, p1/z, z0.d, #0
 ; CHECK-NEXT:    b.ne .LBB3_3
 ; CHECK-NEXT:  // %bb.2: // %vector.body
 ; CHECK-NEXT:    // in Loop: Header=BB3_1 Depth=1
@@ -152,7 +154,7 @@ define i64 @br_or_reduce_nxv2i1(ptr nocapture noundef readonly %src, ptr noundef
 ; CHECK-NEXT:    add x9, x9, x8
 ; CHECK-NEXT:    b.ne .LBB3_1
 ; CHECK-NEXT:  .LBB3_3: // %middle.split
-; CHECK-NEXT:    ptest p0, p1.b
+; CHECK-NEXT:    ptest p1, p2.b
 ; CHECK-NEXT:    b.eq .LBB3_5
 ; CHECK-NEXT:  // %bb.4: // %found
 ; CHECK-NEXT:    mov w8, #56 // =0x38
diff --git a/llvm/test/CodeGen/AArch64/sinksplat.ll b/llvm/test/CodeGen/AArch64/sinksplat.ll
index 5743dc7cce580..1a3a03f27cd13 100644
--- a/llvm/test/CodeGen/AArch64/sinksplat.ll
+++ b/llvm/test/CodeGen/AArch64/sinksplat.ll
@@ -508,7 +508,7 @@ l2:
 define <vscale x 4 x float> @fmul_scalable(ptr %x, ptr %y) "target-features"="+sve" {
 ; CHECK-LABEL: fmul_scalable:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    rdvl x8, #1
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    sxtw x8, w8
diff --git a/llvm/test/CodeGen/AArch64/sme-framelower-use-bp.ll b/llvm/test/CodeGen/AArch64/sme-framelower-use-bp.ll
index f49bb910b5bd1..f34c7ecaae19f 100644
--- a/llvm/test/CodeGen/AArch64/sme-framelower-use-bp.ll
+++ b/llvm/test/CodeGen/AArch64/sme-framelower-use-bp.ll
@@ -511,7 +511,7 @@ define void @quux() #1 {
 ; CHECK-NEXT:    mov p8.b, p0.b
 ; CHECK-NEXT:    pext { p3.s, p4.s }, pn8[0]
 ; CHECK-NEXT:    mov p0.b, p3.b
-; CHECK-NEXT:    ptrue p2.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    and p0.b, p0/z, p0.b, p2.b
 ; CHECK-NEXT:    mov p1.b, p4.b
 ; CHECK-NEXT:    and p1.b, p1/z, p1.b, p2.b
diff --git a/llvm/test/CodeGen/AArch64/sve-aba.ll b/llvm/test/CodeGen/AArch64/sve-aba.ll
index ffb3e2d658364..629b7df66d59f 100644
--- a/llvm/test/CodeGen/AArch64/sve-aba.ll
+++ b/llvm/test/CodeGen/AArch64/sve-aba.ll
@@ -75,7 +75,7 @@ define <vscale x 8 x i16> @saba_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b,
 define <vscale x 8 x i16> @saba_h_promoted_ops(<vscale x 8 x i16> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c) #0 {
 ; CHECK-LABEL: saba_h_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z2.h, p0/m, z2.h
 ; CHECK-NEXT:    sxtb z1.h, p0/m, z1.h
 ; CHECK-NEXT:    saba z0.h, z1.h, z2.h
@@ -126,7 +126,7 @@ define <vscale x 4 x i32> @saba_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b,
 define <vscale x 4 x i32> @saba_s_promoted_ops(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c) #0 {
 ; CHECK-LABEL: saba_s_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z2.s, p0/m, z2.s
 ; CHECK-NEXT:    sxth z1.s, p0/m, z1.s
 ; CHECK-NEXT:    saba z0.s, z1.s, z2.s
@@ -177,7 +177,7 @@ define <vscale x 2 x i64> @saba_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b,
 define <vscale x 2 x i64> @saba_d_promoted_ops(<vscale x 2 x i64> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c) #0 {
 ; CHECK-LABEL: saba_d_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtw z2.d, p0/m, z2.d
 ; CHECK-NEXT:    sxtw z1.d, p0/m, z1.d
 ; CHECK-NEXT:    saba z0.d, z1.d, z2.d
diff --git a/llvm/test/CodeGen/AArch64/sve-abd.ll b/llvm/test/CodeGen/AArch64/sve-abd.ll
index 72790155d046f..7f5d3711471d3 100644
--- a/llvm/test/CodeGen/AArch64/sve-abd.ll
+++ b/llvm/test/CodeGen/AArch64/sve-abd.ll
@@ -52,9 +52,10 @@ define <vscale x 8 x i16> @sabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
 define <vscale x 8 x i16> @sabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) #0 {
 ; CHECK-LABEL: sabd_h_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z1.h, p0/m, z1.h
 ; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
+; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    sabd z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
@@ -81,9 +82,10 @@ define <vscale x 4 x i32> @sabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
 define <vscale x 4 x i32> @sabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) #0 {
 ; CHECK-LABEL: sabd_s_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z1.s, p0/m, z1.s
 ; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
+; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    sabd z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %a.sext = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
@@ -110,9 +112,10 @@ define <vscale x 2 x i64> @sabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
 define <vscale x 2 x i64> @sabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) #0 {
 ; CHECK-LABEL: sabd_d_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtw z1.d, p0/m, z1.d
 ; CHECK-NEXT:    sxtw z0.d, p0/m, z0.d
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    sabd z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %a.sext = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
@@ -280,9 +283,10 @@ define <vscale x 4 x i32> @uabd_non_matching_promoted_ops(<vscale x 4 x i8> %a,
 define <vscale x 4 x i32> @uabd_non_matching_promotion(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) #0 {
 ; CHECK-LABEL: uabd_non_matching_promotion:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    and z0.s, z0.s, #0xff
 ; CHECK-NEXT:    sxtb z1.s, p0/m, z1.s
+; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    sub z0.s, z0.s, z1.s
 ; CHECK-NEXT:    abs z0.s, p0/m, z0.s
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-aliasing.ll b/llvm/test/CodeGen/AArch64/sve-aliasing.ll
index a27429a256250..69e902d051cc4 100644
--- a/llvm/test/CodeGen/AArch64/sve-aliasing.ll
+++ b/llvm/test/CodeGen/AArch64/sve-aliasing.ll
@@ -122,13 +122,14 @@ define void @scalable_v2i64(ptr noalias nocapture noundef %l0) {
 define void @scalable_v8i8(ptr noalias nocapture noundef %l0) {
 ; CHECK-LABEL: scalable_v8i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p1.h
 ; CHECK-NEXT:    ld1sb { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sb { z1.h }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    movprfx z2, z0
-; CHECK-NEXT:    mul z2.h, p0/m, z2.h, z0.h
+; CHECK-NEXT:    mul z2.h, p1/m, z2.h, z0.h
 ; CHECK-NEXT:    movprfx z3, z1
-; CHECK-NEXT:    mul z3.h, p0/m, z3.h, z1.h
+; CHECK-NEXT:    mul z3.h, p1/m, z3.h, z1.h
 ; CHECK-NEXT:    eor z0.d, z2.d, z0.d
 ; CHECK-NEXT:    eor z1.d, z3.d, z1.d
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x0]
@@ -155,13 +156,14 @@ define void @scalable_v8i8(ptr noalias nocapture noundef %l0) {
 define void @scalable_v4i8(ptr noalias nocapture noundef %l0) {
 ; CHECK-LABEL: scalable_v4i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p1.s
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sb { z1.s }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    movprfx z2, z0
-; CHECK-NEXT:    mul z2.s, p0/m, z2.s, z0.s
+; CHECK-NEXT:    mul z2.s, p1/m, z2.s, z0.s
 ; CHECK-NEXT:    movprfx z3, z1
-; CHECK-NEXT:    mul z3.s, p0/m, z3.s, z1.s
+; CHECK-NEXT:    mul z3.s, p1/m, z3.s, z1.s
 ; CHECK-NEXT:    eor z0.d, z2.d, z0.d
 ; CHECK-NEXT:    eor z1.d, z3.d, z1.d
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0]
@@ -188,13 +190,14 @@ define void @scalable_v4i8(ptr noalias nocapture noundef %l0) {
 define void @scalable_v2i8(ptr noalias nocapture noundef %l0) {
 ; CHECK-LABEL: scalable_v2i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sb { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    movprfx z2, z0
-; CHECK-NEXT:    mul z2.d, p0/m, z2.d, z0.d
+; CHECK-NEXT:    mul z2.d, p1/m, z2.d, z0.d
 ; CHECK-NEXT:    movprfx z3, z1
-; CHECK-NEXT:    mul z3.d, p0/m, z3.d, z1.d
+; CHECK-NEXT:    mul z3.d, p1/m, z3.d, z1.d
 ; CHECK-NEXT:    eor z0.d, z2.d, z0.d
 ; CHECK-NEXT:    eor z1.d, z3.d, z1.d
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0]
@@ -221,13 +224,14 @@ define void @scalable_v2i8(ptr noalias nocapture noundef %l0) {
 define void @scalable_v4i16(ptr noalias nocapture noundef %l0) {
 ; CHECK-LABEL: scalable_v4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p1.s
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sh { z1.s }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    movprfx z2, z0
-; CHECK-NEXT:    mul z2.s, p0/m, z2.s, z0.s
+; CHECK-NEXT:    mul z2.s, p1/m, z2.s, z0.s
 ; CHECK-NEXT:    movprfx z3, z1
-; CHECK-NEXT:    mul z3.s, p0/m, z3.s, z1.s
+; CHECK-NEXT:    mul z3.s, p1/m, z3.s, z1.s
 ; CHECK-NEXT:    eor z0.d, z2.d, z0.d
 ; CHECK-NEXT:    eor z1.d, z3.d, z1.d
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0]
@@ -254,13 +258,14 @@ define void @scalable_v4i16(ptr noalias nocapture noundef %l0) {
 define void @scalable_v2i16(ptr noalias nocapture noundef %l0) {
 ; CHECK-LABEL: scalable_v2i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sh { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    movprfx z2, z0
-; CHECK-NEXT:    mul z2.d, p0/m, z2.d, z0.d
+; CHECK-NEXT:    mul z2.d, p1/m, z2.d, z0.d
 ; CHECK-NEXT:    movprfx z3, z1
-; CHECK-NEXT:    mul z3.d, p0/m, z3.d, z1.d
+; CHECK-NEXT:    mul z3.d, p1/m, z3.d, z1.d
 ; CHECK-NEXT:    eor z0.d, z2.d, z0.d
 ; CHECK-NEXT:    eor z1.d, z3.d, z1.d
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0]
@@ -287,13 +292,14 @@ define void @scalable_v2i16(ptr noalias nocapture noundef %l0) {
 define void @scalable_v2i32(ptr noalias nocapture noundef %l0) {
 ; CHECK-LABEL: scalable_v2i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sw { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    movprfx z2, z0
-; CHECK-NEXT:    mul z2.d, p0/m, z2.d, z0.d
+; CHECK-NEXT:    mul z2.d, p1/m, z2.d, z0.d
 ; CHECK-NEXT:    movprfx z3, z1
-; CHECK-NEXT:    mul z3.d, p0/m, z3.d, z1.d
+; CHECK-NEXT:    mul z3.d, p1/m, z3.d, z1.d
 ; CHECK-NEXT:    eor z0.d, z2.d, z0.d
 ; CHECK-NEXT:    eor z1.d, z3.d, z1.d
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0]
@@ -350,17 +356,18 @@ define void @negative_tooshort_v16i8(ptr noalias nocapture noundef %l0) {
 define void @negative_scalable_v2i8(ptr noalias nocapture noundef %l0) {
 ; CHECK-LABEL: negative_scalable_v2i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    rdvl x8, #1
+; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    lsr x8, x8, #4
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    movprfx z1, z0
-; CHECK-NEXT:    mul z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT:    mul z1.d, p1/m, z1.d, z0.d
 ; CHECK-NEXT:    eor z0.d, z1.d, z0.d
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, x8]
 ; CHECK-NEXT:    movprfx z1, z0
-; CHECK-NEXT:    mul z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT:    mul z1.d, p1/m, z1.d, z0.d
 ; CHECK-NEXT:    eor z0.d, z1.d, z0.d
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
@@ -385,17 +392,18 @@ define void @negative_scalable_v2i8(ptr noalias nocapture noundef %l0) {
 define void @negative_scalable_v2i16(ptr noalias nocapture noundef %l0) {
 ; CHECK-LABEL: negative_scalable_v2i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cntd x8
+; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    add x8, x0, x8
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    movprfx z1, z0
-; CHECK-NEXT:    mul z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT:    mul z1.d, p1/m, z1.d, z0.d
 ; CHECK-NEXT:    eor z0.d, z1.d, z0.d
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    movprfx z1, z0
-; CHECK-NEXT:    mul z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT:    mul z1.d, p1/m, z1.d, z0.d
 ; CHECK-NEXT:    eor z0.d, z1.d, z0.d
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x8]
 ; CHECK-NEXT:    ret
@@ -420,17 +428,18 @@ define void @negative_scalable_v2i16(ptr noalias nocapture noundef %l0) {
 define void @negative_scalable_v2i32(ptr noalias nocapture noundef %l0) {
 ; CHECK-LABEL: negative_scalable_v2i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cntw x8
+; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    add x8, x0, x8
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    movprfx z1, z0
-; CHECK-NEXT:    mul z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT:    mul z1.d, p1/m, z1.d, z0.d
 ; CHECK-NEXT:    eor z0.d, z1.d, z0.d
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    movprfx z1, z0
-; CHECK-NEXT:    mul z1.d, p0/m, z1.d, z0.d
+; CHECK-NEXT:    mul z1.d, p1/m, z1.d, z0.d
 ; CHECK-NEXT:    eor z0.d, z1.d, z0.d
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x8]
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-bf16-int-converts.ll b/llvm/test/CodeGen/AArch64/sve-bf16-int-converts.ll
index bdfe90c8a6bb7..7ccb17da6afca 100644
--- a/llvm/test/CodeGen/AArch64/sve-bf16-int-converts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-bf16-int-converts.ll
@@ -448,8 +448,9 @@ define <vscale x 2 x bfloat> @sitofp_nxv2i1_to_nxv2bf16(<vscale x 2 x i1> %a) {
 define <vscale x 2 x bfloat> @sitofp_nxv2i8_to_nxv2bf16(<vscale x 2 x i8> %a) {
 ; CHECK-LABEL: sitofp_nxv2i8_to_nxv2bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.d, p0/m, z0.d
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    scvtf z0.s, p0/m, z0.d
 ; CHECK-NEXT:    bfcvt z0.h, p0/m, z0.s
 ; CHECK-NEXT:    ret
@@ -460,8 +461,9 @@ define <vscale x 2 x bfloat> @sitofp_nxv2i8_to_nxv2bf16(<vscale x 2 x i8> %a) {
 define <vscale x 2 x bfloat> @sitofp_nxv2i16_to_nxv2bf16(<vscale x 2 x i16> %a) {
 ; CHECK-LABEL: sitofp_nxv2i16_to_nxv2bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z0.d, p0/m, z0.d
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    scvtf z0.s, p0/m, z0.d
 ; CHECK-NEXT:    bfcvt z0.h, p0/m, z0.s
 ; CHECK-NEXT:    ret
@@ -510,8 +512,9 @@ define <vscale x 4 x bfloat> @sitofp_nxv4i1_to_nxv4bf16(<vscale x 4 x i1> %a) {
 define <vscale x 4 x bfloat> @sitofp_nxv4i8_to_nxv4bf16(<vscale x 4 x i8> %a) {
 ; CHECK-LABEL: sitofp_nxv4i8_to_nxv4bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.s, p0/m, z0.s
+; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    scvtf z0.s, p0/m, z0.s
 ; CHECK-NEXT:    bfcvt z0.h, p0/m, z0.s
 ; CHECK-NEXT:    ret
@@ -522,8 +525,9 @@ define <vscale x 4 x bfloat> @sitofp_nxv4i8_to_nxv4bf16(<vscale x 4 x i8> %a) {
 define <vscale x 4 x bfloat> @sitofp_nxv4i16_to_nxv4bf16(<vscale x 4 x i16> %a) {
 ; CHECK-LABEL: sitofp_nxv4i16_to_nxv4bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
+; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    scvtf z0.s, p0/m, z0.s
 ; CHECK-NEXT:    bfcvt z0.h, p0/m, z0.s
 ; CHECK-NEXT:    ret
@@ -575,7 +579,7 @@ define <vscale x 8 x bfloat> @sitofp_nxv8i1_to_nxv8bf16(<vscale x 8 x i1> %a) {
 define <vscale x 8 x bfloat> @sitofp_nxv8i8_to_nxv8bf16(<vscale x 8 x i8> %a) {
 ; CHECK-LABEL: sitofp_nxv8i8_to_nxv8bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    sunpkhi z1.s, z0.h
diff --git a/llvm/test/CodeGen/AArch64/sve-bit-counting-pred.ll b/llvm/test/CodeGen/AArch64/sve-bit-counting-pred.ll
index 0012dab58b8d3..5ccdfefbe9a1d 100644
--- a/llvm/test/CodeGen/AArch64/sve-bit-counting-pred.ll
+++ b/llvm/test/CodeGen/AArch64/sve-bit-counting-pred.ll
@@ -52,8 +52,9 @@ define <vscale x 16 x i1> @ctlz_nxv16i1(<vscale x 16 x i1> %a) {
 define <vscale x 8 x i1> @ctlz_nxv8i1(<vscale x 8 x i1> %a) {
 ; CHECK-LABEL: ctlz_nxv8i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.h
-; CHECK-NEXT:    not p0.b, p1/z, p0.b
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    ptrue p2.h
+; CHECK-NEXT:    eor p0.b, p2/z, p0.b, p1.b
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i1> @llvm.ctlz.nxv8i1(<vscale x 8 x i1> %a)
   ret <vscale x 8 x i1> %res
@@ -62,8 +63,9 @@ define <vscale x 8 x i1> @ctlz_nxv8i1(<vscale x 8 x i1> %a) {
 define <vscale x 4 x i1> @ctlz_nxv4i1(<vscale x 4 x i1> %a) {
 ; CHECK-LABEL: ctlz_nxv4i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.s
-; CHECK-NEXT:    not p0.b, p1/z, p0.b
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    ptrue p2.s
+; CHECK-NEXT:    eor p0.b, p2/z, p0.b, p1.b
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i1> @llvm.ctlz.nxv4i1(<vscale x 4 x i1> %a)
   ret <vscale x 4 x i1> %res
@@ -72,8 +74,9 @@ define <vscale x 4 x i1> @ctlz_nxv4i1(<vscale x 4 x i1> %a) {
 define <vscale x 2 x i1> @ctlz_nxv2i1(<vscale x 2 x i1> %a) {
 ; CHECK-LABEL: ctlz_nxv2i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    not p0.b, p1/z, p0.b
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    ptrue p2.d
+; CHECK-NEXT:    eor p0.b, p2/z, p0.b, p1.b
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i1> @llvm.ctlz.nxv2i1(<vscale x 2 x i1> %a)
   ret <vscale x 2 x i1> %res
@@ -94,8 +97,9 @@ define <vscale x 16 x i1> @cttz_nxv16i1(<vscale x 16 x i1> %a) {
 define <vscale x 8 x i1> @cttz_nxv8i1(<vscale x 8 x i1> %a) {
 ; CHECK-LABEL: cttz_nxv8i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.h
-; CHECK-NEXT:    not p0.b, p1/z, p0.b
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    ptrue p2.h
+; CHECK-NEXT:    eor p0.b, p2/z, p0.b, p1.b
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i1> @llvm.cttz.nxv8i1(<vscale x 8 x i1> %a)
   ret <vscale x 8 x i1> %res
@@ -104,8 +108,9 @@ define <vscale x 8 x i1> @cttz_nxv8i1(<vscale x 8 x i1> %a) {
 define <vscale x 4 x i1> @cttz_nxv4i1(<vscale x 4 x i1> %a) {
 ; CHECK-LABEL: cttz_nxv4i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.s
-; CHECK-NEXT:    not p0.b, p1/z, p0.b
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    ptrue p2.s
+; CHECK-NEXT:    eor p0.b, p2/z, p0.b, p1.b
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i1> @llvm.cttz.nxv4i1(<vscale x 4 x i1> %a)
   ret <vscale x 4 x i1> %res
@@ -114,8 +119,9 @@ define <vscale x 4 x i1> @cttz_nxv4i1(<vscale x 4 x i1> %a) {
 define <vscale x 2 x i1> @cttz_nxv2i1(<vscale x 2 x i1> %a) {
 ; CHECK-LABEL: cttz_nxv2i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    not p0.b, p1/z, p0.b
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    ptrue p2.d
+; CHECK-NEXT:    eor p0.b, p2/z, p0.b, p1.b
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i1> @llvm.cttz.nxv2i1(<vscale x 2 x i1> %a)
   ret <vscale x 2 x i1> %res
diff --git a/llvm/test/CodeGen/AArch64/sve-bitcast.ll b/llvm/test/CodeGen/AArch64/sve-bitcast.ll
index be80af89f270c..a44594f90315c 100644
--- a/llvm/test/CodeGen/AArch64/sve-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/sve-bitcast.ll
@@ -871,10 +871,9 @@ define <vscale x 8 x i8> @bitcast_nxv4i16_to_nxv8i8(<vscale x 4 x i16> %v) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1b { z0.h }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1b { z0.h }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -883,10 +882,9 @@ define <vscale x 8 x i8> @bitcast_nxv4i16_to_nxv8i8(<vscale x 4 x i16> %v) #0 {
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.h
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1b { z0.h }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1b { z0.h }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -899,10 +897,9 @@ define <vscale x 8 x i8> @bitcast_nxv2i32_to_nxv8i8(<vscale x 2 x i32> %v) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1b { z0.h }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1b { z0.h }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -911,10 +908,9 @@ define <vscale x 8 x i8> @bitcast_nxv2i32_to_nxv8i8(<vscale x 2 x i32> %v) #0 {
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1b { z0.h }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1b { z0.h }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -943,10 +939,9 @@ define <vscale x 8 x i8> @bitcast_nxv4f16_to_nxv8i8(<vscale x 4 x half> %v) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1b { z0.h }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1b { z0.h }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -955,10 +950,9 @@ define <vscale x 8 x i8> @bitcast_nxv4f16_to_nxv8i8(<vscale x 4 x half> %v) #0 {
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.h
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1b { z0.h }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1b { z0.h }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -971,10 +965,9 @@ define <vscale x 8 x i8> @bitcast_nxv2f32_to_nxv8i8(<vscale x 2 x float> %v) #0
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1b { z0.h }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1b { z0.h }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -983,10 +976,9 @@ define <vscale x 8 x i8> @bitcast_nxv2f32_to_nxv8i8(<vscale x 2 x float> %v) #0
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1b { z0.h }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1b { z0.h }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1015,10 +1007,9 @@ define <vscale x 8 x i8> @bitcast_nxv4bf16_to_nxv8i8(<vscale x 4 x bfloat> %v) #
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1b { z0.h }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1b { z0.h }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1027,10 +1018,9 @@ define <vscale x 8 x i8> @bitcast_nxv4bf16_to_nxv8i8(<vscale x 4 x bfloat> %v) #
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.h
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1b { z0.h }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1b { z0.h }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1047,10 +1037,9 @@ define <vscale x 4 x i16> @bitcast_nxv8i8_to_nxv4i16(<vscale x 8 x i8> %v) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.h }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1059,10 +1048,9 @@ define <vscale x 4 x i16> @bitcast_nxv8i8_to_nxv4i16(<vscale x 8 x i8> %v) #0 {
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1b { z0.h }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1075,10 +1063,9 @@ define <vscale x 4 x i16> @bitcast_nxv2i32_to_nxv4i16(<vscale x 2 x i32> %v) #0
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1087,10 +1074,9 @@ define <vscale x 4 x i16> @bitcast_nxv2i32_to_nxv4i16(<vscale x 2 x i32> %v) #0
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1137,10 +1123,9 @@ define <vscale x 4 x i16> @bitcast_nxv2f32_to_nxv4i16(<vscale x 2 x float> %v) #
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1149,10 +1134,9 @@ define <vscale x 4 x i16> @bitcast_nxv2f32_to_nxv4i16(<vscale x 2 x float> %v) #
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1203,10 +1187,9 @@ define <vscale x 2 x i32> @bitcast_nxv8i8_to_nxv2i32(<vscale x 8 x i8> %v) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.h }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1215,10 +1198,9 @@ define <vscale x 2 x i32> @bitcast_nxv8i8_to_nxv2i32(<vscale x 8 x i8> %v) #0 {
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.d
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1b { z0.h }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1231,10 +1213,9 @@ define <vscale x 2 x i32> @bitcast_nxv4i16_to_nxv2i32(<vscale x 4 x i16> %v) #0
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1243,10 +1224,9 @@ define <vscale x 2 x i32> @bitcast_nxv4i16_to_nxv2i32(<vscale x 4 x i16> %v) #0
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1277,10 +1257,9 @@ define <vscale x 2 x i32> @bitcast_nxv4f16_to_nxv2i32(<vscale x 4 x half> %v) #0
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1289,10 +1268,9 @@ define <vscale x 2 x i32> @bitcast_nxv4f16_to_nxv2i32(<vscale x 4 x half> %v) #0
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1339,10 +1317,9 @@ define <vscale x 2 x i32> @bitcast_nxv4bf16_to_nxv2i32(<vscale x 4 x bfloat> %v)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1351,10 +1328,9 @@ define <vscale x 2 x i32> @bitcast_nxv4bf16_to_nxv2i32(<vscale x 4 x bfloat> %v)
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1493,10 +1469,9 @@ define <vscale x 4 x half> @bitcast_nxv8i8_to_nxv4f16(<vscale x 8 x i8> %v) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.h }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1505,10 +1480,9 @@ define <vscale x 4 x half> @bitcast_nxv8i8_to_nxv4f16(<vscale x 8 x i8> %v) #0 {
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1b { z0.h }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1537,10 +1511,9 @@ define <vscale x 4 x half> @bitcast_nxv2i32_to_nxv4f16(<vscale x 2 x i32> %v) #0
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1549,10 +1522,9 @@ define <vscale x 4 x half> @bitcast_nxv2i32_to_nxv4f16(<vscale x 2 x i32> %v) #0
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1583,10 +1555,9 @@ define <vscale x 4 x half> @bitcast_nxv2f32_to_nxv4f16(<vscale x 2 x float> %v)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1595,10 +1566,9 @@ define <vscale x 4 x half> @bitcast_nxv2f32_to_nxv4f16(<vscale x 2 x float> %v)
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1645,10 +1615,9 @@ define <vscale x 2 x float> @bitcast_nxv8i8_to_nxv2f32(<vscale x 8 x i8> %v) #0
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.h }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1657,10 +1626,9 @@ define <vscale x 2 x float> @bitcast_nxv8i8_to_nxv2f32(<vscale x 8 x i8> %v) #0
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.d
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1b { z0.h }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1673,10 +1641,9 @@ define <vscale x 2 x float> @bitcast_nxv4i16_to_nxv2f32(<vscale x 4 x i16> %v) #
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1685,10 +1652,9 @@ define <vscale x 2 x float> @bitcast_nxv4i16_to_nxv2f32(<vscale x 4 x i16> %v) #
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1735,10 +1701,9 @@ define <vscale x 2 x float> @bitcast_nxv4f16_to_nxv2f32(<vscale x 4 x half> %v)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1747,10 +1712,9 @@ define <vscale x 2 x float> @bitcast_nxv4f16_to_nxv2f32(<vscale x 4 x half> %v)
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1781,10 +1745,9 @@ define <vscale x 2 x float> @bitcast_nxv4bf16_to_nxv2f32(<vscale x 4 x bfloat> %
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1793,10 +1756,9 @@ define <vscale x 2 x float> @bitcast_nxv4bf16_to_nxv2f32(<vscale x 4 x bfloat> %
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1935,10 +1897,9 @@ define <vscale x 4 x bfloat> @bitcast_nxv8i8_to_nxv4bf16(<vscale x 8 x i8> %v) #
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.h }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1947,10 +1908,9 @@ define <vscale x 4 x bfloat> @bitcast_nxv8i8_to_nxv4bf16(<vscale x 8 x i8> %v) #
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1b { z0.h }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -1979,10 +1939,9 @@ define <vscale x 4 x bfloat> @bitcast_nxv2i32_to_nxv4bf16(<vscale x 2 x i32> %v)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1991,10 +1950,9 @@ define <vscale x 4 x bfloat> @bitcast_nxv2i32_to_nxv4bf16(<vscale x 2 x i32> %v)
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -2037,10 +1995,9 @@ define <vscale x 4 x bfloat> @bitcast_nxv2f32_to_nxv4bf16(<vscale x 2 x float> %
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -2049,10 +2006,9 @@ define <vscale x 4 x bfloat> @bitcast_nxv2f32_to_nxv4bf16(<vscale x 2 x float> %
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1w { z0.d }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.s }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -2087,10 +2043,9 @@ define <vscale x 4 x i8> @bitcast_nxv2i16_to_nxv4i8(<vscale x 2 x i16> %v) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.d }, p0, [sp, #3, mul vl]
-; CHECK-NEXT:    ld1b { z0.s }, p1/z, [sp, #3, mul vl]
+; CHECK-NEXT:    ld1b { z0.s }, p0/z, [sp, #3, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -2099,10 +2054,9 @@ define <vscale x 4 x i8> @bitcast_nxv2i16_to_nxv4i8(<vscale x 2 x i16> %v) #0 {
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1h { z0.d }, p0, [sp, #3, mul vl]
-; CHECK_BE-NEXT:    ld1b { z0.s }, p1/z, [sp, #3, mul vl]
+; CHECK_BE-NEXT:    ld1b { z0.s }, p0/z, [sp, #3, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -2133,10 +2087,9 @@ define <vscale x 4 x i8> @bitcast_nxv2f16_to_nxv4i8(<vscale x 2 x half> %v) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.d }, p0, [sp, #3, mul vl]
-; CHECK-NEXT:    ld1b { z0.s }, p1/z, [sp, #3, mul vl]
+; CHECK-NEXT:    ld1b { z0.s }, p0/z, [sp, #3, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -2145,10 +2098,9 @@ define <vscale x 4 x i8> @bitcast_nxv2f16_to_nxv4i8(<vscale x 2 x half> %v) #0 {
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1h { z0.d }, p0, [sp, #3, mul vl]
-; CHECK_BE-NEXT:    ld1b { z0.s }, p1/z, [sp, #3, mul vl]
+; CHECK_BE-NEXT:    ld1b { z0.s }, p0/z, [sp, #3, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -2163,10 +2115,9 @@ define <vscale x 4 x i8> @bitcast_nxv2bf16_to_nxv4i8(<vscale x 2 x bfloat> %v) #
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.d }, p0, [sp, #3, mul vl]
-; CHECK-NEXT:    ld1b { z0.s }, p1/z, [sp, #3, mul vl]
+; CHECK-NEXT:    ld1b { z0.s }, p0/z, [sp, #3, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -2175,10 +2126,9 @@ define <vscale x 4 x i8> @bitcast_nxv2bf16_to_nxv4i8(<vscale x 2 x bfloat> %v) #
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1h { z0.d }, p0, [sp, #3, mul vl]
-; CHECK_BE-NEXT:    ld1b { z0.s }, p1/z, [sp, #3, mul vl]
+; CHECK_BE-NEXT:    ld1b { z0.s }, p0/z, [sp, #3, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -2195,10 +2145,9 @@ define <vscale x 2 x i16> @bitcast_nxv4i8_to_nxv2i16(<vscale x 4 x i8> %v) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.s }, p0, [sp, #3, mul vl]
-; CHECK-NEXT:    ld1h { z0.d }, p1/z, [sp, #3, mul vl]
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [sp, #3, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -2207,10 +2156,9 @@ define <vscale x 2 x i16> @bitcast_nxv4i8_to_nxv2i16(<vscale x 4 x i8> %v) #0 {
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1b { z0.s }, p0, [sp, #3, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.d }, p1/z, [sp, #3, mul vl]
+; CHECK_BE-NEXT:    ld1h { z0.d }, p0/z, [sp, #3, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -2365,10 +2313,9 @@ define <vscale x 2 x half> @bitcast_nxv4i8_to_nxv2f16(<vscale x 4 x i8> %v) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.s }, p0, [sp, #3, mul vl]
-; CHECK-NEXT:    ld1h { z0.d }, p1/z, [sp, #3, mul vl]
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [sp, #3, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -2377,10 +2324,9 @@ define <vscale x 2 x half> @bitcast_nxv4i8_to_nxv2f16(<vscale x 4 x i8> %v) #0 {
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1b { z0.s }, p0, [sp, #3, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.d }, p1/z, [sp, #3, mul vl]
+; CHECK_BE-NEXT:    ld1h { z0.d }, p0/z, [sp, #3, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -2438,10 +2384,9 @@ define <vscale x 2 x bfloat> @bitcast_nxv4i8_to_nxv2bf16(<vscale x 4 x i8> %v) #
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.s }, p0, [sp, #3, mul vl]
-; CHECK-NEXT:    ld1h { z0.d }, p1/z, [sp, #3, mul vl]
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [sp, #3, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -2450,10 +2395,9 @@ define <vscale x 2 x bfloat> @bitcast_nxv4i8_to_nxv2bf16(<vscale x 4 x i8> %v) #
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1b { z0.s }, p0, [sp, #3, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.d }, p1/z, [sp, #3, mul vl]
+; CHECK_BE-NEXT:    ld1h { z0.d }, p0/z, [sp, #3, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
@@ -2614,10 +2558,10 @@ define <vscale x 2 x float> @bitcast_short_half_to_float(<vscale x 4 x half> %v)
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -2627,10 +2571,10 @@ define <vscale x 2 x float> @bitcast_short_half_to_float(<vscale x 4 x half> %v)
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
 ; CHECK_BE-NEXT:    fadd z0.h, p0/m, z0.h, z0.h
+; CHECK_BE-NEXT:    ptrue p0.b
 ; CHECK_BE-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1w { z0.d }, p1/z, [sp, #1, mul vl]
+; CHECK_BE-NEXT:    ld1w { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-dead-masked-store.ll b/llvm/test/CodeGen/AArch64/sve-dead-masked-store.ll
index 766595112e6b2..d9aaec57ac188 100644
--- a/llvm/test/CodeGen/AArch64/sve-dead-masked-store.ll
+++ b/llvm/test/CodeGen/AArch64/sve-dead-masked-store.ll
@@ -34,7 +34,7 @@ define void @dead_masked_store_alltrue_bigger(<vscale x 4 x i16> %val, <vscale x
 define void @dead_masked_store_alltrue_smaller(<vscale x 4 x i32> %val, <vscale x 4 x i16> %val1, ptr %a, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: dead_masked_store_alltrue_smaller:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    st1h { z1.s }, p1, [x0]
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-extload-icmp.ll b/llvm/test/CodeGen/AArch64/sve-extload-icmp.ll
index d5f74e6b9e0c4..091aa4f78fbcb 100644
--- a/llvm/test/CodeGen/AArch64/sve-extload-icmp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extload-icmp.ll
@@ -6,8 +6,9 @@ target triple = "aarch64-unknown-linux-gnu"
 define <vscale x 8 x i8> @extload_icmp_nxv8i8(ptr %in) #0 {
 ; CHECK-LABEL: extload_icmp_nxv8i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    cnot z0.h, p0/m, z0.h
 ; CHECK-NEXT:    ret
   %ld = load <vscale x 8 x i8>, ptr %in
@@ -32,8 +33,9 @@ define <vscale x 16 x i8> @extload_icmp_nxv16i8(ptr %in) #0 {
 define <vscale x 4 x i16> @extload_icmp_nxv4i16(ptr %in) #0 {
 ; CHECK-LABEL: extload_icmp_nxv4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    cnot z0.s, p0/m, z0.s
 ; CHECK-NEXT:    ret
   %ld = load <vscale x 4 x i16>, ptr %in
@@ -58,8 +60,9 @@ define <vscale x 8 x i16> @extload_icmp_nxv8i16(ptr %in) #0 {
 define <vscale x 2 x i32> @extload_icmp_nxv2i32(ptr %in) #0 {
 ; CHECK-LABEL: extload_icmp_nxv2i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    cnot z0.d, p0/m, z0.d
 ; CHECK-NEXT:    ret
   %ld = load <vscale x 2 x i32>, ptr %in
diff --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
index 965af2a745afd..fbfa9c2751705 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
@@ -343,7 +343,7 @@ define void @extract_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec, ptr %p) nounwi
 define <4 x i32> @typesize_regression_test_v4i32(ptr %addr, i64 %idx) {
 ; CHECK-LABEL: typesize_regression_test_v4i32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
index cbede1bf8bb74..ec08749960050 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
@@ -1055,7 +1055,7 @@ define <vscale x 4 x i32> @extract_nxv4i32_nxv8i32_splat_const() {
 define <vscale x 2 x i1> @extract_nxv2i1_nxv16i1_all_ones() {
 ; CHECK-LABEL: extract_nxv2i1_nxv16i1_all_ones:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ret
   %ext = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> splat(i1 true), i64 0)
   ret <vscale x 2 x i1> %ext
diff --git a/llvm/test/CodeGen/AArch64/sve-fcmp.ll b/llvm/test/CodeGen/AArch64/sve-fcmp.ll
index 607cc92eb4505..a6f877c69bdad 100644
--- a/llvm/test/CodeGen/AArch64/sve-fcmp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fcmp.ll
@@ -61,8 +61,9 @@ define <vscale x 4 x i1> @ord(<vscale x 4 x float> %x, <vscale x 4 x float> %x2)
 ; CHECK-LABEL: ord:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmuo p1.s, p0/z, z0.s, z1.s
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    ret
   %y = fcmp ord <vscale x 4 x float> %x, %x2
   ret <vscale x 4 x i1> %y
@@ -82,8 +83,9 @@ define <vscale x 4 x i1> @ugt(<vscale x 4 x float> %x, <vscale x 4 x float> %x2)
 ; CHECK-LABEL: ugt:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z1.s, z0.s
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    ret
   %y = fcmp ugt <vscale x 4 x float> %x, %x2
   ret <vscale x 4 x i1> %y
@@ -92,8 +94,9 @@ define <vscale x 4 x i1> @uge(<vscale x 4 x float> %x, <vscale x 4 x float> %x2)
 ; CHECK-LABEL: uge:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmgt p1.s, p0/z, z1.s, z0.s
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    ret
   %y = fcmp uge <vscale x 4 x float> %x, %x2
   ret <vscale x 4 x i1> %y
@@ -102,8 +105,9 @@ define <vscale x 4 x i1> @ult(<vscale x 4 x float> %x, <vscale x 4 x float> %x2)
 ; CHECK-LABEL: ult:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z1.s
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    ret
   %y = fcmp ult <vscale x 4 x float> %x, %x2
   ret <vscale x 4 x i1> %y
@@ -112,8 +116,9 @@ define <vscale x 4 x i1> @ule(<vscale x 4 x float> %x, <vscale x 4 x float> %x2)
 ; CHECK-LABEL: ule:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmgt p1.s, p0/z, z0.s, z1.s
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    ret
   %y = fcmp ule <vscale x 4 x float> %x, %x2
   ret <vscale x 4 x i1> %y
@@ -387,8 +392,9 @@ define <vscale x 4 x i1> @ugt_zero(<vscale x 4 x float> %x) {
 ; CHECK-LABEL: ugt_zero:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z0.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    ret
   %y = fcmp ugt <vscale x 4 x float> %x, zeroinitializer
   ret <vscale x 4 x i1> %y
@@ -397,8 +403,9 @@ define <vscale x 4 x i1> @uge_zero(<vscale x 4 x float> %x) {
 ; CHECK-LABEL: uge_zero:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmlt p1.s, p0/z, z0.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    ret
   %y = fcmp uge <vscale x 4 x float> %x, zeroinitializer
   ret <vscale x 4 x i1> %y
@@ -407,8 +414,9 @@ define <vscale x 4 x i1> @ult_zero(<vscale x 4 x float> %x) {
 ; CHECK-LABEL: ult_zero:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    ret
   %y = fcmp ult <vscale x 4 x float> %x, zeroinitializer
   ret <vscale x 4 x i1> %y
@@ -417,8 +425,9 @@ define <vscale x 4 x i1> @ule_zero(<vscale x 4 x float> %x) {
 ; CHECK-LABEL: ule_zero:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmgt p1.s, p0/z, z0.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    ret
   %y = fcmp ule <vscale x 4 x float> %x, zeroinitializer
   ret <vscale x 4 x i1> %y
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-insert-vector-elt.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-insert-vector-elt.ll
index 6f4d257039bca..43804e2adc540 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-insert-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-insert-vector-elt.ll
@@ -38,7 +38,7 @@ define void @insertelement_v16f16(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #15 // =0xf
 ; CHECK-NEXT:    index z0.h, #0, #1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    ptrue p1.h, vl16
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z0.h, z1.h
@@ -58,7 +58,7 @@ define void @insertelement_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    mov w8, #15 // =0xf
 ; VBITS_GE_256-NEXT:    index z0.h, #0, #1
-; VBITS_GE_256-NEXT:    ptrue p0.h
+; VBITS_GE_256-NEXT:    ptrue p0.b
 ; VBITS_GE_256-NEXT:    mov z1.h, w8
 ; VBITS_GE_256-NEXT:    ptrue p1.h, vl16
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
@@ -75,7 +75,7 @@ define void @insertelement_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    mov w8, #31 // =0x1f
 ; VBITS_GE_512-NEXT:    index z0.h, #0, #1
-; VBITS_GE_512-NEXT:    ptrue p0.h
+; VBITS_GE_512-NEXT:    ptrue p0.b
 ; VBITS_GE_512-NEXT:    mov z1.h, w8
 ; VBITS_GE_512-NEXT:    ptrue p1.h, vl32
 ; VBITS_GE_512-NEXT:    cmpeq p0.h, p0/z, z0.h, z1.h
@@ -95,7 +95,7 @@ define void @insertelement_v64f16(ptr %a, ptr %b) vscale_range(8,0) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #63 // =0x3f
 ; CHECK-NEXT:    index z0.h, #0, #1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    ptrue p1.h, vl64
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z0.h, z1.h
@@ -115,7 +115,7 @@ define void @insertelement_v128f16(ptr %a, ptr %b) vscale_range(16,0) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #127 // =0x7f
 ; CHECK-NEXT:    index z0.h, #0, #1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    ptrue p1.h, vl128
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z0.h, z1.h
@@ -159,7 +159,7 @@ define void @insertelement_v8f32(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #7 // =0x7
 ; CHECK-NEXT:    index z0.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    ptrue p1.s, vl8
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
@@ -179,7 +179,7 @@ define void @insertelement_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    mov w8, #7 // =0x7
 ; VBITS_GE_256-NEXT:    index z0.s, #0, #1
-; VBITS_GE_256-NEXT:    ptrue p0.s
+; VBITS_GE_256-NEXT:    ptrue p0.b
 ; VBITS_GE_256-NEXT:    mov z1.s, w8
 ; VBITS_GE_256-NEXT:    ptrue p1.s, vl8
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
@@ -196,7 +196,7 @@ define void @insertelement_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    mov w8, #15 // =0xf
 ; VBITS_GE_512-NEXT:    index z0.s, #0, #1
-; VBITS_GE_512-NEXT:    ptrue p0.s
+; VBITS_GE_512-NEXT:    ptrue p0.b
 ; VBITS_GE_512-NEXT:    mov z1.s, w8
 ; VBITS_GE_512-NEXT:    ptrue p1.s, vl16
 ; VBITS_GE_512-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
@@ -216,7 +216,7 @@ define void @insertelement_v32f32(ptr %a, ptr %b) vscale_range(8,0) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #31 // =0x1f
 ; CHECK-NEXT:    index z0.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    ptrue p1.s, vl32
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
@@ -236,7 +236,7 @@ define void @insertelement_v64f32(ptr %a, ptr %b) vscale_range(16,0) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #63 // =0x3f
 ; CHECK-NEXT:    index z0.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    ptrue p1.s, vl64
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
@@ -278,7 +278,7 @@ define void @insertelement_v4f64(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #3 // =0x3
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    ptrue p1.d, vl4
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
@@ -298,7 +298,7 @@ define void @insertelement_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    mov w8, #3 // =0x3
 ; VBITS_GE_256-NEXT:    index z0.d, #0, #1
-; VBITS_GE_256-NEXT:    ptrue p0.d
+; VBITS_GE_256-NEXT:    ptrue p0.b
 ; VBITS_GE_256-NEXT:    mov z1.d, x8
 ; VBITS_GE_256-NEXT:    ptrue p1.d, vl4
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
@@ -315,7 +315,7 @@ define void @insertelement_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    mov w8, #7 // =0x7
 ; VBITS_GE_512-NEXT:    index z0.d, #0, #1
-; VBITS_GE_512-NEXT:    ptrue p0.d
+; VBITS_GE_512-NEXT:    ptrue p0.b
 ; VBITS_GE_512-NEXT:    mov z1.d, x8
 ; VBITS_GE_512-NEXT:    ptrue p1.d, vl8
 ; VBITS_GE_512-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
@@ -335,7 +335,7 @@ define void @insertelement_v16f64(ptr %a, ptr %b) vscale_range(8,0) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #15 // =0xf
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    ptrue p1.d, vl16
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
@@ -355,7 +355,7 @@ define void @insertelement_v32f64(ptr %a, ptr %b) vscale_range(16,0) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #31 // =0x1f
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    ptrue p1.d, vl32
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
index d7b67d73a671e..011fa03fbda7a 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
@@ -54,7 +54,7 @@ define void @nxv16i8(ptr %ldptr, ptr %stptr) {
 define void @nxv8i16(ptr %ldptr, ptr %stptr) {
 ; CHECK-LABEL: nxv8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x8, #128 // =0x80
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x1, x8, lsl #1]
@@ -99,7 +99,7 @@ define void @nxv8i16(ptr %ldptr, ptr %stptr) {
 define void @nxv4i32(ptr %ldptr, ptr %stptr) {
 ; CHECK-LABEL: nxv4i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x8, #64 // =0x40
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x1, x8, lsl #2]
@@ -144,7 +144,7 @@ define void @nxv4i32(ptr %ldptr, ptr %stptr) {
 define void @nxv2i64(ptr %ldptr, ptr %stptr) {
 ; CHECK-LABEL: nxv2i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x8, #32 // =0x20
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x1, x8, lsl #3]
@@ -189,7 +189,7 @@ define void @nxv2i64(ptr %ldptr, ptr %stptr) {
 define void @nxv4i8(ptr %ldptr, ptr %stptr) {
 ; CHECK-LABEL: nxv4i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov w8, #32 // =0x20
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0, x8]
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x1, x8]
@@ -197,7 +197,7 @@ define void @nxv4i8(ptr %ldptr, ptr %stptr) {
 ;
 ; CHECK-128-LABEL: nxv4i8:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.s
+; CHECK-128-NEXT:    ptrue p0.b
 ; CHECK-128-NEXT:    mov w8, #32 // =0x20
 ; CHECK-128-NEXT:    ld1b { z0.s }, p0/z, [x0, x8]
 ; CHECK-128-NEXT:    st1b { z0.s }, p0, [x1, x8]
@@ -205,28 +205,28 @@ define void @nxv4i8(ptr %ldptr, ptr %stptr) {
 ;
 ; CHECK-256-LABEL: nxv4i8:
 ; CHECK-256:       // %bb.0:
-; CHECK-256-NEXT:    ptrue p0.s
+; CHECK-256-NEXT:    ptrue p0.b
 ; CHECK-256-NEXT:    ld1b { z0.s }, p0/z, [x0, #4, mul vl]
 ; CHECK-256-NEXT:    st1b { z0.s }, p0, [x1, #4, mul vl]
 ; CHECK-256-NEXT:    ret
 ;
 ; CHECK-512-LABEL: nxv4i8:
 ; CHECK-512:       // %bb.0:
-; CHECK-512-NEXT:    ptrue p0.s
+; CHECK-512-NEXT:    ptrue p0.b
 ; CHECK-512-NEXT:    ld1b { z0.s }, p0/z, [x0, #2, mul vl]
 ; CHECK-512-NEXT:    st1b { z0.s }, p0, [x1, #2, mul vl]
 ; CHECK-512-NEXT:    ret
 ;
 ; CHECK-1024-LABEL: nxv4i8:
 ; CHECK-1024:       // %bb.0:
-; CHECK-1024-NEXT:    ptrue p0.s
+; CHECK-1024-NEXT:    ptrue p0.b
 ; CHECK-1024-NEXT:    ld1b { z0.s }, p0/z, [x0, #1, mul vl]
 ; CHECK-1024-NEXT:    st1b { z0.s }, p0, [x1, #1, mul vl]
 ; CHECK-1024-NEXT:    ret
 ;
 ; CHECK-2048-LABEL: nxv4i8:
 ; CHECK-2048:       // %bb.0:
-; CHECK-2048-NEXT:    ptrue p0.s
+; CHECK-2048-NEXT:    ptrue p0.b
 ; CHECK-2048-NEXT:    mov w8, #32 // =0x20
 ; CHECK-2048-NEXT:    ld1b { z0.s }, p0/z, [x0, x8]
 ; CHECK-2048-NEXT:    st1b { z0.s }, p0, [x1, x8]
@@ -241,7 +241,7 @@ define void @nxv4i8(ptr %ldptr, ptr %stptr) {
 define void @nxv2f32(ptr %ldptr, ptr %stptr) {
 ; CHECK-LABEL: nxv2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x8, #16 // =0x10
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, x8, lsl #2]
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x1, x8, lsl #2]
@@ -249,7 +249,7 @@ define void @nxv2f32(ptr %ldptr, ptr %stptr) {
 ;
 ; CHECK-128-LABEL: nxv2f32:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.d
+; CHECK-128-NEXT:    ptrue p0.b
 ; CHECK-128-NEXT:    mov x8, #16 // =0x10
 ; CHECK-128-NEXT:    ld1w { z0.d }, p0/z, [x0, x8, lsl #2]
 ; CHECK-128-NEXT:    st1w { z0.d }, p0, [x1, x8, lsl #2]
@@ -257,28 +257,28 @@ define void @nxv2f32(ptr %ldptr, ptr %stptr) {
 ;
 ; CHECK-256-LABEL: nxv2f32:
 ; CHECK-256:       // %bb.0:
-; CHECK-256-NEXT:    ptrue p0.d
+; CHECK-256-NEXT:    ptrue p0.b
 ; CHECK-256-NEXT:    ld1w { z0.d }, p0/z, [x0, #4, mul vl]
 ; CHECK-256-NEXT:    st1w { z0.d }, p0, [x1, #4, mul vl]
 ; CHECK-256-NEXT:    ret
 ;
 ; CHECK-512-LABEL: nxv2f32:
 ; CHECK-512:       // %bb.0:
-; CHECK-512-NEXT:    ptrue p0.d
+; CHECK-512-NEXT:    ptrue p0.b
 ; CHECK-512-NEXT:    ld1w { z0.d }, p0/z, [x0, #2, mul vl]
 ; CHECK-512-NEXT:    st1w { z0.d }, p0, [x1, #2, mul vl]
 ; CHECK-512-NEXT:    ret
 ;
 ; CHECK-1024-LABEL: nxv2f32:
 ; CHECK-1024:       // %bb.0:
-; CHECK-1024-NEXT:    ptrue p0.d
+; CHECK-1024-NEXT:    ptrue p0.b
 ; CHECK-1024-NEXT:    ld1w { z0.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-1024-NEXT:    st1w { z0.d }, p0, [x1, #1, mul vl]
 ; CHECK-1024-NEXT:    ret
 ;
 ; CHECK-2048-LABEL: nxv2f32:
 ; CHECK-2048:       // %bb.0:
-; CHECK-2048-NEXT:    ptrue p0.d
+; CHECK-2048-NEXT:    ptrue p0.b
 ; CHECK-2048-NEXT:    mov x8, #16 // =0x10
 ; CHECK-2048-NEXT:    ld1w { z0.d }, p0/z, [x0, x8, lsl #2]
 ; CHECK-2048-NEXT:    st1w { z0.d }, p0, [x1, x8, lsl #2]
@@ -293,7 +293,7 @@ define void @nxv2f32(ptr %ldptr, ptr %stptr) {
 define void @nxv4f64(ptr %ldptr, ptr %stptr) {
 ; CHECK-LABEL: nxv4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x8, #16 // =0x10
 ; CHECK-NEXT:    add x9, x0, #128
 ; CHECK-NEXT:    ldr z1, [x9, #1, mul vl]
@@ -345,7 +345,7 @@ define void @nxv4f64(ptr %ldptr, ptr %stptr) {
 ;
 ; CHECK-2048-LABEL: nxv4f64:
 ; CHECK-2048:       // %bb.0:
-; CHECK-2048-NEXT:    ptrue p0.d
+; CHECK-2048-NEXT:    ptrue p0.b
 ; CHECK-2048-NEXT:    mov x8, #16 // =0x10
 ; CHECK-2048-NEXT:    add x9, x0, #128
 ; CHECK-2048-NEXT:    ldr z1, [x9, #1, mul vl]
diff --git a/llvm/test/CodeGen/AArch64/sve-fold-loadext-and-splat-vector.ll b/llvm/test/CodeGen/AArch64/sve-fold-loadext-and-splat-vector.ll
index 42ec96ad60498..949513ffe3112 100644
--- a/llvm/test/CodeGen/AArch64/sve-fold-loadext-and-splat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fold-loadext-and-splat-vector.ll
@@ -8,7 +8,7 @@ target triple = "aarch64-unknown-linux-gnu"
 define <vscale x 2 x i64> @fold_loadext_and(ptr %ptr, i32 %needle, <vscale x 2 x i64> %b) #0 {
 ; CHECK-LABEL: fold_loadext_and:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %load = load <vscale x 2 x i32>, ptr %ptr, align 4
@@ -23,9 +23,10 @@ define <vscale x 2 x i64> @fold_loadext_and(ptr %ptr, i32 %needle, <vscale x 2 x
 define <vscale x 2 x i1> @fold_loadext_and_legalize(ptr %ptr, <vscale x 2 x i32> %a) #0 {
 ; CHECK-LABEL: fold_loadext_and_legalize:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    and z0.d, z0.d, #0xffffffff
 ; CHECK-NEXT:    ld1w { z1.d }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z1.d, z0.d
 ; CHECK-NEXT:    ret
   %load = load <vscale x 2 x i32>, ptr %ptr
diff --git a/llvm/test/CodeGen/AArch64/sve-fold-vscale.ll b/llvm/test/CodeGen/AArch64/sve-fold-vscale.ll
index 7524f8d985f71..e4348417a9ca7 100644
--- a/llvm/test/CodeGen/AArch64/sve-fold-vscale.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fold-vscale.ll
@@ -7,7 +7,7 @@
 define void @ld1w_reg_loop(ptr %addr) {
 ; CHECK-LABEL: ld1w_reg_loop:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x8, xzr
 ; CHECK-NEXT:    cntw x9
 ; CHECK-NEXT:  .LBB0_1: // %vector.body
@@ -37,7 +37,7 @@ for.cond.cleanup:
 define void @st1w_reg_loop(ptr %addr, <vscale x 4 x i32> %val) {
 ; CHECK-LABEL: st1w_reg_loop:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x8, xzr
 ; CHECK-NEXT:    cntw x9
 ; CHECK-NEXT:  .LBB1_1: // %vector.body
diff --git a/llvm/test/CodeGen/AArch64/sve-forward-st-to-ld.ll b/llvm/test/CodeGen/AArch64/sve-forward-st-to-ld.ll
index 8620c9a34b5d6..b4d2e62c89edd 100644
--- a/llvm/test/CodeGen/AArch64/sve-forward-st-to-ld.ll
+++ b/llvm/test/CodeGen/AArch64/sve-forward-st-to-ld.ll
@@ -30,7 +30,7 @@ entry:
 define <vscale x 2 x i64> @sti32ldi32ext(ptr nocapture %P, <vscale x 2 x i64> %v) {
 ; CHECK-LABEL: sti32ldi32ext:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    sxtw z0.d, p0/m, z0.d
 ; CHECK-NEXT:    st1w { z1.d }, p0, [x0]
diff --git a/llvm/test/CodeGen/AArch64/sve-fp-int-min-max.ll b/llvm/test/CodeGen/AArch64/sve-fp-int-min-max.ll
index afe13851f0b95..4cf1db2d69e6a 100644
--- a/llvm/test/CodeGen/AArch64/sve-fp-int-min-max.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fp-int-min-max.ll
@@ -4,15 +4,16 @@
 define i64 @scalable_int_min_max(ptr %arg, ptr %arg1, <vscale x 2 x ptr> %i37, <vscale x 2 x i64> %i42, <vscale x 2 x i64> %i54) {
 ; CHECK-LABEL: scalable_int_min_max:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov w8, #3745 // =0xea1
 ; CHECK-NEXT:    movk w8, #16618, lsl #16
 ; CHECK-NEXT:    ld1w { z3.d }, p0/z, [x0]
 ; CHECK-NEXT:    mov z4.s, w8
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov w8, #57344 // =0xe000
 ; CHECK-NEXT:    movk w8, #17535, lsl #16
-; CHECK-NEXT:    mov z5.s, w8
 ; CHECK-NEXT:    fmul z4.s, p0/m, z4.s, z3.s
+; CHECK-NEXT:    mov z5.s, w8
 ; CHECK-NEXT:    fadd z4.s, p0/m, z4.s, z5.s
 ; CHECK-NEXT:    mov z5.d, #1023 // =0x3ff
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.s
diff --git a/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll b/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll
index 4b93900c7d272..1f9297b389396 100644
--- a/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll
@@ -52,7 +52,7 @@ define half @fadda_nxv6f16(<vscale x 6 x half> %v, half %s) {
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    mov w8, #32768 // =0x8000
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    str z0, [sp]
 ; CHECK-NEXT:    mov z2.h, w8
 ; CHECK-NEXT:    fmov s0, s1
@@ -79,7 +79,7 @@ define half @fadda_nxv10f16(<vscale x 10 x half> %v, half %s) {
 ; CHECK-NEXT:    // kill: def $h2 killed $h2 def $z2
 ; CHECK-NEXT:    mov w8, #32768 // =0x8000
 ; CHECK-NEXT:    str z1, [sp]
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    fadda h2, p0, h2, z0.h
 ; CHECK-NEXT:    mov z0.h, w8
 ; CHECK-NEXT:    addvl x8, sp, #1
diff --git a/llvm/test/CodeGen/AArch64/sve-fpext-load.ll b/llvm/test/CodeGen/AArch64/sve-fpext-load.ll
index 9812c3775d416..31af0c694461e 100644
--- a/llvm/test/CodeGen/AArch64/sve-fpext-load.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fpext-load.ll
@@ -5,8 +5,9 @@
 define <vscale x 2 x double> @ext2_f16_f64(ptr %ptr, i64 %index) {
 ; CHECK-LABEL: ext2_f16_f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fcvt z0.d, p0/m, z0.h
 ; CHECK-NEXT:    ret
   %load = load <vscale x 2 x half>, ptr %ptr, align 4
@@ -18,7 +19,7 @@ define <vscale x 2 x double> @ext2_f16_f64(ptr %ptr, i64 %index) {
 define <vscale x 4 x double> @ext4_f16_f64(ptr %ptr, i64 %index) {
 ; CHECK-LABEL: ext4_f16_f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    uunpklo z1.d, z0.s
@@ -62,8 +63,9 @@ define <vscale x 8 x double> @ext8_f16_f64(ptr %ptr, i64 %index) {
 define <vscale x 2 x double> @ext2_f32_f64(ptr %ptr, i64 %index) {
 ; CHECK-LABEL: ext2_f32_f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fcvt z0.d, p0/m, z0.s
 ; CHECK-NEXT:    ret
   %load = load <vscale x 2 x float>, ptr %ptr, align 4
diff --git a/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll b/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll
index 584c29ebcfc04..79649d81a94fa 100644
--- a/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll
@@ -19,13 +19,14 @@ define <vscale x 2 x i32> @test_signed_v2f32_v2i32(<vscale x 2 x float> %f) {
 ; CHECK-NEXT:    mov z2.d, #0xffffffff80000000
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    mov w8, #1325400063 // =0x4effffff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    mov z3.s, w8
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.s
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.s, p0/z, z0.s, z3.s
 ; CHECK-NEXT:    mov z3.d, #0x7fffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z0.s, z0.s
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -43,13 +44,14 @@ define <vscale x 4 x i32> @test_signed_v4f32_v4i32(<vscale x 4 x float> %f) {
 ; CHECK-NEXT:    mov z2.s, #0x80000000
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    mov w8, #1325400063 // =0x4effffff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    mov z3.s, w8
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.s, p0/m, z0.s
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.s, p0/z, z0.s, z3.s
 ; CHECK-NEXT:    mov z3.s, #0x7fffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z0.s, z0.s
 ; CHECK-NEXT:    mov z1.s, p1/m, z2.s
 ; CHECK-NEXT:    sel z0.s, p2, z3.s, z1.s
@@ -72,6 +74,7 @@ define <vscale x 8 x i32> @test_signed_v8f32_v8i32(<vscale x 8 x float> %f) {
 ; CHECK-NEXT:    mov z6.s, #0x7fffffff
 ; CHECK-NEXT:    mov z2.s, w8
 ; CHECK-NEXT:    mov w8, #1325400063 // =0x4effffff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    mov z3.s, w8
 ; CHECK-NEXT:    movprfx z4, z0
 ; CHECK-NEXT:    fcvtzs z4.s, p0/m, z0.s
@@ -80,10 +83,10 @@ define <vscale x 8 x i32> @test_signed_v8f32_v8i32(<vscale x 8 x float> %f) {
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z2.s
 ; CHECK-NEXT:    fcmge p2.s, p0/z, z1.s, z2.s
 ; CHECK-NEXT:    mov z2.s, #0x80000000
-; CHECK-NEXT:    fcmgt p3.s, p0/z, z0.s, z3.s
 ; CHECK-NEXT:    fcmgt p4.s, p0/z, z1.s, z3.s
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    fcmgt p3.s, p0/z, z0.s, z3.s
 ; CHECK-NEXT:    sel z3.s, p1, z2.s, z4.s
 ; CHECK-NEXT:    fcmuo p1.s, p0/z, z0.s, z0.s
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z1.s, z1.s
@@ -107,14 +110,15 @@ define <vscale x 4 x i16> @test_signed_v4f32_v4i16(<vscale x 4 x float> %f) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    mov w8, #65024 // =0xfe00
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    movk w8, #18175, lsl #16
 ; CHECK-NEXT:    mov z2.s, w8
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.s, p0/m, z0.s
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.s, p0/z, z0.s, z2.s
 ; CHECK-NEXT:    mov z2.s, #32767 // =0x7fff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z0.s, z0.s
 ; CHECK-NEXT:    mov z1.s, p1/m, #-32768 // =0xffffffffffff8000
 ; CHECK-NEXT:    sel z0.s, p2, z2.s, z1.s
@@ -137,6 +141,7 @@ define <vscale x 8 x i16> @test_signed_v8f32_v8i16(<vscale x 8 x float> %f) {
 ; CHECK-NEXT:    mov z5.s, #32767 // =0x7fff
 ; CHECK-NEXT:    mov z2.s, w8
 ; CHECK-NEXT:    mov w8, #65024 // =0xfe00
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    movk w8, #18175, lsl #16
 ; CHECK-NEXT:    movprfx z3, z1
 ; CHECK-NEXT:    fcvtzs z3.s, p0/m, z1.s
@@ -145,10 +150,10 @@ define <vscale x 8 x i16> @test_signed_v8f32_v8i16(<vscale x 8 x float> %f) {
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z1.s, z2.s
 ; CHECK-NEXT:    fcmge p2.s, p0/z, z0.s, z2.s
 ; CHECK-NEXT:    mov z2.s, w8
-; CHECK-NEXT:    fcmgt p3.s, p0/z, z1.s, z2.s
 ; CHECK-NEXT:    fcmgt p4.s, p0/z, z0.s, z2.s
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    fcmgt p3.s, p0/z, z1.s, z2.s
 ; CHECK-NEXT:    mov z3.s, p1/m, #-32768 // =0xffffffffffff8000
 ; CHECK-NEXT:    fcmuo p1.s, p0/z, z1.s, z1.s
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z0.s, z0.s
@@ -174,13 +179,14 @@ define <vscale x 2 x i64> @test_signed_v2f32_v2i64(<vscale x 2 x float> %f) {
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    mov z3.s, w8
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.s
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.s, p0/z, z0.s, z3.s
 ; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z0.s, z0.s
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -204,6 +210,7 @@ define <vscale x 4 x i64> @test_signed_v4f32_v4i64(<vscale x 4 x float> %f) {
 ; CHECK-NEXT:    mov z2.s, w8
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    mov z3.s, w8
 ; CHECK-NEXT:    mov z6.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z1.s, z2.s
@@ -213,10 +220,10 @@ define <vscale x 4 x i64> @test_signed_v4f32_v4i64(<vscale x 4 x float> %f) {
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z1.s
 ; CHECK-NEXT:    movprfx z5, z0
 ; CHECK-NEXT:    fcvtzs z5.d, p0/m, z0.s
-; CHECK-NEXT:    fcmgt p3.s, p0/z, z1.s, z3.s
 ; CHECK-NEXT:    fcmgt p4.s, p0/z, z0.s, z3.s
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    fcmgt p3.s, p0/z, z1.s, z3.s
 ; CHECK-NEXT:    sel z3.d, p1, z2.d, z4.d
 ; CHECK-NEXT:    fcmuo p1.s, p0/z, z1.s, z1.s
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z0.s, z0.s
@@ -251,14 +258,15 @@ define <vscale x 2 x i32> @test_signed_v2f64_v2i32(<vscale x 2 x double> %f) {
 ; CHECK-NEXT:    mov z2.d, #0xffffffff80000000
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    mov x8, #281474972516352 // =0xffffffc00000
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    movk x8, #16863, lsl #48
 ; CHECK-NEXT:    mov z3.d, x8
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, z1.d
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.d
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.d, p0/z, z0.d, z3.d
 ; CHECK-NEXT:    mov z3.d, #0x7fffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z0.d, z0.d
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -281,6 +289,7 @@ define <vscale x 4 x i32> @test_signed_v4f64_v4i32(<vscale x 4 x double> %f) {
 ; CHECK-NEXT:    mov z6.d, #0x7fffffff
 ; CHECK-NEXT:    mov z2.d, x8
 ; CHECK-NEXT:    mov x8, #281474972516352 // =0xffffffc00000
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    movk x8, #16863, lsl #48
 ; CHECK-NEXT:    movprfx z4, z1
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z1.d
@@ -290,10 +299,10 @@ define <vscale x 4 x i32> @test_signed_v4f64_v4i32(<vscale x 4 x double> %f) {
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z1.d, z2.d
 ; CHECK-NEXT:    fcmge p2.d, p0/z, z0.d, z2.d
 ; CHECK-NEXT:    mov z2.d, #0xffffffff80000000
-; CHECK-NEXT:    fcmgt p3.d, p0/z, z1.d, z3.d
 ; CHECK-NEXT:    fcmgt p4.d, p0/z, z0.d, z3.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    fcmgt p3.d, p0/z, z1.d, z3.d
 ; CHECK-NEXT:    sel z3.d, p1, z2.d, z4.d
 ; CHECK-NEXT:    fcmuo p1.d, p0/z, z1.d, z1.d
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z0.d, z0.d
@@ -327,50 +336,51 @@ define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) {
 ; CHECK-NEXT:    mov z5.d, #0xffffffff80000000
 ; CHECK-NEXT:    mov z4.d, x8
 ; CHECK-NEXT:    mov x8, #281474972516352 // =0xffffffc00000
-; CHECK-NEXT:    mov z26.d, #0x7fffffff
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    movk x8, #16863, lsl #48
-; CHECK-NEXT:    movprfx z7, z0
-; CHECK-NEXT:    fcvtzs z7.d, p0/m, z0.d
+; CHECK-NEXT:    movprfx z7, z1
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z1.d
 ; CHECK-NEXT:    movprfx z24, z3
 ; CHECK-NEXT:    fcvtzs z24.d, p0/m, z3.d
 ; CHECK-NEXT:    mov z6.d, x8
 ; CHECK-NEXT:    movprfx z25, z2
 ; CHECK-NEXT:    fcvtzs z25.d, p0/m, z2.d
-; CHECK-NEXT:    fcmge p1.d, p0/z, z1.d, z4.d
-; CHECK-NEXT:    fcmge p2.d, p0/z, z0.d, z4.d
-; CHECK-NEXT:    fcmge p3.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    fcmge p4.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    movprfx z4, z1
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z1.d
-; CHECK-NEXT:    fcmgt p5.d, p0/z, z1.d, z6.d
-; CHECK-NEXT:    fcmgt p6.d, p0/z, z0.d, z6.d
-; CHECK-NEXT:    fcmgt p7.d, p0/z, z3.d, z6.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    mov z4.d, p1/m, z5.d
-; CHECK-NEXT:    fcmgt p1.d, p0/z, z2.d, z6.d
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
-; CHECK-NEXT:    sel z6.d, p2, z5.d, z7.d
-; CHECK-NEXT:    fcmuo p2.d, p0/z, z1.d, z1.d
-; CHECK-NEXT:    sel z7.d, p3, z5.d, z24.d
+; CHECK-NEXT:    mov z26.d, #0x7fffffff
+; CHECK-NEXT:    fcmge p2.d, p0/z, z1.d, z4.d
+; CHECK-NEXT:    fcmge p3.d, p0/z, z0.d, z4.d
+; CHECK-NEXT:    fcmge p4.d, p0/z, z3.d, z4.d
+; CHECK-NEXT:    fcmge p5.d, p0/z, z2.d, z4.d
+; CHECK-NEXT:    movprfx z4, z0
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z0.d
+; CHECK-NEXT:    fcmgt p6.d, p0/z, z1.d, z6.d
+; CHECK-NEXT:    fcmgt p7.d, p0/z, z2.d, z6.d
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p1.b
+; CHECK-NEXT:    eor p3.b, p0/z, p3.b, p1.b
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    mov z7.d, p2/m, z5.d
+; CHECK-NEXT:    fcmgt p2.d, p0/z, z0.d, z6.d
+; CHECK-NEXT:    eor p1.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    fcmgt p5.d, p0/z, z3.d, z6.d
+; CHECK-NEXT:    mov z4.d, p3/m, z5.d
+; CHECK-NEXT:    sel z6.d, p4, z5.d, z24.d
 ; CHECK-NEXT:    fcmuo p3.d, p0/z, z0.d, z0.d
-; CHECK-NEXT:    sel z5.d, p4, z5.d, z25.d
 ; CHECK-NEXT:    fcmuo p4.d, p0/z, z3.d, z3.d
+; CHECK-NEXT:    sel z5.d, p1, z5.d, z25.d
+; CHECK-NEXT:    fcmuo p1.d, p0/z, z1.d, z1.d
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z2.d, z2.d
-; CHECK-NEXT:    sel z0.d, p5, z26.d, z4.d
-; CHECK-NEXT:    sel z1.d, p6, z26.d, z6.d
+; CHECK-NEXT:    sel z1.d, p6, z26.d, z7.d
+; CHECK-NEXT:    sel z0.d, p2, z26.d, z4.d
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z2.d, p7, z26.d, z7.d
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z3.d, p1, z26.d, z5.d
+; CHECK-NEXT:    sel z2.d, p5, z26.d, z6.d
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
+; CHECK-NEXT:    sel z3.d, p7, z26.d, z5.d
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z1.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    mov z0.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z2.d, p4/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov z3.d, p0/m, #0 // =0x0
-; CHECK-NEXT:    uzp1 z0.s, z1.s, z0.s
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z1.s
 ; CHECK-NEXT:    uzp1 z1.s, z3.s, z2.s
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -392,6 +402,7 @@ define <vscale x 4 x i16> @test_signed_v4f64_v4i16(<vscale x 4 x double> %f) {
 ; CHECK-NEXT:    mov z5.d, #32767 // =0x7fff
 ; CHECK-NEXT:    mov z2.d, x8
 ; CHECK-NEXT:    mov x8, #281200098803712 // =0xffc000000000
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    movk x8, #16607, lsl #48
 ; CHECK-NEXT:    movprfx z3, z1
 ; CHECK-NEXT:    fcvtzs z3.d, p0/m, z1.d
@@ -400,10 +411,10 @@ define <vscale x 4 x i16> @test_signed_v4f64_v4i16(<vscale x 4 x double> %f) {
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z1.d, z2.d
 ; CHECK-NEXT:    fcmge p2.d, p0/z, z0.d, z2.d
 ; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    fcmgt p3.d, p0/z, z1.d, z2.d
 ; CHECK-NEXT:    fcmgt p4.d, p0/z, z0.d, z2.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    fcmgt p3.d, p0/z, z1.d, z2.d
 ; CHECK-NEXT:    mov z3.d, p1/m, #-32768 // =0xffffffffffff8000
 ; CHECK-NEXT:    fcmuo p1.d, p0/z, z1.d, z1.d
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z0.d, z0.d
@@ -437,6 +448,7 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
 ; CHECK-NEXT:    mov z25.d, #32767 // =0x7fff
 ; CHECK-NEXT:    mov z4.d, x8
 ; CHECK-NEXT:    mov x8, #281200098803712 // =0xffc000000000
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    movk x8, #16607, lsl #48
 ; CHECK-NEXT:    movprfx z6, z2
 ; CHECK-NEXT:    fcvtzs z6.d, p0/m, z2.d
@@ -446,32 +458,32 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
 ; CHECK-NEXT:    movprfx z24, z0
 ; CHECK-NEXT:    fcvtzs z24.d, p0/m, z0.d
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    fcmge p2.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    fcmge p3.d, p0/z, z1.d, z4.d
-; CHECK-NEXT:    fcmge p4.d, p0/z, z0.d, z4.d
+; CHECK-NEXT:    fcmge p3.d, p0/z, z2.d, z4.d
+; CHECK-NEXT:    fcmge p4.d, p0/z, z1.d, z4.d
+; CHECK-NEXT:    fcmge p5.d, p0/z, z0.d, z4.d
 ; CHECK-NEXT:    movprfx z4, z3
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z3.d
-; CHECK-NEXT:    fcmgt p5.d, p0/z, z3.d, z5.d
-; CHECK-NEXT:    fcmgt p6.d, p0/z, z2.d, z5.d
-; CHECK-NEXT:    fcmgt p7.d, p0/z, z1.d, z5.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    fcmgt p6.d, p0/z, z3.d, z5.d
+; CHECK-NEXT:    fcmgt p7.d, p0/z, z2.d, z5.d
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
+; CHECK-NEXT:    eor p3.b, p0/z, p3.b, p2.b
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p2.b
 ; CHECK-NEXT:    mov z4.d, p1/m, #-32768 // =0xffffffffffff8000
 ; CHECK-NEXT:    fcmgt p1.d, p0/z, z0.d, z5.d
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
-; CHECK-NEXT:    mov z6.d, p2/m, #-32768 // =0xffffffffffff8000
-; CHECK-NEXT:    fcmuo p2.d, p0/z, z3.d, z3.d
-; CHECK-NEXT:    mov z7.d, p3/m, #-32768 // =0xffffffffffff8000
+; CHECK-NEXT:    eor p2.b, p0/z, p5.b, p2.b
+; CHECK-NEXT:    fcmgt p5.d, p0/z, z1.d, z5.d
+; CHECK-NEXT:    mov z6.d, p3/m, #-32768 // =0xffffffffffff8000
+; CHECK-NEXT:    mov z7.d, p4/m, #-32768 // =0xffffffffffff8000
 ; CHECK-NEXT:    fcmuo p3.d, p0/z, z2.d, z2.d
-; CHECK-NEXT:    mov z24.d, p4/m, #-32768 // =0xffffffffffff8000
 ; CHECK-NEXT:    fcmuo p4.d, p0/z, z1.d, z1.d
+; CHECK-NEXT:    mov z24.d, p2/m, #-32768 // =0xffffffffffff8000
+; CHECK-NEXT:    fcmuo p2.d, p0/z, z3.d, z3.d
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z0.d, z0.d
-; CHECK-NEXT:    sel z2.d, p5, z25.d, z4.d
-; CHECK-NEXT:    sel z0.d, p6, z25.d, z6.d
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z1.d, p7, z25.d, z7.d
+; CHECK-NEXT:    sel z2.d, p6, z25.d, z4.d
+; CHECK-NEXT:    sel z0.d, p7, z25.d, z6.d
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    sel z1.d, p5, z25.d, z7.d
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z3.d, p1, z25.d, z24.d
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov z2.d, p2/m, #0 // =0x0
@@ -497,13 +509,14 @@ define <vscale x 2 x i64> @test_signed_v2f64_v2i64(<vscale x 2 x double> %f) {
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    mov z3.d, x8
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, z1.d
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.d
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.d, p0/z, z0.d, z3.d
 ; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z0.d, z0.d
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -526,6 +539,7 @@ define <vscale x 4 x i64> @test_signed_v4f64_v4i64(<vscale x 4 x double> %f) {
 ; CHECK-NEXT:    mov z6.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    mov z2.d, x8
 ; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    mov z3.d, x8
 ; CHECK-NEXT:    movprfx z4, z0
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z0.d
@@ -534,10 +548,10 @@ define <vscale x 4 x i64> @test_signed_v4f64_v4i64(<vscale x 4 x double> %f) {
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, z2.d
 ; CHECK-NEXT:    fcmge p2.d, p0/z, z1.d, z2.d
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
-; CHECK-NEXT:    fcmgt p3.d, p0/z, z0.d, z3.d
 ; CHECK-NEXT:    fcmgt p4.d, p0/z, z1.d, z3.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    fcmgt p3.d, p0/z, z0.d, z3.d
 ; CHECK-NEXT:    sel z3.d, p1, z2.d, z4.d
 ; CHECK-NEXT:    fcmuo p1.d, p0/z, z0.d, z0.d
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z1.d, z1.d
@@ -573,13 +587,14 @@ define <vscale x 2 x i32> @test_signed_v2f16_v2i32(<vscale x 2 x half> %f) {
 ; CHECK-NEXT:    mov z2.d, #0xffffffff80000000
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    mov z3.h, w8
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.h
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.h, p0/z, z0.h, z3.h
 ; CHECK-NEXT:    mov z3.d, #0x7fffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -597,13 +612,14 @@ define <vscale x 4 x i32> @test_signed_v4f16_v4i32(<vscale x 4 x half> %f) {
 ; CHECK-NEXT:    mov z2.s, #0x80000000
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    mov z3.h, w8
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.s, p0/m, z0.h
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.h, p0/z, z0.h, z3.h
 ; CHECK-NEXT:    mov z3.s, #0x7fffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
 ; CHECK-NEXT:    mov z1.s, p1/m, z2.s
 ; CHECK-NEXT:    sel z0.s, p2, z3.s, z1.s
@@ -627,6 +643,7 @@ define <vscale x 8 x i32> @test_signed_v8f16_v8i32(<vscale x 8 x half> %f) {
 ; CHECK-NEXT:    mov z2.h, w8
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    mov z3.h, w8
 ; CHECK-NEXT:    mov z6.s, #0x7fffffff
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z1.h, z2.h
@@ -636,10 +653,10 @@ define <vscale x 8 x i32> @test_signed_v8f16_v8i32(<vscale x 8 x half> %f) {
 ; CHECK-NEXT:    fcvtzs z4.s, p0/m, z1.h
 ; CHECK-NEXT:    movprfx z5, z0
 ; CHECK-NEXT:    fcvtzs z5.s, p0/m, z0.h
-; CHECK-NEXT:    fcmgt p3.h, p0/z, z1.h, z3.h
 ; CHECK-NEXT:    fcmgt p4.h, p0/z, z0.h, z3.h
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    fcmgt p3.h, p0/z, z1.h, z3.h
 ; CHECK-NEXT:    sel z3.s, p1, z2.s, z4.s
 ; CHECK-NEXT:    fcmuo p1.h, p0/z, z1.h, z1.h
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
@@ -663,13 +680,14 @@ define <vscale x 4 x i16> @test_signed_v4f16_v4i16(<vscale x 4 x half> %f) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    mov w8, #30719 // =0x77ff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    mov z2.h, w8
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.s, p0/m, z0.h
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.h, p0/z, z0.h, z2.h
 ; CHECK-NEXT:    mov z2.s, #32767 // =0x7fff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
 ; CHECK-NEXT:    mov z1.s, p1/m, #-32768 // =0xffffffffffff8000
 ; CHECK-NEXT:    sel z0.s, p2, z2.s, z1.s
@@ -686,13 +704,14 @@ define <vscale x 8 x i16> @test_signed_v8f16_v8i16(<vscale x 8 x half> %f) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    mov w8, #30719 // =0x77ff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    mov z2.h, w8
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.h, p0/m, z0.h
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.h, p0/z, z0.h, z2.h
 ; CHECK-NEXT:    mov z2.h, #32767 // =0x7fff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
 ; CHECK-NEXT:    mov z1.h, p1/m, #-32768 // =0xffffffffffff8000
 ; CHECK-NEXT:    sel z0.h, p2, z2.h, z1.h
@@ -710,13 +729,14 @@ define <vscale x 2 x i64> @test_signed_v2f16_v2i64(<vscale x 2 x half> %f) {
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    mov z3.h, w8
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.h
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.h, p0/z, z0.h, z3.h
 ; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -740,6 +760,7 @@ define <vscale x 4 x i64> @test_signed_v4f16_v4i64(<vscale x 4 x half> %f) {
 ; CHECK-NEXT:    mov z2.h, w8
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    mov z3.h, w8
 ; CHECK-NEXT:    mov z6.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z1.h, z2.h
@@ -749,10 +770,10 @@ define <vscale x 4 x i64> @test_signed_v4f16_v4i64(<vscale x 4 x half> %f) {
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z1.h
 ; CHECK-NEXT:    movprfx z5, z0
 ; CHECK-NEXT:    fcvtzs z5.d, p0/m, z0.h
-; CHECK-NEXT:    fcmgt p3.h, p0/z, z1.h, z3.h
 ; CHECK-NEXT:    fcmgt p4.h, p0/z, z0.h, z3.h
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    fcmgt p3.h, p0/z, z1.h, z3.h
 ; CHECK-NEXT:    sel z3.d, p1, z2.d, z4.d
 ; CHECK-NEXT:    fcmuo p1.h, p0/z, z1.h, z1.h
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
diff --git a/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll b/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll
index ed352ffec339f..29292e0cefc73 100644
--- a/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll
@@ -17,10 +17,11 @@ define <vscale x 2 x i32> @test_signed_v2f32_v2i32(<vscale x 2 x float> %f) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov w8, #1333788671 // =0x4f7fffff
 ; CHECK-NEXT:    mov z1.s, w8
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, #0.0
 ; CHECK-NEXT:    movprfx z2, z0
 ; CHECK-NEXT:    fcvtzu z2.d, p0/m, z0.s
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p0.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    mov z0.d, #0xffffffff
 ; CHECK-NEXT:    mov z2.d, p1/m, #0 // =0x0
@@ -36,10 +37,11 @@ define <vscale x 4 x i32> @test_signed_v4f32_v4i32(<vscale x 4 x float> %f) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov w8, #1333788671 // =0x4f7fffff
 ; CHECK-NEXT:    mov z2.s, w8
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, #0.0
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzu z1.s, p0/m, z0.s
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p0.s, p0/z, z0.s, z2.s
 ; CHECK-NEXT:    mov z1.s, p1/m, #0 // =0x0
 ; CHECK-NEXT:    mov z1.s, p0/m, #-1 // =0xffffffffffffffff
@@ -54,6 +56,7 @@ define <vscale x 8 x i32> @test_signed_v8f32_v8i32(<vscale x 8 x float> %f) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov w8, #1333788671 // =0x4f7fffff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    mov z4.s, w8
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, #0.0
 ; CHECK-NEXT:    fcmge p2.s, p0/z, z1.s, #0.0
@@ -61,9 +64,9 @@ define <vscale x 8 x i32> @test_signed_v8f32_v8i32(<vscale x 8 x float> %f) {
 ; CHECK-NEXT:    fcvtzu z2.s, p0/m, z0.s
 ; CHECK-NEXT:    movprfx z3, z1
 ; CHECK-NEXT:    fcvtzu z3.s, p0/m, z1.s
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
 ; CHECK-NEXT:    fcmgt p3.s, p0/z, z0.s, z4.s
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
 ; CHECK-NEXT:    fcmgt p0.s, p0/z, z1.s, z4.s
 ; CHECK-NEXT:    mov z2.s, p1/m, #0 // =0x0
 ; CHECK-NEXT:    mov z3.s, p2/m, #0 // =0x0
@@ -82,11 +85,12 @@ define <vscale x 4 x i16> @test_signed_v4f32_v4i16(<vscale x 4 x float> %f) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov w8, #65280 // =0xff00
 ; CHECK-NEXT:    movk w8, #18303, lsl #16
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, #0.0
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    movprfx z2, z0
 ; CHECK-NEXT:    fcvtzu z2.s, p0/m, z0.s
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p0.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    mov z0.s, #65535 // =0xffff
 ; CHECK-NEXT:    mov z2.s, p1/m, #0 // =0x0
@@ -102,6 +106,7 @@ define <vscale x 8 x i16> @test_signed_v8f32_v8i16(<vscale x 8 x float> %f) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov w8, #65280 // =0xff00
 ; CHECK-NEXT:    movk w8, #18303, lsl #16
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z1.s, #0.0
 ; CHECK-NEXT:    fcmge p2.s, p0/z, z0.s, #0.0
 ; CHECK-NEXT:    mov z2.s, w8
@@ -109,9 +114,9 @@ define <vscale x 8 x i16> @test_signed_v8f32_v8i16(<vscale x 8 x float> %f) {
 ; CHECK-NEXT:    fcvtzu z3.s, p0/m, z1.s
 ; CHECK-NEXT:    movprfx z4, z0
 ; CHECK-NEXT:    fcvtzu z4.s, p0/m, z0.s
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
 ; CHECK-NEXT:    fcmgt p3.s, p0/z, z1.s, z2.s
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
 ; CHECK-NEXT:    fcmgt p0.s, p0/z, z0.s, z2.s
 ; CHECK-NEXT:    mov z0.s, #65535 // =0xffff
 ; CHECK-NEXT:    mov z3.s, p1/m, #0 // =0x0
@@ -130,10 +135,11 @@ define <vscale x 2 x i64> @test_signed_v2f32_v2i64(<vscale x 2 x float> %f) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov w8, #1602224127 // =0x5f7fffff
 ; CHECK-NEXT:    mov z2.s, w8
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, #0.0
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzu z1.d, p0/m, z0.s
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p0.s, p0/z, z0.s, z2.s
 ; CHECK-NEXT:    mov z1.d, p1/m, #0 // =0x0
 ; CHECK-NEXT:    mov z1.d, p0/m, #-1 // =0xffffffffffffffff
@@ -151,15 +157,16 @@ define <vscale x 4 x i64> @test_signed_v4f32_v4i64(<vscale x 4 x float> %f) {
 ; CHECK-NEXT:    mov w8, #1602224127 // =0x5f7fffff
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z4.s, w8
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z2.s, #0.0
 ; CHECK-NEXT:    fcmge p2.s, p0/z, z3.s, #0.0
 ; CHECK-NEXT:    movprfx z0, z2
 ; CHECK-NEXT:    fcvtzu z0.d, p0/m, z2.s
 ; CHECK-NEXT:    movprfx z1, z3
 ; CHECK-NEXT:    fcvtzu z1.d, p0/m, z3.s
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
 ; CHECK-NEXT:    fcmgt p3.s, p0/z, z2.s, z4.s
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
 ; CHECK-NEXT:    fcmgt p0.s, p0/z, z3.s, z4.s
 ; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
 ; CHECK-NEXT:    mov z1.d, p2/m, #0 // =0x0
@@ -186,11 +193,12 @@ define <vscale x 2 x i32> @test_signed_v2f64_v2i32(<vscale x 2 x double> %f) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, #281474974613504 // =0xffffffe00000
 ; CHECK-NEXT:    movk x8, #16879, lsl #48
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, #0.0
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    movprfx z2, z0
 ; CHECK-NEXT:    fcvtzu z2.d, p0/m, z0.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p0.d, p0/z, z0.d, z1.d
 ; CHECK-NEXT:    mov z0.d, #0xffffffff
 ; CHECK-NEXT:    mov z2.d, p1/m, #0 // =0x0
@@ -206,6 +214,7 @@ define <vscale x 4 x i32> @test_signed_v4f64_v4i32(<vscale x 4 x double> %f) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, #281474974613504 // =0xffffffe00000
 ; CHECK-NEXT:    movk x8, #16879, lsl #48
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z1.d, #0.0
 ; CHECK-NEXT:    fcmge p2.d, p0/z, z0.d, #0.0
 ; CHECK-NEXT:    mov z2.d, x8
@@ -213,9 +222,9 @@ define <vscale x 4 x i32> @test_signed_v4f64_v4i32(<vscale x 4 x double> %f) {
 ; CHECK-NEXT:    fcvtzu z3.d, p0/m, z1.d
 ; CHECK-NEXT:    movprfx z4, z0
 ; CHECK-NEXT:    fcvtzu z4.d, p0/m, z0.d
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
 ; CHECK-NEXT:    fcmgt p3.d, p0/z, z1.d, z2.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
 ; CHECK-NEXT:    fcmgt p0.d, p0/z, z0.d, z2.d
 ; CHECK-NEXT:    mov z0.d, #0xffffffff
 ; CHECK-NEXT:    mov z3.d, p1/m, #0 // =0x0
@@ -241,10 +250,11 @@ define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, #281474974613504 // =0xffffffe00000
 ; CHECK-NEXT:    movk x8, #16879, lsl #48
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z1.d, #0.0
 ; CHECK-NEXT:    fcmge p2.d, p0/z, z0.d, #0.0
-; CHECK-NEXT:    fcmge p3.d, p0/z, z3.d, #0.0
-; CHECK-NEXT:    fcmge p4.d, p0/z, z2.d, #0.0
+; CHECK-NEXT:    fcmge p4.d, p0/z, z3.d, #0.0
+; CHECK-NEXT:    fcmge p5.d, p0/z, z2.d, #0.0
 ; CHECK-NEXT:    movprfx z5, z1
 ; CHECK-NEXT:    fcvtzu z5.d, p0/m, z1.d
 ; CHECK-NEXT:    mov z4.d, x8
@@ -254,26 +264,26 @@ define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) {
 ; CHECK-NEXT:    fcvtzu z7.d, p0/m, z3.d
 ; CHECK-NEXT:    movprfx z24, z2
 ; CHECK-NEXT:    fcvtzu z24.d, p0/m, z2.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    fcmgt p5.d, p0/z, z1.d, z4.d
-; CHECK-NEXT:    fcmgt p6.d, p0/z, z0.d, z4.d
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    mov z0.d, #0xffffffff
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    fcmgt p6.d, p0/z, z1.d, z4.d
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p3.b
 ; CHECK-NEXT:    mov z5.d, p1/m, #0 // =0x0
 ; CHECK-NEXT:    fcmgt p1.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    eor p3.b, p0/z, p5.b, p3.b
+; CHECK-NEXT:    fcmgt p5.d, p0/z, z0.d, z4.d
+; CHECK-NEXT:    mov z0.d, #0xffffffff
 ; CHECK-NEXT:    fcmgt p0.d, p0/z, z2.d, z4.d
 ; CHECK-NEXT:    mov z6.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    mov z7.d, p3/m, #0 // =0x0
-; CHECK-NEXT:    mov z24.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    sel z1.d, p5, z0.d, z5.d
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z2.d, p6, z0.d, z6.d
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z3.d, p1, z0.d, z7.d
+; CHECK-NEXT:    mov z7.d, p4/m, #0 // =0x0
+; CHECK-NEXT:    mov z24.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    sel z1.d, p6, z0.d, z5.d
+; CHECK-NEXT:    sel z2.d, p5, z0.d, z6.d
+; CHECK-NEXT:    sel z3.d, p1, z0.d, z7.d
 ; CHECK-NEXT:    sel z4.d, p0, z0.d, z24.d
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    uzp1 z0.s, z2.s, z1.s
 ; CHECK-NEXT:    uzp1 z1.s, z4.s, z3.s
 ; CHECK-NEXT:    addvl sp, sp, #1
@@ -289,6 +299,7 @@ define <vscale x 4 x i16> @test_signed_v4f64_v4i16(<vscale x 4 x double> %f) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, #281337537757184 // =0xffe000000000
 ; CHECK-NEXT:    movk x8, #16623, lsl #48
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z1.d, #0.0
 ; CHECK-NEXT:    fcmge p2.d, p0/z, z0.d, #0.0
 ; CHECK-NEXT:    mov z2.d, x8
@@ -296,9 +307,9 @@ define <vscale x 4 x i16> @test_signed_v4f64_v4i16(<vscale x 4 x double> %f) {
 ; CHECK-NEXT:    fcvtzu z3.d, p0/m, z1.d
 ; CHECK-NEXT:    movprfx z4, z0
 ; CHECK-NEXT:    fcvtzu z4.d, p0/m, z0.d
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
 ; CHECK-NEXT:    fcmgt p3.d, p0/z, z1.d, z2.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
 ; CHECK-NEXT:    fcmgt p0.d, p0/z, z0.d, z2.d
 ; CHECK-NEXT:    mov z0.d, #65535 // =0xffff
 ; CHECK-NEXT:    mov z3.d, p1/m, #0 // =0x0
@@ -324,10 +335,11 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, #281337537757184 // =0xffe000000000
 ; CHECK-NEXT:    movk x8, #16623, lsl #48
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z3.d, #0.0
 ; CHECK-NEXT:    fcmge p2.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    fcmge p3.d, p0/z, z1.d, #0.0
-; CHECK-NEXT:    fcmge p4.d, p0/z, z0.d, #0.0
+; CHECK-NEXT:    fcmge p4.d, p0/z, z1.d, #0.0
+; CHECK-NEXT:    fcmge p5.d, p0/z, z0.d, #0.0
 ; CHECK-NEXT:    movprfx z5, z3
 ; CHECK-NEXT:    fcvtzu z5.d, p0/m, z3.d
 ; CHECK-NEXT:    mov z4.d, x8
@@ -337,26 +349,26 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
 ; CHECK-NEXT:    fcvtzu z7.d, p0/m, z1.d
 ; CHECK-NEXT:    movprfx z24, z0
 ; CHECK-NEXT:    fcvtzu z24.d, p0/m, z0.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    fcmgt p5.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    fcmgt p6.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    mov z2.d, #65535 // =0xffff
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    fcmgt p6.d, p0/z, z3.d, z4.d
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p3.b
 ; CHECK-NEXT:    mov z5.d, p1/m, #0 // =0x0
 ; CHECK-NEXT:    fcmgt p1.d, p0/z, z1.d, z4.d
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    eor p3.b, p0/z, p5.b, p3.b
+; CHECK-NEXT:    fcmgt p5.d, p0/z, z2.d, z4.d
+; CHECK-NEXT:    mov z2.d, #65535 // =0xffff
 ; CHECK-NEXT:    fcmgt p0.d, p0/z, z0.d, z4.d
 ; CHECK-NEXT:    mov z6.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    mov z7.d, p3/m, #0 // =0x0
-; CHECK-NEXT:    mov z24.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    sel z0.d, p5, z2.d, z5.d
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z1.d, p6, z2.d, z6.d
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z3.d, p1, z2.d, z7.d
+; CHECK-NEXT:    mov z7.d, p4/m, #0 // =0x0
+; CHECK-NEXT:    mov z24.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    sel z0.d, p6, z2.d, z5.d
+; CHECK-NEXT:    sel z1.d, p5, z2.d, z6.d
+; CHECK-NEXT:    sel z3.d, p1, z2.d, z7.d
 ; CHECK-NEXT:    sel z2.d, p0, z2.d, z24.d
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    uzp1 z0.s, z1.s, z0.s
 ; CHECK-NEXT:    uzp1 z1.s, z2.s, z3.s
 ; CHECK-NEXT:    uzp1 z0.h, z1.h, z0.h
@@ -373,10 +385,11 @@ define <vscale x 2 x i64> @test_signed_v2f64_v2i64(<vscale x 2 x double> %f) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, #4895412794951729151 // =0x43efffffffffffff
 ; CHECK-NEXT:    mov z2.d, x8
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, #0.0
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzu z1.d, p0/m, z0.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p0.d, p0/z, z0.d, z2.d
 ; CHECK-NEXT:    mov z1.d, p1/m, #0 // =0x0
 ; CHECK-NEXT:    mov z1.d, p0/m, #-1 // =0xffffffffffffffff
@@ -391,6 +404,7 @@ define <vscale x 4 x i64> @test_signed_v4f64_v4i64(<vscale x 4 x double> %f) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, #4895412794951729151 // =0x43efffffffffffff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    mov z4.d, x8
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, #0.0
 ; CHECK-NEXT:    fcmge p2.d, p0/z, z1.d, #0.0
@@ -398,9 +412,9 @@ define <vscale x 4 x i64> @test_signed_v4f64_v4i64(<vscale x 4 x double> %f) {
 ; CHECK-NEXT:    fcvtzu z2.d, p0/m, z0.d
 ; CHECK-NEXT:    movprfx z3, z1
 ; CHECK-NEXT:    fcvtzu z3.d, p0/m, z1.d
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
 ; CHECK-NEXT:    fcmgt p3.d, p0/z, z0.d, z4.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
 ; CHECK-NEXT:    fcmgt p0.d, p0/z, z1.d, z4.d
 ; CHECK-NEXT:    mov z2.d, p1/m, #0 // =0x0
 ; CHECK-NEXT:    mov z3.d, p2/m, #0 // =0x0
@@ -430,10 +444,11 @@ define <vscale x 2 x i32> @test_signed_v2f16_v2i32(<vscale x 2 x half> %f) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
 ; CHECK-NEXT:    mov z1.h, w8
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, #0.0
 ; CHECK-NEXT:    movprfx z2, z0
 ; CHECK-NEXT:    fcvtzu z2.d, p0/m, z0.h
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p0.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    mov z0.d, #0xffffffff
 ; CHECK-NEXT:    mov z2.d, p1/m, #0 // =0x0
@@ -449,10 +464,11 @@ define <vscale x 4 x i32> @test_signed_v4f16_v4i32(<vscale x 4 x half> %f) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
 ; CHECK-NEXT:    mov z2.h, w8
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, #0.0
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzu z1.s, p0/m, z0.h
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p0.h, p0/z, z0.h, z2.h
 ; CHECK-NEXT:    mov z1.s, p1/m, #0 // =0x0
 ; CHECK-NEXT:    mov z1.s, p0/m, #-1 // =0xffffffffffffffff
@@ -470,15 +486,16 @@ define <vscale x 8 x i32> @test_signed_v8f16_v8i32(<vscale x 8 x half> %f) {
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov z4.h, w8
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z2.h, #0.0
 ; CHECK-NEXT:    fcmge p2.h, p0/z, z3.h, #0.0
 ; CHECK-NEXT:    movprfx z0, z2
 ; CHECK-NEXT:    fcvtzu z0.s, p0/m, z2.h
 ; CHECK-NEXT:    movprfx z1, z3
 ; CHECK-NEXT:    fcvtzu z1.s, p0/m, z3.h
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
 ; CHECK-NEXT:    fcmgt p3.h, p0/z, z2.h, z4.h
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
 ; CHECK-NEXT:    fcmgt p0.h, p0/z, z3.h, z4.h
 ; CHECK-NEXT:    mov z0.s, p1/m, #0 // =0x0
 ; CHECK-NEXT:    mov z1.s, p2/m, #0 // =0x0
@@ -495,10 +512,11 @@ define <vscale x 4 x i16> @test_signed_v4f16_v4i16(<vscale x 4 x half> %f) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
 ; CHECK-NEXT:    mov z1.h, w8
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, #0.0
 ; CHECK-NEXT:    movprfx z2, z0
 ; CHECK-NEXT:    fcvtzu z2.s, p0/m, z0.h
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p0.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    mov z0.s, #65535 // =0xffff
 ; CHECK-NEXT:    mov z2.s, p1/m, #0 // =0x0
@@ -514,10 +532,11 @@ define <vscale x 8 x i16> @test_signed_v8f16_v8i16(<vscale x 8 x half> %f) {
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
 ; CHECK-NEXT:    mov z2.h, w8
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, #0.0
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzu z1.h, p0/m, z0.h
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p0.h, p0/z, z0.h, z2.h
 ; CHECK-NEXT:    mov z1.h, p1/m, #0 // =0x0
 ; CHECK-NEXT:    mov z1.h, p0/m, #-1 // =0xffffffffffffffff
@@ -533,10 +552,11 @@ define <vscale x 2 x i64> @test_signed_v2f16_v2i64(<vscale x 2 x half> %f) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
 ; CHECK-NEXT:    mov z2.h, w8
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, #0.0
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzu z1.d, p0/m, z0.h
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p0.h, p0/z, z0.h, z2.h
 ; CHECK-NEXT:    mov z1.d, p1/m, #0 // =0x0
 ; CHECK-NEXT:    mov z1.d, p0/m, #-1 // =0xffffffffffffffff
@@ -554,15 +574,16 @@ define <vscale x 4 x i64> @test_signed_v4f16_v4i64(<vscale x 4 x half> %f) {
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z4.h, w8
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z2.h, #0.0
 ; CHECK-NEXT:    fcmge p2.h, p0/z, z3.h, #0.0
 ; CHECK-NEXT:    movprfx z0, z2
 ; CHECK-NEXT:    fcvtzu z0.d, p0/m, z2.h
 ; CHECK-NEXT:    movprfx z1, z3
 ; CHECK-NEXT:    fcvtzu z1.d, p0/m, z3.h
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
 ; CHECK-NEXT:    fcmgt p3.h, p0/z, z2.h, z4.h
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
 ; CHECK-NEXT:    fcmgt p0.h, p0/z, z3.h, z4.h
 ; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
 ; CHECK-NEXT:    mov z1.d, p2/m, #0 // =0x0
diff --git a/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll b/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll
index 573958771658c..ba80bf3d6c1b1 100644
--- a/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll
@@ -7,6 +7,7 @@ define void @fptrunc2_f64_f32(ptr %dst, ptr %src) {
 ; CHECK-NEXT:    ldr z0, [x1]
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fcvt z0.s, p0/m, z0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
@@ -22,6 +23,7 @@ define void @fptrunc2_f64_f16(ptr %dst, ptr %src) {
 ; CHECK-NEXT:    ldr z0, [x1]
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fcvt z0.h, p0/m, z0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
@@ -37,6 +39,7 @@ define void @fptrunc4_f32_f16(ptr %dst, ptr %src) {
 ; CHECK-NEXT:    ldr z0, [x1]
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
@@ -49,9 +52,10 @@ entry:
 define void @fptrunc2_f32_f16(ptr %dst, ptr %src) {
 ; CHECK-LABEL: fptrunc2_f32_f16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x1]
-; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
+; CHECK-NEXT:    fcvt z0.h, p1/m, z0.s
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll b/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
index 9f5e0eb9878c2..b3ecec82c124c 100644
--- a/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
@@ -345,7 +345,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64_null_with__vec_plus_imm_offsets
 define <vscale x 4 x i32> @masked_gather_nxv4i32_s8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv4i32_s8_offsets:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    sxtb z0.s, p1/m, z0.s
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2]
 ; CHECK-NEXT:    ret
@@ -370,7 +370,7 @@ define <vscale x 4 x i32> @masked_gather_nxv4i32_u8_offsets(ptr %base, <vscale x
 define <vscale x 4 x i32> @masked_gather_nxv4i32_u32s8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv4i32_u32s8_offsets:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    sxtb z0.s, p1/m, z0.s
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2]
 ; CHECK-NEXT:    ret
@@ -421,7 +421,7 @@ define void @masked_scatter_nxv2i64_null_with__vec_plus_imm_offsets(<vscale x 2
 define void @masked_scatter_nxv4i32_s8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %data) #0 {
 ; CHECK-LABEL: masked_scatter_nxv4i32_s8_offsets:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    sxtb z0.s, p1/m, z0.s
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x0, z0.s, sxtw #2]
 ; CHECK-NEXT:    ret
@@ -446,7 +446,7 @@ define void @masked_scatter_nxv4i32_u8_offsets(ptr %base, <vscale x 4 x i8> %off
 define void @masked_scatter_nxv4i32_u32s8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %data) #0 {
 ; CHECK-LABEL: masked_scatter_nxv4i32_u32s8_offsets:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    sxtb z0.s, p1/m, z0.s
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x0, z0.s, uxtw #2]
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll b/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
index 73d043b411696..49726f223b6ae 100644
--- a/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
@@ -31,7 +31,7 @@ define <vscale x 2 x i64> @no_dag_combine_sext(<vscale x 2 x i1> %pg,
 ; CHECK-LABEL: no_dag_combine_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z1.d }, p0/z, [z0.d, #16]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    movprfx z0, z1
 ; CHECK-NEXT:    sxtb z0.d, p0/m, z1.d
 ; CHECK-NEXT:    st1b { z1.d }, p1, [x0]
@@ -76,7 +76,7 @@ define <vscale x 2 x i64> @no_dag_combine_zext(<vscale x 2 x i1> %pg,
 define <vscale x 16 x i8> @narrow_i64_gather_index_i8_zext(ptr %out, ptr %in, <vscale x 16 x i8> %d, i64 %ptr){
 ; CHECK-LABEL: narrow_i64_gather_index_i8_zext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add x8, x1, x2
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x8, #3, mul vl]
 ; CHECK-NEXT:    ld1b { z1.s }, p0/z, [x8, #2, mul vl]
@@ -102,7 +102,7 @@ define <vscale x 16 x i8> @narrow_i64_gather_index_i8_zext(ptr %out, ptr %in, <v
 define <vscale x 16 x i8> @narrow_i64_gather_index_i8_sext(ptr %out, ptr %in, <vscale x 16 x i8> %d, i64 %ptr){
 ; CHECK-LABEL: narrow_i64_gather_index_i8_sext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add x8, x1, x2
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x8, #3, mul vl]
 ; CHECK-NEXT:    ld1sb { z1.s }, p0/z, [x8, #2, mul vl]
@@ -128,7 +128,7 @@ define <vscale x 16 x i8> @narrow_i64_gather_index_i8_sext(ptr %out, ptr %in, <v
 define <vscale x 8 x i16> @narrow_i64_gather_index_i16_zext(ptr %out, ptr %in, <vscale x 8 x i16> %d, i64 %ptr){
 ; CHECK-LABEL: narrow_i64_gather_index_i16_zext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add x8, x1, x2, lsl #1
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x1, x2, lsl #1]
 ; CHECK-NEXT:    ld1h { z1.s }, p0/z, [x8, #1, mul vl]
@@ -148,7 +148,7 @@ define <vscale x 8 x i16> @narrow_i64_gather_index_i16_zext(ptr %out, ptr %in, <
 define <vscale x 8 x i16> @narrow_i64_gather_index_i16_sext(ptr %out, ptr %in, <vscale x 8 x i16> %d, i64 %ptr){
 ; CHECK-LABEL: narrow_i64_gather_index_i16_sext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add x8, x1, x2, lsl #1
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x1, x2, lsl #1]
 ; CHECK-NEXT:    ld1sh { z1.s }, p0/z, [x8, #1, mul vl]
@@ -168,7 +168,7 @@ define <vscale x 8 x i16> @narrow_i64_gather_index_i16_sext(ptr %out, ptr %in, <
 define <vscale x 4 x i32> @no_narrow_i64_gather_index_i32(ptr %out, ptr %in, <vscale x 4 x i32> %d, i64 %ptr){
 ; CHECK-LABEL: no_narrow_i64_gather_index_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x1, x2, lsl #2]
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x1, z0.s, uxtw #2]
 ; CHECK-NEXT:    ret
@@ -184,7 +184,7 @@ define <vscale x 4 x i32> @no_narrow_i64_gather_index_i32(ptr %out, ptr %in, <vs
 define <vscale x 2 x i64> @no_narrow_i64_gather_index_i64(ptr %out, ptr %in, <vscale x 2 x i64> %d, i64 %ptr){
 ; CHECK-LABEL: no_narrow_i64_gather_index_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x1, x2, lsl #3]
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x1, z0.d, lsl #3]
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-gep.ll b/llvm/test/CodeGen/AArch64/sve-gep.ll
index e60a3e4bf9fb2..73a1aceb34bf0 100644
--- a/llvm/test/CodeGen/AArch64/sve-gep.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gep.ll
@@ -221,10 +221,11 @@ define <vscale x 2 x ptr> @scalable_of_scalable_2(<vscale x 2 x ptr> %base) {
 define <vscale x 2 x ptr> @scalable_of_scalable_3(<vscale x 2 x ptr> %base, <vscale x 2 x i32> %idx) {
 ; CHECK-LABEL: scalable_of_scalable_3:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    rdvl x8, #1
 ; CHECK-NEXT:    mov z2.d, x8
 ; CHECK-NEXT:    sxtw z1.d, p0/m, z1.d
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mla z0.d, p0/m, z1.d, z2.d
 ; CHECK-NEXT:    ret
   %d = getelementptr <vscale x 2 x i64>, <vscale x 2 x ptr> %base, <vscale x 2 x i32> %idx
diff --git a/llvm/test/CodeGen/AArch64/sve-hadd.ll b/llvm/test/CodeGen/AArch64/sve-hadd.ll
index 978ee4534e5e1..0a77930fa79d5 100644
--- a/llvm/test/CodeGen/AArch64/sve-hadd.ll
+++ b/llvm/test/CodeGen/AArch64/sve-hadd.ll
@@ -74,7 +74,7 @@ entry:
 define <vscale x 2 x i32> @hadds_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i32> %s1) {
 ; SVE-LABEL: hadds_v2i32:
 ; SVE:       // %bb.0: // %entry
-; SVE-NEXT:    ptrue p0.d
+; SVE-NEXT:    ptrue p0.b
 ; SVE-NEXT:    sxtw z0.d, p0/m, z0.d
 ; SVE-NEXT:    adr z0.d, [z0.d, z1.d, sxtw]
 ; SVE-NEXT:    asr z0.d, z0.d, #1
@@ -82,9 +82,10 @@ define <vscale x 2 x i32> @hadds_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i32
 ;
 ; SVE2-LABEL: hadds_v2i32:
 ; SVE2:       // %bb.0: // %entry
-; SVE2-NEXT:    ptrue p0.d
+; SVE2-NEXT:    ptrue p0.b
 ; SVE2-NEXT:    sxtw z1.d, p0/m, z1.d
 ; SVE2-NEXT:    sxtw z0.d, p0/m, z0.d
+; SVE2-NEXT:    ptrue p0.d
 ; SVE2-NEXT:    shadd z0.d, p0/m, z0.d, z1.d
 ; SVE2-NEXT:    ret
 entry:
@@ -99,7 +100,7 @@ entry:
 define <vscale x 2 x i32> @hadds_v2i32_lsh(<vscale x 2 x i32> %s0, <vscale x 2 x i32> %s1) {
 ; CHECK-LABEL: hadds_v2i32_lsh:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtw z0.d, p0/m, z0.d
 ; CHECK-NEXT:    adr z0.d, [z0.d, z1.d, sxtw]
 ; CHECK-NEXT:    lsr z0.d, z0.d, #1
@@ -209,7 +210,7 @@ entry:
 define <vscale x 2 x i16> @hadds_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i16> %s1) {
 ; SVE-LABEL: hadds_v2i16:
 ; SVE:       // %bb.0: // %entry
-; SVE-NEXT:    ptrue p0.d
+; SVE-NEXT:    ptrue p0.b
 ; SVE-NEXT:    sxth z1.d, p0/m, z1.d
 ; SVE-NEXT:    sxth z0.d, p0/m, z0.d
 ; SVE-NEXT:    add z0.d, z0.d, z1.d
@@ -218,9 +219,10 @@ define <vscale x 2 x i16> @hadds_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i16
 ;
 ; SVE2-LABEL: hadds_v2i16:
 ; SVE2:       // %bb.0: // %entry
-; SVE2-NEXT:    ptrue p0.d
+; SVE2-NEXT:    ptrue p0.b
 ; SVE2-NEXT:    sxth z1.d, p0/m, z1.d
 ; SVE2-NEXT:    sxth z0.d, p0/m, z0.d
+; SVE2-NEXT:    ptrue p0.d
 ; SVE2-NEXT:    shadd z0.d, p0/m, z0.d, z1.d
 ; SVE2-NEXT:    ret
 entry:
@@ -235,7 +237,7 @@ entry:
 define <vscale x 2 x i16> @hadds_v2i16_lsh(<vscale x 2 x i16> %s0, <vscale x 2 x i16> %s1) {
 ; CHECK-LABEL: hadds_v2i16_lsh:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z0.d, p0/m, z0.d
 ; CHECK-NEXT:    sxth z1.d, p0/m, z1.d
 ; CHECK-NEXT:    add z0.d, z0.d, z1.d
@@ -279,7 +281,7 @@ entry:
 define <vscale x 4 x i16> @hadds_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i16> %s1) {
 ; SVE-LABEL: hadds_v4i16:
 ; SVE:       // %bb.0: // %entry
-; SVE-NEXT:    ptrue p0.s
+; SVE-NEXT:    ptrue p0.b
 ; SVE-NEXT:    sxth z1.s, p0/m, z1.s
 ; SVE-NEXT:    sxth z0.s, p0/m, z0.s
 ; SVE-NEXT:    add z0.s, z0.s, z1.s
@@ -288,9 +290,10 @@ define <vscale x 4 x i16> @hadds_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i16
 ;
 ; SVE2-LABEL: hadds_v4i16:
 ; SVE2:       // %bb.0: // %entry
-; SVE2-NEXT:    ptrue p0.s
+; SVE2-NEXT:    ptrue p0.b
 ; SVE2-NEXT:    sxth z1.s, p0/m, z1.s
 ; SVE2-NEXT:    sxth z0.s, p0/m, z0.s
+; SVE2-NEXT:    ptrue p0.s
 ; SVE2-NEXT:    shadd z0.s, p0/m, z0.s, z1.s
 ; SVE2-NEXT:    ret
 entry:
@@ -305,7 +308,7 @@ entry:
 define <vscale x 4 x i16> @hadds_v4i16_lsh(<vscale x 4 x i16> %s0, <vscale x 4 x i16> %s1) {
 ; CHECK-LABEL: hadds_v4i16_lsh:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
 ; CHECK-NEXT:    sxth z1.s, p0/m, z1.s
 ; CHECK-NEXT:    add z0.s, z0.s, z1.s
@@ -417,7 +420,7 @@ entry:
 define <vscale x 4 x i8> @hadds_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s1) {
 ; SVE-LABEL: hadds_v4i8:
 ; SVE:       // %bb.0: // %entry
-; SVE-NEXT:    ptrue p0.s
+; SVE-NEXT:    ptrue p0.b
 ; SVE-NEXT:    sxtb z1.s, p0/m, z1.s
 ; SVE-NEXT:    sxtb z0.s, p0/m, z0.s
 ; SVE-NEXT:    add z0.s, z0.s, z1.s
@@ -426,9 +429,10 @@ define <vscale x 4 x i8> @hadds_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s
 ;
 ; SVE2-LABEL: hadds_v4i8:
 ; SVE2:       // %bb.0: // %entry
-; SVE2-NEXT:    ptrue p0.s
+; SVE2-NEXT:    ptrue p0.b
 ; SVE2-NEXT:    sxtb z1.s, p0/m, z1.s
 ; SVE2-NEXT:    sxtb z0.s, p0/m, z0.s
+; SVE2-NEXT:    ptrue p0.s
 ; SVE2-NEXT:    shadd z0.s, p0/m, z0.s, z1.s
 ; SVE2-NEXT:    ret
 entry:
@@ -443,7 +447,7 @@ entry:
 define <vscale x 4 x i8> @hadds_v4i8_lsh(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s1) {
 ; CHECK-LABEL: hadds_v4i8_lsh:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.s, p0/m, z0.s
 ; CHECK-NEXT:    sxtb z1.s, p0/m, z1.s
 ; CHECK-NEXT:    add z0.s, z0.s, z1.s
@@ -487,7 +491,7 @@ entry:
 define <vscale x 8 x i8> @hadds_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s1) {
 ; SVE-LABEL: hadds_v8i8:
 ; SVE:       // %bb.0: // %entry
-; SVE-NEXT:    ptrue p0.h
+; SVE-NEXT:    ptrue p0.b
 ; SVE-NEXT:    sxtb z1.h, p0/m, z1.h
 ; SVE-NEXT:    sxtb z0.h, p0/m, z0.h
 ; SVE-NEXT:    add z0.h, z0.h, z1.h
@@ -496,9 +500,10 @@ define <vscale x 8 x i8> @hadds_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s
 ;
 ; SVE2-LABEL: hadds_v8i8:
 ; SVE2:       // %bb.0: // %entry
-; SVE2-NEXT:    ptrue p0.h
+; SVE2-NEXT:    ptrue p0.b
 ; SVE2-NEXT:    sxtb z1.h, p0/m, z1.h
 ; SVE2-NEXT:    sxtb z0.h, p0/m, z0.h
+; SVE2-NEXT:    ptrue p0.h
 ; SVE2-NEXT:    shadd z0.h, p0/m, z0.h, z1.h
 ; SVE2-NEXT:    ret
 entry:
@@ -513,7 +518,7 @@ entry:
 define <vscale x 8 x i8> @hadds_v8i8_lsh(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s1) {
 ; CHECK-LABEL: hadds_v8i8_lsh:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
 ; CHECK-NEXT:    sxtb z1.h, p0/m, z1.h
 ; CHECK-NEXT:    add z0.h, z0.h, z1.h
@@ -697,7 +702,7 @@ entry:
 define <vscale x 2 x i32> @rhadds_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i32> %s1) {
 ; SVE-LABEL: rhadds_v2i32:
 ; SVE:       // %bb.0: // %entry
-; SVE-NEXT:    ptrue p0.d
+; SVE-NEXT:    ptrue p0.b
 ; SVE-NEXT:    mov z2.d, #-1 // =0xffffffffffffffff
 ; SVE-NEXT:    sxtw z0.d, p0/m, z0.d
 ; SVE-NEXT:    sxtw z1.d, p0/m, z1.d
@@ -708,9 +713,10 @@ define <vscale x 2 x i32> @rhadds_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i3
 ;
 ; SVE2-LABEL: rhadds_v2i32:
 ; SVE2:       // %bb.0: // %entry
-; SVE2-NEXT:    ptrue p0.d
+; SVE2-NEXT:    ptrue p0.b
 ; SVE2-NEXT:    sxtw z1.d, p0/m, z1.d
 ; SVE2-NEXT:    sxtw z0.d, p0/m, z0.d
+; SVE2-NEXT:    ptrue p0.d
 ; SVE2-NEXT:    srhadd z0.d, p0/m, z0.d, z1.d
 ; SVE2-NEXT:    ret
 entry:
@@ -726,7 +732,7 @@ entry:
 define <vscale x 2 x i32> @rhadds_v2i32_lsh(<vscale x 2 x i32> %s0, <vscale x 2 x i32> %s1) {
 ; CHECK-LABEL: rhadds_v2i32_lsh:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.d, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    sxtw z0.d, p0/m, z0.d
 ; CHECK-NEXT:    sxtw z1.d, p0/m, z1.d
@@ -847,7 +853,7 @@ entry:
 define <vscale x 2 x i16> @rhadds_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i16> %s1) {
 ; SVE-LABEL: rhadds_v2i16:
 ; SVE:       // %bb.0: // %entry
-; SVE-NEXT:    ptrue p0.d
+; SVE-NEXT:    ptrue p0.b
 ; SVE-NEXT:    mov z2.d, #-1 // =0xffffffffffffffff
 ; SVE-NEXT:    sxth z0.d, p0/m, z0.d
 ; SVE-NEXT:    sxth z1.d, p0/m, z1.d
@@ -858,9 +864,10 @@ define <vscale x 2 x i16> @rhadds_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i1
 ;
 ; SVE2-LABEL: rhadds_v2i16:
 ; SVE2:       // %bb.0: // %entry
-; SVE2-NEXT:    ptrue p0.d
+; SVE2-NEXT:    ptrue p0.b
 ; SVE2-NEXT:    sxth z1.d, p0/m, z1.d
 ; SVE2-NEXT:    sxth z0.d, p0/m, z0.d
+; SVE2-NEXT:    ptrue p0.d
 ; SVE2-NEXT:    srhadd z0.d, p0/m, z0.d, z1.d
 ; SVE2-NEXT:    ret
 entry:
@@ -876,7 +883,7 @@ entry:
 define <vscale x 2 x i16> @rhadds_v2i16_lsh(<vscale x 2 x i16> %s0, <vscale x 2 x i16> %s1) {
 ; CHECK-LABEL: rhadds_v2i16_lsh:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.d, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    sxth z0.d, p0/m, z0.d
 ; CHECK-NEXT:    sxth z1.d, p0/m, z1.d
@@ -926,7 +933,7 @@ entry:
 define <vscale x 4 x i16> @rhadds_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i16> %s1) {
 ; SVE-LABEL: rhadds_v4i16:
 ; SVE:       // %bb.0: // %entry
-; SVE-NEXT:    ptrue p0.s
+; SVE-NEXT:    ptrue p0.b
 ; SVE-NEXT:    mov z2.s, #-1 // =0xffffffffffffffff
 ; SVE-NEXT:    sxth z0.s, p0/m, z0.s
 ; SVE-NEXT:    sxth z1.s, p0/m, z1.s
@@ -937,9 +944,10 @@ define <vscale x 4 x i16> @rhadds_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i1
 ;
 ; SVE2-LABEL: rhadds_v4i16:
 ; SVE2:       // %bb.0: // %entry
-; SVE2-NEXT:    ptrue p0.s
+; SVE2-NEXT:    ptrue p0.b
 ; SVE2-NEXT:    sxth z1.s, p0/m, z1.s
 ; SVE2-NEXT:    sxth z0.s, p0/m, z0.s
+; SVE2-NEXT:    ptrue p0.s
 ; SVE2-NEXT:    srhadd z0.s, p0/m, z0.s, z1.s
 ; SVE2-NEXT:    ret
 entry:
@@ -955,7 +963,7 @@ entry:
 define <vscale x 4 x i16> @rhadds_v4i16_lsh(<vscale x 4 x i16> %s0, <vscale x 4 x i16> %s1) {
 ; CHECK-LABEL: rhadds_v4i16_lsh:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.s, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
 ; CHECK-NEXT:    sxth z1.s, p0/m, z1.s
@@ -1076,7 +1084,7 @@ entry:
 define <vscale x 4 x i8> @rhadds_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s1) {
 ; SVE-LABEL: rhadds_v4i8:
 ; SVE:       // %bb.0: // %entry
-; SVE-NEXT:    ptrue p0.s
+; SVE-NEXT:    ptrue p0.b
 ; SVE-NEXT:    mov z2.s, #-1 // =0xffffffffffffffff
 ; SVE-NEXT:    sxtb z0.s, p0/m, z0.s
 ; SVE-NEXT:    sxtb z1.s, p0/m, z1.s
@@ -1087,9 +1095,10 @@ define <vscale x 4 x i8> @rhadds_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %
 ;
 ; SVE2-LABEL: rhadds_v4i8:
 ; SVE2:       // %bb.0: // %entry
-; SVE2-NEXT:    ptrue p0.s
+; SVE2-NEXT:    ptrue p0.b
 ; SVE2-NEXT:    sxtb z1.s, p0/m, z1.s
 ; SVE2-NEXT:    sxtb z0.s, p0/m, z0.s
+; SVE2-NEXT:    ptrue p0.s
 ; SVE2-NEXT:    srhadd z0.s, p0/m, z0.s, z1.s
 ; SVE2-NEXT:    ret
 entry:
@@ -1105,7 +1114,7 @@ entry:
 define <vscale x 4 x i8> @rhadds_v4i8_lsh(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s1) {
 ; CHECK-LABEL: rhadds_v4i8_lsh:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.s, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    sxtb z0.s, p0/m, z0.s
 ; CHECK-NEXT:    sxtb z1.s, p0/m, z1.s
@@ -1155,7 +1164,7 @@ entry:
 define <vscale x 8 x i8> @rhadds_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s1) {
 ; SVE-LABEL: rhadds_v8i8:
 ; SVE:       // %bb.0: // %entry
-; SVE-NEXT:    ptrue p0.h
+; SVE-NEXT:    ptrue p0.b
 ; SVE-NEXT:    mov z2.h, #-1 // =0xffffffffffffffff
 ; SVE-NEXT:    sxtb z0.h, p0/m, z0.h
 ; SVE-NEXT:    sxtb z1.h, p0/m, z1.h
@@ -1166,9 +1175,10 @@ define <vscale x 8 x i8> @rhadds_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %
 ;
 ; SVE2-LABEL: rhadds_v8i8:
 ; SVE2:       // %bb.0: // %entry
-; SVE2-NEXT:    ptrue p0.h
+; SVE2-NEXT:    ptrue p0.b
 ; SVE2-NEXT:    sxtb z1.h, p0/m, z1.h
 ; SVE2-NEXT:    sxtb z0.h, p0/m, z0.h
+; SVE2-NEXT:    ptrue p0.h
 ; SVE2-NEXT:    srhadd z0.h, p0/m, z0.h, z1.h
 ; SVE2-NEXT:    ret
 entry:
@@ -1184,7 +1194,7 @@ entry:
 define <vscale x 8 x i8> @rhadds_v8i8_lsh(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s1) {
 ; CHECK-LABEL: rhadds_v8i8_lsh:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.h, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
 ; CHECK-NEXT:    sxtb z1.h, p0/m, z1.h
diff --git a/llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll b/llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll
index ebec275c92c52..ebd31c6307c7c 100644
--- a/llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll
+++ b/llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll
@@ -179,7 +179,7 @@ define <vscale x 2 x i64> @zero_fill_non_zero_index(<vscale x 2 x i1> %pg, <vsca
 ; CHECK-NEXT:    index z1.d, #0, #1
 ; CHECK-NEXT:    uminv d3, p0, z0.d
 ; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    cmpeq p0.d, p1/z, z1.d, z2.d
 ; CHECK-NEXT:    fmov x8, d3
@@ -212,7 +212,7 @@ define <vscale x 2 x i64> @zero_fill_no_zero_upper_lanes(<vscale x 2 x i1> %pg,
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umin z0.d, p0/m, z0.d, z0.d
 ; CHECK-NEXT:    movi v1.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    fmov x8, d0
 ; CHECK-NEXT:    mov z1.d, p0/m, x8
 ; CHECK-NEXT:    mov z0.d, z1.d
diff --git a/llvm/test/CodeGen/AArch64/sve-insert-element.ll b/llvm/test/CodeGen/AArch64/sve-insert-element.ll
index 7f558e32ae397..fffc0bcaf97dd 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-element.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-element.ll
@@ -15,7 +15,7 @@ define <vscale x 16 x i8> @test_lane0_16xi8(<vscale x 16 x i8> %a) {
 define <vscale x 8 x i16> @test_lane0_8xi16(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: test_lane0_8xi16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    mov w8, #30 // =0x1e
 ; CHECK-NEXT:    mov z0.h, p0/m, w8
 ; CHECK-NEXT:    ret
@@ -26,7 +26,7 @@ define <vscale x 8 x i16> @test_lane0_8xi16(<vscale x 8 x i16> %a) {
 define <vscale x 4 x i32> @test_lane0_4xi32(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: test_lane0_4xi32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    mov w8, #30 // =0x1e
 ; CHECK-NEXT:    mov z0.s, p0/m, w8
 ; CHECK-NEXT:    ret
@@ -37,7 +37,7 @@ define <vscale x 4 x i32> @test_lane0_4xi32(<vscale x 4 x i32> %a) {
 define <vscale x 2 x i64> @test_lane0_2xi64(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: test_lane0_2xi64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    mov w8, #30 // =0x1e
 ; CHECK-NEXT:    mov z0.d, p0/m, x8
 ; CHECK-NEXT:    ret
@@ -49,7 +49,7 @@ define <vscale x 2 x double> @test_lane0_2xf64(<vscale x 2 x double> %a) {
 ; CHECK-LABEL: test_lane0_2xf64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov d1, #1.00000000
-; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    mov z0.d, p0/m, z1.d
 ; CHECK-NEXT:    ret
   %b = insertelement <vscale x 2 x double> %a, double 1.0, i32 0
@@ -60,7 +60,7 @@ define <vscale x 4 x float> @test_lane0_4xf32(<vscale x 4 x float> %a) {
 ; CHECK-LABEL: test_lane0_4xf32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov s1, #1.00000000
-; CHECK-NEXT:    ptrue p0.s, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    mov z0.s, p0/m, z1.s
 ; CHECK-NEXT:    ret
   %b = insertelement <vscale x 4 x float> %a, float 1.0, i32 0
@@ -71,7 +71,7 @@ define <vscale x 8 x half> @test_lane0_8xf16(<vscale x 8 x half> %a) {
 ; CHECK-LABEL: test_lane0_8xf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov h1, #1.00000000
-; CHECK-NEXT:    ptrue p0.h, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    mov z0.h, p0/m, z1.h
 ; CHECK-NEXT:    ret
   %b = insertelement <vscale x 8 x half> %a, half 1.0, i32 0
@@ -81,7 +81,7 @@ define <vscale x 8 x half> @test_lane0_8xf16(<vscale x 8 x half> %a) {
 define <vscale x 8 x bfloat> @test_lane0_8xbf16(<vscale x 8 x bfloat> %a, bfloat %x) {
 ; CHECK-LABEL: test_lane0_8xbf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    // kill: def $h1 killed $h1 def $z1
 ; CHECK-NEXT:    mov z0.h, p0/m, z1.h
 ; CHECK-NEXT:    ret
@@ -95,7 +95,7 @@ define <vscale x 2 x i64> @test_lane4_2xi64(<vscale x 2 x i64> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #4 // =0x4
 ; CHECK-NEXT:    index z1.d, #0, #1
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.d, x8
 ; CHECK-NEXT:    mov w8, #30 // =0x1e
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z1.d, z2.d
@@ -111,7 +111,7 @@ define <vscale x 8 x half> @test_lane9_8xf16(<vscale x 8 x half> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #9 // =0x9
 ; CHECK-NEXT:    index z1.h, #0, #1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.h, w8
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z1.h, z2.h
 ; CHECK-NEXT:    fmov h1, #1.00000000
@@ -126,7 +126,7 @@ define <vscale x 8 x bfloat> @test_lane9_8xbf16(<vscale x 8 x bfloat> %a, bfloat
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #9 // =0x9
 ; CHECK-NEXT:    index z2.h, #0, #1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z3.h, w8
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z2.h, z3.h
 ; CHECK-NEXT:    mov z0.h, p0/m, h1
@@ -181,7 +181,7 @@ define <vscale x 8 x i16> @test_lane6_undef_8xi16(i16 %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #6 // =0x6
 ; CHECK-NEXT:    index z0.h, #0, #1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    mov z0.h, p0/m, w0
@@ -331,7 +331,7 @@ define <vscale x 2 x half> @test_insert_with_index_nxv2f16(half %h, i64 %idx) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.d, #0, #1
 ; CHECK-NEXT:    mov z2.d, x0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z1.d, z2.d
 ; CHECK-NEXT:    mov z0.h, p0/m, h0
 ; CHECK-NEXT:    ret
@@ -344,7 +344,7 @@ define <vscale x 4 x half> @test_insert_with_index_nxv4f16(half %h, i64 %idx) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.s, #0, #1
 ; CHECK-NEXT:    mov z2.s, w0
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z1.s, z2.s
 ; CHECK-NEXT:    mov z0.h, p0/m, h0
 ; CHECK-NEXT:    ret
@@ -357,7 +357,7 @@ define <vscale x 8 x half> @test_insert_with_index_nxv8f16(half %h, i64 %idx) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.h, #0, #1
 ; CHECK-NEXT:    mov z2.h, w0
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z1.h, z2.h
 ; CHECK-NEXT:    mov z0.h, p0/m, h0
 ; CHECK-NEXT:    ret
@@ -370,7 +370,7 @@ define <vscale x 2 x bfloat> @test_insert_with_index_nxv2bf16(bfloat %h, i64 %id
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.d, #0, #1
 ; CHECK-NEXT:    mov z2.d, x0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z1.d, z2.d
 ; CHECK-NEXT:    mov z0.h, p0/m, h0
 ; CHECK-NEXT:    ret
@@ -383,7 +383,7 @@ define <vscale x 4 x bfloat> @test_insert_with_index_nxv4bf16(bfloat %h, i64 %id
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.s, #0, #1
 ; CHECK-NEXT:    mov z2.s, w0
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z1.s, z2.s
 ; CHECK-NEXT:    mov z0.h, p0/m, h0
 ; CHECK-NEXT:    ret
@@ -396,7 +396,7 @@ define <vscale x 8 x bfloat> @test_insert_with_index_nxv8bf16(bfloat %h, i64 %id
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.h, #0, #1
 ; CHECK-NEXT:    mov z2.h, w0
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z1.h, z2.h
 ; CHECK-NEXT:    mov z0.h, p0/m, h0
 ; CHECK-NEXT:    ret
@@ -409,7 +409,7 @@ define <vscale x 2 x float> @test_insert_with_index_nxv2f32(float %f, i64 %idx)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.d, #0, #1
 ; CHECK-NEXT:    mov z2.d, x0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z1.d, z2.d
 ; CHECK-NEXT:    mov z0.s, p0/m, s0
 ; CHECK-NEXT:    ret
@@ -422,7 +422,7 @@ define <vscale x 4 x float> @test_insert_with_index_nxv4f32(float %f, i64 %idx)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.s, #0, #1
 ; CHECK-NEXT:    mov z2.s, w0
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z1.s, z2.s
 ; CHECK-NEXT:    mov z0.s, p0/m, s0
 ; CHECK-NEXT:    ret
@@ -435,7 +435,7 @@ define <vscale x 2 x double> @test_insert_with_index_nxv2f64(double %d, i64 %idx
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.d, #0, #1
 ; CHECK-NEXT:    mov z2.d, x0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z1.d, z2.d
 ; CHECK-NEXT:    mov z0.d, p0/m, d0
 ; CHECK-NEXT:    ret
@@ -448,7 +448,7 @@ define <vscale x 2 x i1> @test_predicate_insert_2xi1_immediate (<vscale x 2 x i1
 ; CHECK-LABEL: test_predicate_insert_2xi1_immediate:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.d, p0/z, #1 // =0x1
-; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    mov z0.d, p0/m, x0
 ; CHECK-NEXT:    ptrue p0.d
@@ -464,13 +464,14 @@ define <vscale x 4 x i1> @test_predicate_insert_4xi1_immediate (<vscale x 4 x i1
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #2 // =0x2
 ; CHECK-NEXT:    index z0.s, #0, #1
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    mov z1.s, w8
-; CHECK-NEXT:    cmpeq p2.s, p1/z, z0.s, z1.s
+; CHECK-NEXT:    cmpeq p1.s, p1/z, z0.s, z1.s
 ; CHECK-NEXT:    mov z0.s, p0/z, #1 // =0x1
-; CHECK-NEXT:    mov z0.s, p2/m, w0
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    mov z0.s, p1/m, w0
 ; CHECK-NEXT:    and z0.s, z0.s, #0x1
-; CHECK-NEXT:    cmpne p0.s, p1/z, z0.s, #0
+; CHECK-NEXT:    cmpne p0.s, p0/z, z0.s, #0
 ; CHECK-NEXT:    ret
   %res = insertelement <vscale x 4 x i1> %val, i1 %elt, i32 2
   ret <vscale x 4 x i1> %res
@@ -481,14 +482,15 @@ define <vscale x 8 x i1> @test_predicate_insert_8xi1_immediate (<vscale x 8 x i1
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.h, #0, #1
 ; CHECK-NEXT:    mov w8, w0
-; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    mov w8, #1 // =0x1
-; CHECK-NEXT:    cmpeq p2.h, p1/z, z0.h, z1.h
+; CHECK-NEXT:    cmpeq p1.h, p1/z, z0.h, z1.h
 ; CHECK-NEXT:    mov z0.h, p0/z, #1 // =0x1
-; CHECK-NEXT:    mov z0.h, p2/m, w8
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    mov z0.h, p1/m, w8
 ; CHECK-NEXT:    and z0.h, z0.h, #0x1
-; CHECK-NEXT:    cmpne p0.h, p1/z, z0.h, #0
+; CHECK-NEXT:    cmpne p0.h, p0/z, z0.h, #0
 ; CHECK-NEXT:    ret
   %res = insertelement <vscale x 8 x i1> %val, i1 1, i32 %idx
   ret <vscale x 8 x i1> %res
@@ -518,14 +520,15 @@ define <vscale x 2 x i1> @test_predicate_insert_2xi1(<vscale x 2 x i1> %val, i1
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    mov w8, w1
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
-; CHECK-NEXT:    cmpeq p2.d, p1/z, z0.d, z1.d
+; CHECK-NEXT:    cmpeq p1.d, p1/z, z0.d, z1.d
 ; CHECK-NEXT:    mov z0.d, p0/z, #1 // =0x1
-; CHECK-NEXT:    mov z0.d, p2/m, x0
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    mov z0.d, p1/m, x0
 ; CHECK-NEXT:    and z0.d, z0.d, #0x1
-; CHECK-NEXT:    cmpne p0.d, p1/z, z0.d, #0
+; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    ret
   %res = insertelement <vscale x 2 x i1> %val, i1 %elt, i32 %idx
   ret <vscale x 2 x i1> %res
@@ -536,13 +539,14 @@ define <vscale x 4 x i1> @test_predicate_insert_4xi1(<vscale x 4 x i1> %val, i1
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.s, #0, #1
 ; CHECK-NEXT:    mov w8, w1
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    mov z1.s, w8
-; CHECK-NEXT:    cmpeq p2.s, p1/z, z0.s, z1.s
+; CHECK-NEXT:    cmpeq p1.s, p1/z, z0.s, z1.s
 ; CHECK-NEXT:    mov z0.s, p0/z, #1 // =0x1
-; CHECK-NEXT:    mov z0.s, p2/m, w0
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    mov z0.s, p1/m, w0
 ; CHECK-NEXT:    and z0.s, z0.s, #0x1
-; CHECK-NEXT:    cmpne p0.s, p1/z, z0.s, #0
+; CHECK-NEXT:    cmpne p0.s, p0/z, z0.s, #0
 ; CHECK-NEXT:    ret
   %res = insertelement <vscale x 4 x i1> %val, i1 %elt, i32 %idx
   ret <vscale x 4 x i1> %res
@@ -552,13 +556,14 @@ define <vscale x 8 x i1> @test_predicate_insert_8xi1(<vscale x 8 x i1> %val, i1
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z0.h, #0, #1
 ; CHECK-NEXT:    mov w8, w1
-; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    mov z1.h, w8
-; CHECK-NEXT:    cmpeq p2.h, p1/z, z0.h, z1.h
+; CHECK-NEXT:    cmpeq p1.h, p1/z, z0.h, z1.h
 ; CHECK-NEXT:    mov z0.h, p0/z, #1 // =0x1
-; CHECK-NEXT:    mov z0.h, p2/m, w0
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    mov z0.h, p1/m, w0
 ; CHECK-NEXT:    and z0.h, z0.h, #0x1
-; CHECK-NEXT:    cmpne p0.h, p1/z, z0.h, #0
+; CHECK-NEXT:    cmpne p0.h, p0/z, z0.h, #0
 ; CHECK-NEXT:    ret
   %res = insertelement <vscale x 8 x i1> %val, i1 %elt, i32 %idx
   ret <vscale x 8 x i1> %res
diff --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
index 14948647c2f8d..71d6f331227a6 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
@@ -321,7 +321,7 @@ define <vscale x 8 x half> @insert_nxv8f16_nxv2f16(<vscale x 8 x half> %vec, <vs
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    str z0, [sp]
 ; CHECK-NEXT:    st1h { z1.d }, p0, [sp, #1, mul vl]
 ; CHECK-NEXT:    ldr z0, [sp]
@@ -499,7 +499,7 @@ define <vscale x 4 x bfloat> @insert_nxv4bf16_v4bf16(<vscale x 4 x bfloat> %sv0,
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    addpl x8, sp, #4
 ; CHECK-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
 ; CHECK-NEXT:    str d1, [x8]
@@ -645,7 +645,7 @@ define <vscale x 16 x i1> @insert_nxv16i1_nxv4i1_into_poison(<vscale x 4 x i1> %
 define <vscale x 2 x i1> @insert_nxv2i1_v8i1_const_true_into_undef() vscale_range(4,8) {
 ; CHECK-LABEL: insert_nxv2i1_v8i1_const_true_into_undef:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ret
   %v0 = call <vscale x 2 x i1> @llvm.vector.insert.nxv2i1.v8i1 (<vscale x 2 x i1> poison, <8 x i1> splat (i1 true), i64 0)
   ret <vscale x 2 x i1> %v0
@@ -654,7 +654,7 @@ define <vscale x 2 x i1> @insert_nxv2i1_v8i1_const_true_into_undef() vscale_rang
 define <vscale x 4 x i1> @insert_nxv4i1_v16i1_const_true_into_undef() vscale_range(4,8) {
 ; CHECK-LABEL: insert_nxv4i1_v16i1_const_true_into_undef:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ret
   %v0 = call <vscale x 4 x i1> @llvm.vector.insert.nxv4i1.v16i1 (<vscale x 4 x i1> poison, <16 x i1> splat (i1 true), i64 0)
   ret <vscale x 4 x i1> %v0
@@ -663,7 +663,7 @@ define <vscale x 4 x i1> @insert_nxv4i1_v16i1_const_true_into_undef() vscale_ran
 define <vscale x 8 x i1> @insert_nxv8i1_v32i1_const_true_into_undef() vscale_range(4,8) {
 ; CHECK-LABEL: insert_nxv8i1_v32i1_const_true_into_undef:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ret
   %v0 = call <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.v32i1 (<vscale x 8 x i1> poison, <32 x i1> splat (i1 true), i64 0)
   ret <vscale x 8 x i1> %v0
diff --git a/llvm/test/CodeGen/AArch64/sve-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-int-arith.ll
index c59b1d430ff4f..108714e175425 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-arith.ll
@@ -142,8 +142,9 @@ define <vscale x 2 x i64> @abs_nxv2i64(<vscale x 2 x i64> %a) {
 define <vscale x 4 x i16> @abs_nxv4i16(<vscale x 4 x i16> %a) {
 ; CHECK-LABEL: abs_nxv4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
+; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    abs z0.s, p0/m, z0.s
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i16> @llvm.abs.nxv4i16(<vscale x 4 x i16> %a, i1 false)
diff --git a/llvm/test/CodeGen/AArch64/sve-int-pred-reduce.ll b/llvm/test/CodeGen/AArch64/sve-int-pred-reduce.ll
index 9cff3b3056e0c..de58a25af8ab9 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-pred-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-pred-reduce.ll
@@ -50,10 +50,11 @@ define i1 @reduce_and_nxv2i1(<vscale x 2 x i1> %vec) {
 define i1 @reduce_and_nxv1i1(<vscale x 1 x i1> %vec) {
 ; CHECK-LABEL: reduce_and_nxv1i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    punpklo p2.h, p1.b
-; CHECK-NEXT:    eor p0.b, p1/z, p0.b, p2.b
-; CHECK-NEXT:    ptest p2, p0.b
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    ptrue p2.d
+; CHECK-NEXT:    punpklo p1.h, p1.b
+; CHECK-NEXT:    eor p0.b, p2/z, p0.b, p1.b
+; CHECK-NEXT:    ptest p1, p0.b
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    ret
   %res = call i1 @llvm.vector.reduce.and.i1.nxv1i1(<vscale x 1 x i1> %vec)
@@ -108,7 +109,7 @@ define i1 @reduce_or_nxv2i1(<vscale x 2 x i1> %vec) {
 define i1 @reduce_or_nxv1i1(<vscale x 1 x i1> %vec) {
 ; CHECK-LABEL: reduce_or_nxv1i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    punpklo p1.h, p1.b
 ; CHECK-NEXT:    ptest p1, p0.b
 ; CHECK-NEXT:    cset w0, ne
@@ -166,7 +167,7 @@ define i1 @reduce_xor_nxv2i1(<vscale x 2 x i1> %vec) {
 define i1 @reduce_xor_nxv1i1(<vscale x 1 x i1> %vec) {
 ; CHECK-LABEL: reduce_xor_nxv1i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    punpklo p1.h, p1.b
 ; CHECK-NEXT:    cntp x8, p1, p0.d
 ; CHECK-NEXT:    and w0, w8, #0x1
@@ -224,10 +225,11 @@ define i1 @reduce_smax_nxv2i1(<vscale x 2 x i1> %vec) {
 define i1 @reduce_smax_nxv1i1(<vscale x 1 x i1> %vec) {
 ; CHECK-LABEL: reduce_smax_nxv1i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    punpklo p2.h, p1.b
-; CHECK-NEXT:    eor p0.b, p1/z, p0.b, p2.b
-; CHECK-NEXT:    ptest p2, p0.b
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    ptrue p2.d
+; CHECK-NEXT:    punpklo p1.h, p1.b
+; CHECK-NEXT:    eor p0.b, p2/z, p0.b, p1.b
+; CHECK-NEXT:    ptest p1, p0.b
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    ret
   %res = call i1 @llvm.vector.reduce.smax.i1.nxv1i1(<vscale x 1 x i1> %vec)
@@ -282,7 +284,7 @@ define i1 @reduce_smin_nxv2i1(<vscale x 2 x i1> %vec) {
 define i1 @reduce_smin_nxv1i1(<vscale x 1 x i1> %vec) {
 ; CHECK-LABEL: reduce_smin_nxv1i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    punpklo p1.h, p1.b
 ; CHECK-NEXT:    ptest p1, p0.b
 ; CHECK-NEXT:    cset w0, ne
@@ -339,7 +341,7 @@ define i1 @reduce_umax_nxv2i1(<vscale x 2 x i1> %vec) {
 define i1 @reduce_umax_nxv1i1(<vscale x 1 x i1> %vec) {
 ; CHECK-LABEL: reduce_umax_nxv1i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    punpklo p1.h, p1.b
 ; CHECK-NEXT:    ptest p1, p0.b
 ; CHECK-NEXT:    cset w0, ne
@@ -386,10 +388,11 @@ define i1 @reduce_umin_nxv4i1(<vscale x 4 x i1> %vec) {
 define i1 @reduce_umin_nxv1i1(<vscale x 1 x i1> %vec) {
 ; CHECK-LABEL: reduce_umin_nxv1i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    punpklo p2.h, p1.b
-; CHECK-NEXT:    eor p0.b, p1/z, p0.b, p2.b
-; CHECK-NEXT:    ptest p2, p0.b
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    ptrue p2.d
+; CHECK-NEXT:    punpklo p1.h, p1.b
+; CHECK-NEXT:    eor p0.b, p2/z, p0.b, p1.b
+; CHECK-NEXT:    ptest p1, p0.b
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    ret
   %res = call i1 @llvm.vector.reduce.umin.i1.nxv1i1(<vscale x 1 x i1> %vec)
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll
index bb72f0506690b..6afd9e9443506 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll
@@ -895,7 +895,7 @@ define <vscale x 16 x i1> @wide_cmphs_b(<vscale x 16 x i1> %pg, <vscale x 16 x i
 define <vscale x 8 x i1> @ir_cmphs_h(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: ir_cmphs_h:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ret
   %out = icmp uge <vscale x 8 x i16> %a, zeroinitializer
   ret <vscale x 8 x i1> %out
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll
index 4153f0be611a1..9958f2ddc0372 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll
@@ -8,7 +8,7 @@
 define <vscale x 16 x i32> @ld1b_i8_sext_i32(ptr %base) {
 ; CHECK-LABEL: ld1b_i8_sext_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sb { z1.s }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ld1sb { z2.s }, p0/z, [x0, #2, mul vl]
@@ -22,7 +22,7 @@ define <vscale x 16 x i32> @ld1b_i8_sext_i32(ptr %base) {
 define <vscale x 16 x i32> @ld1b_i8_zext_i32(ptr %base) {
 ; CHECK-LABEL: ld1b_i8_zext_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1b { z1.s }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ld1b { z2.s }, p0/z, [x0, #2, mul vl]
@@ -36,7 +36,7 @@ define <vscale x 16 x i32> @ld1b_i8_zext_i32(ptr %base) {
 define <vscale x 16 x i64> @ld1b_i8_sext(ptr %base) {
 ; CHECK-LABEL: ld1b_i8_sext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sb { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ld1sb { z2.d }, p0/z, [x0, #2, mul vl]
@@ -54,7 +54,7 @@ define <vscale x 16 x i64> @ld1b_i8_sext(ptr %base) {
 define <vscale x 16 x i64> @ld1b_i8_zext(ptr %base) {
 ; CHECK-LABEL: ld1b_i8_zext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1b { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ld1b { z2.d }, p0/z, [x0, #2, mul vl]
@@ -76,7 +76,7 @@ define <vscale x 16 x i64> @ld1b_i8_zext(ptr %base) {
 define <vscale x 8 x i64> @ld1h_i16_sext(ptr %base) {
 ; CHECK-LABEL: ld1h_i16_sext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sh { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ld1sh { z2.d }, p0/z, [x0, #2, mul vl]
@@ -90,7 +90,7 @@ define <vscale x 8 x i64> @ld1h_i16_sext(ptr %base) {
 define <vscale x 8 x i64> @ld1h_i16_zext(ptr %base) {
 ; CHECK-LABEL: ld1h_i16_zext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ld1h { z2.d }, p0/z, [x0, #2, mul vl]
@@ -108,7 +108,7 @@ define <vscale x 8 x i64> @ld1h_i16_zext(ptr %base) {
 define <vscale x 4 x i64> @ld1w_i32_sext(ptr %base) {
 ; CHECK-LABEL: ld1w_i32_sext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sw { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
@@ -120,7 +120,7 @@ define <vscale x 4 x i64> @ld1w_i32_sext(ptr %base) {
 define <vscale x 4 x i64> @ld1w_i32_zext(ptr %base) {
 ; CHECK-LABEL: ld1w_i32_zext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
@@ -135,7 +135,7 @@ define <vscale x 4 x i64> @ld1w_i32_zext(ptr %base) {
 define <vscale x 4 x i64> @zload_4i8_4i64(ptr %a) {
 ; CHECK-LABEL: zload_4i8_4i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1b { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
@@ -147,7 +147,7 @@ define <vscale x 4 x i64> @zload_4i8_4i64(ptr %a) {
 define <vscale x 4 x i64> @zload_4i16_4i64(ptr %a) {
 ; CHECK-LABEL: zload_4i16_4i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
@@ -159,7 +159,7 @@ define <vscale x 4 x i64> @zload_4i16_4i64(ptr %a) {
 define <vscale x 8 x i32> @zload_8i8_8i32(ptr %a) {
 ; CHECK-LABEL: zload_8i8_8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1b { z1.s }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
@@ -171,7 +171,7 @@ define <vscale x 8 x i32> @zload_8i8_8i32(ptr %a) {
 define <vscale x 8 x i64> @zload_8i8_8i64(ptr %a) {
 ; CHECK-LABEL: zload_8i8_8i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1b { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ld1b { z2.d }, p0/z, [x0, #2, mul vl]
@@ -185,7 +185,7 @@ define <vscale x 8 x i64> @zload_8i8_8i64(ptr %a) {
 define <vscale x 4 x i64> @sload_4i8_4i64(ptr %a) {
 ; CHECK-LABEL: sload_4i8_4i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sb { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
@@ -197,7 +197,7 @@ define <vscale x 4 x i64> @sload_4i8_4i64(ptr %a) {
 define <vscale x 4 x i64> @sload_4i16_4i64(ptr %a) {
 ; CHECK-LABEL: sload_4i16_4i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sh { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
@@ -209,7 +209,7 @@ define <vscale x 4 x i64> @sload_4i16_4i64(ptr %a) {
 define <vscale x 8 x i32> @sload_8i8_8i32(ptr %a) {
 ; CHECK-LABEL: sload_8i8_8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sb { z1.s }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
@@ -221,7 +221,7 @@ define <vscale x 8 x i32> @sload_8i8_8i32(ptr %a) {
 define <vscale x 8 x i64> @sload_8i8_8i64(ptr %a) {
 ; CHECK-LABEL: sload_8i8_8i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sb { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ld1sb { z2.d }, p0/z, [x0, #2, mul vl]
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
index 723b217cf15a3..de4826177cf03 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
@@ -57,7 +57,7 @@ define <vscale x 16 x i8> @ld1rqb_i8_imm_upper_bound(<vscale x 16 x i1> %pred, p
 define <vscale x 16 x i8> @ld1rqb_i8_imm_out_of_lower_bound(<vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqb_i8_imm_out_of_lower_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-129
+; CHECK-NEXT:    mov x8, #-129 // =0xffffffffffffff7f
 ; CHECK-NEXT:    ld1rqb { z0.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 -129
@@ -68,7 +68,7 @@ define <vscale x 16 x i8> @ld1rqb_i8_imm_out_of_lower_bound(<vscale x 16 x i1> %
 define <vscale x 16 x i8> @ld1rqb_i8_imm_out_of_upper_bound(<vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqb_i8_imm_out_of_upper_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #113
+; CHECK-NEXT:    mov w8, #113 // =0x71
 ; CHECK-NEXT:    ld1rqb { z0.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 113
@@ -196,7 +196,7 @@ define <vscale x 8 x bfloat> @ld1rqh_bf16_scalar(<vscale x 8 x i1> %pred, ptr %a
 define <vscale x 8 x i16> @ld1rqh_i16_imm_dupqlane(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqh_i16_imm_dupqlane:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, #-16]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds <8 x i16>, ptr %addr, i16 -1
@@ -209,7 +209,7 @@ define <vscale x 8 x i16> @ld1rqh_i16_imm_dupqlane(<vscale x 8 x i1> %pred, ptr
 define <vscale x 8 x i16> @ld1rqh_i16_scalar_dupqlane(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqh_i16_scalar_dupqlane:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %idx
@@ -222,7 +222,7 @@ define <vscale x 8 x i16> @ld1rqh_i16_scalar_dupqlane(<vscale x 8 x i1> %pred, p
 define <vscale x 8 x half> @ld1rqh_f16_imm_dupqlane(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqh_f16_imm_dupqlane:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, #-16]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds <8 x half>, ptr %addr, i16 -1
@@ -235,7 +235,7 @@ define <vscale x 8 x half> @ld1rqh_f16_imm_dupqlane(<vscale x 8 x i1> %pred, ptr
 define <vscale x 8 x half> @ld1rqh_f16_scalar_dupqlane(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqh_f16_scalar_dupqlane:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds half, ptr %addr, i64 %idx
@@ -248,7 +248,7 @@ define <vscale x 8 x half> @ld1rqh_f16_scalar_dupqlane(<vscale x 8 x i1> %pred,
 define <vscale x 8 x bfloat> @ld1rqh_bf16_imm_dupqlane(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqh_bf16_imm_dupqlane:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, #-16]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds <8 x bfloat>, ptr %addr, i16 -1
@@ -261,7 +261,7 @@ define <vscale x 8 x bfloat> @ld1rqh_bf16_imm_dupqlane(<vscale x 8 x i1> %pred,
 define <vscale x 8 x bfloat> @ld1rqh_bf16_scalar_dupqlane(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqh_bf16_scalar_dupqlane:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %idx
@@ -336,7 +336,7 @@ define <vscale x 4 x float> @ld1rqw_f32_scalar(<vscale x 4 x i1> %pred, ptr %bas
 define <vscale x 4 x i32> @ld1rqw_i32_imm_dupqlane(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqw_i32_imm_dupqlane:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0, #16]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds <4 x i32>, ptr %addr, i32 1
@@ -349,7 +349,7 @@ define <vscale x 4 x i32> @ld1rqw_i32_imm_dupqlane(<vscale x 4 x i1> %pred, ptr
 define <vscale x 4 x i32> @ld1rqw_i32_scalar_dupqlane(<vscale x 4 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqw_i32_scalar_dupqlane:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i32, ptr %addr, i64 %idx
@@ -362,7 +362,7 @@ define <vscale x 4 x i32> @ld1rqw_i32_scalar_dupqlane(<vscale x 4 x i1> %pred, p
 define <vscale x 4 x float> @ld1rqw_f32_imm_dupqlane(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqw_f32_imm_dupqlane:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0, #16]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds <4 x float>, ptr %addr, i32 1
@@ -375,7 +375,7 @@ define <vscale x 4 x float> @ld1rqw_f32_imm_dupqlane(<vscale x 4 x i1> %pred, pt
 define <vscale x 4 x float> @ld1rqw_f32_scalar_dupqlane(<vscale x 4 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqw_f32_scalar_dupqlane:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds float, ptr %addr, i64 %idx
@@ -450,7 +450,7 @@ define <vscale x 2 x double> @ld1rqd_f64_scalar(<vscale x 2 x i1> %pred, ptr %ba
 define <vscale x 2 x i64> @ld1rqd_i64_imm_dupqlane(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqd_i64_imm_dupqlane:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0, #16]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds <2 x i64>, ptr %addr, i64 1
@@ -463,7 +463,7 @@ define <vscale x 2 x i64> @ld1rqd_i64_imm_dupqlane(<vscale x 2 x i1> %pred, ptr
 define <vscale x 2 x i64> @ld1rqd_i64_scalar_dupqlane(<vscale x 2 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqd_i64_scalar_dupqlane:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i64, ptr %addr, i64 %idx
@@ -476,7 +476,7 @@ define <vscale x 2 x i64> @ld1rqd_i64_scalar_dupqlane(<vscale x 2 x i1> %pred, p
 define <vscale x 2 x double> @ld1rqd_f64_imm_dupqlane(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqd_f64_imm_dupqlane:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0, #16]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds <2 x double>, ptr %addr, i64 1
@@ -489,7 +489,7 @@ define <vscale x 2 x double> @ld1rqd_f64_imm_dupqlane(<vscale x 2 x i1> %pred, p
 define <vscale x 2 x double> @ld1rqd_f64_scalar_dupqlane(<vscale x 2 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqd_f64_scalar_dupqlane:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds double, ptr %addr, i64 %idx
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret-no-streaming.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret-no-streaming.ll
index 3986a5a79d57d..1c1a864329046 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret-no-streaming.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret-no-streaming.ll
@@ -7,7 +7,7 @@
 define <vscale x 16 x i1> @reinterpret_bool_from_splat() {
 ; CHECK-LABEL: reinterpret_bool_from_splat:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ret
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> splat(i1 true))
   ret <vscale x 16 x i1> %out
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret.ll
index 5127fa7e93480..049a3797aa77f 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-reinterpret.ll
@@ -17,7 +17,7 @@ define <vscale x 16 x i1> @reinterpret_bool_from_b(<vscale x 16 x i1> %pg) {
 define <vscale x 16 x i1> @reinterpret_bool_from_h(<vscale x 8 x i1> %pg) {
 ; CHECK-LABEL: reinterpret_bool_from_h:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    and p0.b, p0/z, p0.b, p1.b
 ; CHECK-NEXT:    ret
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %pg)
@@ -27,7 +27,7 @@ define <vscale x 16 x i1> @reinterpret_bool_from_h(<vscale x 8 x i1> %pg) {
 define <vscale x 16 x i1> @reinterpret_bool_from_s(<vscale x 4 x i1> %pg) {
 ; CHECK-LABEL: reinterpret_bool_from_s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    and p0.b, p0/z, p0.b, p1.b
 ; CHECK-NEXT:    ret
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %pg)
@@ -37,7 +37,7 @@ define <vscale x 16 x i1> @reinterpret_bool_from_s(<vscale x 4 x i1> %pg) {
 define <vscale x 16 x i1> @reinterpret_bool_from_d(<vscale x 2 x i1> %pg) {
 ; CHECK-LABEL: reinterpret_bool_from_d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    and p0.b, p0/z, p0.b, p1.b
 ; CHECK-NEXT:    ret
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg)
@@ -47,7 +47,7 @@ define <vscale x 16 x i1> @reinterpret_bool_from_d(<vscale x 2 x i1> %pg) {
 define <vscale x 16 x i1> @reinterpret_bool_from_q(<vscale x 1 x i1> %arg) {
 ; CHECK-LABEL: reinterpret_bool_from_q:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    punpklo p1.h, p1.b
 ; CHECK-NEXT:    and p0.b, p0/z, p0.b, p1.b
 ; CHECK-NEXT:    ret
@@ -142,7 +142,7 @@ define <vscale x 16 x i1> @reinterpret_cmpgt(<vscale x 8 x i1> %p, <vscale x 8 x
 define <vscale x 16 x i1> @chained_reinterpret() {
 ; CHECK-LABEL: chained_reinterpret:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ret
   %in = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
   %cast2 = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %in)
diff --git a/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll b/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll
index 8b631199b0594..07f0200b108b0 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll
@@ -22,7 +22,7 @@ define <vscale x 4 x i32> @test_post_ld1_insert(ptr %a, ptr %ptr, i64 %inc) {
 define <vscale x 2 x double> @test_post_ld1_dup(ptr %a, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_post_ld1_dup:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add x8, x0, x2, lsl #3
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    str x8, [x1]
@@ -41,11 +41,12 @@ define void @test_post_ld1_int_fixed(ptr %data, i64 %idx, ptr %addr, ptr %res_pt
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z1.d, x8
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    ldr x8, [x0]
-; CHECK-NEXT:    ptrue p2.d, vl1
 ; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x2]
+; CHECK-NEXT:    ptrue p2.b, vl1
 ; CHECK-NEXT:    ldr x9, [x0, x1, lsl #3]
-; CHECK-NEXT:    cmpeq p1.d, p0/z, z0.d, z1.d
+; CHECK-NEXT:    cmpeq p1.d, p1/z, z0.d, z1.d
 ; CHECK-NEXT:    mov z0.d, z2.d
 ; CHECK-NEXT:    mov z0.d, p2/m, x8
 ; CHECK-NEXT:    mov z2.d, p1/m, x9
@@ -68,17 +69,18 @@ define void @test_post_ld1_double_fixed(ptr %data, i64 %idx, ptr %addr, ptr %res
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #2 // =0x2
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.d, x8
-; CHECK-NEXT:    ptrue p2.d, vl1
+; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    ldr d2, [x0, x1, lsl #3]
-; CHECK-NEXT:    cmpeq p1.d, p0/z, z0.d, z1.d
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x2]
+; CHECK-NEXT:    ptrue p2.b, vl1
+; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
+; CHECK-NEXT:    ld1d { z0.d }, p1/z, [x2]
 ; CHECK-NEXT:    ldr d1, [x0]
 ; CHECK-NEXT:    sel z1.d, p2, z1.d, z0.d
-; CHECK-NEXT:    mov z0.d, p1/m, d2
+; CHECK-NEXT:    mov z0.d, p0/m, d2
 ; CHECK-NEXT:    fadd z0.d, z1.d, z0.d
-; CHECK-NEXT:    st1d { z0.d }, p0, [x3]
+; CHECK-NEXT:    st1d { z0.d }, p1, [x3]
 ; CHECK-NEXT:    ret
   %A = load <4 x double>, ptr %addr
   %ld1 = load double, ptr %data
diff --git a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll
index 523fdea6b2231..017cec81aa171 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll
@@ -92,10 +92,9 @@ define <vscale x 2 x i64> @ld1d_inbound(ptr %a) {
 define void @load_nxv6f16(ptr %a) {
 ; CHECK-LABEL: load_nxv6f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT:    ld1h { z0.s }, p1/z, [x0]
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load volatile <vscale x 6 x half>, ptr %a
   ret void
@@ -104,7 +103,7 @@ define void @load_nxv6f16(ptr %a) {
 define void @load_nxv6f32(ptr %a) {
 ; CHECK-LABEL: load_nxv6f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ldr z0, [x0]
 ; CHECK-NEXT:    ret
@@ -115,7 +114,7 @@ define void @load_nxv6f32(ptr %a) {
 define void @load_nxv12f16(ptr %a) {
 ; CHECK-LABEL: load_nxv12f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ldr z0, [x0]
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
index 3f31917b125b7..a2eba2d877cd8 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
@@ -50,7 +50,7 @@ define <vscale x 2 x i64> @ld1_nxv16i8_bitcast_to_i64(ptr %addr, i64 %off) {
 define <vscale x 8 x i16> @ld1_nxv8i16_zext8(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv8i16_zext8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
@@ -62,7 +62,7 @@ define <vscale x 8 x i16> @ld1_nxv8i16_zext8(ptr %addr, i64 %off) {
 define <vscale x 4 x i32> @ld1_nxv4i32_zext8(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4i32_zext8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
@@ -74,7 +74,7 @@ define <vscale x 4 x i32> @ld1_nxv4i32_zext8(ptr %addr, i64 %off) {
 define <vscale x 2 x i64> @ld1_nxv2i64_zext8(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2i64_zext8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
@@ -86,7 +86,7 @@ define <vscale x 2 x i64> @ld1_nxv2i64_zext8(ptr %addr, i64 %off) {
 define <vscale x 8 x i16> @ld1_nxv8i16_sext8(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv8i16_sext8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1sb { z0.h }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
@@ -98,7 +98,7 @@ define <vscale x 8 x i16> @ld1_nxv8i16_sext8(ptr %addr, i64 %off) {
 define <vscale x 4 x i32> @ld1_nxv4i32_sext8(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4i32_sext8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
@@ -110,7 +110,7 @@ define <vscale x 4 x i32> @ld1_nxv4i32_sext8(ptr %addr, i64 %off) {
 define <vscale x 2 x i64> @ld1_nxv2i64_sext8(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2i64_sext8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
@@ -124,7 +124,7 @@ define <vscale x 2 x i64> @ld1_nxv2i64_sext8(ptr %addr, i64 %off) {
 define <vscale x 8 x i16> @ld1_nxv8i16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
@@ -135,7 +135,7 @@ define <vscale x 8 x i16> @ld1_nxv8i16(ptr %addr, i64 %off) {
 define <vscale x 4 x i32> @ld1_nxv4i32_zext16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4i32_zext16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
@@ -147,7 +147,7 @@ define <vscale x 4 x i32> @ld1_nxv4i32_zext16(ptr %addr, i64 %off) {
 define <vscale x 2 x i64> @ld1_nxv2i64_zext16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2i64_zext16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
@@ -159,7 +159,7 @@ define <vscale x 2 x i64> @ld1_nxv2i64_zext16(ptr %addr, i64 %off) {
 define <vscale x 4 x i32> @ld1_nxv4i32_sext16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4i32_sext16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
@@ -171,7 +171,7 @@ define <vscale x 4 x i32> @ld1_nxv4i32_sext16(ptr %addr, i64 %off) {
 define <vscale x 2 x i64> @ld1_nxv2i64_sext16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2i64_sext16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
@@ -183,7 +183,7 @@ define <vscale x 2 x i64> @ld1_nxv2i64_sext16(ptr %addr, i64 %off) {
 define <vscale x 8 x half> @ld1_nxv8f16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds half, ptr %addr, i64 %off
@@ -194,7 +194,7 @@ define <vscale x 8 x half> @ld1_nxv8f16(ptr %addr, i64 %off) {
 define <vscale x 8 x bfloat> @ld1_nxv8bf16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv8bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
@@ -205,7 +205,7 @@ define <vscale x 8 x bfloat> @ld1_nxv8bf16(ptr %addr, i64 %off) {
 define <vscale x 4 x half> @ld1_nxv4f16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds half, ptr %addr, i64 %off
@@ -216,7 +216,7 @@ define <vscale x 4 x half> @ld1_nxv4f16(ptr %addr, i64 %off) {
 define <vscale x 4 x bfloat> @ld1_nxv4bf16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
@@ -227,7 +227,7 @@ define <vscale x 4 x bfloat> @ld1_nxv4bf16(ptr %addr, i64 %off) {
 define <vscale x 2 x half> @ld1_nxv2f16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds half, ptr %addr, i64 %off
@@ -238,7 +238,7 @@ define <vscale x 2 x half> @ld1_nxv2f16(ptr %addr, i64 %off) {
 define <vscale x 2 x bfloat> @ld1_nxv2bf16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
@@ -251,7 +251,7 @@ define <vscale x 2 x bfloat> @ld1_nxv2bf16(ptr %addr, i64 %off) {
 define <vscale x 4 x i32> @ld1_nxv4i32(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i32, ptr %addr, i64 %off
@@ -262,7 +262,7 @@ define <vscale x 4 x i32> @ld1_nxv4i32(ptr %addr, i64 %off) {
 define <vscale x 2 x i64> @ld1_nxv2i64_zext32(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2i64_zext32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i32, ptr %addr, i64 %off
@@ -274,7 +274,7 @@ define <vscale x 2 x i64> @ld1_nxv2i64_zext32(ptr %addr, i64 %off) {
 define <vscale x 2 x i64> @ld1_nxv2i64_sext32(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2i64_sext32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i32, ptr %addr, i64 %off
@@ -286,7 +286,7 @@ define <vscale x 2 x i64> @ld1_nxv2i64_sext32(ptr %addr, i64 %off) {
 define <vscale x 4 x float> @ld1_nxv4f32(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds float, ptr %addr, i64 %off
@@ -297,7 +297,7 @@ define <vscale x 4 x float> @ld1_nxv4f32(ptr %addr, i64 %off) {
 define <vscale x 2 x float> @ld1_nxv2f32(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds float, ptr %addr, i64 %off
@@ -310,7 +310,7 @@ define <vscale x 2 x float> @ld1_nxv2f32(ptr %addr, i64 %off) {
 define <vscale x 2 x i64> @ld1_nxv2i64(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i64, ptr %addr, i64 %off
@@ -321,7 +321,7 @@ define <vscale x 2 x i64> @ld1_nxv2i64(ptr %addr, i64 %off) {
 define <vscale x 2 x double> @ld1_nxv2f64(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds double, ptr %addr, i64 %off
diff --git a/llvm/test/CodeGen/AArch64/sve-ld1r.ll b/llvm/test/CodeGen/AArch64/sve-ld1r.ll
index a6d7d17fd9eef..be20d90ca0ea3 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld1r.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld1r.ll
@@ -94,7 +94,7 @@ define <vscale x 16 x i8> @ld1rb_gep_out_of_range_down(ptr %valp) {
 define <vscale x 8 x i16> @ld1rb_i8_i16_zext(ptr %valp) {
 ; CHECK-LABEL: ld1rb_i8_i16_zext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rb { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load i8, ptr %valp
@@ -107,7 +107,7 @@ define <vscale x 8 x i16> @ld1rb_i8_i16_zext(ptr %valp) {
 define <vscale x 8 x i16> @ld1rb_i8_i16_sext(ptr %valp) {
 ; CHECK-LABEL: ld1rb_i8_i16_sext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rsb { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load i8, ptr %valp
@@ -120,7 +120,7 @@ define <vscale x 8 x i16> @ld1rb_i8_i16_sext(ptr %valp) {
 define <vscale x 4 x i32> @ld1rb_i8_i32_zext(ptr %valp) {
 ; CHECK-LABEL: ld1rb_i8_i32_zext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load i8, ptr %valp
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @ld1rb_i8_i32_zext(ptr %valp) {
 define <vscale x 4 x i32> @ld1rb_i8_i32_sext(ptr %valp) {
 ; CHECK-LABEL: ld1rb_i8_i32_sext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rsb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load i8, ptr %valp
@@ -146,7 +146,7 @@ define <vscale x 4 x i32> @ld1rb_i8_i32_sext(ptr %valp) {
 define <vscale x 2 x i64> @ld1rb_i8_i64_zext(ptr %valp) {
 ; CHECK-LABEL: ld1rb_i8_i64_zext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load i8, ptr %valp
@@ -159,7 +159,7 @@ define <vscale x 2 x i64> @ld1rb_i8_i64_zext(ptr %valp) {
 define <vscale x 2 x i64> @ld1rb_i8_i64_sext(ptr %valp) {
 ; CHECK-LABEL: ld1rb_i8_i64_sext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rsb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load i8, ptr %valp
@@ -172,7 +172,7 @@ define <vscale x 2 x i64> @ld1rb_i8_i64_sext(ptr %valp) {
 define <vscale x 8 x i16> @ld1rh(ptr %valp) {
 ; CHECK-LABEL: ld1rh:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load i16, ptr %valp
@@ -184,7 +184,7 @@ define <vscale x 8 x i16> @ld1rh(ptr %valp) {
 define <vscale x 8 x i16> @ld1rh_gep(ptr %valp) {
 ; CHECK-LABEL: ld1rh_gep:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0, #126]
 ; CHECK-NEXT:    ret
   %valp2 = getelementptr i16, ptr %valp, i32 63
@@ -197,7 +197,7 @@ define <vscale x 8 x i16> @ld1rh_gep(ptr %valp) {
 define <vscale x 8 x i16> @ld1rh_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LABEL: ld1rh_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add x8, x0, #128
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -211,7 +211,7 @@ define <vscale x 8 x i16> @ld1rh_gep_out_of_range_up(ptr %valp) {
 define <vscale x 8 x i16> @ld1rh_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LABEL: ld1rh_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sub x8, x0, #2
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -225,7 +225,7 @@ define <vscale x 8 x i16> @ld1rh_gep_out_of_range_down(ptr %valp) {
 define <vscale x 4 x i32> @ld1rh_i16_i32_zext(ptr %valp) {
 ; CHECK-LABEL: ld1rh_i16_i32_zext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load i16, ptr %valp
@@ -238,7 +238,7 @@ define <vscale x 4 x i32> @ld1rh_i16_i32_zext(ptr %valp) {
 define <vscale x 4 x i32> @ld1rh_i16_i32_sext(ptr %valp) {
 ; CHECK-LABEL: ld1rh_i16_i32_sext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rsh { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load i16, ptr %valp
@@ -251,7 +251,7 @@ define <vscale x 4 x i32> @ld1rh_i16_i32_sext(ptr %valp) {
 define <vscale x 2 x i64> @ld1rh_i16_i64_zext(ptr %valp) {
 ; CHECK-LABEL: ld1rh_i16_i64_zext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load i16, ptr %valp
@@ -264,7 +264,7 @@ define <vscale x 2 x i64> @ld1rh_i16_i64_zext(ptr %valp) {
 define <vscale x 2 x i64> @ld1rh_i16_i64_sext(ptr %valp) {
 ; CHECK-LABEL: ld1rh_i16_i64_sext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rsh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load i16, ptr %valp
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @ld1rh_i16_i64_sext(ptr %valp) {
 define <vscale x 4 x i32> @ld1rw(ptr %valp) {
 ; CHECK-LABEL: ld1rw:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load i32, ptr %valp
@@ -289,7 +289,7 @@ define <vscale x 4 x i32> @ld1rw(ptr %valp) {
 define <vscale x 4 x i32> @ld1rw_gep(ptr %valp) {
 ; CHECK-LABEL: ld1rw_gep:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0, #252]
 ; CHECK-NEXT:    ret
   %valp2 = getelementptr i32, ptr %valp, i32 63
@@ -302,7 +302,7 @@ define <vscale x 4 x i32> @ld1rw_gep(ptr %valp) {
 define <vscale x 4 x i32> @ld1rw_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LABEL: ld1rw_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add x8, x0, #256
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -316,7 +316,7 @@ define <vscale x 4 x i32> @ld1rw_gep_out_of_range_up(ptr %valp) {
 define <vscale x 4 x i32> @ld1rw_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LABEL: ld1rw_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sub x8, x0, #4
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -330,7 +330,7 @@ define <vscale x 4 x i32> @ld1rw_gep_out_of_range_down(ptr %valp) {
 define <vscale x 2 x i64> @ld1rw_i32_i64_zext(ptr %valp) {
 ; CHECK-LABEL: ld1rw_i32_i64_zext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load i32, ptr %valp
@@ -343,7 +343,7 @@ define <vscale x 2 x i64> @ld1rw_i32_i64_zext(ptr %valp) {
 define <vscale x 2 x i64> @ld1rw_i32_i64_sext(ptr %valp) {
 ; CHECK-LABEL: ld1rw_i32_i64_sext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rsw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load i32, ptr %valp
@@ -356,7 +356,7 @@ define <vscale x 2 x i64> @ld1rw_i32_i64_sext(ptr %valp) {
 define <vscale x 2 x i64> @ld1rd(ptr %valp) {
 ; CHECK-LABEL: ld1rd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %val = load i64, ptr %valp
@@ -368,7 +368,7 @@ define <vscale x 2 x i64> @ld1rd(ptr %valp) {
 define <vscale x 2 x i64> @ld1rd_gep(ptr %valp) {
 ; CHECK-LABEL: ld1rd_gep:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0, #504]
 ; CHECK-NEXT:    ret
   %valp2 = getelementptr i64, ptr %valp, i32 63
@@ -381,7 +381,7 @@ define <vscale x 2 x i64> @ld1rd_gep(ptr %valp) {
 define <vscale x 2 x i64> @ld1rd_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LABEL: ld1rd_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add x8, x0, #512
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -395,7 +395,7 @@ define <vscale x 2 x i64> @ld1rd_gep_out_of_range_up(ptr %valp) {
 define <vscale x 2 x i64> @ld1rd_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LABEL: ld1rd_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sub x8, x0, #8
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -409,7 +409,7 @@ define <vscale x 2 x i64> @ld1rd_gep_out_of_range_down(ptr %valp) {
 define <vscale x 8 x half> @ld1rh_half(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rh_half:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.h
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    ld1rh { z0.h }, p0/z, [x0]
 ; CHECK-LD1R-NEXT:    ret
 ;
@@ -439,7 +439,7 @@ define <vscale x 8 x half> @ld1rh_half_neoverse(ptr %valp) #1 {
 define <vscale x 8 x half> @ld1rh_half_gep(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rh_half_gep:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.h
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    ld1rh { z0.h }, p0/z, [x0, #126]
 ; CHECK-LD1R-NEXT:    ret
 ;
@@ -458,7 +458,7 @@ define <vscale x 8 x half> @ld1rh_half_gep(ptr %valp) {
 define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rh_half_gep_out_of_range_up:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.h
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    add x8, x0, #128
 ; CHECK-LD1R-NEXT:    ld1rh { z0.h }, p0/z, [x8]
 ; CHECK-LD1R-NEXT:    ret
@@ -478,7 +478,7 @@ define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_up(ptr %valp) {
 define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rh_half_gep_out_of_range_down:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.h
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    sub x8, x0, #2
 ; CHECK-LD1R-NEXT:    ld1rh { z0.h }, p0/z, [x8]
 ; CHECK-LD1R-NEXT:    ret
@@ -498,7 +498,7 @@ define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_down(ptr %valp) {
 define <vscale x 4 x half> @ld1rh_half_unpacked4(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rh_half_unpacked4:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.s
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    ld1rh { z0.s }, p0/z, [x0]
 ; CHECK-LD1R-NEXT:    ret
 ;
@@ -516,7 +516,7 @@ define <vscale x 4 x half> @ld1rh_half_unpacked4(ptr %valp) {
 define <vscale x 4 x half> @ld1rh_half_unpacked4_gep(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rh_half_unpacked4_gep:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.s
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    ld1rh { z0.s }, p0/z, [x0, #126]
 ; CHECK-LD1R-NEXT:    ret
 ;
@@ -535,7 +535,7 @@ define <vscale x 4 x half> @ld1rh_half_unpacked4_gep(ptr %valp) {
 define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rh_half_unpacked4_gep_out_of_range_up:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.s
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    add x8, x0, #128
 ; CHECK-LD1R-NEXT:    ld1rh { z0.s }, p0/z, [x8]
 ; CHECK-LD1R-NEXT:    ret
@@ -555,7 +555,7 @@ define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_up(ptr %valp)
 define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rh_half_unpacked4_gep_out_of_range_down:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.s
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    sub x8, x0, #2
 ; CHECK-LD1R-NEXT:    ld1rh { z0.s }, p0/z, [x8]
 ; CHECK-LD1R-NEXT:    ret
@@ -575,7 +575,7 @@ define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_down(ptr %valp
 define <vscale x 2 x half> @ld1rh_half_unpacked2(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rh_half_unpacked2:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.d
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    ld1rh { z0.d }, p0/z, [x0]
 ; CHECK-LD1R-NEXT:    ret
 ;
@@ -593,7 +593,7 @@ define <vscale x 2 x half> @ld1rh_half_unpacked2(ptr %valp) {
 define <vscale x 2 x half> @ld1rh_half_unpacked2_gep(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rh_half_unpacked2_gep:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.d
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    ld1rh { z0.d }, p0/z, [x0, #126]
 ; CHECK-LD1R-NEXT:    ret
 ;
@@ -612,7 +612,7 @@ define <vscale x 2 x half> @ld1rh_half_unpacked2_gep(ptr %valp) {
 define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rh_half_unpacked2_gep_out_of_range_up:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.d
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    add x8, x0, #128
 ; CHECK-LD1R-NEXT:    ld1rh { z0.d }, p0/z, [x8]
 ; CHECK-LD1R-NEXT:    ret
@@ -632,7 +632,7 @@ define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_up(ptr %valp)
 define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rh_half_unpacked2_gep_out_of_range_down:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.d
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    sub x8, x0, #2
 ; CHECK-LD1R-NEXT:    ld1rh { z0.d }, p0/z, [x8]
 ; CHECK-LD1R-NEXT:    ret
@@ -652,7 +652,7 @@ define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_down(ptr %valp
 define <vscale x 4 x float> @ld1rw_float(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rw_float:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.s
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    ld1rw { z0.s }, p0/z, [x0]
 ; CHECK-LD1R-NEXT:    ret
 ;
@@ -670,7 +670,7 @@ define <vscale x 4 x float> @ld1rw_float(ptr %valp) {
 define <vscale x 4 x float> @ld1rw_float_gep(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rw_float_gep:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.s
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    ld1rw { z0.s }, p0/z, [x0, #252]
 ; CHECK-LD1R-NEXT:    ret
 ;
@@ -689,7 +689,7 @@ define <vscale x 4 x float> @ld1rw_float_gep(ptr %valp) {
 define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rw_float_gep_out_of_range_up:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.s
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    add x8, x0, #256
 ; CHECK-LD1R-NEXT:    ld1rw { z0.s }, p0/z, [x8]
 ; CHECK-LD1R-NEXT:    ret
@@ -709,7 +709,7 @@ define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_up(ptr %valp) {
 define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rw_float_gep_out_of_range_down:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.s
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    sub x8, x0, #4
 ; CHECK-LD1R-NEXT:    ld1rw { z0.s }, p0/z, [x8]
 ; CHECK-LD1R-NEXT:    ret
@@ -729,7 +729,7 @@ define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_down(ptr %valp) {
 define <vscale x 2 x float> @ld1rw_float_unpacked2(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rw_float_unpacked2:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.d
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    ld1rw { z0.d }, p0/z, [x0]
 ; CHECK-LD1R-NEXT:    ret
 ;
@@ -747,7 +747,7 @@ define <vscale x 2 x float> @ld1rw_float_unpacked2(ptr %valp) {
 define <vscale x 2 x float> @ld1rw_float_unpacked2_gep(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rw_float_unpacked2_gep:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.d
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    ld1rw { z0.d }, p0/z, [x0, #252]
 ; CHECK-LD1R-NEXT:    ret
 ;
@@ -766,7 +766,7 @@ define <vscale x 2 x float> @ld1rw_float_unpacked2_gep(ptr %valp) {
 define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rw_float_unpacked2_gep_out_of_range_up:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.d
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    add x8, x0, #256
 ; CHECK-LD1R-NEXT:    ld1rw { z0.d }, p0/z, [x8]
 ; CHECK-LD1R-NEXT:    ret
@@ -786,7 +786,7 @@ define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_up(ptr %valp
 define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rw_float_unpacked2_gep_out_of_range_down:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.d
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    sub x8, x0, #4
 ; CHECK-LD1R-NEXT:    ld1rw { z0.d }, p0/z, [x8]
 ; CHECK-LD1R-NEXT:    ret
@@ -806,7 +806,7 @@ define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_down(ptr %va
 define <vscale x 2 x double> @ld1rd_double(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rd_double:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.d
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    ld1rd { z0.d }, p0/z, [x0]
 ; CHECK-LD1R-NEXT:    ret
 ;
@@ -824,7 +824,7 @@ define <vscale x 2 x double> @ld1rd_double(ptr %valp) {
 define <vscale x 2 x double> @ld1rd_double_gep(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rd_double_gep:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.d
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    ld1rd { z0.d }, p0/z, [x0, #504]
 ; CHECK-LD1R-NEXT:    ret
 ;
@@ -843,7 +843,7 @@ define <vscale x 2 x double> @ld1rd_double_gep(ptr %valp) {
 define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rd_double_gep_out_of_range_up:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.d
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    add x8, x0, #512
 ; CHECK-LD1R-NEXT:    ld1rd { z0.d }, p0/z, [x8]
 ; CHECK-LD1R-NEXT:    ret
@@ -863,7 +863,7 @@ define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_up(ptr %valp) {
 define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LD1R-LABEL: ld1rd_double_gep_out_of_range_down:
 ; CHECK-LD1R:       // %bb.0:
-; CHECK-LD1R-NEXT:    ptrue p0.d
+; CHECK-LD1R-NEXT:    ptrue p0.b
 ; CHECK-LD1R-NEXT:    sub x8, x0, #8
 ; CHECK-LD1R-NEXT:    ld1rd { z0.d }, p0/z, [x8]
 ; CHECK-LD1R-NEXT:    ret
@@ -883,7 +883,7 @@ define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_down(ptr %valp) {
 define <vscale x 2 x double> @dupq_ld1rqd_f64(ptr %a) {
 ; CHECK-LABEL: dupq_ld1rqd_f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %1 = load <2 x double>, ptr %a
@@ -895,7 +895,7 @@ define <vscale x 2 x double> @dupq_ld1rqd_f64(ptr %a) {
 define <vscale x 4 x float> @dupq_ld1rqw_f32(ptr %a) {
 ; CHECK-LABEL: dupq_ld1rqw_f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %1 = load <4 x float>, ptr %a
@@ -907,7 +907,7 @@ define <vscale x 4 x float> @dupq_ld1rqw_f32(ptr %a) {
 define <vscale x 8 x half> @dupq_ld1rqh_f16(ptr %a) {
 ; CHECK-LABEL: dupq_ld1rqh_f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %1 = load <8 x half>, ptr %a
@@ -919,7 +919,7 @@ define <vscale x 8 x half> @dupq_ld1rqh_f16(ptr %a) {
 define <vscale x 8 x bfloat> @dupq_ld1rqh_bf16(ptr %a) #0 {
 ; CHECK-LABEL: dupq_ld1rqh_bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %1 = load <8 x bfloat>, ptr %a
@@ -931,7 +931,7 @@ define <vscale x 8 x bfloat> @dupq_ld1rqh_bf16(ptr %a) #0 {
 define <vscale x 2 x i64> @dupq_ld1rqd_i64(ptr %a) #0 {
 ; CHECK-LABEL: dupq_ld1rqd_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %1 = load <2 x i64>, ptr %a
@@ -943,7 +943,7 @@ define <vscale x 2 x i64> @dupq_ld1rqd_i64(ptr %a) #0 {
 define <vscale x 4 x i32> @dupq_ld1rqw_i32(ptr %a) #0 {
 ; CHECK-LABEL: dupq_ld1rqw_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %1 = load <4 x i32>, ptr %a
@@ -955,7 +955,7 @@ define <vscale x 4 x i32> @dupq_ld1rqw_i32(ptr %a) #0 {
 define <vscale x 8 x i16> @dupq_ld1rqw_i16(ptr %a) #0 {
 ; CHECK-LABEL: dupq_ld1rqw_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %1 = load <8 x i16>, ptr %a
@@ -1414,7 +1414,7 @@ define <vscale x 2 x double> @negtest_dup_ld1rd_double_passthru_nxv2f64(<vscale
 define ptr @avoid_preindex_load(ptr %src, ptr %out) {
 ; CHECK-LABEL: avoid_preindex_load:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rsb { z0.d }, p0/z, [x0, #1]
 ; CHECK-NEXT:    add x0, x0, #1
 ; CHECK-NEXT:    str z0, [x1]
diff --git a/llvm/test/CodeGen/AArch64/sve-llrint.ll b/llvm/test/CodeGen/AArch64/sve-llrint.ll
index 16e0e0c4661b6..7b1a7c3daa83a 100644
--- a/llvm/test/CodeGen/AArch64/sve-llrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve-llrint.ll
@@ -9,14 +9,15 @@ define <vscale x 1 x i64> @llrint_v1i64_v1f16(<vscale x 1 x half> %x) {
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
 ; CHECK-NEXT:    mov z3.h, w8
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.h
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.h, p0/z, z0.h, z3.h
 ; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -35,14 +36,15 @@ define <vscale x 2 x i64> @llrint_v1i64_v2f16(<vscale x 2 x half> %x) {
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
 ; CHECK-NEXT:    mov z3.h, w8
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.h
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.h, p0/z, z0.h, z3.h
 ; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -67,6 +69,7 @@ define <vscale x 4 x i64> @llrint_v4i64_v4f16(<vscale x 4 x half> %x) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z2.h, w8
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    mov z3.h, w8
 ; CHECK-NEXT:    mov z6.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    frintx z1.h, p0/m, z1.h
@@ -78,10 +81,10 @@ define <vscale x 4 x i64> @llrint_v4i64_v4f16(<vscale x 4 x half> %x) {
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z1.h
 ; CHECK-NEXT:    movprfx z5, z0
 ; CHECK-NEXT:    fcvtzs z5.d, p0/m, z0.h
-; CHECK-NEXT:    fcmgt p3.h, p0/z, z1.h, z3.h
 ; CHECK-NEXT:    fcmgt p4.h, p0/z, z0.h, z3.h
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    fcmgt p3.h, p0/z, z1.h, z3.h
 ; CHECK-NEXT:    sel z3.d, p1, z2.d, z4.d
 ; CHECK-NEXT:    fcmuo p1.h, p0/z, z1.h, z1.h
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
@@ -116,6 +119,7 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f16(<vscale x 8 x half> %x) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z4.h, w8
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    mov z6.h, w8
 ; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    uunpklo z2.d, z1.s
@@ -130,8 +134,8 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f16(<vscale x 8 x half> %x) {
 ; CHECK-NEXT:    mov z0.d, #0x8000000000000000
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z2.h, z4.h
 ; CHECK-NEXT:    fcmge p2.h, p0/z, z1.h, z4.h
-; CHECK-NEXT:    fcmge p3.h, p0/z, z3.h, z4.h
-; CHECK-NEXT:    fcmge p4.h, p0/z, z5.h, z4.h
+; CHECK-NEXT:    fcmge p4.h, p0/z, z3.h, z4.h
+; CHECK-NEXT:    fcmge p5.h, p0/z, z5.h, z4.h
 ; CHECK-NEXT:    movprfx z4, z2
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z2.h
 ; CHECK-NEXT:    movprfx z7, z1
@@ -140,27 +144,27 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f16(<vscale x 8 x half> %x) {
 ; CHECK-NEXT:    fcvtzs z24.d, p0/m, z3.h
 ; CHECK-NEXT:    movprfx z25, z5
 ; CHECK-NEXT:    fcvtzs z25.d, p0/m, z5.h
-; CHECK-NEXT:    fcmgt p7.h, p0/z, z3.h, z6.h
-; CHECK-NEXT:    fcmgt p5.h, p0/z, z2.h, z6.h
-; CHECK-NEXT:    fcmgt p6.h, p0/z, z1.h, z6.h
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    fcmgt p6.h, p0/z, z2.h, z6.h
+; CHECK-NEXT:    fcmgt p7.h, p0/z, z1.h, z6.h
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p3.b
 ; CHECK-NEXT:    mov z4.d, p1/m, z0.d
 ; CHECK-NEXT:    fcmgt p1.h, p0/z, z5.h, z6.h
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    eor p3.b, p0/z, p5.b, p3.b
+; CHECK-NEXT:    fcmgt p5.h, p0/z, z3.h, z6.h
 ; CHECK-NEXT:    sel z6.d, p2, z0.d, z7.d
+; CHECK-NEXT:    sel z7.d, p4, z0.d, z24.d
+; CHECK-NEXT:    fcmuo p4.h, p0/z, z3.h, z3.h
 ; CHECK-NEXT:    fcmuo p2.h, p0/z, z2.h, z2.h
-; CHECK-NEXT:    sel z7.d, p3, z0.d, z24.d
+; CHECK-NEXT:    sel z24.d, p3, z0.d, z25.d
 ; CHECK-NEXT:    fcmuo p3.h, p0/z, z1.h, z1.h
-; CHECK-NEXT:    sel z24.d, p4, z0.d, z25.d
-; CHECK-NEXT:    fcmuo p4.h, p0/z, z3.h, z3.h
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z5.h, z5.h
-; CHECK-NEXT:    sel z0.d, p5, z26.d, z4.d
-; CHECK-NEXT:    sel z1.d, p6, z26.d, z6.d
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z2.d, p7, z26.d, z7.d
+; CHECK-NEXT:    sel z0.d, p6, z26.d, z4.d
+; CHECK-NEXT:    sel z1.d, p7, z26.d, z6.d
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    sel z2.d, p5, z26.d, z7.d
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z3.d, p1, z26.d, z24.d
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
@@ -180,7 +184,7 @@ define <vscale x 16 x i64> @llrint_v16i64_v16f16(<vscale x 16 x half> %x) {
 ; CHECK-LABEL: llrint_v16i64_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-3
+; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Folded Spill
@@ -188,124 +192,122 @@ define <vscale x 16 x i64> @llrint_v16i64_v16f16(<vscale x 16 x half> %x) {
 ; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT:    str z9, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT:    str z8, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT:    .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
 ; CHECK-NEXT:    uunpklo z2.s, z0.h
 ; CHECK-NEXT:    uunpkhi z0.s, z0.h
 ; CHECK-NEXT:    mov w8, #64511 // =0xfbff
 ; CHECK-NEXT:    uunpklo z4.s, z1.h
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    uunpkhi z1.s, z1.h
-; CHECK-NEXT:    mov z5.h, w8
+; CHECK-NEXT:    mov z6.h, w8
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
-; CHECK-NEXT:    mov z25.d, #0x8000000000000000
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    mov z24.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z27.h, w8
-; CHECK-NEXT:    mov z7.d, #0x7fffffffffffffff
+; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
+; CHECK-NEXT:    uunpklo z5.d, z0.s
 ; CHECK-NEXT:    uunpklo z3.d, z2.s
 ; CHECK-NEXT:    uunpkhi z2.d, z2.s
-; CHECK-NEXT:    uunpklo z6.d, z0.s
 ; CHECK-NEXT:    uunpkhi z0.d, z0.s
-; CHECK-NEXT:    uunpklo z24.d, z4.s
+; CHECK-NEXT:    uunpklo z7.d, z4.s
 ; CHECK-NEXT:    uunpkhi z4.d, z4.s
-; CHECK-NEXT:    uunpklo z26.d, z1.s
+; CHECK-NEXT:    uunpklo z25.d, z1.s
 ; CHECK-NEXT:    uunpkhi z1.d, z1.s
-; CHECK-NEXT:    frintx z2.h, p0/m, z2.h
+; CHECK-NEXT:    frintx z5.h, p0/m, z5.h
 ; CHECK-NEXT:    frintx z3.h, p0/m, z3.h
-; CHECK-NEXT:    frintx z6.h, p0/m, z6.h
+; CHECK-NEXT:    frintx z2.h, p0/m, z2.h
 ; CHECK-NEXT:    movprfx z28, z0
 ; CHECK-NEXT:    frintx z28.h, p0/m, z0.h
 ; CHECK-NEXT:    movprfx z29, z4
 ; CHECK-NEXT:    frintx z29.h, p0/m, z4.h
-; CHECK-NEXT:    frintx z24.h, p0/m, z24.h
+; CHECK-NEXT:    frintx z7.h, p0/m, z7.h
 ; CHECK-NEXT:    movprfx z30, z1
 ; CHECK-NEXT:    frintx z30.h, p0/m, z1.h
-; CHECK-NEXT:    frintx z26.h, p0/m, z26.h
-; CHECK-NEXT:    fcmge p5.h, p0/z, z2.h, z5.h
-; CHECK-NEXT:    fcmge p2.h, p0/z, z3.h, z5.h
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z2.h
+; CHECK-NEXT:    frintx z25.h, p0/m, z25.h
+; CHECK-NEXT:    fcmge p5.h, p0/z, z5.h, z6.h
+; CHECK-NEXT:    fcmge p2.h, p0/z, z3.h, z6.h
+; CHECK-NEXT:    fcmge p4.h, p0/z, z2.h, z6.h
+; CHECK-NEXT:    movprfx z4, z5
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z5.h
 ; CHECK-NEXT:    movprfx z0, z3
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z3.h
-; CHECK-NEXT:    fcmge p6.h, p0/z, z6.h, z5.h
+; CHECK-NEXT:    movprfx z1, z2
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z2.h
+; CHECK-NEXT:    fcmge p7.h, p0/z, z28.h, z6.h
+; CHECK-NEXT:    fcmge p8.h, p0/z, z7.h, z6.h
+; CHECK-NEXT:    fcmge p9.h, p0/z, z29.h, z6.h
 ; CHECK-NEXT:    fcmgt p3.h, p0/z, z3.h, z27.h
-; CHECK-NEXT:    fcmuo p1.h, p0/z, z3.h, z3.h
-; CHECK-NEXT:    fcmge p7.h, p0/z, z28.h, z5.h
-; CHECK-NEXT:    movprfx z3, z6
-; CHECK-NEXT:    fcvtzs z3.d, p0/m, z6.h
-; CHECK-NEXT:    fcmge p8.h, p0/z, z24.h, z5.h
-; CHECK-NEXT:    fcmgt p4.h, p0/z, z2.h, z27.h
-; CHECK-NEXT:    fcmge p9.h, p0/z, z26.h, z5.h
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    movprfx z4, z24
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z24.h
-; CHECK-NEXT:    fcmge p10.h, p0/z, z30.h, z5.h
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    movprfx z31, z26
-; CHECK-NEXT:    fcvtzs z31.d, p0/m, z26.h
+; CHECK-NEXT:    fcmge p10.h, p0/z, z30.h, z6.h
+; CHECK-NEXT:    movprfx z31, z7
+; CHECK-NEXT:    fcvtzs z31.d, p0/m, z7.h
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
 ; CHECK-NEXT:    movprfx z8, z30
 ; CHECK-NEXT:    fcvtzs z8.d, p0/m, z30.h
-; CHECK-NEXT:    mov z1.d, p5/m, z25.d
-; CHECK-NEXT:    fcmge p5.h, p0/z, z29.h, z5.h
-; CHECK-NEXT:    not p6.b, p0/z, p6.b
-; CHECK-NEXT:    mov z0.d, p2/m, z25.d
-; CHECK-NEXT:    fcmuo p2.h, p0/z, z2.h, z2.h
-; CHECK-NEXT:    movprfx z2, z28
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z28.h
-; CHECK-NEXT:    movprfx z5, z29
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z29.h
-; CHECK-NEXT:    not p7.b, p0/z, p7.b
-; CHECK-NEXT:    mov z3.d, p6/m, z25.d
-; CHECK-NEXT:    not p6.b, p0/z, p8.b
-; CHECK-NEXT:    fcmgt p8.h, p0/z, z6.h, z27.h
-; CHECK-NEXT:    mov z1.d, p4/m, z7.d
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    mov z0.d, p3/m, z7.d
-; CHECK-NEXT:    fcmgt p3.h, p0/z, z29.h, z27.h
-; CHECK-NEXT:    sel z9.d, p7, z25.d, z2.d
-; CHECK-NEXT:    not p7.b, p0/z, p9.b
-; CHECK-NEXT:    mov z4.d, p6/m, z25.d
-; CHECK-NEXT:    not p6.b, p0/z, p10.b
+; CHECK-NEXT:    eor p6.b, p0/z, p2.b, p1.b
+; CHECK-NEXT:    fcmuo p2.h, p0/z, z3.h, z3.h
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    sel z3.d, p5, z24.d, z4.d
+; CHECK-NEXT:    fcmge p5.h, p0/z, z25.h, z6.h
+; CHECK-NEXT:    mov z0.d, p6/m, z24.d
+; CHECK-NEXT:    fcmgt p6.h, p0/z, z2.h, z27.h
+; CHECK-NEXT:    movprfx z4, z28
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z28.h
+; CHECK-NEXT:    mov z1.d, p4/m, z24.d
+; CHECK-NEXT:    fcmuo p4.h, p0/z, z2.h, z2.h
+; CHECK-NEXT:    movprfx z2, z29
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z29.h
+; CHECK-NEXT:    movprfx z6, z25
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z25.h
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    eor p8.b, p0/z, p8.b, p1.b
+; CHECK-NEXT:    mov z0.d, p3/m, z26.d
+; CHECK-NEXT:    fcmgt p3.h, p0/z, z25.h, z27.h
+; CHECK-NEXT:    eor p9.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    mov z4.d, p7/m, z24.d
+; CHECK-NEXT:    fcmgt p7.h, p0/z, z5.h, z27.h
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    mov z31.d, p8/m, z24.d
+; CHECK-NEXT:    fcmuo p8.h, p0/z, z5.h, z5.h
+; CHECK-NEXT:    eor p1.b, p0/z, p10.b, p1.b
 ; CHECK-NEXT:    fcmgt p10.h, p0/z, z28.h, z27.h
-; CHECK-NEXT:    mov z5.d, p5/m, z25.d
-; CHECK-NEXT:    fcmgt p5.h, p0/z, z24.h, z27.h
-; CHECK-NEXT:    fcmuo p9.h, p0/z, z6.h, z6.h
-; CHECK-NEXT:    sel z6.d, p7, z25.d, z31.d
-; CHECK-NEXT:    sel z25.d, p6, z25.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    fcmgt p6.h, p0/z, z26.h, z27.h
-; CHECK-NEXT:    fcmgt p7.h, p0/z, z30.h, z27.h
-; CHECK-NEXT:    fcmuo p4.h, p0/z, z28.h, z28.h
-; CHECK-NEXT:    sel z2.d, p8, z7.d, z3.d
-; CHECK-NEXT:    sel z3.d, p10, z7.d, z9.d
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    fcmuo p8.h, p0/z, z29.h, z29.h
-; CHECK-NEXT:    mov z4.d, p5/m, z7.d
-; CHECK-NEXT:    fcmuo p5.h, p0/z, z24.h, z24.h
-; CHECK-NEXT:    fcmuo p10.h, p0/z, z26.h, z26.h
-; CHECK-NEXT:    mov z5.d, p3/m, z7.d
-; CHECK-NEXT:    mov z6.d, p6/m, z7.d
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    sel z5.d, p9, z24.d, z2.d
+; CHECK-NEXT:    fcmgt p9.h, p0/z, z7.h, z27.h
+; CHECK-NEXT:    mov z6.d, p5/m, z24.d
+; CHECK-NEXT:    fcmgt p5.h, p0/z, z30.h, z27.h
+; CHECK-NEXT:    sel z24.d, p1, z24.d, z8.d
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    fcmgt p1.h, p0/z, z29.h, z27.h
+; CHECK-NEXT:    mov z1.d, p6/m, z26.d
+; CHECK-NEXT:    fcmuo p6.h, p0/z, z28.h, z28.h
+; CHECK-NEXT:    sel z2.d, p7, z26.d, z3.d
+; CHECK-NEXT:    sel z3.d, p10, z26.d, z4.d
+; CHECK-NEXT:    fcmuo p7.h, p0/z, z7.h, z7.h
+; CHECK-NEXT:    fcmuo p10.h, p0/z, z25.h, z25.h
+; CHECK-NEXT:    sel z4.d, p9, z26.d, z31.d
+; CHECK-NEXT:    fcmuo p9.h, p0/z, z29.h, z29.h
+; CHECK-NEXT:    mov z6.d, p3/m, z26.d
+; CHECK-NEXT:    mov z5.d, p1/m, z26.d
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z30.h, z30.h
-; CHECK-NEXT:    sel z7.d, p7, z7.d, z25.d
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z2.d, p9/m, #0 // =0x0
-; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z3.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z4.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    mov z5.d, p8/m, #0 // =0x0
+; CHECK-NEXT:    sel z7.d, p5, z26.d, z24.d
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z1.d, p4/m, #0 // =0x0
+; CHECK-NEXT:    mov z2.d, p8/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z3.d, p6/m, #0 // =0x0
+; CHECK-NEXT:    mov z4.d, p7/m, #0 // =0x0
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z5.d, p9/m, #0 // =0x0
 ; CHECK-NEXT:    mov z6.d, p10/m, #0 // =0x0
-; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z1.d, p2/m, #0 // =0x0
+; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    mov z7.d, p0/m, #0 // =0x0
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #3
+; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f16(<vscale x 16 x half> %x)
@@ -355,225 +357,227 @@ define <vscale x 32 x i64> @llrint_v32i64_v32f16(<vscale x 32 x half> %x) {
 ; CHECK-NEXT:    uunpkhi z5.s, z0.h
 ; CHECK-NEXT:    mov w9, #64511 // =0xfbff
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    uunpklo z6.s, z1.h
-; CHECK-NEXT:    mov z26.h, w9
-; CHECK-NEXT:    uunpkhi z25.s, z1.h
+; CHECK-NEXT:    uunpklo z24.s, z1.h
+; CHECK-NEXT:    uunpkhi z28.s, z1.h
+; CHECK-NEXT:    mov z27.h, w9
 ; CHECK-NEXT:    mov w9, #31743 // =0x7bff
-; CHECK-NEXT:    mov z27.d, #0x8000000000000000
+; CHECK-NEXT:    mov z16.d, z3.d
 ; CHECK-NEXT:    uunpklo z31.s, z2.h
-; CHECK-NEXT:    uunpkhi z12.s, z2.h
-; CHECK-NEXT:    mov z17.d, z3.d
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    mov z7.d, #0x8000000000000000
 ; CHECK-NEXT:    uunpklo z0.d, z4.s
+; CHECK-NEXT:    uunpklo z6.d, z5.s
 ; CHECK-NEXT:    uunpkhi z4.d, z4.s
-; CHECK-NEXT:    uunpklo z7.d, z5.s
-; CHECK-NEXT:    uunpkhi z24.d, z5.s
-; CHECK-NEXT:    uunpklo z28.d, z6.s
-; CHECK-NEXT:    uunpkhi z29.d, z6.s
-; CHECK-NEXT:    uunpklo z8.d, z25.s
-; CHECK-NEXT:    uunpkhi z9.d, z25.s
-; CHECK-NEXT:    uunpklo z16.s, z17.h
-; CHECK-NEXT:    uunpklo z11.d, z31.s
-; CHECK-NEXT:    uunpkhi z14.d, z31.s
-; CHECK-NEXT:    uunpkhi z17.s, z17.h
-; CHECK-NEXT:    movprfx z30, z4
-; CHECK-NEXT:    frintx z30.h, p0/m, z4.h
-; CHECK-NEXT:    movprfx z4, z7
-; CHECK-NEXT:    frintx z4.h, p0/m, z7.h
-; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
-; CHECK-NEXT:    movprfx z6, z24
-; CHECK-NEXT:    frintx z6.h, p0/m, z24.h
-; CHECK-NEXT:    movprfx z7, z28
-; CHECK-NEXT:    frintx z7.h, p0/m, z28.h
-; CHECK-NEXT:    movprfx z25, z29
-; CHECK-NEXT:    frintx z25.h, p0/m, z29.h
-; CHECK-NEXT:    movprfx z3, z9
-; CHECK-NEXT:    frintx z3.h, p0/m, z9.h
-; CHECK-NEXT:    mov z5.h, w9
-; CHECK-NEXT:    movprfx z31, z11
-; CHECK-NEXT:    frintx z31.h, p0/m, z11.h
-; CHECK-NEXT:    movprfx z9, z14
-; CHECK-NEXT:    frintx z9.h, p0/m, z14.h
-; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z26.h
-; CHECK-NEXT:    fcmge p4.h, p0/z, z4.h, z26.h
-; CHECK-NEXT:    movprfx z24, z0
-; CHECK-NEXT:    fcvtzs z24.d, p0/m, z0.h
-; CHECK-NEXT:    fcmge p2.h, p0/z, z30.h, z26.h
-; CHECK-NEXT:    movprfx z29, z4
-; CHECK-NEXT:    fcvtzs z29.d, p0/m, z4.h
-; CHECK-NEXT:    fcmge p6.h, p0/z, z6.h, z26.h
-; CHECK-NEXT:    movprfx z28, z30
-; CHECK-NEXT:    fcvtzs z28.d, p0/m, z30.h
-; CHECK-NEXT:    movprfx z10, z6
-; CHECK-NEXT:    fcvtzs z10.d, p0/m, z6.h
-; CHECK-NEXT:    str z0, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    fcmge p3.h, p0/z, z7.h, z26.h
-; CHECK-NEXT:    movprfx z13, z7
-; CHECK-NEXT:    fcvtzs z13.d, p0/m, z7.h
-; CHECK-NEXT:    movprfx z15, z25
-; CHECK-NEXT:    fcvtzs z15.d, p0/m, z25.h
-; CHECK-NEXT:    not p5.b, p0/z, p1.b
-; CHECK-NEXT:    movprfx z18, z3
-; CHECK-NEXT:    fcvtzs z18.d, p0/m, z3.h
+; CHECK-NEXT:    uunpkhi z25.d, z5.s
+; CHECK-NEXT:    uunpklo z26.d, z24.s
+; CHECK-NEXT:    uunpkhi z24.d, z24.s
+; CHECK-NEXT:    uunpklo z9.d, z28.s
+; CHECK-NEXT:    uunpkhi z10.d, z28.s
+; CHECK-NEXT:    uunpkhi z12.s, z2.h
+; CHECK-NEXT:    uunpkhi z13.d, z31.s
+; CHECK-NEXT:    movprfx z29, z0
+; CHECK-NEXT:    frintx z29.h, p0/m, z0.h
+; CHECK-NEXT:    movprfx z5, z6
+; CHECK-NEXT:    frintx z5.h, p0/m, z6.h
+; CHECK-NEXT:    movprfx z3, z4
+; CHECK-NEXT:    frintx z3.h, p0/m, z4.h
+; CHECK-NEXT:    movprfx z4, z25
+; CHECK-NEXT:    frintx z4.h, p0/m, z25.h
+; CHECK-NEXT:    movprfx z25, z26
+; CHECK-NEXT:    frintx z25.h, p0/m, z26.h
+; CHECK-NEXT:    movprfx z26, z24
+; CHECK-NEXT:    frintx z26.h, p0/m, z24.h
+; CHECK-NEXT:    mov z6.h, w9
+; CHECK-NEXT:    movprfx z28, z9
+; CHECK-NEXT:    frintx z28.h, p0/m, z9.h
+; CHECK-NEXT:    uunpklo z9.d, z31.s
+; CHECK-NEXT:    uunpklo z15.d, z12.s
+; CHECK-NEXT:    uunpkhi z12.d, z12.s
+; CHECK-NEXT:    fcmge p2.h, p0/z, z29.h, z27.h
+; CHECK-NEXT:    fcmge p4.h, p0/z, z5.h, z27.h
+; CHECK-NEXT:    str z29, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    fcmge p3.h, p0/z, z3.h, z27.h
+; CHECK-NEXT:    fcvtzs z29.d, p0/m, z29.h
+; CHECK-NEXT:    movprfx z8, z5
+; CHECK-NEXT:    fcvtzs z8.d, p0/m, z5.h
+; CHECK-NEXT:    movprfx z30, z3
+; CHECK-NEXT:    fcvtzs z30.d, p0/m, z3.h
+; CHECK-NEXT:    fcmge p7.h, p0/z, z4.h, z27.h
+; CHECK-NEXT:    str z3, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    fcmge p8.h, p0/z, z25.h, z27.h
+; CHECK-NEXT:    movprfx z11, z25
+; CHECK-NEXT:    fcvtzs z11.d, p0/m, z25.h
+; CHECK-NEXT:    movprfx z14, z26
+; CHECK-NEXT:    fcvtzs z14.d, p0/m, z26.h
+; CHECK-NEXT:    eor p5.b, p0/z, p2.b, p1.b
+; CHECK-NEXT:    movprfx z31, z9
+; CHECK-NEXT:    frintx z31.h, p0/m, z9.h
+; CHECK-NEXT:    movprfx z9, z13
+; CHECK-NEXT:    frintx z9.h, p0/m, z13.h
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    frintx z12.h, p0/m, z12.h
+; CHECK-NEXT:    fcmgt p2.h, p0/z, z5.h, z6.h
+; CHECK-NEXT:    eor p6.b, p0/z, p3.b, p1.b
+; CHECK-NEXT:    fcmgt p3.h, p0/z, z3.h, z6.h
+; CHECK-NEXT:    sel z0.d, p5, z7.d, z29.d
+; CHECK-NEXT:    sel z29.d, p4, z7.d, z8.d
+; CHECK-NEXT:    movprfx z8, z4
+; CHECK-NEXT:    fcvtzs z8.d, p0/m, z4.h
+; CHECK-NEXT:    fcmge p5.h, p0/z, z26.h, z27.h
+; CHECK-NEXT:    movprfx z3, z10
+; CHECK-NEXT:    frintx z3.h, p0/m, z10.h
+; CHECK-NEXT:    uunpklo z10.s, z16.h
+; CHECK-NEXT:    uunpkhi z16.s, z16.h
+; CHECK-NEXT:    mov z30.d, p6/m, z7.d
+; CHECK-NEXT:    eor p6.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    fcmge p7.h, p0/z, z9.h, z27.h
+; CHECK-NEXT:    eor p4.b, p0/z, p8.b, p1.b
 ; CHECK-NEXT:    movprfx z20, z31
 ; CHECK-NEXT:    fcvtzs z20.d, p0/m, z31.h
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
 ; CHECK-NEXT:    movprfx z21, z9
 ; CHECK-NEXT:    fcvtzs z21.d, p0/m, z9.h
-; CHECK-NEXT:    fcmgt p1.h, p0/z, z30.h, z5.h
-; CHECK-NEXT:    sel z0.d, p5, z27.d, z24.d
-; CHECK-NEXT:    not p7.b, p0/z, p2.b
-; CHECK-NEXT:    fcmgt p2.h, p0/z, z4.h, z5.h
-; CHECK-NEXT:    mov z29.d, p4/m, z27.d
-; CHECK-NEXT:    fcmge p4.h, p0/z, z25.h, z26.h
-; CHECK-NEXT:    not p5.b, p0/z, p6.b
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    fcmge p6.h, p0/z, z9.h, z26.h
-; CHECK-NEXT:    fcmgt p9.h, p0/z, z6.h, z5.h
-; CHECK-NEXT:    str z0, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    sel z0.d, p7, z27.d, z28.d
-; CHECK-NEXT:    movprfx z28, z8
-; CHECK-NEXT:    frintx z28.h, p0/m, z8.h
-; CHECK-NEXT:    sel z8.d, p5, z27.d, z10.d
-; CHECK-NEXT:    uunpklo z10.d, z12.s
-; CHECK-NEXT:    uunpkhi z12.d, z12.s
-; CHECK-NEXT:    not p5.b, p0/z, p4.b
-; CHECK-NEXT:    sel z11.d, p3, z27.d, z13.d
-; CHECK-NEXT:    uunpklo z13.d, z16.s
-; CHECK-NEXT:    fcmge p3.h, p0/z, z3.h, z26.h
-; CHECK-NEXT:    not p6.b, p0/z, p6.b
-; CHECK-NEXT:    str z0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT:    sel z24.d, p5, z27.d, z15.d
-; CHECK-NEXT:    uunpkhi z15.d, z16.s
+; CHECK-NEXT:    mov z8.d, p6/m, z7.d
+; CHECK-NEXT:    eor p6.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    fcmge p5.h, p0/z, z28.h, z27.h
+; CHECK-NEXT:    uunpklo z13.d, z10.s
+; CHECK-NEXT:    uunpkhi z17.d, z10.s
+; CHECK-NEXT:    movprfx z10, z15
+; CHECK-NEXT:    frintx z10.h, p0/m, z15.h
+; CHECK-NEXT:    uunpklo z15.d, z16.s
+; CHECK-NEXT:    uunpkhi z16.d, z16.s
+; CHECK-NEXT:    movprfx z18, z3
+; CHECK-NEXT:    fcvtzs z18.d, p0/m, z3.h
+; CHECK-NEXT:    mov z11.d, p4/m, z7.d
+; CHECK-NEXT:    sel z24.d, p6, z7.d, z14.d
 ; CHECK-NEXT:    movprfx z14, z28
 ; CHECK-NEXT:    fcvtzs z14.d, p0/m, z28.h
-; CHECK-NEXT:    frintx z10.h, p0/m, z10.h
-; CHECK-NEXT:    uunpklo z16.d, z17.s
-; CHECK-NEXT:    frintx z12.h, p0/m, z12.h
-; CHECK-NEXT:    uunpkhi z17.d, z17.s
+; CHECK-NEXT:    fcmge p4.h, p0/z, z3.h, z27.h
+; CHECK-NEXT:    fcmge p6.h, p0/z, z31.h, z27.h
+; CHECK-NEXT:    str z0, [sp, #2, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    movprfx z19, z13
 ; CHECK-NEXT:    frintx z19.h, p0/m, z13.h
-; CHECK-NEXT:    fcmge p4.h, p0/z, z28.h, z26.h
-; CHECK-NEXT:    fcmge p5.h, p0/z, z31.h, z26.h
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    frintx z17.h, p0/m, z17.h
+; CHECK-NEXT:    fcmge p8.h, p0/z, z10.h, z27.h
 ; CHECK-NEXT:    frintx z15.h, p0/m, z15.h
-; CHECK-NEXT:    fcmge p7.h, p0/z, z10.h, z26.h
+; CHECK-NEXT:    fcmge p9.h, p0/z, z12.h, z27.h
 ; CHECK-NEXT:    frintx z16.h, p0/m, z16.h
-; CHECK-NEXT:    fcmge p8.h, p0/z, z12.h, z26.h
-; CHECK-NEXT:    frintx z17.h, p0/m, z17.h
-; CHECK-NEXT:    movprfx z23, z19
-; CHECK-NEXT:    fcvtzs z23.d, p0/m, z19.h
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    sel z13.d, p3, z27.d, z18.d
-; CHECK-NEXT:    fcmge p3.h, p0/z, z19.h, z26.h
-; CHECK-NEXT:    movprfx z0, z15
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z15.h
-; CHECK-NEXT:    sel z22.d, p4, z27.d, z14.d
-; CHECK-NEXT:    sel z18.d, p6, z27.d, z21.d
-; CHECK-NEXT:    movprfx z21, z12
-; CHECK-NEXT:    fcvtzs z21.d, p0/m, z12.h
-; CHECK-NEXT:    movprfx z1, z16
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z16.h
-; CHECK-NEXT:    sel z14.d, p5, z27.d, z20.d
-; CHECK-NEXT:    fcmge p4.h, p0/z, z15.h, z26.h
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    eor p6.b, p0/z, p6.b, p1.b
+; CHECK-NEXT:    sel z22.d, p5, z7.d, z14.d
+; CHECK-NEXT:    fcmge p5.h, p0/z, z17.h, z27.h
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    sel z13.d, p4, z7.d, z18.d
+; CHECK-NEXT:    movprfx z2, z16
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z16.h
+; CHECK-NEXT:    fcmge p4.h, p0/z, z19.h, z27.h
+; CHECK-NEXT:    sel z14.d, p6, z7.d, z20.d
+; CHECK-NEXT:    movprfx z1, z15
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z15.h
+; CHECK-NEXT:    sel z18.d, p7, z7.d, z21.d
+; CHECK-NEXT:    eor p6.b, p0/z, p8.b, p1.b
 ; CHECK-NEXT:    movprfx z20, z10
 ; CHECK-NEXT:    fcvtzs z20.d, p0/m, z10.h
-; CHECK-NEXT:    movprfx z2, z17
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z17.h
-; CHECK-NEXT:    not p5.b, p0/z, p7.b
-; CHECK-NEXT:    fcmge p6.h, p0/z, z16.h, z26.h
-; CHECK-NEXT:    not p7.b, p0/z, p8.b
-; CHECK-NEXT:    fcmge p8.h, p0/z, z17.h, z26.h
-; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
-; CHECK-NEXT:    mov z20.d, p5/m, z27.d
-; CHECK-NEXT:    mov z21.d, p7/m, z27.d
-; CHECK-NEXT:    not p5.b, p0/z, p6.b
-; CHECK-NEXT:    mov z23.d, p3/m, z27.d
-; CHECK-NEXT:    fcmgt p3.h, p0/z, z17.h, z5.h
-; CHECK-NEXT:    not p6.b, p0/z, p8.b
-; CHECK-NEXT:    mov z0.d, p4/m, z27.d
-; CHECK-NEXT:    fcmgt p4.h, p0/z, z16.h, z5.h
-; CHECK-NEXT:    mov z1.d, p5/m, z27.d
-; CHECK-NEXT:    fcmuo p5.h, p0/z, z16.h, z16.h
-; CHECK-NEXT:    mov z29.d, p2/m, z26.d
-; CHECK-NEXT:    mov z2.d, p6/m, z27.d
-; CHECK-NEXT:    ldr z27, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    fcmgt p6.h, p0/z, z7.h, z5.h
-; CHECK-NEXT:    fcmgt p2.h, p0/z, z12.h, z5.h
-; CHECK-NEXT:    fcmuo p8.h, p0/z, z17.h, z17.h
-; CHECK-NEXT:    fcmgt p7.h, p0/z, z28.h, z5.h
-; CHECK-NEXT:    mov z1.d, p4/m, z26.d
-; CHECK-NEXT:    fcmuo p4.h, p0/z, z15.h, z15.h
-; CHECK-NEXT:    mov z8.d, p9/m, z26.d
-; CHECK-NEXT:    mov z27.d, p1/m, z26.d
-; CHECK-NEXT:    fcmgt p1.h, p0/z, z15.h, z5.h
-; CHECK-NEXT:    mov z2.d, p3/m, z26.d
-; CHECK-NEXT:    fcmgt p3.h, p0/z, z19.h, z5.h
-; CHECK-NEXT:    mov z11.d, p6/m, z26.d
+; CHECK-NEXT:    fcmge p7.h, p0/z, z15.h, z27.h
+; CHECK-NEXT:    eor p8.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    movprfx z21, z12
+; CHECK-NEXT:    fcvtzs z21.d, p0/m, z12.h
+; CHECK-NEXT:    fcmge p9.h, p0/z, z16.h, z27.h
+; CHECK-NEXT:    movprfx z23, z19
+; CHECK-NEXT:    fcvtzs z23.d, p0/m, z19.h
+; CHECK-NEXT:    movprfx z0, z17
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z17.h
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    mov z27.d, #0x7fffffffffffffff
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    mov z20.d, p6/m, z7.d
+; CHECK-NEXT:    fcmgt p6.h, p0/z, z4.h, z6.h
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    mov z21.d, p8/m, z7.d
+; CHECK-NEXT:    fcmgt p8.h, p0/z, z28.h, z6.h
+; CHECK-NEXT:    eor p1.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    mov z23.d, p4/m, z7.d
+; CHECK-NEXT:    mov z0.d, p5/m, z7.d
+; CHECK-NEXT:    mov z1.d, p7/m, z7.d
+; CHECK-NEXT:    fcmgt p4.h, p0/z, z16.h, z6.h
+; CHECK-NEXT:    fcmuo p5.h, p0/z, z15.h, z15.h
+; CHECK-NEXT:    mov z2.d, p1/m, z7.d
+; CHECK-NEXT:    fcmgt p1.h, p0/z, z15.h, z6.h
+; CHECK-NEXT:    sel z7.d, p3, z27.d, z30.d
+; CHECK-NEXT:    fcmgt p3.h, p0/z, z17.h, z6.h
+; CHECK-NEXT:    fcmuo p9.h, p0/z, z16.h, z16.h
+; CHECK-NEXT:    mov z29.d, p2/m, z27.d
+; CHECK-NEXT:    fcmgt p2.h, p0/z, z12.h, z6.h
+; CHECK-NEXT:    sel z30.d, p6, z27.d, z8.d
 ; CHECK-NEXT:    fcmuo p6.h, p0/z, z19.h, z19.h
+; CHECK-NEXT:    mov z2.d, p4/m, z27.d
+; CHECK-NEXT:    fcmuo p4.h, p0/z, z17.h, z17.h
+; CHECK-NEXT:    fcmgt p7.h, p0/z, z25.h, z6.h
+; CHECK-NEXT:    mov z1.d, p1/m, z27.d
+; CHECK-NEXT:    fcmgt p1.h, p0/z, z19.h, z6.h
+; CHECK-NEXT:    mov z0.d, p3/m, z27.d
+; CHECK-NEXT:    fcmgt p3.h, p0/z, z10.h, z6.h
+; CHECK-NEXT:    mov z2.d, p9/m, #0 // =0x0
+; CHECK-NEXT:    sel z15.d, p2, z27.d, z21.d
+; CHECK-NEXT:    fcmuo p2.h, p0/z, z10.h, z10.h
 ; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    fcmgt p5.h, p0/z, z9.h, z5.h
-; CHECK-NEXT:    sel z15.d, p2, z26.d, z21.d
-; CHECK-NEXT:    fcmuo p2.h, p0/z, z12.h, z12.h
-; CHECK-NEXT:    mov z2.d, p8/m, #0 // =0x0
-; CHECK-NEXT:    sel z16.d, p7, z26.d, z22.d
-; CHECK-NEXT:    mov z0.d, p1/m, z26.d
-; CHECK-NEXT:    fcmgt p1.h, p0/z, z10.h, z5.h
-; CHECK-NEXT:    str z1, [x8, #14, mul vl]
-; CHECK-NEXT:    sel z17.d, p3, z26.d, z23.d
-; CHECK-NEXT:    fcmuo p3.h, p0/z, z10.h, z10.h
-; CHECK-NEXT:    str z2, [x8, #15, mul vl]
-; CHECK-NEXT:    sel z2.d, p5, z26.d, z18.d
-; CHECK-NEXT:    fcmuo p5.h, p0/z, z9.h, z9.h
+; CHECK-NEXT:    fcmgt p5.h, p0/z, z9.h, z6.h
+; CHECK-NEXT:    sel z8.d, p7, z27.d, z11.d
+; CHECK-NEXT:    sel z16.d, p1, z27.d, z23.d
+; CHECK-NEXT:    fcmuo p1.h, p0/z, z12.h, z12.h
 ; CHECK-NEXT:    mov z0.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    fcmgt p4.h, p0/z, z3.h, z5.h
-; CHECK-NEXT:    mov z15.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    sel z1.d, p1, z26.d, z20.d
-; CHECK-NEXT:    fcmgt p1.h, p0/z, z31.h, z5.h
-; CHECK-NEXT:    mov z17.d, p6/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p2.h, p0/z, z31.h, z31.h
+; CHECK-NEXT:    str z2, [x8, #15, mul vl]
+; CHECK-NEXT:    fcmgt p4.h, p0/z, z3.h, z6.h
+; CHECK-NEXT:    sel z11.d, p8, z27.d, z22.d
+; CHECK-NEXT:    str z1, [x8, #14, mul vl]
+; CHECK-NEXT:    sel z1.d, p3, z27.d, z20.d
+; CHECK-NEXT:    fcmgt p3.h, p0/z, z31.h, z6.h
 ; CHECK-NEXT:    str z0, [x8, #13, mul vl]
+; CHECK-NEXT:    sel z2.d, p5, z27.d, z18.d
+; CHECK-NEXT:    fcmuo p5.h, p0/z, z9.h, z9.h
+; CHECK-NEXT:    mov z15.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p1.h, p0/z, z31.h, z31.h
+; CHECK-NEXT:    mov z16.d, p6/m, #0 // =0x0
+; CHECK-NEXT:    mov z1.d, p2/m, #0 // =0x0
+; CHECK-NEXT:    fcmgt p2.h, p0/z, z26.h, z6.h
+; CHECK-NEXT:    sel z0.d, p3, z27.d, z14.d
+; CHECK-NEXT:    fcmuo p3.h, p0/z, z3.h, z3.h
+; CHECK-NEXT:    sel z3.d, p4, z27.d, z13.d
+; CHECK-NEXT:    str z16, [x8, #12, mul vl]
 ; CHECK-NEXT:    mov z2.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p5.h, p0/z, z25.h, z25.h
-; CHECK-NEXT:    str z17, [x8, #12, mul vl]
-; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
-; CHECK-NEXT:    fcmgt p3.h, p0/z, z25.h, z5.h
+; CHECK-NEXT:    fcmuo p5.h, p0/z, z26.h, z26.h
 ; CHECK-NEXT:    str z15, [x8, #11, mul vl]
-; CHECK-NEXT:    sel z0.d, p1, z26.d, z14.d
-; CHECK-NEXT:    fcmuo p1.h, p0/z, z3.h, z3.h
-; CHECK-NEXT:    sel z3.d, p4, z26.d, z13.d
 ; CHECK-NEXT:    fcmuo p4.h, p0/z, z28.h, z28.h
 ; CHECK-NEXT:    str z1, [x8, #10, mul vl]
-; CHECK-NEXT:    sel z1.d, p3, z26.d, z24.d
-; CHECK-NEXT:    fcmuo p3.h, p0/z, z7.h, z7.h
-; CHECK-NEXT:    ldr z7, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    sel z1.d, p2, z27.d, z24.d
+; CHECK-NEXT:    ldr z24, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    str z2, [x8, #9, mul vl]
-; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    mov z3.d, p1/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p1.h, p0/z, z6.h, z6.h
-; CHECK-NEXT:    mov z16.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p4.h, p0/z, z4.h, z4.h
-; CHECK-NEXT:    fcmgt p2.h, p0/z, z7.h, z5.h
-; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p5.h, p0/z, z30.h, z30.h
+; CHECK-NEXT:    mov z3.d, p3/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p2.h, p0/z, z25.h, z25.h
+; CHECK-NEXT:    fcmuo p3.h, p0/z, z4.h, z4.h
 ; CHECK-NEXT:    str z0, [x8, #8, mul vl]
-; CHECK-NEXT:    fcmuo p0.h, p0/z, z7.h, z7.h
-; CHECK-NEXT:    mov z11.d, p3/m, #0 // =0x0
+; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
+; CHECK-NEXT:    mov z11.d, p4/m, #0 // =0x0
+; CHECK-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    str z3, [x8, #7, mul vl]
+; CHECK-NEXT:    fcmgt p1.h, p0/z, z24.h, z6.h
+; CHECK-NEXT:    fcmuo p4.h, p0/z, z5.h, z5.h
+; CHECK-NEXT:    mov z8.d, p2/m, #0 // =0x0
+; CHECK-NEXT:    mov z30.d, p3/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p5.h, p0/z, z0.h, z0.h
 ; CHECK-NEXT:    ldr z0, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    str z16, [x8, #6, mul vl]
-; CHECK-NEXT:    mov z8.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    str z11, [x8, #6, mul vl]
 ; CHECK-NEXT:    str z1, [x8, #5, mul vl]
+; CHECK-NEXT:    fcmuo p0.h, p0/z, z24.h, z24.h
+; CHECK-NEXT:    str z8, [x8, #4, mul vl]
 ; CHECK-NEXT:    mov z29.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    mov z27.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    str z11, [x8, #4, mul vl]
-; CHECK-NEXT:    str z8, [x8, #3, mul vl]
-; CHECK-NEXT:    mov z0.d, p2/m, z26.d
+; CHECK-NEXT:    str z30, [x8, #3, mul vl]
+; CHECK-NEXT:    mov z0.d, p1/m, z27.d
+; CHECK-NEXT:    mov z7.d, p5/m, #0 // =0x0
 ; CHECK-NEXT:    str z29, [x8, #2, mul vl]
-; CHECK-NEXT:    str z27, [x8, #1, mul vl]
 ; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT:    str z7, [x8, #1, mul vl]
 ; CHECK-NEXT:    str z0, [x8]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
@@ -614,14 +618,15 @@ define <vscale x 1 x i64> @llrint_v1i64_v1f32(<vscale x 1 x float> %x) {
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
 ; CHECK-NEXT:    mov z3.s, w8
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.s
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.s, p0/z, z0.s, z3.s
 ; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z0.s, z0.s
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -640,14 +645,15 @@ define <vscale x 2 x i64> @llrint_v2i64_v2f32(<vscale x 2 x float> %x) {
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
 ; CHECK-NEXT:    mov z3.s, w8
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.s
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.s, p0/z, z0.s, z3.s
 ; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z0.s, z0.s
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -672,6 +678,7 @@ define <vscale x 4 x i64> @llrint_v4i64_v4f32(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z2.s, w8
 ; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    mov z3.s, w8
 ; CHECK-NEXT:    mov z6.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    frintx z1.s, p0/m, z1.s
@@ -683,10 +690,10 @@ define <vscale x 4 x i64> @llrint_v4i64_v4f32(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z1.s
 ; CHECK-NEXT:    movprfx z5, z0
 ; CHECK-NEXT:    fcvtzs z5.d, p0/m, z0.s
-; CHECK-NEXT:    fcmgt p3.s, p0/z, z1.s, z3.s
 ; CHECK-NEXT:    fcmgt p4.s, p0/z, z0.s, z3.s
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    fcmgt p3.s, p0/z, z1.s, z3.s
 ; CHECK-NEXT:    sel z3.d, p1, z2.d, z4.d
 ; CHECK-NEXT:    fcmuo p1.s, p0/z, z1.s, z1.s
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z0.s, z0.s
@@ -719,10 +726,11 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f32(<vscale x 8 x float> %x) {
 ; CHECK-NEXT:    uunpkhi z0.d, z0.s
 ; CHECK-NEXT:    mov w8, #-553648128 // =0xdf000000
 ; CHECK-NEXT:    uunpklo z3.d, z1.s
-; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z4.s, w8
 ; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    mov z5.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z6.s, w8
 ; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
@@ -734,35 +742,35 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f32(<vscale x 8 x float> %x) {
 ; CHECK-NEXT:    fcmge p2.s, p0/z, z0.s, z4.s
 ; CHECK-NEXT:    movprfx z7, z0
 ; CHECK-NEXT:    fcvtzs z7.d, p0/m, z0.s
-; CHECK-NEXT:    fcmge p3.s, p0/z, z3.s, z4.s
-; CHECK-NEXT:    fcmge p4.s, p0/z, z1.s, z4.s
+; CHECK-NEXT:    fcmge p4.s, p0/z, z3.s, z4.s
+; CHECK-NEXT:    fcmge p5.s, p0/z, z1.s, z4.s
 ; CHECK-NEXT:    movprfx z4, z2
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z2.s
 ; CHECK-NEXT:    movprfx z24, z3
 ; CHECK-NEXT:    fcvtzs z24.d, p0/m, z3.s
 ; CHECK-NEXT:    movprfx z25, z1
 ; CHECK-NEXT:    fcvtzs z25.d, p0/m, z1.s
-; CHECK-NEXT:    fcmgt p7.s, p0/z, z3.s, z6.s
-; CHECK-NEXT:    fcmgt p5.s, p0/z, z2.s, z6.s
-; CHECK-NEXT:    fcmgt p6.s, p0/z, z0.s, z6.s
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    fcmgt p6.s, p0/z, z2.s, z6.s
+; CHECK-NEXT:    fcmgt p7.s, p0/z, z0.s, z6.s
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p3.b
 ; CHECK-NEXT:    mov z4.d, p1/m, z5.d
 ; CHECK-NEXT:    fcmgt p1.s, p0/z, z1.s, z6.s
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    eor p3.b, p0/z, p5.b, p3.b
+; CHECK-NEXT:    fcmgt p5.s, p0/z, z3.s, z6.s
 ; CHECK-NEXT:    sel z6.d, p2, z5.d, z7.d
+; CHECK-NEXT:    sel z7.d, p4, z5.d, z24.d
+; CHECK-NEXT:    fcmuo p4.s, p0/z, z3.s, z3.s
 ; CHECK-NEXT:    fcmuo p2.s, p0/z, z2.s, z2.s
-; CHECK-NEXT:    sel z7.d, p3, z5.d, z24.d
+; CHECK-NEXT:    sel z5.d, p3, z5.d, z25.d
 ; CHECK-NEXT:    fcmuo p3.s, p0/z, z0.s, z0.s
-; CHECK-NEXT:    sel z5.d, p4, z5.d, z25.d
-; CHECK-NEXT:    fcmuo p4.s, p0/z, z3.s, z3.s
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z1.s, z1.s
-; CHECK-NEXT:    sel z0.d, p5, z26.d, z4.d
-; CHECK-NEXT:    sel z1.d, p6, z26.d, z6.d
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z2.d, p7, z26.d, z7.d
+; CHECK-NEXT:    sel z0.d, p6, z26.d, z4.d
+; CHECK-NEXT:    sel z1.d, p7, z26.d, z6.d
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    sel z2.d, p5, z26.d, z7.d
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z3.d, p1, z26.d, z5.d
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
@@ -782,7 +790,7 @@ define <vscale x 16 x i64> @llrint_v16i64_v16f32(<vscale x 16 x float> %x) {
 ; CHECK-LABEL: llrint_v16i64_v16f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-2
+; CHECK-NEXT:    addvl sp, sp, #-4
 ; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Folded Spill
@@ -790,119 +798,125 @@ define <vscale x 16 x i64> @llrint_v16i64_v16f32(<vscale x 16 x float> %x) {
 ; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z8, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
+; CHECK-NEXT:    .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
+; CHECK-NEXT:    .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
 ; CHECK-NEXT:    uunpklo z4.d, z0.s
-; CHECK-NEXT:    uunpkhi z0.d, z0.s
+; CHECK-NEXT:    uunpkhi z6.d, z0.s
 ; CHECK-NEXT:    mov w8, #-553648128 // =0xdf000000
-; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    uunpklo z7.d, z1.s
-; CHECK-NEXT:    uunpkhi z1.d, z1.s
-; CHECK-NEXT:    uunpklo z24.d, z2.s
-; CHECK-NEXT:    uunpkhi z2.d, z2.s
-; CHECK-NEXT:    uunpklo z25.d, z3.s
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uunpkhi z24.d, z1.s
+; CHECK-NEXT:    uunpkhi z25.d, z2.s
+; CHECK-NEXT:    uunpklo z26.d, z3.s
 ; CHECK-NEXT:    uunpkhi z3.d, z3.s
-; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    mov z0.d, #0x8000000000000000
 ; CHECK-NEXT:    movprfx z5, z4
 ; CHECK-NEXT:    frintx z5.s, p0/m, z4.s
-; CHECK-NEXT:    movprfx z6, z0
-; CHECK-NEXT:    frintx z6.s, p0/m, z0.s
+; CHECK-NEXT:    movprfx z1, z6
+; CHECK-NEXT:    frintx z1.s, p0/m, z6.s
+; CHECK-NEXT:    uunpklo z6.d, z2.s
+; CHECK-NEXT:    movprfx z2, z7
+; CHECK-NEXT:    frintx z2.s, p0/m, z7.s
 ; CHECK-NEXT:    mov z4.s, w8
-; CHECK-NEXT:    frintx z7.s, p0/m, z7.s
-; CHECK-NEXT:    movprfx z28, z1
-; CHECK-NEXT:    frintx z28.s, p0/m, z1.s
 ; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
-; CHECK-NEXT:    mov z0.d, #0x8000000000000000
 ; CHECK-NEXT:    frintx z24.s, p0/m, z24.s
-; CHECK-NEXT:    movprfx z29, z2
-; CHECK-NEXT:    frintx z29.s, p0/m, z2.s
 ; CHECK-NEXT:    frintx z25.s, p0/m, z25.s
-; CHECK-NEXT:    movprfx z30, z3
-; CHECK-NEXT:    frintx z30.s, p0/m, z3.s
+; CHECK-NEXT:    movprfx z31, z3
+; CHECK-NEXT:    frintx z31.s, p0/m, z3.s
+; CHECK-NEXT:    frintx z26.s, p0/m, z26.s
 ; CHECK-NEXT:    mov z27.s, w8
-; CHECK-NEXT:    fcmge p1.s, p0/z, z5.s, z4.s
-; CHECK-NEXT:    fcmge p2.s, p0/z, z6.s, z4.s
-; CHECK-NEXT:    movprfx z1, z5
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z5.s
-; CHECK-NEXT:    movprfx z2, z6
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z6.s
-; CHECK-NEXT:    fcmge p5.s, p0/z, z7.s, z4.s
-; CHECK-NEXT:    fcmge p6.s, p0/z, z28.s, z4.s
-; CHECK-NEXT:    movprfx z3, z7
-; CHECK-NEXT:    fcvtzs z3.d, p0/m, z7.s
-; CHECK-NEXT:    fcmge p8.s, p0/z, z29.s, z4.s
-; CHECK-NEXT:    fcmgt p3.s, p0/z, z5.s, z27.s
-; CHECK-NEXT:    fcmgt p7.s, p0/z, z6.s, z27.s
+; CHECK-NEXT:    mov z7.d, #0x7fffffffffffffff
+; CHECK-NEXT:    fcmge p2.s, p0/z, z5.s, z4.s
+; CHECK-NEXT:    movprfx z28, z5
+; CHECK-NEXT:    fcvtzs z28.d, p0/m, z5.s
+; CHECK-NEXT:    movprfx z29, z1
+; CHECK-NEXT:    fcvtzs z29.d, p0/m, z1.s
+; CHECK-NEXT:    fcmge p5.s, p0/z, z2.s, z4.s
+; CHECK-NEXT:    fcmge p4.s, p0/z, z1.s, z4.s
+; CHECK-NEXT:    movprfx z30, z2
+; CHECK-NEXT:    fcvtzs z30.d, p0/m, z2.s
+; CHECK-NEXT:    frintx z6.s, p0/m, z6.s
+; CHECK-NEXT:    fcmge p6.s, p0/z, z24.s, z4.s
+; CHECK-NEXT:    movprfx z8, z31
+; CHECK-NEXT:    fcvtzs z8.d, p0/m, z31.s
 ; CHECK-NEXT:    fcmge p9.s, p0/z, z25.s, z4.s
-; CHECK-NEXT:    movprfx z31, z25
-; CHECK-NEXT:    fcvtzs z31.d, p0/m, z25.s
-; CHECK-NEXT:    not p4.b, p0/z, p1.b
-; CHECK-NEXT:    fcmuo p1.s, p0/z, z5.s, z5.s
-; CHECK-NEXT:    movprfx z5, z28
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z28.s
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    fcmge p10.s, p0/z, z30.s, z4.s
-; CHECK-NEXT:    movprfx z8, z30
-; CHECK-NEXT:    fcvtzs z8.d, p0/m, z30.s
-; CHECK-NEXT:    mov z1.d, p4/m, z0.d
-; CHECK-NEXT:    fcmge p4.s, p0/z, z24.s, z4.s
-; CHECK-NEXT:    movprfx z4, z29
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z29.s
-; CHECK-NEXT:    mov z2.d, p2/m, z0.d
-; CHECK-NEXT:    fcmuo p2.s, p0/z, z6.s, z6.s
-; CHECK-NEXT:    movprfx z6, z24
-; CHECK-NEXT:    fcvtzs z6.d, p0/m, z24.s
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    not p6.b, p0/z, p6.b
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
-; CHECK-NEXT:    mov z3.d, p5/m, z0.d
-; CHECK-NEXT:    not p5.b, p0/z, p8.b
-; CHECK-NEXT:    mov z5.d, p6/m, z0.d
-; CHECK-NEXT:    fcmgt p8.s, p0/z, z7.s, z27.s
-; CHECK-NEXT:    not p6.b, p0/z, p9.b
-; CHECK-NEXT:    mov z6.d, p4/m, z0.d
-; CHECK-NEXT:    fcmuo p9.s, p0/z, z7.s, z7.s
-; CHECK-NEXT:    not p4.b, p0/z, p10.b
-; CHECK-NEXT:    fcmgt p10.s, p0/z, z28.s, z27.s
-; CHECK-NEXT:    sel z7.d, p5, z0.d, z4.d
-; CHECK-NEXT:    fcmgt p5.s, p0/z, z24.s, z27.s
-; CHECK-NEXT:    mov z31.d, p6/m, z0.d
-; CHECK-NEXT:    fcmgt p6.s, p0/z, z30.s, z27.s
-; CHECK-NEXT:    mov z8.d, p4/m, z0.d
-; CHECK-NEXT:    sel z0.d, p3, z26.d, z1.d
-; CHECK-NEXT:    fcmgt p3.s, p0/z, z29.s, z27.s
-; CHECK-NEXT:    fcmgt p4.s, p0/z, z25.s, z27.s
-; CHECK-NEXT:    sel z1.d, p7, z26.d, z2.d
-; CHECK-NEXT:    fcmuo p7.s, p0/z, z28.s, z28.s
-; CHECK-NEXT:    sel z2.d, p8, z26.d, z3.d
-; CHECK-NEXT:    sel z3.d, p10, z26.d, z5.d
-; CHECK-NEXT:    fcmuo p8.s, p0/z, z29.s, z29.s
-; CHECK-NEXT:    sel z4.d, p5, z26.d, z6.d
-; CHECK-NEXT:    fcmuo p5.s, p0/z, z24.s, z24.s
-; CHECK-NEXT:    fcmuo p10.s, p0/z, z25.s, z25.s
-; CHECK-NEXT:    sel z5.d, p3, z26.d, z7.d
-; CHECK-NEXT:    fcmuo p0.s, p0/z, z30.s, z30.s
-; CHECK-NEXT:    sel z7.d, p6, z26.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z6.d, p4, z26.d, z31.d
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z2.d, p9/m, #0 // =0x0
-; CHECK-NEXT:    mov z3.d, p7/m, #0 // =0x0
-; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z4.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    mov z5.d, p8/m, #0 // =0x0
+; CHECK-NEXT:    fcmgt p3.s, p0/z, z5.s, z27.s
+; CHECK-NEXT:    fcmge p10.s, p0/z, z31.s, z4.s
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p1.b
+; CHECK-NEXT:    fcmgt p8.s, p0/z, z1.s, z27.s
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    mov z28.d, p2/m, z0.d
+; CHECK-NEXT:    fcmuo p2.s, p0/z, z5.s, z5.s
+; CHECK-NEXT:    fcmge p7.s, p0/z, z6.s, z4.s
+; CHECK-NEXT:    sel z5.d, p5, z0.d, z30.d
+; CHECK-NEXT:    movprfx z30, z6
+; CHECK-NEXT:    fcvtzs z30.d, p0/m, z6.s
+; CHECK-NEXT:    fcmge p5.s, p0/z, z26.s, z4.s
+; CHECK-NEXT:    movprfx z4, z26
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z26.s
+; CHECK-NEXT:    sel z3.d, p4, z0.d, z29.d
+; CHECK-NEXT:    movprfx z29, z24
+; CHECK-NEXT:    fcvtzs z29.d, p0/m, z24.s
+; CHECK-NEXT:    fcmuo p4.s, p0/z, z1.s, z1.s
+; CHECK-NEXT:    movprfx z1, z25
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z25.s
+; CHECK-NEXT:    eor p6.b, p0/z, p6.b, p1.b
+; CHECK-NEXT:    eor p9.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    mov z29.d, p6/m, z0.d
+; CHECK-NEXT:    fcmgt p6.s, p0/z, z2.s, z27.s
+; CHECK-NEXT:    eor p1.b, p0/z, p10.b, p1.b
+; CHECK-NEXT:    fcmgt p10.s, p0/z, z24.s, z27.s
+; CHECK-NEXT:    sel z9.d, p9, z0.d, z1.d
+; CHECK-NEXT:    fcmgt p9.s, p0/z, z6.s, z27.s
+; CHECK-NEXT:    mov z30.d, p7/m, z0.d
+; CHECK-NEXT:    sel z10.d, p5, z0.d, z4.d
+; CHECK-NEXT:    mov z8.d, p1/m, z0.d
+; CHECK-NEXT:    sel z0.d, p3, z7.d, z28.d
+; CHECK-NEXT:    fcmgt p1.s, p0/z, z25.s, z27.s
+; CHECK-NEXT:    fcmgt p3.s, p0/z, z26.s, z27.s
+; CHECK-NEXT:    fcmgt p5.s, p0/z, z31.s, z27.s
+; CHECK-NEXT:    fcmuo p7.s, p0/z, z2.s, z2.s
+; CHECK-NEXT:    sel z1.d, p8, z7.d, z3.d
+; CHECK-NEXT:    fcmuo p8.s, p0/z, z24.s, z24.s
+; CHECK-NEXT:    sel z2.d, p6, z7.d, z5.d
+; CHECK-NEXT:    sel z3.d, p10, z7.d, z29.d
+; CHECK-NEXT:    sel z4.d, p9, z7.d, z30.d
+; CHECK-NEXT:    fcmuo p6.s, p0/z, z6.s, z6.s
+; CHECK-NEXT:    fcmuo p9.s, p0/z, z25.s, z25.s
+; CHECK-NEXT:    fcmuo p10.s, p0/z, z26.s, z26.s
+; CHECK-NEXT:    sel z5.d, p1, z7.d, z9.d
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    sel z6.d, p3, z7.d, z10.d
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    fcmuo p0.s, p0/z, z31.s, z31.s
+; CHECK-NEXT:    sel z7.d, p5, z7.d, z8.d
+; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z1.d, p4/m, #0 // =0x0
+; CHECK-NEXT:    mov z2.d, p7/m, #0 // =0x0
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z3.d, p8/m, #0 // =0x0
+; CHECK-NEXT:    mov z4.d, p6/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z5.d, p9/m, #0 // =0x0
 ; CHECK-NEXT:    mov z6.d, p10/m, #0 // =0x0
-; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z1.d, p2/m, #0 // =0x0
+; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    mov z7.d, p0/m, #0 // =0x0
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #2
+; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float> %x)
@@ -949,222 +963,221 @@ define <vscale x 32 x i64> @llrint_v32i64_v32f32(<vscale x 32 x float> %x) {
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
 ; CHECK-NEXT:    uunpklo z24.d, z0.s
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uunpkhi z25.d, z0.s
 ; CHECK-NEXT:    mov w9, #-553648128 // =0xdf000000
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    uunpklo z26.d, z1.s
-; CHECK-NEXT:    uunpkhi z25.d, z0.s
-; CHECK-NEXT:    uunpkhi z28.d, z1.s
-; CHECK-NEXT:    mov z29.s, w9
+; CHECK-NEXT:    uunpkhi z27.d, z1.s
+; CHECK-NEXT:    mov z28.s, w9
 ; CHECK-NEXT:    mov w9, #1593835519 // =0x5effffff
-; CHECK-NEXT:    mov z17.d, z5.d
-; CHECK-NEXT:    mov z27.d, #0x8000000000000000
-; CHECK-NEXT:    uunpkhi z30.d, z2.s
-; CHECK-NEXT:    uunpklo z8.d, z3.s
-; CHECK-NEXT:    movprfx z0, z24
-; CHECK-NEXT:    frintx z0.s, p0/m, z24.s
-; CHECK-NEXT:    uunpkhi z9.d, z3.s
-; CHECK-NEXT:    uunpkhi z14.d, z4.s
+; CHECK-NEXT:    mov z16.d, z5.d
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    uunpkhi z29.d, z2.s
+; CHECK-NEXT:    mov z30.d, #0x8000000000000000
+; CHECK-NEXT:    movprfx z31, z24
+; CHECK-NEXT:    frintx z31.s, p0/m, z24.s
+; CHECK-NEXT:    movprfx z0, z25
+; CHECK-NEXT:    frintx z0.s, p0/m, z25.s
+; CHECK-NEXT:    mov z25.s, w9
 ; CHECK-NEXT:    movprfx z24, z26
 ; CHECK-NEXT:    frintx z24.s, p0/m, z26.s
-; CHECK-NEXT:    movprfx z1, z25
-; CHECK-NEXT:    frintx z1.s, p0/m, z25.s
-; CHECK-NEXT:    movprfx z5, z28
-; CHECK-NEXT:    frintx z5.s, p0/m, z28.s
 ; CHECK-NEXT:    uunpklo z26.d, z2.s
-; CHECK-NEXT:    uunpklo z16.d, z17.s
-; CHECK-NEXT:    mov z25.s, w9
-; CHECK-NEXT:    movprfx z28, z30
-; CHECK-NEXT:    frintx z28.s, p0/m, z30.s
-; CHECK-NEXT:    movprfx z30, z8
-; CHECK-NEXT:    frintx z30.s, p0/m, z8.s
-; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z29.s
-; CHECK-NEXT:    movprfx z31, z0
-; CHECK-NEXT:    fcvtzs z31.d, p0/m, z0.s
-; CHECK-NEXT:    str z0, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    fcmge p2.s, p0/z, z1.s, z29.s
-; CHECK-NEXT:    fcmge p3.s, p0/z, z24.s, z29.s
-; CHECK-NEXT:    fcmge p5.s, p0/z, z5.s, z29.s
+; CHECK-NEXT:    movprfx z5, z27
+; CHECK-NEXT:    frintx z5.s, p0/m, z27.s
+; CHECK-NEXT:    uunpklo z9.d, z3.s
+; CHECK-NEXT:    uunpkhi z11.d, z3.s
+; CHECK-NEXT:    uunpkhi z13.d, z4.s
+; CHECK-NEXT:    movprfx z27, z29
+; CHECK-NEXT:    frintx z27.s, p0/m, z29.s
+; CHECK-NEXT:    uunpklo z15.d, z16.s
+; CHECK-NEXT:    uunpkhi z17.d, z6.s
+; CHECK-NEXT:    fcmge p2.s, p0/z, z31.s, z28.s
+; CHECK-NEXT:    str z31, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    movprfx z8, z0
+; CHECK-NEXT:    fcvtzs z8.d, p0/m, z0.s
+; CHECK-NEXT:    fcmge p3.s, p0/z, z0.s, z28.s
+; CHECK-NEXT:    fcvtzs z31.d, p0/m, z31.s
+; CHECK-NEXT:    movprfx z10, z24
+; CHECK-NEXT:    fcvtzs z10.d, p0/m, z24.s
+; CHECK-NEXT:    fcmge p4.s, p0/z, z24.s, z28.s
 ; CHECK-NEXT:    frintx z26.s, p0/m, z26.s
-; CHECK-NEXT:    movprfx z10, z1
-; CHECK-NEXT:    fcvtzs z10.d, p0/m, z1.s
-; CHECK-NEXT:    movprfx z11, z24
-; CHECK-NEXT:    fcvtzs z11.d, p0/m, z24.s
+; CHECK-NEXT:    str z0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    fcmge p6.s, p0/z, z5.s, z28.s
 ; CHECK-NEXT:    movprfx z12, z5
 ; CHECK-NEXT:    fcvtzs z12.d, p0/m, z5.s
-; CHECK-NEXT:    movprfx z15, z28
-; CHECK-NEXT:    fcvtzs z15.d, p0/m, z28.s
-; CHECK-NEXT:    str z1, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    not p4.b, p0/z, p1.b
-; CHECK-NEXT:    fcmgt p1.s, p0/z, z1.s, z25.s
-; CHECK-NEXT:    fcmgt p9.s, p0/z, z5.s, z25.s
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    sel z0.d, p4, z27.d, z31.d
-; CHECK-NEXT:    fcmge p4.s, p0/z, z26.s, z29.s
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    movprfx z13, z26
-; CHECK-NEXT:    fcvtzs z13.d, p0/m, z26.s
-; CHECK-NEXT:    sel z31.d, p2, z27.d, z10.d
-; CHECK-NEXT:    uunpklo z10.d, z4.s
-; CHECK-NEXT:    sel z8.d, p3, z27.d, z11.d
-; CHECK-NEXT:    fcmge p3.s, p0/z, z28.s, z29.s
-; CHECK-NEXT:    sel z11.d, p5, z27.d, z12.d
-; CHECK-NEXT:    movprfx z4, z9
-; CHECK-NEXT:    frintx z4.s, p0/m, z9.s
-; CHECK-NEXT:    str z0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT:    not p5.b, p0/z, p4.b
-; CHECK-NEXT:    fcmge p4.s, p0/z, z30.s, z29.s
-; CHECK-NEXT:    fcmgt p2.s, p0/z, z24.s, z25.s
-; CHECK-NEXT:    sel z12.d, p5, z27.d, z13.d
-; CHECK-NEXT:    uunpkhi z13.d, z17.s
-; CHECK-NEXT:    movprfx z9, z10
-; CHECK-NEXT:    frintx z9.s, p0/m, z10.s
-; CHECK-NEXT:    movprfx z10, z14
-; CHECK-NEXT:    frintx z10.s, p0/m, z14.s
-; CHECK-NEXT:    uunpkhi z17.d, z6.s
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    uunpklo z14.d, z6.s
-; CHECK-NEXT:    movprfx z6, z16
-; CHECK-NEXT:    frintx z6.s, p0/m, z16.s
-; CHECK-NEXT:    uunpklo z16.d, z7.s
-; CHECK-NEXT:    uunpkhi z7.d, z7.s
-; CHECK-NEXT:    sel z3.d, p3, z27.d, z15.d
-; CHECK-NEXT:    fcmge p3.s, p0/z, z4.s, z29.s
-; CHECK-NEXT:    frintx z13.s, p0/m, z13.s
-; CHECK-NEXT:    movprfx z15, z30
-; CHECK-NEXT:    fcvtzs z15.d, p0/m, z30.s
-; CHECK-NEXT:    fcmge p5.s, p0/z, z9.s, z29.s
-; CHECK-NEXT:    fcmge p6.s, p0/z, z10.s, z29.s
+; CHECK-NEXT:    movprfx z29, z9
+; CHECK-NEXT:    frintx z29.s, p0/m, z9.s
+; CHECK-NEXT:    eor p5.b, p0/z, p2.b, p1.b
+; CHECK-NEXT:    fcmgt p2.s, p0/z, z0.s, z25.s
+; CHECK-NEXT:    uunpklo z9.d, z4.s
+; CHECK-NEXT:    eor p3.b, p0/z, p3.b, p1.b
+; CHECK-NEXT:    movprfx z4, z11
+; CHECK-NEXT:    frintx z4.s, p0/m, z11.s
+; CHECK-NEXT:    movprfx z14, z27
+; CHECK-NEXT:    fcvtzs z14.d, p0/m, z27.s
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    sel z0.d, p5, z30.d, z31.d
+; CHECK-NEXT:    fcmge p5.s, p0/z, z26.s, z28.s
+; CHECK-NEXT:    sel z31.d, p3, z30.d, z8.d
+; CHECK-NEXT:    eor p6.b, p0/z, p6.b, p1.b
 ; CHECK-NEXT:    frintx z17.s, p0/m, z17.s
+; CHECK-NEXT:    sel z8.d, p4, z30.d, z10.d
+; CHECK-NEXT:    movprfx z10, z26
+; CHECK-NEXT:    fcvtzs z10.d, p0/m, z26.s
+; CHECK-NEXT:    fcmge p4.s, p0/z, z27.s, z28.s
+; CHECK-NEXT:    sel z11.d, p6, z30.d, z12.d
 ; CHECK-NEXT:    movprfx z18, z4
 ; CHECK-NEXT:    fcvtzs z18.d, p0/m, z4.s
+; CHECK-NEXT:    str z0, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    eor p6.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    frintx z9.s, p0/m, z9.s
+; CHECK-NEXT:    fcmge p5.s, p0/z, z29.s, z28.s
+; CHECK-NEXT:    movprfx z0, z17
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z17.s
+; CHECK-NEXT:    fcmgt p3.s, p0/z, z24.s, z25.s
+; CHECK-NEXT:    sel z12.d, p6, z30.d, z10.d
+; CHECK-NEXT:    movprfx z10, z13
+; CHECK-NEXT:    frintx z10.s, p0/m, z13.s
+; CHECK-NEXT:    uunpkhi z13.d, z16.s
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    movprfx z16, z29
+; CHECK-NEXT:    fcvtzs z16.d, p0/m, z29.s
+; CHECK-NEXT:    fcmge p6.s, p0/z, z9.s, z28.s
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    sel z3.d, p4, z30.d, z14.d
+; CHECK-NEXT:    uunpklo z14.d, z6.s
+; CHECK-NEXT:    movprfx z6, z15
+; CHECK-NEXT:    frintx z6.s, p0/m, z15.s
+; CHECK-NEXT:    uunpklo z15.d, z7.s
+; CHECK-NEXT:    frintx z13.s, p0/m, z13.s
+; CHECK-NEXT:    uunpkhi z7.d, z7.s
+; CHECK-NEXT:    fcmge p4.s, p0/z, z4.s, z28.s
+; CHECK-NEXT:    fcmge p7.s, p0/z, z10.s, z28.s
 ; CHECK-NEXT:    movprfx z20, z10
 ; CHECK-NEXT:    fcvtzs z20.d, p0/m, z10.s
-; CHECK-NEXT:    frintx z16.s, p0/m, z16.s
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    eor p6.b, p0/z, p6.b, p1.b
+; CHECK-NEXT:    mov z16.d, p5/m, z30.d
+; CHECK-NEXT:    fcmge p5.s, p0/z, z17.s, z28.s
 ; CHECK-NEXT:    movprfx z19, z14
 ; CHECK-NEXT:    frintx z19.s, p0/m, z14.s
 ; CHECK-NEXT:    movprfx z14, z9
 ; CHECK-NEXT:    fcvtzs z14.d, p0/m, z9.s
-; CHECK-NEXT:    fcmge p7.s, p0/z, z6.s, z29.s
-; CHECK-NEXT:    fcmge p8.s, p0/z, z13.s, z29.s
+; CHECK-NEXT:    fcmge p8.s, p0/z, z6.s, z28.s
+; CHECK-NEXT:    frintx z15.s, p0/m, z15.s
+; CHECK-NEXT:    fcmge p9.s, p0/z, z13.s, z28.s
 ; CHECK-NEXT:    movprfx z21, z7
 ; CHECK-NEXT:    frintx z21.s, p0/m, z7.s
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    not p6.b, p0/z, p6.b
-; CHECK-NEXT:    mov z15.d, p4/m, z27.d
-; CHECK-NEXT:    fcmge p4.s, p0/z, z17.s, z29.s
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    sel z7.d, p3, z27.d, z18.d
-; CHECK-NEXT:    movprfx z0, z17
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z17.s
-; CHECK-NEXT:    sel z18.d, p6, z27.d, z20.d
-; CHECK-NEXT:    movprfx z20, z6
-; CHECK-NEXT:    fcvtzs z20.d, p0/m, z6.s
-; CHECK-NEXT:    fcmge p6.s, p0/z, z16.s, z29.s
-; CHECK-NEXT:    fcmge p3.s, p0/z, z19.s, z29.s
-; CHECK-NEXT:    mov z14.d, p5/m, z27.d
-; CHECK-NEXT:    not p5.b, p0/z, p7.b
-; CHECK-NEXT:    not p7.b, p0/z, p8.b
-; CHECK-NEXT:    fcmge p8.s, p0/z, z21.s, z29.s
-; CHECK-NEXT:    movprfx z1, z16
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z16.s
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
 ; CHECK-NEXT:    movprfx z22, z13
 ; CHECK-NEXT:    fcvtzs z22.d, p0/m, z13.s
-; CHECK-NEXT:    movprfx z23, z19
-; CHECK-NEXT:    fcvtzs z23.d, p0/m, z19.s
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    sel z7.d, p4, z30.d, z18.d
+; CHECK-NEXT:    fcmge p4.s, p0/z, z19.s, z28.s
+; CHECK-NEXT:    mov z14.d, p6/m, z30.d
+; CHECK-NEXT:    sel z18.d, p7, z30.d, z20.d
+; CHECK-NEXT:    eor p6.b, p0/z, p8.b, p1.b
+; CHECK-NEXT:    fcmge p7.s, p0/z, z15.s, z28.s
+; CHECK-NEXT:    eor p8.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    fcmge p9.s, p0/z, z21.s, z28.s
 ; CHECK-NEXT:    movprfx z2, z21
 ; CHECK-NEXT:    fcvtzs z2.d, p0/m, z21.s
-; CHECK-NEXT:    mov z29.d, #0x7fffffffffffffff
-; CHECK-NEXT:    mov z20.d, p5/m, z27.d
-; CHECK-NEXT:    not p5.b, p0/z, p6.b
-; CHECK-NEXT:    mov z0.d, p4/m, z27.d
-; CHECK-NEXT:    fcmgt p4.s, p0/z, z16.s, z25.s
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    not p6.b, p0/z, p8.b
-; CHECK-NEXT:    mov z1.d, p5/m, z27.d
-; CHECK-NEXT:    mov z22.d, p7/m, z27.d
-; CHECK-NEXT:    mov z23.d, p3/m, z27.d
-; CHECK-NEXT:    fcmgt p3.s, p0/z, z21.s, z25.s
-; CHECK-NEXT:    fcmuo p5.s, p0/z, z16.s, z16.s
-; CHECK-NEXT:    mov z2.d, p6/m, z27.d
-; CHECK-NEXT:    sel z27.d, p1, z29.d, z31.d
-; CHECK-NEXT:    fcmgt p1.s, p0/z, z17.s, z25.s
-; CHECK-NEXT:    mov z1.d, p4/m, z29.d
-; CHECK-NEXT:    fcmgt p6.s, p0/z, z26.s, z25.s
-; CHECK-NEXT:    fcmgt p7.s, p0/z, z30.s, z25.s
-; CHECK-NEXT:    sel z31.d, p2, z29.d, z8.d
-; CHECK-NEXT:    fcmgt p2.s, p0/z, z13.s, z25.s
-; CHECK-NEXT:    fcmuo p8.s, p0/z, z21.s, z21.s
-; CHECK-NEXT:    mov z2.d, p3/m, z29.d
+; CHECK-NEXT:    movprfx z1, z15
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z15.s
+; CHECK-NEXT:    movprfx z20, z6
+; CHECK-NEXT:    fcvtzs z20.d, p0/m, z6.s
+; CHECK-NEXT:    movprfx z23, z19
+; CHECK-NEXT:    fcvtzs z23.d, p0/m, z19.s
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    mov z28.d, #0x7fffffffffffffff
+; CHECK-NEXT:    mov z22.d, p8/m, z30.d
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    fcmgt p8.s, p0/z, z29.s, z25.s
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    eor p1.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    mov z20.d, p6/m, z30.d
+; CHECK-NEXT:    mov z23.d, p4/m, z30.d
+; CHECK-NEXT:    mov z1.d, p7/m, z30.d
+; CHECK-NEXT:    mov z0.d, p5/m, z30.d
+; CHECK-NEXT:    fcmgt p4.s, p0/z, z21.s, z25.s
+; CHECK-NEXT:    mov z2.d, p1/m, z30.d
+; CHECK-NEXT:    fcmgt p1.s, p0/z, z15.s, z25.s
+; CHECK-NEXT:    sel z30.d, p2, z28.d, z31.d
+; CHECK-NEXT:    fcmgt p2.s, p0/z, z17.s, z25.s
+; CHECK-NEXT:    fcmuo p5.s, p0/z, z15.s, z15.s
+; CHECK-NEXT:    fcmgt p6.s, p0/z, z5.s, z25.s
+; CHECK-NEXT:    fcmgt p7.s, p0/z, z26.s, z25.s
+; CHECK-NEXT:    fcmuo p9.s, p0/z, z21.s, z21.s
+; CHECK-NEXT:    sel z31.d, p3, z28.d, z8.d
+; CHECK-NEXT:    fcmgt p3.s, p0/z, z13.s, z25.s
+; CHECK-NEXT:    mov z2.d, p4/m, z28.d
 ; CHECK-NEXT:    fcmuo p4.s, p0/z, z17.s, z17.s
-; CHECK-NEXT:    fcmgt p3.s, p0/z, z19.s, z25.s
-; CHECK-NEXT:    mov z0.d, p1/m, z29.d
-; CHECK-NEXT:    fcmgt p1.s, p0/z, z6.s, z25.s
+; CHECK-NEXT:    mov z1.d, p1/m, z28.d
+; CHECK-NEXT:    fcmgt p1.s, p0/z, z19.s, z25.s
+; CHECK-NEXT:    mov z0.d, p2/m, z28.d
+; CHECK-NEXT:    fcmgt p2.s, p0/z, z6.s, z25.s
+; CHECK-NEXT:    sel z8.d, p6, z28.d, z11.d
+; CHECK-NEXT:    sel z11.d, p7, z28.d, z12.d
+; CHECK-NEXT:    sel z12.d, p8, z28.d, z16.d
+; CHECK-NEXT:    mov z2.d, p9/m, #0 // =0x0
 ; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    sel z8.d, p9, z29.d, z11.d
-; CHECK-NEXT:    sel z11.d, p6, z29.d, z12.d
-; CHECK-NEXT:    sel z12.d, p7, z29.d, z15.d
 ; CHECK-NEXT:    fcmgt p5.s, p0/z, z10.s, z25.s
-; CHECK-NEXT:    sel z15.d, p2, z29.d, z22.d
-; CHECK-NEXT:    fcmuo p2.s, p0/z, z13.s, z13.s
-; CHECK-NEXT:    str z1, [x8, #14, mul vl]
-; CHECK-NEXT:    mov z2.d, p8/m, #0 // =0x0
+; CHECK-NEXT:    sel z15.d, p3, z28.d, z22.d
+; CHECK-NEXT:    sel z16.d, p1, z28.d, z23.d
+; CHECK-NEXT:    fcmuo p1.s, p0/z, z13.s, z13.s
 ; CHECK-NEXT:    mov z0.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    sel z1.d, p1, z29.d, z20.d
-; CHECK-NEXT:    fcmgt p1.s, p0/z, z9.s, z25.s
+; CHECK-NEXT:    str z2, [x8, #15, mul vl]
 ; CHECK-NEXT:    fcmuo p6.s, p0/z, z19.s, z19.s
-; CHECK-NEXT:    sel z16.d, p3, z29.d, z23.d
 ; CHECK-NEXT:    fcmuo p3.s, p0/z, z6.s, z6.s
+; CHECK-NEXT:    str z1, [x8, #14, mul vl]
+; CHECK-NEXT:    sel z1.d, p2, z28.d, z20.d
+; CHECK-NEXT:    fcmgt p2.s, p0/z, z9.s, z25.s
+; CHECK-NEXT:    str z0, [x8, #13, mul vl]
+; CHECK-NEXT:    sel z2.d, p5, z28.d, z18.d
 ; CHECK-NEXT:    fcmgt p4.s, p0/z, z4.s, z25.s
-; CHECK-NEXT:    str z2, [x8, #15, mul vl]
-; CHECK-NEXT:    sel z2.d, p5, z29.d, z18.d
 ; CHECK-NEXT:    fcmuo p5.s, p0/z, z10.s, z10.s
-; CHECK-NEXT:    str z0, [x8, #13, mul vl]
-; CHECK-NEXT:    mov z15.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p2.s, p0/z, z9.s, z9.s
-; CHECK-NEXT:    sel z0.d, p1, z29.d, z14.d
+; CHECK-NEXT:    mov z15.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p1.s, p0/z, z9.s, z9.s
 ; CHECK-NEXT:    mov z16.d, p6/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p1.s, p0/z, z4.s, z4.s
 ; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
-; CHECK-NEXT:    fcmgt p3.s, p0/z, z28.s, z25.s
-; CHECK-NEXT:    sel z4.d, p4, z29.d, z7.d
+; CHECK-NEXT:    fcmgt p3.s, p0/z, z27.s, z25.s
+; CHECK-NEXT:    sel z0.d, p2, z28.d, z14.d
+; CHECK-NEXT:    fcmuo p2.s, p0/z, z4.s, z4.s
 ; CHECK-NEXT:    str z15, [x8, #11, mul vl]
-; CHECK-NEXT:    mov z2.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p5.s, p0/z, z28.s, z28.s
+; CHECK-NEXT:    sel z4.d, p4, z28.d, z7.d
+; CHECK-NEXT:    fcmuo p4.s, p0/z, z29.s, z29.s
 ; CHECK-NEXT:    str z16, [x8, #12, mul vl]
-; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p4.s, p0/z, z30.s, z30.s
+; CHECK-NEXT:    mov z2.d, p5/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p5.s, p0/z, z27.s, z27.s
 ; CHECK-NEXT:    str z1, [x8, #10, mul vl]
-; CHECK-NEXT:    mov z4.d, p1/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p1.s, p0/z, z5.s, z5.s
-; CHECK-NEXT:    sel z1.d, p3, z29.d, z3.d
+; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    sel z1.d, p3, z28.d, z3.d
 ; CHECK-NEXT:    ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    str z2, [x8, #9, mul vl]
-; CHECK-NEXT:    str z0, [x8, #8, mul vl]
+; CHECK-NEXT:    mov z4.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    fcmuo p3.s, p0/z, z26.s, z26.s
-; CHECK-NEXT:    ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    str z4, [x8, #7, mul vl]
+; CHECK-NEXT:    str z2, [x8, #9, mul vl]
+; CHECK-NEXT:    fcmuo p2.s, p0/z, z5.s, z5.s
 ; CHECK-NEXT:    mov z12.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    fcmgt p2.s, p0/z, z3.s, z25.s
+; CHECK-NEXT:    str z0, [x8, #8, mul vl]
 ; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
 ; CHECK-NEXT:    fcmuo p4.s, p0/z, z24.s, z24.s
-; CHECK-NEXT:    mov z8.d, p1/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p5.s, p0/z, z0.s, z0.s
 ; CHECK-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    str z4, [x8, #7, mul vl]
+; CHECK-NEXT:    fcmgt p1.s, p0/z, z3.s, z25.s
+; CHECK-NEXT:    mov z11.d, p3/m, #0 // =0x0
+; CHECK-NEXT:    mov z8.d, p2/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p5.s, p0/z, z0.s, z0.s
+; CHECK-NEXT:    ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    str z12, [x8, #6, mul vl]
 ; CHECK-NEXT:    str z1, [x8, #5, mul vl]
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z3.s, z3.s
-; CHECK-NEXT:    mov z11.d, p3/m, #0 // =0x0
-; CHECK-NEXT:    str z8, [x8, #3, mul vl]
 ; CHECK-NEXT:    mov z31.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    mov z0.d, p2/m, z29.d
 ; CHECK-NEXT:    str z11, [x8, #4, mul vl]
-; CHECK-NEXT:    mov z27.d, p5/m, #0 // =0x0
+; CHECK-NEXT:    str z8, [x8, #3, mul vl]
+; CHECK-NEXT:    mov z0.d, p1/m, z28.d
 ; CHECK-NEXT:    str z31, [x8, #2, mul vl]
+; CHECK-NEXT:    mov z30.d, p5/m, #0 // =0x0
 ; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
-; CHECK-NEXT:    str z27, [x8, #1, mul vl]
+; CHECK-NEXT:    str z30, [x8, #1, mul vl]
 ; CHECK-NEXT:    str z0, [x8]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
@@ -1205,14 +1218,15 @@ define <vscale x 1 x i64> @llrint_v1i64_v1f64(<vscale x 1 x double> %x) {
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
 ; CHECK-NEXT:    mov z3.d, x8
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, z1.d
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.d
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.d, p0/z, z0.d, z3.d
 ; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z0.d, z0.d
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -1231,14 +1245,15 @@ define <vscale x 2 x i64> @llrint_v2i64_v2f64(<vscale x 2 x double> %x) {
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
 ; CHECK-NEXT:    mov z3.d, x8
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, z1.d
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.d
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.d, p0/z, z0.d, z3.d
 ; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z0.d, z0.d
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -1262,6 +1277,7 @@ define <vscale x 4 x i64> @llrint_v4i64_v4f64(<vscale x 4 x double> %x) {
 ; CHECK-NEXT:    mov z6.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    mov z2.d, x8
 ; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
 ; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
 ; CHECK-NEXT:    mov z3.d, x8
@@ -1272,10 +1288,10 @@ define <vscale x 4 x i64> @llrint_v4i64_v4f64(<vscale x 4 x double> %x) {
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z0.d
 ; CHECK-NEXT:    movprfx z5, z1
 ; CHECK-NEXT:    fcvtzs z5.d, p0/m, z1.d
-; CHECK-NEXT:    fcmgt p3.d, p0/z, z0.d, z3.d
 ; CHECK-NEXT:    fcmgt p4.d, p0/z, z1.d, z3.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    fcmgt p3.d, p0/z, z0.d, z3.d
 ; CHECK-NEXT:    sel z3.d, p1, z2.d, z4.d
 ; CHECK-NEXT:    fcmuo p1.d, p0/z, z0.d, z0.d
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z1.d, z1.d
@@ -1309,51 +1325,52 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f64(<vscale x 8 x double> %x) {
 ; CHECK-NEXT:    mov z5.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z4.d, x8
 ; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
-; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
-; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
 ; CHECK-NEXT:    frintx z2.d, p0/m, z2.d
+; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
 ; CHECK-NEXT:    frintx z3.d, p0/m, z3.d
 ; CHECK-NEXT:    mov z6.d, x8
+; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, z4.d
-; CHECK-NEXT:    fcmge p2.d, p0/z, z1.d, z4.d
-; CHECK-NEXT:    fcmge p3.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    fcmge p4.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    movprfx z4, z0
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z0.d
-; CHECK-NEXT:    movprfx z7, z1
-; CHECK-NEXT:    fcvtzs z7.d, p0/m, z1.d
+; CHECK-NEXT:    fcmge p4.d, p0/z, z2.d, z4.d
+; CHECK-NEXT:    fcmge p3.d, p0/z, z1.d, z4.d
+; CHECK-NEXT:    fcmge p5.d, p0/z, z3.d, z4.d
+; CHECK-NEXT:    movprfx z7, z0
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z0.d
 ; CHECK-NEXT:    movprfx z24, z2
 ; CHECK-NEXT:    fcvtzs z24.d, p0/m, z2.d
+; CHECK-NEXT:    movprfx z4, z1
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z1.d
 ; CHECK-NEXT:    movprfx z25, z3
 ; CHECK-NEXT:    fcvtzs z25.d, p0/m, z3.d
-; CHECK-NEXT:    fcmgt p7.d, p0/z, z2.d, z6.d
-; CHECK-NEXT:    fcmgt p5.d, p0/z, z0.d, z6.d
-; CHECK-NEXT:    fcmgt p6.d, p0/z, z1.d, z6.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    mov z4.d, p1/m, z5.d
-; CHECK-NEXT:    fcmgt p1.d, p0/z, z3.d, z6.d
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
-; CHECK-NEXT:    sel z6.d, p2, z5.d, z7.d
-; CHECK-NEXT:    fcmuo p2.d, p0/z, z0.d, z0.d
-; CHECK-NEXT:    sel z7.d, p3, z5.d, z24.d
-; CHECK-NEXT:    fcmuo p3.d, p0/z, z1.d, z1.d
-; CHECK-NEXT:    sel z5.d, p4, z5.d, z25.d
+; CHECK-NEXT:    fcmgt p6.d, p0/z, z0.d, z6.d
+; CHECK-NEXT:    fcmgt p7.d, p0/z, z3.d, z6.d
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p2.b
+; CHECK-NEXT:    eor p3.b, p0/z, p3.b, p2.b
+; CHECK-NEXT:    mov z7.d, p1/m, z5.d
+; CHECK-NEXT:    fcmgt p1.d, p0/z, z1.d, z6.d
+; CHECK-NEXT:    eor p2.b, p0/z, p5.b, p2.b
+; CHECK-NEXT:    fcmgt p5.d, p0/z, z2.d, z6.d
+; CHECK-NEXT:    sel z6.d, p4, z5.d, z24.d
+; CHECK-NEXT:    mov z4.d, p3/m, z5.d
 ; CHECK-NEXT:    fcmuo p4.d, p0/z, z2.d, z2.d
+; CHECK-NEXT:    fcmuo p3.d, p0/z, z1.d, z1.d
+; CHECK-NEXT:    sel z5.d, p2, z5.d, z25.d
+; CHECK-NEXT:    fcmuo p2.d, p0/z, z0.d, z0.d
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z3.d, z3.d
-; CHECK-NEXT:    sel z0.d, p5, z26.d, z4.d
-; CHECK-NEXT:    sel z1.d, p6, z26.d, z6.d
+; CHECK-NEXT:    sel z0.d, p6, z26.d, z7.d
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z2.d, p7, z26.d, z7.d
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z3.d, p1, z26.d, z5.d
+; CHECK-NEXT:    sel z2.d, p5, z26.d, z6.d
+; CHECK-NEXT:    sel z1.d, p1, z26.d, z4.d
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    sel z3.d, p7, z26.d, z5.d
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z2.d, p4/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z3.d, p0/m, #0 // =0x0
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1382,101 +1399,103 @@ define <vscale x 16 x i64> @llrint_v16f64(<vscale x 16 x double> %x) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, #-4332462841530417152 // =0xc3e0000000000000
 ; CHECK-NEXT:    mov z24.d, #0x7fffffffffffffff
-; CHECK-NEXT:    mov z25.d, x8
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    movprfx z28, z0
+; CHECK-NEXT:    frintx z28.d, p0/m, z0.d
+; CHECK-NEXT:    mov z0.d, x8
+; CHECK-NEXT:    movprfx z26, z2
+; CHECK-NEXT:    frintx z26.d, p0/m, z2.d
+; CHECK-NEXT:    movprfx z25, z1
+; CHECK-NEXT:    frintx z25.d, p0/m, z1.d
+; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
-; CHECK-NEXT:    movprfx z26, z0
-; CHECK-NEXT:    frintx z26.d, p0/m, z0.d
-; CHECK-NEXT:    movprfx z27, z1
-; CHECK-NEXT:    frintx z27.d, p0/m, z1.d
-; CHECK-NEXT:    frintx z2.d, p0/m, z2.d
-; CHECK-NEXT:    mov z0.d, #0x8000000000000000
-; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    frintx z3.d, p0/m, z3.d
-; CHECK-NEXT:    movprfx z28, z4
-; CHECK-NEXT:    frintx z28.d, p0/m, z4.d
+; CHECK-NEXT:    movprfx z27, z4
+; CHECK-NEXT:    frintx z27.d, p0/m, z4.d
 ; CHECK-NEXT:    frintx z5.d, p0/m, z5.d
+; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    frintx z6.d, p0/m, z6.d
 ; CHECK-NEXT:    frintx z7.d, p0/m, z7.d
-; CHECK-NEXT:    fcmge p1.d, p0/z, z26.d, z25.d
-; CHECK-NEXT:    fcmge p2.d, p0/z, z27.d, z25.d
-; CHECK-NEXT:    movprfx z4, z26
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z26.d
-; CHECK-NEXT:    fcmge p5.d, p0/z, z2.d, z25.d
-; CHECK-NEXT:    movprfx z29, z27
-; CHECK-NEXT:    fcvtzs z29.d, p0/m, z27.d
-; CHECK-NEXT:    fcmgt p3.d, p0/z, z26.d, z1.d
-; CHECK-NEXT:    fcmge p6.d, p0/z, z3.d, z25.d
-; CHECK-NEXT:    fcmge p8.d, p0/z, z5.d, z25.d
-; CHECK-NEXT:    fcmgt p7.d, p0/z, z27.d, z1.d
-; CHECK-NEXT:    fcmge p9.d, p0/z, z6.d, z25.d
-; CHECK-NEXT:    movprfx z30, z28
-; CHECK-NEXT:    fcvtzs z30.d, p0/m, z28.d
-; CHECK-NEXT:    fcmge p10.d, p0/z, z7.d, z25.d
-; CHECK-NEXT:    not p4.b, p0/z, p1.b
-; CHECK-NEXT:    fcmuo p1.d, p0/z, z26.d, z26.d
-; CHECK-NEXT:    movprfx z26, z2
-; CHECK-NEXT:    fcvtzs z26.d, p0/m, z2.d
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    movprfx z31, z6
-; CHECK-NEXT:    fcvtzs z31.d, p0/m, z6.d
+; CHECK-NEXT:    fcmge p3.d, p0/z, z28.d, z0.d
+; CHECK-NEXT:    movprfx z4, z28
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z28.d
+; CHECK-NEXT:    fcmge p6.d, p0/z, z26.d, z0.d
+; CHECK-NEXT:    fcmge p5.d, p0/z, z25.d, z0.d
+; CHECK-NEXT:    movprfx z29, z25
+; CHECK-NEXT:    fcvtzs z29.d, p0/m, z25.d
+; CHECK-NEXT:    fcmuo p2.d, p0/z, z28.d, z28.d
+; CHECK-NEXT:    fcmgt p4.d, p0/z, z28.d, z1.d
+; CHECK-NEXT:    movprfx z28, z26
+; CHECK-NEXT:    fcvtzs z28.d, p0/m, z26.d
+; CHECK-NEXT:    fcmge p7.d, p0/z, z3.d, z0.d
+; CHECK-NEXT:    fcmge p8.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    fcmge p9.d, p0/z, z6.d, z0.d
+; CHECK-NEXT:    fcmge p10.d, p0/z, z7.d, z0.d
+; CHECK-NEXT:    eor p3.b, p0/z, p3.b, p1.b
+; CHECK-NEXT:    movprfx z30, z3
+; CHECK-NEXT:    fcvtzs z30.d, p0/m, z3.d
+; CHECK-NEXT:    movprfx z31, z27
+; CHECK-NEXT:    fcvtzs z31.d, p0/m, z27.d
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
 ; CHECK-NEXT:    movprfx z8, z7
 ; CHECK-NEXT:    fcvtzs z8.d, p0/m, z7.d
-; CHECK-NEXT:    mov z4.d, p4/m, z0.d
-; CHECK-NEXT:    fcmge p4.d, p0/z, z28.d, z25.d
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    mov z29.d, p2/m, z0.d
-; CHECK-NEXT:    fcmuo p2.d, p0/z, z27.d, z27.d
-; CHECK-NEXT:    movprfx z27, z3
-; CHECK-NEXT:    fcvtzs z27.d, p0/m, z3.d
-; CHECK-NEXT:    sel z25.d, p5, z0.d, z26.d
-; CHECK-NEXT:    movprfx z26, z5
-; CHECK-NEXT:    fcvtzs z26.d, p0/m, z5.d
-; CHECK-NEXT:    not p6.b, p0/z, p6.b
-; CHECK-NEXT:    not p5.b, p0/z, p8.b
-; CHECK-NEXT:    fcmgt p8.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
-; CHECK-NEXT:    mov z27.d, p6/m, z0.d
-; CHECK-NEXT:    not p6.b, p0/z, p9.b
-; CHECK-NEXT:    fcmuo p9.d, p0/z, z2.d, z2.d
-; CHECK-NEXT:    mov z30.d, p4/m, z0.d
-; CHECK-NEXT:    not p4.b, p0/z, p10.b
+; CHECK-NEXT:    mov z4.d, p3/m, z2.d
+; CHECK-NEXT:    eor p3.b, p0/z, p6.b, p1.b
+; CHECK-NEXT:    fcmge p6.d, p0/z, z27.d, z0.d
+; CHECK-NEXT:    movprfx z0, z5
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z5.d
+; CHECK-NEXT:    mov z29.d, p5/m, z2.d
+; CHECK-NEXT:    fcmgt p5.d, p0/z, z25.d, z1.d
+; CHECK-NEXT:    mov z28.d, p3/m, z2.d
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    fcmuo p3.d, p0/z, z25.d, z25.d
+; CHECK-NEXT:    eor p8.b, p0/z, p8.b, p1.b
+; CHECK-NEXT:    movprfx z25, z6
+; CHECK-NEXT:    fcvtzs z25.d, p0/m, z6.d
+; CHECK-NEXT:    eor p6.b, p0/z, p6.b, p1.b
+; CHECK-NEXT:    mov z30.d, p7/m, z2.d
+; CHECK-NEXT:    fcmgt p7.d, p0/z, z26.d, z1.d
+; CHECK-NEXT:    eor p9.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    eor p1.b, p0/z, p10.b, p1.b
+; CHECK-NEXT:    mov z31.d, p6/m, z2.d
+; CHECK-NEXT:    fcmuo p6.d, p0/z, z26.d, z26.d
 ; CHECK-NEXT:    fcmgt p10.d, p0/z, z3.d, z1.d
-; CHECK-NEXT:    mov z26.d, p5/m, z0.d
-; CHECK-NEXT:    fcmgt p5.d, p0/z, z28.d, z1.d
-; CHECK-NEXT:    mov z31.d, p6/m, z0.d
-; CHECK-NEXT:    mov z8.d, p4/m, z0.d
-; CHECK-NEXT:    sel z0.d, p3, z24.d, z4.d
-; CHECK-NEXT:    fcmgt p3.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    sel z26.d, p8, z2.d, z0.d
+; CHECK-NEXT:    fcmgt p8.d, p0/z, z27.d, z1.d
+; CHECK-NEXT:    mov z8.d, p1/m, z2.d
+; CHECK-NEXT:    sel z0.d, p4, z24.d, z4.d
+; CHECK-NEXT:    fcmgt p1.d, p0/z, z5.d, z1.d
 ; CHECK-NEXT:    fcmgt p4.d, p0/z, z6.d, z1.d
-; CHECK-NEXT:    fcmgt p6.d, p0/z, z7.d, z1.d
-; CHECK-NEXT:    sel z1.d, p7, z24.d, z29.d
-; CHECK-NEXT:    fcmuo p7.d, p0/z, z3.d, z3.d
-; CHECK-NEXT:    sel z2.d, p8, z24.d, z25.d
-; CHECK-NEXT:    sel z3.d, p10, z24.d, z27.d
-; CHECK-NEXT:    sel z4.d, p5, z24.d, z30.d
-; CHECK-NEXT:    fcmuo p5.d, p0/z, z28.d, z28.d
+; CHECK-NEXT:    mov z25.d, p9/m, z2.d
+; CHECK-NEXT:    fcmgt p9.d, p0/z, z7.d, z1.d
+; CHECK-NEXT:    sel z1.d, p5, z24.d, z29.d
+; CHECK-NEXT:    fcmuo p5.d, p0/z, z3.d, z3.d
+; CHECK-NEXT:    sel z2.d, p7, z24.d, z28.d
+; CHECK-NEXT:    sel z3.d, p10, z24.d, z30.d
+; CHECK-NEXT:    sel z4.d, p8, z24.d, z31.d
+; CHECK-NEXT:    fcmuo p7.d, p0/z, z27.d, z27.d
 ; CHECK-NEXT:    fcmuo p8.d, p0/z, z5.d, z5.d
 ; CHECK-NEXT:    fcmuo p10.d, p0/z, z6.d, z6.d
-; CHECK-NEXT:    sel z5.d, p3, z24.d, z26.d
-; CHECK-NEXT:    fcmuo p0.d, p0/z, z7.d, z7.d
-; CHECK-NEXT:    sel z6.d, p4, z24.d, z31.d
+; CHECK-NEXT:    sel z5.d, p1, z24.d, z26.d
+; CHECK-NEXT:    sel z6.d, p4, z24.d, z25.d
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z7.d, p6, z24.d, z8.d
+; CHECK-NEXT:    fcmuo p0.d, p0/z, z7.d, z7.d
+; CHECK-NEXT:    sel z7.d, p9, z24.d, z8.d
 ; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z2.d, p9/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z3.d, p7/m, #0 // =0x0
-; CHECK-NEXT:    mov z4.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z2.d, p6/m, #0 // =0x0
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z3.d, p5/m, #0 // =0x0
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z4.d, p7/m, #0 // =0x0
 ; CHECK-NEXT:    mov z5.d, p8/m, #0 // =0x0
+; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov z6.d, p10/m, #0 // =0x0
+; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
-; CHECK-NEXT:    mov z1.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z7.d, p0/m, #0 // =0x0
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1530,213 +1549,213 @@ define <vscale x 32 x i64> @llrint_v32f64(<vscale x 32 x double> %x) {
 ; CHECK-NEXT:    ldr z24, [x0, #6, mul vl]
 ; CHECK-NEXT:    ldr z1, [x0, #1, mul vl]
 ; CHECK-NEXT:    mov z7.d, x9
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    mov z26.d, #0x8000000000000000
-; CHECK-NEXT:    ldr z3, [x0, #3, mul vl]
-; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    movprfx z12, z0
+; CHECK-NEXT:    frintx z12.d, p0/m, z0.d
 ; CHECK-NEXT:    movprfx z30, z2
 ; CHECK-NEXT:    frintx z30.d, p0/m, z2.d
-; CHECK-NEXT:    ldr z6, [x0, #5, mul vl]
+; CHECK-NEXT:    ldr z4, [x0, #3, mul vl]
 ; CHECK-NEXT:    movprfx z25, z24
 ; CHECK-NEXT:    frintx z25.d, p0/m, z24.d
-; CHECK-NEXT:    movprfx z12, z1
-; CHECK-NEXT:    frintx z12.d, p0/m, z1.d
+; CHECK-NEXT:    movprfx z11, z1
+; CHECK-NEXT:    frintx z11.d, p0/m, z1.d
+; CHECK-NEXT:    ldr z6, [x0, #5, mul vl]
 ; CHECK-NEXT:    ldr z5, [x0, #4, mul vl]
-; CHECK-NEXT:    frintx z3.d, p0/m, z3.d
+; CHECK-NEXT:    ldr z9, [x0, #15, mul vl]
+; CHECK-NEXT:    ldr z8, [x0, #7, mul vl]
+; CHECK-NEXT:    frintx z4.d, p0/m, z4.d
 ; CHECK-NEXT:    mov x9, #4890909195324358655 // =0x43dfffffffffffff
-; CHECK-NEXT:    frintx z6.d, p0/m, z6.d
-; CHECK-NEXT:    mov z4.d, x9
-; CHECK-NEXT:    fcmge p3.d, p0/z, z0.d, z7.d
-; CHECK-NEXT:    movprfx z24, z0
-; CHECK-NEXT:    fcvtzs z24.d, p0/m, z0.d
+; CHECK-NEXT:    fcmge p3.d, p0/z, z12.d, z7.d
+; CHECK-NEXT:    movprfx z24, z12
+; CHECK-NEXT:    fcvtzs z24.d, p0/m, z12.d
 ; CHECK-NEXT:    fcmge p5.d, p0/z, z30.d, z7.d
 ; CHECK-NEXT:    movprfx z28, z30
 ; CHECK-NEXT:    fcvtzs z28.d, p0/m, z30.d
-; CHECK-NEXT:    str z0, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    fcmge p4.d, p0/z, z11.d, z7.d
 ; CHECK-NEXT:    frintx z5.d, p0/m, z5.d
-; CHECK-NEXT:    fcmge p4.d, p0/z, z12.d, z7.d
-; CHECK-NEXT:    ldr z8, [x0, #7, mul vl]
-; CHECK-NEXT:    ldr z9, [x0, #15, mul vl]
-; CHECK-NEXT:    movprfx z27, z12
-; CHECK-NEXT:    fcvtzs z27.d, p0/m, z12.d
-; CHECK-NEXT:    fcmge p6.d, p0/z, z3.d, z7.d
-; CHECK-NEXT:    fcmge p9.d, p0/z, z6.d, z7.d
-; CHECK-NEXT:    not p7.b, p0/z, p3.b
-; CHECK-NEXT:    movprfx z31, z3
-; CHECK-NEXT:    fcvtzs z31.d, p0/m, z3.d
-; CHECK-NEXT:    movprfx z15, z6
-; CHECK-NEXT:    fcvtzs z15.d, p0/m, z6.d
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    fcmge p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    movprfx z13, z5
-; CHECK-NEXT:    fcvtzs z13.d, p0/m, z5.d
-; CHECK-NEXT:    sel z0.d, p7, z26.d, z24.d
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    frintx z6.d, p0/m, z6.d
+; CHECK-NEXT:    movprfx z27, z11
+; CHECK-NEXT:    fcvtzs z27.d, p0/m, z11.d
 ; CHECK-NEXT:    movprfx z17, z25
 ; CHECK-NEXT:    fcvtzs z17.d, p0/m, z25.d
-; CHECK-NEXT:    not p3.b, p0/z, p6.b
-; CHECK-NEXT:    fcmge p6.d, p0/z, z25.d, z7.d
+; CHECK-NEXT:    fcmge p6.d, p0/z, z4.d, z7.d
+; CHECK-NEXT:    movprfx z31, z4
+; CHECK-NEXT:    fcvtzs z31.d, p0/m, z4.d
 ; CHECK-NEXT:    movprfx z22, z9
 ; CHECK-NEXT:    frintx z22.d, p0/m, z9.d
-; CHECK-NEXT:    sel z29.d, p4, z26.d, z27.d
+; CHECK-NEXT:    eor p3.b, p0/z, p3.b, p1.b
+; CHECK-NEXT:    mov z3.d, x9
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    fcmge p7.d, p0/z, z5.d, z7.d
+; CHECK-NEXT:    movprfx z10, z5
+; CHECK-NEXT:    fcvtzs z10.d, p0/m, z5.d
+; CHECK-NEXT:    sel z0.d, p3, z26.d, z24.d
+; CHECK-NEXT:    eor p8.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    fcmge p9.d, p0/z, z6.d, z7.d
+; CHECK-NEXT:    movprfx z14, z6
+; CHECK-NEXT:    fcvtzs z14.d, p0/m, z6.d
+; CHECK-NEXT:    eor p4.b, p0/z, p6.b, p1.b
+; CHECK-NEXT:    movprfx z2, z22
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z22.d
+; CHECK-NEXT:    sel z29.d, p8, z26.d, z27.d
 ; CHECK-NEXT:    movprfx z27, z8
 ; CHECK-NEXT:    frintx z27.d, p0/m, z8.d
-; CHECK-NEXT:    fcmgt p1.d, p0/z, z12.d, z4.d
-; CHECK-NEXT:    str z0, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    fcmgt p2.d, p0/z, z11.d, z3.d
+; CHECK-NEXT:    str z0, [sp, #2, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    sel z0.d, p5, z26.d, z28.d
-; CHECK-NEXT:    not p4.b, p0/z, p8.b
-; CHECK-NEXT:    ldr z10, [x0, #8, mul vl]
-; CHECK-NEXT:    not p5.b, p0/z, p9.b
-; CHECK-NEXT:    sel z24.d, p3, z26.d, z31.d
-; CHECK-NEXT:    not p3.b, p0/z, p6.b
-; CHECK-NEXT:    movprfx z2, z22
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z22.d
-; CHECK-NEXT:    fcmgt p2.d, p0/z, z30.d, z4.d
-; CHECK-NEXT:    str z0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT:    fcmge p7.d, p0/z, z27.d, z7.d
-; CHECK-NEXT:    sel z31.d, p5, z26.d, z15.d
-; CHECK-NEXT:    ldr z11, [x0, #9, mul vl]
-; CHECK-NEXT:    movprfx z28, z10
-; CHECK-NEXT:    frintx z28.d, p0/m, z10.d
-; CHECK-NEXT:    ldr z10, [x0, #10, mul vl]
-; CHECK-NEXT:    ldr z18, [x0, #11, mul vl]
+; CHECK-NEXT:    eor p5.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    ldr z13, [x0, #14, mul vl]
+; CHECK-NEXT:    eor p6.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    fcmge p7.d, p0/z, z25.d, z7.d
+; CHECK-NEXT:    sel z24.d, p4, z26.d, z31.d
+; CHECK-NEXT:    sel z31.d, p5, z26.d, z10.d
+; CHECK-NEXT:    fcmgt p3.d, p0/z, z30.d, z3.d
+; CHECK-NEXT:    str z0, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    fcmge p8.d, p0/z, z27.d, z7.d
+; CHECK-NEXT:    sel z0.d, p6, z26.d, z14.d
+; CHECK-NEXT:    ldr z28, [x0, #8, mul vl]
+; CHECK-NEXT:    ldr z8, [x0, #9, mul vl]
+; CHECK-NEXT:    ldr z18, [x0, #10, mul vl]
+; CHECK-NEXT:    ldr z19, [x0, #11, mul vl]
 ; CHECK-NEXT:    ldr z16, [x0, #13, mul vl]
-; CHECK-NEXT:    ldr z14, [x0, #14, mul vl]
-; CHECK-NEXT:    ldr z19, [x0, #12, mul vl]
-; CHECK-NEXT:    mov z17.d, p3/m, z26.d
-; CHECK-NEXT:    fcmgt p9.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    movprfx z8, z11
-; CHECK-NEXT:    frintx z8.d, p0/m, z11.d
-; CHECK-NEXT:    sel z11.d, p4, z26.d, z13.d
-; CHECK-NEXT:    frintx z10.d, p0/m, z10.d
-; CHECK-NEXT:    movprfx z13, z18
-; CHECK-NEXT:    frintx z13.d, p0/m, z18.d
-; CHECK-NEXT:    fcmge p5.d, p0/z, z28.d, z7.d
+; CHECK-NEXT:    ldr z15, [x0, #12, mul vl]
+; CHECK-NEXT:    eor p4.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    movprfx z21, z13
+; CHECK-NEXT:    frintx z21.d, p0/m, z13.d
+; CHECK-NEXT:    str z0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    frintx z28.d, p0/m, z28.d
+; CHECK-NEXT:    frintx z8.d, p0/m, z8.d
+; CHECK-NEXT:    movprfx z10, z18
+; CHECK-NEXT:    frintx z10.d, p0/m, z18.d
+; CHECK-NEXT:    movprfx z14, z19
+; CHECK-NEXT:    frintx z14.d, p0/m, z19.d
 ; CHECK-NEXT:    movprfx z18, z27
 ; CHECK-NEXT:    fcvtzs z18.d, p0/m, z27.d
+; CHECK-NEXT:    eor p5.b, p0/z, p8.b, p1.b
+; CHECK-NEXT:    frintx z15.d, p0/m, z15.d
 ; CHECK-NEXT:    frintx z16.d, p0/m, z16.d
-; CHECK-NEXT:    movprfx z15, z19
-; CHECK-NEXT:    frintx z15.d, p0/m, z19.d
+; CHECK-NEXT:    mov z17.d, p4/m, z26.d
+; CHECK-NEXT:    movprfx z1, z21
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z21.d
+; CHECK-NEXT:    fcmge p6.d, p0/z, z28.d, z7.d
+; CHECK-NEXT:    fcmge p7.d, p0/z, z8.d, z7.d
 ; CHECK-NEXT:    movprfx z19, z28
 ; CHECK-NEXT:    fcvtzs z19.d, p0/m, z28.d
-; CHECK-NEXT:    movprfx z21, z14
-; CHECK-NEXT:    frintx z21.d, p0/m, z14.d
-; CHECK-NEXT:    not p4.b, p0/z, p7.b
-; CHECK-NEXT:    fcmge p6.d, p0/z, z8.d, z7.d
 ; CHECK-NEXT:    movprfx z20, z8
 ; CHECK-NEXT:    fcvtzs z20.d, p0/m, z8.d
-; CHECK-NEXT:    fcmge p7.d, p0/z, z10.d, z7.d
-; CHECK-NEXT:    fcmge p8.d, p0/z, z13.d, z7.d
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    sel z9.d, p4, z26.d, z18.d
-; CHECK-NEXT:    fcmge p4.d, p0/z, z16.d, z7.d
-; CHECK-NEXT:    fcmge p3.d, p0/z, z15.d, z7.d
+; CHECK-NEXT:    fcmge p8.d, p0/z, z10.d, z7.d
+; CHECK-NEXT:    fcmge p9.d, p0/z, z14.d, z7.d
+; CHECK-NEXT:    sel z9.d, p5, z26.d, z18.d
+; CHECK-NEXT:    fcmge p4.d, p0/z, z15.d, z7.d
+; CHECK-NEXT:    fcmge p5.d, p0/z, z16.d, z7.d
+; CHECK-NEXT:    movprfx z23, z15
+; CHECK-NEXT:    fcvtzs z23.d, p0/m, z15.d
 ; CHECK-NEXT:    movprfx z0, z16
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z16.d
-; CHECK-NEXT:    sel z14.d, p5, z26.d, z19.d
+; CHECK-NEXT:    eor p6.b, p0/z, p6.b, p1.b
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    sel z13.d, p6, z26.d, z19.d
+; CHECK-NEXT:    eor p6.b, p0/z, p8.b, p1.b
 ; CHECK-NEXT:    movprfx z19, z10
 ; CHECK-NEXT:    fcvtzs z19.d, p0/m, z10.d
-; CHECK-NEXT:    movprfx z1, z21
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z21.d
-; CHECK-NEXT:    not p6.b, p0/z, p6.b
-; CHECK-NEXT:    movprfx z23, z15
-; CHECK-NEXT:    fcvtzs z23.d, p0/m, z15.d
-; CHECK-NEXT:    not p5.b, p0/z, p7.b
-; CHECK-NEXT:    sel z18.d, p6, z26.d, z20.d
-; CHECK-NEXT:    fcmge p6.d, p0/z, z21.d, z7.d
-; CHECK-NEXT:    not p7.b, p0/z, p8.b
-; CHECK-NEXT:    fcmge p8.d, p0/z, z22.d, z7.d
-; CHECK-NEXT:    movprfx z20, z13
-; CHECK-NEXT:    fcvtzs z20.d, p0/m, z13.d
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    sel z18.d, p7, z26.d, z20.d
+; CHECK-NEXT:    fcmge p7.d, p0/z, z21.d, z7.d
+; CHECK-NEXT:    eor p8.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    fcmge p9.d, p0/z, z22.d, z7.d
+; CHECK-NEXT:    movprfx z20, z14
+; CHECK-NEXT:    fcvtzs z20.d, p0/m, z14.d
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
 ; CHECK-NEXT:    mov z7.d, #0x7fffffffffffffff
-; CHECK-NEXT:    mov z19.d, p5/m, z26.d
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    mov z0.d, p4/m, z26.d
-; CHECK-NEXT:    fcmgt p4.d, p0/z, z21.d, z4.d
-; CHECK-NEXT:    not p5.b, p0/z, p6.b
-; CHECK-NEXT:    mov z23.d, p3/m, z26.d
-; CHECK-NEXT:    fcmgt p3.d, p0/z, z22.d, z4.d
-; CHECK-NEXT:    not p6.b, p0/z, p8.b
-; CHECK-NEXT:    mov z20.d, p7/m, z26.d
-; CHECK-NEXT:    fcmuo p8.d, p0/z, z22.d, z22.d
-; CHECK-NEXT:    mov z1.d, p5/m, z26.d
+; CHECK-NEXT:    mov z19.d, p6/m, z26.d
+; CHECK-NEXT:    mov z23.d, p4/m, z26.d
+; CHECK-NEXT:    fcmgt p4.d, p0/z, z22.d, z3.d
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    mov z0.d, p5/m, z26.d
 ; CHECK-NEXT:    fcmuo p5.d, p0/z, z21.d, z21.d
-; CHECK-NEXT:    fcmgt p7.d, p0/z, z25.d, z4.d
-; CHECK-NEXT:    mov z2.d, p6/m, z26.d
-; CHECK-NEXT:    sel z26.d, p1, z7.d, z29.d
-; CHECK-NEXT:    fcmgt p1.d, p0/z, z16.d, z4.d
-; CHECK-NEXT:    ldr z29, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    fcmgt p6.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    mov z24.d, p9/m, z7.d
-; CHECK-NEXT:    mov z1.d, p4/m, z7.d
+; CHECK-NEXT:    eor p1.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    mov z20.d, p8/m, z26.d
+; CHECK-NEXT:    fcmgt p6.d, p0/z, z4.d, z3.d
+; CHECK-NEXT:    mov z1.d, p7/m, z26.d
+; CHECK-NEXT:    fcmuo p9.d, p0/z, z22.d, z22.d
+; CHECK-NEXT:    fcmgt p8.d, p0/z, z25.d, z3.d
+; CHECK-NEXT:    mov z2.d, p1/m, z26.d
+; CHECK-NEXT:    fcmgt p1.d, p0/z, z21.d, z3.d
+; CHECK-NEXT:    sel z26.d, p2, z7.d, z29.d
+; CHECK-NEXT:    fcmgt p2.d, p0/z, z16.d, z3.d
+; CHECK-NEXT:    ldr z29, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    fcmgt p7.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    mov z24.d, p6/m, z7.d
+; CHECK-NEXT:    fcmuo p6.d, p0/z, z15.d, z15.d
+; CHECK-NEXT:    mov z2.d, p4/m, z7.d
 ; CHECK-NEXT:    fcmuo p4.d, p0/z, z16.d, z16.d
-; CHECK-NEXT:    mov z2.d, p3/m, z7.d
-; CHECK-NEXT:    fcmgt p3.d, p0/z, z15.d, z4.d
-; CHECK-NEXT:    mov z17.d, p7/m, z7.d
-; CHECK-NEXT:    mov z29.d, p2/m, z7.d
-; CHECK-NEXT:    fcmgt p2.d, p0/z, z13.d, z4.d
-; CHECK-NEXT:    mov z0.d, p1/m, z7.d
-; CHECK-NEXT:    fcmgt p1.d, p0/z, z10.d, z4.d
+; CHECK-NEXT:    mov z17.d, p8/m, z7.d
+; CHECK-NEXT:    mov z1.d, p1/m, z7.d
+; CHECK-NEXT:    mov z29.d, p3/m, z7.d
+; CHECK-NEXT:    fcmgt p3.d, p0/z, z14.d, z3.d
+; CHECK-NEXT:    fcmgt p1.d, p0/z, z15.d, z3.d
+; CHECK-NEXT:    mov z0.d, p2/m, z7.d
+; CHECK-NEXT:    fcmgt p2.d, p0/z, z10.d, z3.d
+; CHECK-NEXT:    mov z2.d, p9/m, #0 // =0x0
+; CHECK-NEXT:    mov z31.d, p7/m, z7.d
 ; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    mov z11.d, p6/m, z7.d
-; CHECK-NEXT:    fcmuo p6.d, p0/z, z15.d, z15.d
-; CHECK-NEXT:    fcmgt p5.d, p0/z, z8.d, z4.d
-; CHECK-NEXT:    mov z2.d, p8/m, #0 // =0x0
-; CHECK-NEXT:    sel z16.d, p3, z7.d, z23.d
+; CHECK-NEXT:    fcmgt p5.d, p0/z, z8.d, z3.d
+; CHECK-NEXT:    sel z15.d, p3, z7.d, z20.d
 ; CHECK-NEXT:    fcmuo p3.d, p0/z, z10.d, z10.d
 ; CHECK-NEXT:    mov z0.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    sel z15.d, p2, z7.d, z20.d
-; CHECK-NEXT:    fcmuo p2.d, p0/z, z13.d, z13.d
+; CHECK-NEXT:    sel z16.d, p1, z7.d, z23.d
+; CHECK-NEXT:    fcmuo p1.d, p0/z, z14.d, z14.d
+; CHECK-NEXT:    fcmgt p4.d, p0/z, z27.d, z3.d
 ; CHECK-NEXT:    str z1, [x8, #14, mul vl]
-; CHECK-NEXT:    sel z1.d, p1, z7.d, z19.d
-; CHECK-NEXT:    fcmgt p1.d, p0/z, z28.d, z4.d
-; CHECK-NEXT:    fcmgt p4.d, p0/z, z27.d, z4.d
+; CHECK-NEXT:    sel z1.d, p2, z7.d, z19.d
+; CHECK-NEXT:    fcmgt p2.d, p0/z, z28.d, z3.d
 ; CHECK-NEXT:    str z2, [x8, #15, mul vl]
 ; CHECK-NEXT:    sel z2.d, p5, z7.d, z18.d
-; CHECK-NEXT:    mov z16.d, p6/m, #0 // =0x0
 ; CHECK-NEXT:    fcmuo p5.d, p0/z, z8.d, z8.d
+; CHECK-NEXT:    mov z16.d, p6/m, #0 // =0x0
 ; CHECK-NEXT:    str z0, [x8, #13, mul vl]
-; CHECK-NEXT:    mov z15.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p2.d, p0/z, z28.d, z28.d
+; CHECK-NEXT:    mov z15.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p1.d, p0/z, z28.d, z28.d
 ; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
-; CHECK-NEXT:    fcmgt p3.d, p0/z, z6.d, z4.d
-; CHECK-NEXT:    sel z0.d, p1, z7.d, z14.d
-; CHECK-NEXT:    fcmuo p1.d, p0/z, z27.d, z27.d
+; CHECK-NEXT:    sel z0.d, p2, z7.d, z13.d
+; CHECK-NEXT:    fcmuo p2.d, p0/z, z27.d, z27.d
 ; CHECK-NEXT:    sel z27.d, p4, z7.d, z9.d
+; CHECK-NEXT:    fcmgt p3.d, p0/z, z6.d, z3.d
 ; CHECK-NEXT:    str z16, [x8, #12, mul vl]
-; CHECK-NEXT:    fcmuo p4.d, p0/z, z25.d, z25.d
-; CHECK-NEXT:    str z15, [x8, #11, mul vl]
 ; CHECK-NEXT:    mov z2.d, p5/m, #0 // =0x0
+; CHECK-NEXT:    str z15, [x8, #11, mul vl]
+; CHECK-NEXT:    fcmuo p4.d, p0/z, z25.d, z25.d
 ; CHECK-NEXT:    fcmuo p5.d, p0/z, z6.d, z6.d
 ; CHECK-NEXT:    str z1, [x8, #10, mul vl]
-; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    sel z1.d, p3, z7.d, z31.d
-; CHECK-NEXT:    fcmuo p3.d, p0/z, z5.d, z5.d
-; CHECK-NEXT:    ldr z5, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    mov z27.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    fcmgt p1.d, p0/z, z12.d, z3.d
+; CHECK-NEXT:    ldr z1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    mov z27.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    str z2, [x8, #9, mul vl]
-; CHECK-NEXT:    fcmuo p1.d, p0/z, z3.d, z3.d
+; CHECK-NEXT:    fcmuo p2.d, p0/z, z4.d, z4.d
 ; CHECK-NEXT:    str z0, [x8, #8, mul vl]
 ; CHECK-NEXT:    mov z17.d, p4/m, #0 // =0x0
 ; CHECK-NEXT:    fcmuo p4.d, p0/z, z30.d, z30.d
-; CHECK-NEXT:    fcmgt p2.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p5.d, p0/z, z12.d, z12.d
+; CHECK-NEXT:    mov z1.d, p3/m, z7.d
 ; CHECK-NEXT:    str z27, [x8, #7, mul vl]
-; CHECK-NEXT:    fcmuo p0.d, p0/z, z5.d, z5.d
-; CHECK-NEXT:    mov z11.d, p3/m, #0 // =0x0
-; CHECK-NEXT:    ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    mov z24.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p3.d, p0/z, z5.d, z5.d
+; CHECK-NEXT:    ldr z0, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    mov z24.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    str z17, [x8, #6, mul vl]
+; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p5.d, p0/z, z11.d, z11.d
+; CHECK-NEXT:    fcmuo p0.d, p0/z, z12.d, z12.d
+; CHECK-NEXT:    mov z0.d, p1/m, z7.d
+; CHECK-NEXT:    mov z31.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z29.d, p4/m, #0 // =0x0
+; CHECK-NEXT:    str z24, [x8, #3, mul vl]
 ; CHECK-NEXT:    str z1, [x8, #5, mul vl]
 ; CHECK-NEXT:    mov z26.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    str z11, [x8, #4, mul vl]
-; CHECK-NEXT:    mov z0.d, p2/m, z7.d
-; CHECK-NEXT:    str z24, [x8, #3, mul vl]
+; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT:    str z31, [x8, #4, mul vl]
 ; CHECK-NEXT:    str z29, [x8, #2, mul vl]
 ; CHECK-NEXT:    str z26, [x8, #1, mul vl]
-; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
 ; CHECK-NEXT:    str z0, [x8]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sve-load-store-strict-align.ll b/llvm/test/CodeGen/AArch64/sve-load-store-strict-align.ll
index 03b08ff437599..f376952eb1771 100644
--- a/llvm/test/CodeGen/AArch64/sve-load-store-strict-align.ll
+++ b/llvm/test/CodeGen/AArch64/sve-load-store-strict-align.ll
@@ -29,7 +29,7 @@ define void @nxv8i16(ptr %ldptr, ptr %stptr) {
 ;
 ; CHECK-LABEL: nxv8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x1]
 ; CHECK-NEXT:    ret
@@ -47,7 +47,7 @@ define void @nxv4i32(ptr %ldptr, ptr %stptr) {
 ;
 ; CHECK-LABEL: nxv4i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x1]
 ; CHECK-NEXT:    ret
@@ -65,7 +65,7 @@ define void @nxv2i64(ptr %ldptr, ptr %stptr) {
 ;
 ; CHECK-LABEL: nxv2i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-lrint.ll b/llvm/test/CodeGen/AArch64/sve-lrint.ll
index 908ba2392a437..0b70e0e1b22e2 100644
--- a/llvm/test/CodeGen/AArch64/sve-lrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve-lrint.ll
@@ -10,14 +10,15 @@ define <vscale x 1 x iXLen> @lrint_v1f16(<vscale x 1 x half> %x) {
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
 ; CHECK-NEXT:    mov z3.h, w8
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.h
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.h, p0/z, z0.h, z3.h
 ; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -36,14 +37,15 @@ define <vscale x 2 x iXLen> @lrint_v2f16(<vscale x 2 x half> %x) {
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
 ; CHECK-NEXT:    mov z3.h, w8
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.h
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.h, p0/z, z0.h, z3.h
 ; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -68,6 +70,7 @@ define <vscale x 4 x iXLen> @lrint_v4f16(<vscale x 4 x half> %x) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z2.h, w8
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    mov z3.h, w8
 ; CHECK-NEXT:    mov z6.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    frintx z1.h, p0/m, z1.h
@@ -79,10 +82,10 @@ define <vscale x 4 x iXLen> @lrint_v4f16(<vscale x 4 x half> %x) {
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z1.h
 ; CHECK-NEXT:    movprfx z5, z0
 ; CHECK-NEXT:    fcvtzs z5.d, p0/m, z0.h
-; CHECK-NEXT:    fcmgt p3.h, p0/z, z1.h, z3.h
 ; CHECK-NEXT:    fcmgt p4.h, p0/z, z0.h, z3.h
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    fcmgt p3.h, p0/z, z1.h, z3.h
 ; CHECK-NEXT:    sel z3.d, p1, z2.d, z4.d
 ; CHECK-NEXT:    fcmuo p1.h, p0/z, z1.h, z1.h
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
@@ -117,6 +120,7 @@ define <vscale x 8 x iXLen> @lrint_v8f16(<vscale x 8 x half> %x) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z4.h, w8
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    mov z6.h, w8
 ; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    uunpklo z2.d, z1.s
@@ -131,8 +135,8 @@ define <vscale x 8 x iXLen> @lrint_v8f16(<vscale x 8 x half> %x) {
 ; CHECK-NEXT:    mov z0.d, #0x8000000000000000
 ; CHECK-NEXT:    fcmge p1.h, p0/z, z2.h, z4.h
 ; CHECK-NEXT:    fcmge p2.h, p0/z, z1.h, z4.h
-; CHECK-NEXT:    fcmge p3.h, p0/z, z3.h, z4.h
-; CHECK-NEXT:    fcmge p4.h, p0/z, z5.h, z4.h
+; CHECK-NEXT:    fcmge p4.h, p0/z, z3.h, z4.h
+; CHECK-NEXT:    fcmge p5.h, p0/z, z5.h, z4.h
 ; CHECK-NEXT:    movprfx z4, z2
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z2.h
 ; CHECK-NEXT:    movprfx z7, z1
@@ -141,27 +145,27 @@ define <vscale x 8 x iXLen> @lrint_v8f16(<vscale x 8 x half> %x) {
 ; CHECK-NEXT:    fcvtzs z24.d, p0/m, z3.h
 ; CHECK-NEXT:    movprfx z25, z5
 ; CHECK-NEXT:    fcvtzs z25.d, p0/m, z5.h
-; CHECK-NEXT:    fcmgt p7.h, p0/z, z3.h, z6.h
-; CHECK-NEXT:    fcmgt p5.h, p0/z, z2.h, z6.h
-; CHECK-NEXT:    fcmgt p6.h, p0/z, z1.h, z6.h
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    fcmgt p6.h, p0/z, z2.h, z6.h
+; CHECK-NEXT:    fcmgt p7.h, p0/z, z1.h, z6.h
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p3.b
 ; CHECK-NEXT:    mov z4.d, p1/m, z0.d
 ; CHECK-NEXT:    fcmgt p1.h, p0/z, z5.h, z6.h
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    eor p3.b, p0/z, p5.b, p3.b
+; CHECK-NEXT:    fcmgt p5.h, p0/z, z3.h, z6.h
 ; CHECK-NEXT:    sel z6.d, p2, z0.d, z7.d
+; CHECK-NEXT:    sel z7.d, p4, z0.d, z24.d
+; CHECK-NEXT:    fcmuo p4.h, p0/z, z3.h, z3.h
 ; CHECK-NEXT:    fcmuo p2.h, p0/z, z2.h, z2.h
-; CHECK-NEXT:    sel z7.d, p3, z0.d, z24.d
+; CHECK-NEXT:    sel z24.d, p3, z0.d, z25.d
 ; CHECK-NEXT:    fcmuo p3.h, p0/z, z1.h, z1.h
-; CHECK-NEXT:    sel z24.d, p4, z0.d, z25.d
-; CHECK-NEXT:    fcmuo p4.h, p0/z, z3.h, z3.h
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z5.h, z5.h
-; CHECK-NEXT:    sel z0.d, p5, z26.d, z4.d
-; CHECK-NEXT:    sel z1.d, p6, z26.d, z6.d
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z2.d, p7, z26.d, z7.d
+; CHECK-NEXT:    sel z0.d, p6, z26.d, z4.d
+; CHECK-NEXT:    sel z1.d, p7, z26.d, z6.d
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    sel z2.d, p5, z26.d, z7.d
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z3.d, p1, z26.d, z24.d
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
@@ -181,7 +185,7 @@ define <vscale x 16 x iXLen> @lrint_v16f16(<vscale x 16 x half> %x) {
 ; CHECK-LABEL: lrint_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-3
+; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Folded Spill
@@ -189,124 +193,122 @@ define <vscale x 16 x iXLen> @lrint_v16f16(<vscale x 16 x half> %x) {
 ; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT:    str z9, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT:    str z8, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT:    .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
 ; CHECK-NEXT:    uunpklo z2.s, z0.h
 ; CHECK-NEXT:    uunpkhi z0.s, z0.h
 ; CHECK-NEXT:    mov w8, #64511 // =0xfbff
 ; CHECK-NEXT:    uunpklo z4.s, z1.h
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    uunpkhi z1.s, z1.h
-; CHECK-NEXT:    mov z5.h, w8
+; CHECK-NEXT:    mov z6.h, w8
 ; CHECK-NEXT:    mov w8, #31743 // =0x7bff
-; CHECK-NEXT:    mov z25.d, #0x8000000000000000
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    mov z24.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z27.h, w8
-; CHECK-NEXT:    mov z7.d, #0x7fffffffffffffff
+; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
+; CHECK-NEXT:    uunpklo z5.d, z0.s
 ; CHECK-NEXT:    uunpklo z3.d, z2.s
 ; CHECK-NEXT:    uunpkhi z2.d, z2.s
-; CHECK-NEXT:    uunpklo z6.d, z0.s
 ; CHECK-NEXT:    uunpkhi z0.d, z0.s
-; CHECK-NEXT:    uunpklo z24.d, z4.s
+; CHECK-NEXT:    uunpklo z7.d, z4.s
 ; CHECK-NEXT:    uunpkhi z4.d, z4.s
-; CHECK-NEXT:    uunpklo z26.d, z1.s
+; CHECK-NEXT:    uunpklo z25.d, z1.s
 ; CHECK-NEXT:    uunpkhi z1.d, z1.s
-; CHECK-NEXT:    frintx z2.h, p0/m, z2.h
+; CHECK-NEXT:    frintx z5.h, p0/m, z5.h
 ; CHECK-NEXT:    frintx z3.h, p0/m, z3.h
-; CHECK-NEXT:    frintx z6.h, p0/m, z6.h
+; CHECK-NEXT:    frintx z2.h, p0/m, z2.h
 ; CHECK-NEXT:    movprfx z28, z0
 ; CHECK-NEXT:    frintx z28.h, p0/m, z0.h
 ; CHECK-NEXT:    movprfx z29, z4
 ; CHECK-NEXT:    frintx z29.h, p0/m, z4.h
-; CHECK-NEXT:    frintx z24.h, p0/m, z24.h
+; CHECK-NEXT:    frintx z7.h, p0/m, z7.h
 ; CHECK-NEXT:    movprfx z30, z1
 ; CHECK-NEXT:    frintx z30.h, p0/m, z1.h
-; CHECK-NEXT:    frintx z26.h, p0/m, z26.h
-; CHECK-NEXT:    fcmge p5.h, p0/z, z2.h, z5.h
-; CHECK-NEXT:    fcmge p2.h, p0/z, z3.h, z5.h
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z2.h
+; CHECK-NEXT:    frintx z25.h, p0/m, z25.h
+; CHECK-NEXT:    fcmge p5.h, p0/z, z5.h, z6.h
+; CHECK-NEXT:    fcmge p2.h, p0/z, z3.h, z6.h
+; CHECK-NEXT:    fcmge p4.h, p0/z, z2.h, z6.h
+; CHECK-NEXT:    movprfx z4, z5
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z5.h
 ; CHECK-NEXT:    movprfx z0, z3
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z3.h
-; CHECK-NEXT:    fcmge p6.h, p0/z, z6.h, z5.h
+; CHECK-NEXT:    movprfx z1, z2
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z2.h
+; CHECK-NEXT:    fcmge p7.h, p0/z, z28.h, z6.h
+; CHECK-NEXT:    fcmge p8.h, p0/z, z7.h, z6.h
+; CHECK-NEXT:    fcmge p9.h, p0/z, z29.h, z6.h
 ; CHECK-NEXT:    fcmgt p3.h, p0/z, z3.h, z27.h
-; CHECK-NEXT:    fcmuo p1.h, p0/z, z3.h, z3.h
-; CHECK-NEXT:    fcmge p7.h, p0/z, z28.h, z5.h
-; CHECK-NEXT:    movprfx z3, z6
-; CHECK-NEXT:    fcvtzs z3.d, p0/m, z6.h
-; CHECK-NEXT:    fcmge p8.h, p0/z, z24.h, z5.h
-; CHECK-NEXT:    fcmgt p4.h, p0/z, z2.h, z27.h
-; CHECK-NEXT:    fcmge p9.h, p0/z, z26.h, z5.h
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    movprfx z4, z24
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z24.h
-; CHECK-NEXT:    fcmge p10.h, p0/z, z30.h, z5.h
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    movprfx z31, z26
-; CHECK-NEXT:    fcvtzs z31.d, p0/m, z26.h
+; CHECK-NEXT:    fcmge p10.h, p0/z, z30.h, z6.h
+; CHECK-NEXT:    movprfx z31, z7
+; CHECK-NEXT:    fcvtzs z31.d, p0/m, z7.h
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
 ; CHECK-NEXT:    movprfx z8, z30
 ; CHECK-NEXT:    fcvtzs z8.d, p0/m, z30.h
-; CHECK-NEXT:    mov z1.d, p5/m, z25.d
-; CHECK-NEXT:    fcmge p5.h, p0/z, z29.h, z5.h
-; CHECK-NEXT:    not p6.b, p0/z, p6.b
-; CHECK-NEXT:    mov z0.d, p2/m, z25.d
-; CHECK-NEXT:    fcmuo p2.h, p0/z, z2.h, z2.h
-; CHECK-NEXT:    movprfx z2, z28
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z28.h
-; CHECK-NEXT:    movprfx z5, z29
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z29.h
-; CHECK-NEXT:    not p7.b, p0/z, p7.b
-; CHECK-NEXT:    mov z3.d, p6/m, z25.d
-; CHECK-NEXT:    not p6.b, p0/z, p8.b
-; CHECK-NEXT:    fcmgt p8.h, p0/z, z6.h, z27.h
-; CHECK-NEXT:    mov z1.d, p4/m, z7.d
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    mov z0.d, p3/m, z7.d
-; CHECK-NEXT:    fcmgt p3.h, p0/z, z29.h, z27.h
-; CHECK-NEXT:    sel z9.d, p7, z25.d, z2.d
-; CHECK-NEXT:    not p7.b, p0/z, p9.b
-; CHECK-NEXT:    mov z4.d, p6/m, z25.d
-; CHECK-NEXT:    not p6.b, p0/z, p10.b
+; CHECK-NEXT:    eor p6.b, p0/z, p2.b, p1.b
+; CHECK-NEXT:    fcmuo p2.h, p0/z, z3.h, z3.h
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    sel z3.d, p5, z24.d, z4.d
+; CHECK-NEXT:    fcmge p5.h, p0/z, z25.h, z6.h
+; CHECK-NEXT:    mov z0.d, p6/m, z24.d
+; CHECK-NEXT:    fcmgt p6.h, p0/z, z2.h, z27.h
+; CHECK-NEXT:    movprfx z4, z28
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z28.h
+; CHECK-NEXT:    mov z1.d, p4/m, z24.d
+; CHECK-NEXT:    fcmuo p4.h, p0/z, z2.h, z2.h
+; CHECK-NEXT:    movprfx z2, z29
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z29.h
+; CHECK-NEXT:    movprfx z6, z25
+; CHECK-NEXT:    fcvtzs z6.d, p0/m, z25.h
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    eor p8.b, p0/z, p8.b, p1.b
+; CHECK-NEXT:    mov z0.d, p3/m, z26.d
+; CHECK-NEXT:    fcmgt p3.h, p0/z, z25.h, z27.h
+; CHECK-NEXT:    eor p9.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    mov z4.d, p7/m, z24.d
+; CHECK-NEXT:    fcmgt p7.h, p0/z, z5.h, z27.h
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    mov z31.d, p8/m, z24.d
+; CHECK-NEXT:    fcmuo p8.h, p0/z, z5.h, z5.h
+; CHECK-NEXT:    eor p1.b, p0/z, p10.b, p1.b
 ; CHECK-NEXT:    fcmgt p10.h, p0/z, z28.h, z27.h
-; CHECK-NEXT:    mov z5.d, p5/m, z25.d
-; CHECK-NEXT:    fcmgt p5.h, p0/z, z24.h, z27.h
-; CHECK-NEXT:    fcmuo p9.h, p0/z, z6.h, z6.h
-; CHECK-NEXT:    sel z6.d, p7, z25.d, z31.d
-; CHECK-NEXT:    sel z25.d, p6, z25.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    fcmgt p6.h, p0/z, z26.h, z27.h
-; CHECK-NEXT:    fcmgt p7.h, p0/z, z30.h, z27.h
-; CHECK-NEXT:    fcmuo p4.h, p0/z, z28.h, z28.h
-; CHECK-NEXT:    sel z2.d, p8, z7.d, z3.d
-; CHECK-NEXT:    sel z3.d, p10, z7.d, z9.d
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    fcmuo p8.h, p0/z, z29.h, z29.h
-; CHECK-NEXT:    mov z4.d, p5/m, z7.d
-; CHECK-NEXT:    fcmuo p5.h, p0/z, z24.h, z24.h
-; CHECK-NEXT:    fcmuo p10.h, p0/z, z26.h, z26.h
-; CHECK-NEXT:    mov z5.d, p3/m, z7.d
-; CHECK-NEXT:    mov z6.d, p6/m, z7.d
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    sel z5.d, p9, z24.d, z2.d
+; CHECK-NEXT:    fcmgt p9.h, p0/z, z7.h, z27.h
+; CHECK-NEXT:    mov z6.d, p5/m, z24.d
+; CHECK-NEXT:    fcmgt p5.h, p0/z, z30.h, z27.h
+; CHECK-NEXT:    sel z24.d, p1, z24.d, z8.d
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    fcmgt p1.h, p0/z, z29.h, z27.h
+; CHECK-NEXT:    mov z1.d, p6/m, z26.d
+; CHECK-NEXT:    fcmuo p6.h, p0/z, z28.h, z28.h
+; CHECK-NEXT:    sel z2.d, p7, z26.d, z3.d
+; CHECK-NEXT:    sel z3.d, p10, z26.d, z4.d
+; CHECK-NEXT:    fcmuo p7.h, p0/z, z7.h, z7.h
+; CHECK-NEXT:    fcmuo p10.h, p0/z, z25.h, z25.h
+; CHECK-NEXT:    sel z4.d, p9, z26.d, z31.d
+; CHECK-NEXT:    fcmuo p9.h, p0/z, z29.h, z29.h
+; CHECK-NEXT:    mov z6.d, p3/m, z26.d
+; CHECK-NEXT:    mov z5.d, p1/m, z26.d
 ; CHECK-NEXT:    fcmuo p0.h, p0/z, z30.h, z30.h
-; CHECK-NEXT:    sel z7.d, p7, z7.d, z25.d
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z2.d, p9/m, #0 // =0x0
-; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z3.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z4.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    mov z5.d, p8/m, #0 // =0x0
+; CHECK-NEXT:    sel z7.d, p5, z26.d, z24.d
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z1.d, p4/m, #0 // =0x0
+; CHECK-NEXT:    mov z2.d, p8/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z3.d, p6/m, #0 // =0x0
+; CHECK-NEXT:    mov z4.d, p7/m, #0 // =0x0
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z5.d, p9/m, #0 // =0x0
 ; CHECK-NEXT:    mov z6.d, p10/m, #0 // =0x0
-; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z1.d, p2/m, #0 // =0x0
+; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    mov z7.d, p0/m, #0 // =0x0
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #3
+; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %a = call <vscale x 16 x iXLen> @llvm.lrint.nxv16iXLen.nxv16f16(<vscale x 16 x half> %x)
@@ -356,225 +358,227 @@ define <vscale x 32 x iXLen> @lrint_v32f16(<vscale x 32 x half> %x) {
 ; CHECK-NEXT:    uunpkhi z5.s, z0.h
 ; CHECK-NEXT:    mov w9, #64511 // =0xfbff
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    uunpklo z6.s, z1.h
-; CHECK-NEXT:    mov z26.h, w9
-; CHECK-NEXT:    uunpkhi z25.s, z1.h
+; CHECK-NEXT:    uunpklo z24.s, z1.h
+; CHECK-NEXT:    uunpkhi z28.s, z1.h
+; CHECK-NEXT:    mov z27.h, w9
 ; CHECK-NEXT:    mov w9, #31743 // =0x7bff
-; CHECK-NEXT:    mov z27.d, #0x8000000000000000
+; CHECK-NEXT:    mov z16.d, z3.d
 ; CHECK-NEXT:    uunpklo z31.s, z2.h
-; CHECK-NEXT:    uunpkhi z12.s, z2.h
-; CHECK-NEXT:    mov z17.d, z3.d
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    mov z7.d, #0x8000000000000000
 ; CHECK-NEXT:    uunpklo z0.d, z4.s
+; CHECK-NEXT:    uunpklo z6.d, z5.s
 ; CHECK-NEXT:    uunpkhi z4.d, z4.s
-; CHECK-NEXT:    uunpklo z7.d, z5.s
-; CHECK-NEXT:    uunpkhi z24.d, z5.s
-; CHECK-NEXT:    uunpklo z28.d, z6.s
-; CHECK-NEXT:    uunpkhi z29.d, z6.s
-; CHECK-NEXT:    uunpklo z8.d, z25.s
-; CHECK-NEXT:    uunpkhi z9.d, z25.s
-; CHECK-NEXT:    uunpklo z16.s, z17.h
-; CHECK-NEXT:    uunpklo z11.d, z31.s
-; CHECK-NEXT:    uunpkhi z14.d, z31.s
-; CHECK-NEXT:    uunpkhi z17.s, z17.h
-; CHECK-NEXT:    movprfx z30, z4
-; CHECK-NEXT:    frintx z30.h, p0/m, z4.h
-; CHECK-NEXT:    movprfx z4, z7
-; CHECK-NEXT:    frintx z4.h, p0/m, z7.h
-; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
-; CHECK-NEXT:    movprfx z6, z24
-; CHECK-NEXT:    frintx z6.h, p0/m, z24.h
-; CHECK-NEXT:    movprfx z7, z28
-; CHECK-NEXT:    frintx z7.h, p0/m, z28.h
-; CHECK-NEXT:    movprfx z25, z29
-; CHECK-NEXT:    frintx z25.h, p0/m, z29.h
-; CHECK-NEXT:    movprfx z3, z9
-; CHECK-NEXT:    frintx z3.h, p0/m, z9.h
-; CHECK-NEXT:    mov z5.h, w9
-; CHECK-NEXT:    movprfx z31, z11
-; CHECK-NEXT:    frintx z31.h, p0/m, z11.h
-; CHECK-NEXT:    movprfx z9, z14
-; CHECK-NEXT:    frintx z9.h, p0/m, z14.h
-; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z26.h
-; CHECK-NEXT:    fcmge p4.h, p0/z, z4.h, z26.h
-; CHECK-NEXT:    movprfx z24, z0
-; CHECK-NEXT:    fcvtzs z24.d, p0/m, z0.h
-; CHECK-NEXT:    fcmge p2.h, p0/z, z30.h, z26.h
-; CHECK-NEXT:    movprfx z29, z4
-; CHECK-NEXT:    fcvtzs z29.d, p0/m, z4.h
-; CHECK-NEXT:    fcmge p6.h, p0/z, z6.h, z26.h
-; CHECK-NEXT:    movprfx z28, z30
-; CHECK-NEXT:    fcvtzs z28.d, p0/m, z30.h
-; CHECK-NEXT:    movprfx z10, z6
-; CHECK-NEXT:    fcvtzs z10.d, p0/m, z6.h
-; CHECK-NEXT:    str z0, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    fcmge p3.h, p0/z, z7.h, z26.h
-; CHECK-NEXT:    movprfx z13, z7
-; CHECK-NEXT:    fcvtzs z13.d, p0/m, z7.h
-; CHECK-NEXT:    movprfx z15, z25
-; CHECK-NEXT:    fcvtzs z15.d, p0/m, z25.h
-; CHECK-NEXT:    not p5.b, p0/z, p1.b
-; CHECK-NEXT:    movprfx z18, z3
-; CHECK-NEXT:    fcvtzs z18.d, p0/m, z3.h
+; CHECK-NEXT:    uunpkhi z25.d, z5.s
+; CHECK-NEXT:    uunpklo z26.d, z24.s
+; CHECK-NEXT:    uunpkhi z24.d, z24.s
+; CHECK-NEXT:    uunpklo z9.d, z28.s
+; CHECK-NEXT:    uunpkhi z10.d, z28.s
+; CHECK-NEXT:    uunpkhi z12.s, z2.h
+; CHECK-NEXT:    uunpkhi z13.d, z31.s
+; CHECK-NEXT:    movprfx z29, z0
+; CHECK-NEXT:    frintx z29.h, p0/m, z0.h
+; CHECK-NEXT:    movprfx z5, z6
+; CHECK-NEXT:    frintx z5.h, p0/m, z6.h
+; CHECK-NEXT:    movprfx z3, z4
+; CHECK-NEXT:    frintx z3.h, p0/m, z4.h
+; CHECK-NEXT:    movprfx z4, z25
+; CHECK-NEXT:    frintx z4.h, p0/m, z25.h
+; CHECK-NEXT:    movprfx z25, z26
+; CHECK-NEXT:    frintx z25.h, p0/m, z26.h
+; CHECK-NEXT:    movprfx z26, z24
+; CHECK-NEXT:    frintx z26.h, p0/m, z24.h
+; CHECK-NEXT:    mov z6.h, w9
+; CHECK-NEXT:    movprfx z28, z9
+; CHECK-NEXT:    frintx z28.h, p0/m, z9.h
+; CHECK-NEXT:    uunpklo z9.d, z31.s
+; CHECK-NEXT:    uunpklo z15.d, z12.s
+; CHECK-NEXT:    uunpkhi z12.d, z12.s
+; CHECK-NEXT:    fcmge p2.h, p0/z, z29.h, z27.h
+; CHECK-NEXT:    fcmge p4.h, p0/z, z5.h, z27.h
+; CHECK-NEXT:    str z29, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    fcmge p3.h, p0/z, z3.h, z27.h
+; CHECK-NEXT:    fcvtzs z29.d, p0/m, z29.h
+; CHECK-NEXT:    movprfx z8, z5
+; CHECK-NEXT:    fcvtzs z8.d, p0/m, z5.h
+; CHECK-NEXT:    movprfx z30, z3
+; CHECK-NEXT:    fcvtzs z30.d, p0/m, z3.h
+; CHECK-NEXT:    fcmge p7.h, p0/z, z4.h, z27.h
+; CHECK-NEXT:    str z3, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    fcmge p8.h, p0/z, z25.h, z27.h
+; CHECK-NEXT:    movprfx z11, z25
+; CHECK-NEXT:    fcvtzs z11.d, p0/m, z25.h
+; CHECK-NEXT:    movprfx z14, z26
+; CHECK-NEXT:    fcvtzs z14.d, p0/m, z26.h
+; CHECK-NEXT:    eor p5.b, p0/z, p2.b, p1.b
+; CHECK-NEXT:    movprfx z31, z9
+; CHECK-NEXT:    frintx z31.h, p0/m, z9.h
+; CHECK-NEXT:    movprfx z9, z13
+; CHECK-NEXT:    frintx z9.h, p0/m, z13.h
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    frintx z12.h, p0/m, z12.h
+; CHECK-NEXT:    fcmgt p2.h, p0/z, z5.h, z6.h
+; CHECK-NEXT:    eor p6.b, p0/z, p3.b, p1.b
+; CHECK-NEXT:    fcmgt p3.h, p0/z, z3.h, z6.h
+; CHECK-NEXT:    sel z0.d, p5, z7.d, z29.d
+; CHECK-NEXT:    sel z29.d, p4, z7.d, z8.d
+; CHECK-NEXT:    movprfx z8, z4
+; CHECK-NEXT:    fcvtzs z8.d, p0/m, z4.h
+; CHECK-NEXT:    fcmge p5.h, p0/z, z26.h, z27.h
+; CHECK-NEXT:    movprfx z3, z10
+; CHECK-NEXT:    frintx z3.h, p0/m, z10.h
+; CHECK-NEXT:    uunpklo z10.s, z16.h
+; CHECK-NEXT:    uunpkhi z16.s, z16.h
+; CHECK-NEXT:    mov z30.d, p6/m, z7.d
+; CHECK-NEXT:    eor p6.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    fcmge p7.h, p0/z, z9.h, z27.h
+; CHECK-NEXT:    eor p4.b, p0/z, p8.b, p1.b
 ; CHECK-NEXT:    movprfx z20, z31
 ; CHECK-NEXT:    fcvtzs z20.d, p0/m, z31.h
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
 ; CHECK-NEXT:    movprfx z21, z9
 ; CHECK-NEXT:    fcvtzs z21.d, p0/m, z9.h
-; CHECK-NEXT:    fcmgt p1.h, p0/z, z30.h, z5.h
-; CHECK-NEXT:    sel z0.d, p5, z27.d, z24.d
-; CHECK-NEXT:    not p7.b, p0/z, p2.b
-; CHECK-NEXT:    fcmgt p2.h, p0/z, z4.h, z5.h
-; CHECK-NEXT:    mov z29.d, p4/m, z27.d
-; CHECK-NEXT:    fcmge p4.h, p0/z, z25.h, z26.h
-; CHECK-NEXT:    not p5.b, p0/z, p6.b
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    fcmge p6.h, p0/z, z9.h, z26.h
-; CHECK-NEXT:    fcmgt p9.h, p0/z, z6.h, z5.h
-; CHECK-NEXT:    str z0, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    sel z0.d, p7, z27.d, z28.d
-; CHECK-NEXT:    movprfx z28, z8
-; CHECK-NEXT:    frintx z28.h, p0/m, z8.h
-; CHECK-NEXT:    sel z8.d, p5, z27.d, z10.d
-; CHECK-NEXT:    uunpklo z10.d, z12.s
-; CHECK-NEXT:    uunpkhi z12.d, z12.s
-; CHECK-NEXT:    not p5.b, p0/z, p4.b
-; CHECK-NEXT:    sel z11.d, p3, z27.d, z13.d
-; CHECK-NEXT:    uunpklo z13.d, z16.s
-; CHECK-NEXT:    fcmge p3.h, p0/z, z3.h, z26.h
-; CHECK-NEXT:    not p6.b, p0/z, p6.b
-; CHECK-NEXT:    str z0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT:    sel z24.d, p5, z27.d, z15.d
-; CHECK-NEXT:    uunpkhi z15.d, z16.s
+; CHECK-NEXT:    mov z8.d, p6/m, z7.d
+; CHECK-NEXT:    eor p6.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    fcmge p5.h, p0/z, z28.h, z27.h
+; CHECK-NEXT:    uunpklo z13.d, z10.s
+; CHECK-NEXT:    uunpkhi z17.d, z10.s
+; CHECK-NEXT:    movprfx z10, z15
+; CHECK-NEXT:    frintx z10.h, p0/m, z15.h
+; CHECK-NEXT:    uunpklo z15.d, z16.s
+; CHECK-NEXT:    uunpkhi z16.d, z16.s
+; CHECK-NEXT:    movprfx z18, z3
+; CHECK-NEXT:    fcvtzs z18.d, p0/m, z3.h
+; CHECK-NEXT:    mov z11.d, p4/m, z7.d
+; CHECK-NEXT:    sel z24.d, p6, z7.d, z14.d
 ; CHECK-NEXT:    movprfx z14, z28
 ; CHECK-NEXT:    fcvtzs z14.d, p0/m, z28.h
-; CHECK-NEXT:    frintx z10.h, p0/m, z10.h
-; CHECK-NEXT:    uunpklo z16.d, z17.s
-; CHECK-NEXT:    frintx z12.h, p0/m, z12.h
-; CHECK-NEXT:    uunpkhi z17.d, z17.s
+; CHECK-NEXT:    fcmge p4.h, p0/z, z3.h, z27.h
+; CHECK-NEXT:    fcmge p6.h, p0/z, z31.h, z27.h
+; CHECK-NEXT:    str z0, [sp, #2, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    movprfx z19, z13
 ; CHECK-NEXT:    frintx z19.h, p0/m, z13.h
-; CHECK-NEXT:    fcmge p4.h, p0/z, z28.h, z26.h
-; CHECK-NEXT:    fcmge p5.h, p0/z, z31.h, z26.h
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    frintx z17.h, p0/m, z17.h
+; CHECK-NEXT:    fcmge p8.h, p0/z, z10.h, z27.h
 ; CHECK-NEXT:    frintx z15.h, p0/m, z15.h
-; CHECK-NEXT:    fcmge p7.h, p0/z, z10.h, z26.h
+; CHECK-NEXT:    fcmge p9.h, p0/z, z12.h, z27.h
 ; CHECK-NEXT:    frintx z16.h, p0/m, z16.h
-; CHECK-NEXT:    fcmge p8.h, p0/z, z12.h, z26.h
-; CHECK-NEXT:    frintx z17.h, p0/m, z17.h
-; CHECK-NEXT:    movprfx z23, z19
-; CHECK-NEXT:    fcvtzs z23.d, p0/m, z19.h
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    sel z13.d, p3, z27.d, z18.d
-; CHECK-NEXT:    fcmge p3.h, p0/z, z19.h, z26.h
-; CHECK-NEXT:    movprfx z0, z15
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z15.h
-; CHECK-NEXT:    sel z22.d, p4, z27.d, z14.d
-; CHECK-NEXT:    sel z18.d, p6, z27.d, z21.d
-; CHECK-NEXT:    movprfx z21, z12
-; CHECK-NEXT:    fcvtzs z21.d, p0/m, z12.h
-; CHECK-NEXT:    movprfx z1, z16
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z16.h
-; CHECK-NEXT:    sel z14.d, p5, z27.d, z20.d
-; CHECK-NEXT:    fcmge p4.h, p0/z, z15.h, z26.h
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    eor p6.b, p0/z, p6.b, p1.b
+; CHECK-NEXT:    sel z22.d, p5, z7.d, z14.d
+; CHECK-NEXT:    fcmge p5.h, p0/z, z17.h, z27.h
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    sel z13.d, p4, z7.d, z18.d
+; CHECK-NEXT:    movprfx z2, z16
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z16.h
+; CHECK-NEXT:    fcmge p4.h, p0/z, z19.h, z27.h
+; CHECK-NEXT:    sel z14.d, p6, z7.d, z20.d
+; CHECK-NEXT:    movprfx z1, z15
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z15.h
+; CHECK-NEXT:    sel z18.d, p7, z7.d, z21.d
+; CHECK-NEXT:    eor p6.b, p0/z, p8.b, p1.b
 ; CHECK-NEXT:    movprfx z20, z10
 ; CHECK-NEXT:    fcvtzs z20.d, p0/m, z10.h
-; CHECK-NEXT:    movprfx z2, z17
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z17.h
-; CHECK-NEXT:    not p5.b, p0/z, p7.b
-; CHECK-NEXT:    fcmge p6.h, p0/z, z16.h, z26.h
-; CHECK-NEXT:    not p7.b, p0/z, p8.b
-; CHECK-NEXT:    fcmge p8.h, p0/z, z17.h, z26.h
-; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
-; CHECK-NEXT:    mov z20.d, p5/m, z27.d
-; CHECK-NEXT:    mov z21.d, p7/m, z27.d
-; CHECK-NEXT:    not p5.b, p0/z, p6.b
-; CHECK-NEXT:    mov z23.d, p3/m, z27.d
-; CHECK-NEXT:    fcmgt p3.h, p0/z, z17.h, z5.h
-; CHECK-NEXT:    not p6.b, p0/z, p8.b
-; CHECK-NEXT:    mov z0.d, p4/m, z27.d
-; CHECK-NEXT:    fcmgt p4.h, p0/z, z16.h, z5.h
-; CHECK-NEXT:    mov z1.d, p5/m, z27.d
-; CHECK-NEXT:    fcmuo p5.h, p0/z, z16.h, z16.h
-; CHECK-NEXT:    mov z29.d, p2/m, z26.d
-; CHECK-NEXT:    mov z2.d, p6/m, z27.d
-; CHECK-NEXT:    ldr z27, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    fcmgt p6.h, p0/z, z7.h, z5.h
-; CHECK-NEXT:    fcmgt p2.h, p0/z, z12.h, z5.h
-; CHECK-NEXT:    fcmuo p8.h, p0/z, z17.h, z17.h
-; CHECK-NEXT:    fcmgt p7.h, p0/z, z28.h, z5.h
-; CHECK-NEXT:    mov z1.d, p4/m, z26.d
-; CHECK-NEXT:    fcmuo p4.h, p0/z, z15.h, z15.h
-; CHECK-NEXT:    mov z8.d, p9/m, z26.d
-; CHECK-NEXT:    mov z27.d, p1/m, z26.d
-; CHECK-NEXT:    fcmgt p1.h, p0/z, z15.h, z5.h
-; CHECK-NEXT:    mov z2.d, p3/m, z26.d
-; CHECK-NEXT:    fcmgt p3.h, p0/z, z19.h, z5.h
-; CHECK-NEXT:    mov z11.d, p6/m, z26.d
+; CHECK-NEXT:    fcmge p7.h, p0/z, z15.h, z27.h
+; CHECK-NEXT:    eor p8.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    movprfx z21, z12
+; CHECK-NEXT:    fcvtzs z21.d, p0/m, z12.h
+; CHECK-NEXT:    fcmge p9.h, p0/z, z16.h, z27.h
+; CHECK-NEXT:    movprfx z23, z19
+; CHECK-NEXT:    fcvtzs z23.d, p0/m, z19.h
+; CHECK-NEXT:    movprfx z0, z17
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z17.h
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    mov z27.d, #0x7fffffffffffffff
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    mov z20.d, p6/m, z7.d
+; CHECK-NEXT:    fcmgt p6.h, p0/z, z4.h, z6.h
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    mov z21.d, p8/m, z7.d
+; CHECK-NEXT:    fcmgt p8.h, p0/z, z28.h, z6.h
+; CHECK-NEXT:    eor p1.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    mov z23.d, p4/m, z7.d
+; CHECK-NEXT:    mov z0.d, p5/m, z7.d
+; CHECK-NEXT:    mov z1.d, p7/m, z7.d
+; CHECK-NEXT:    fcmgt p4.h, p0/z, z16.h, z6.h
+; CHECK-NEXT:    fcmuo p5.h, p0/z, z15.h, z15.h
+; CHECK-NEXT:    mov z2.d, p1/m, z7.d
+; CHECK-NEXT:    fcmgt p1.h, p0/z, z15.h, z6.h
+; CHECK-NEXT:    sel z7.d, p3, z27.d, z30.d
+; CHECK-NEXT:    fcmgt p3.h, p0/z, z17.h, z6.h
+; CHECK-NEXT:    fcmuo p9.h, p0/z, z16.h, z16.h
+; CHECK-NEXT:    mov z29.d, p2/m, z27.d
+; CHECK-NEXT:    fcmgt p2.h, p0/z, z12.h, z6.h
+; CHECK-NEXT:    sel z30.d, p6, z27.d, z8.d
 ; CHECK-NEXT:    fcmuo p6.h, p0/z, z19.h, z19.h
+; CHECK-NEXT:    mov z2.d, p4/m, z27.d
+; CHECK-NEXT:    fcmuo p4.h, p0/z, z17.h, z17.h
+; CHECK-NEXT:    fcmgt p7.h, p0/z, z25.h, z6.h
+; CHECK-NEXT:    mov z1.d, p1/m, z27.d
+; CHECK-NEXT:    fcmgt p1.h, p0/z, z19.h, z6.h
+; CHECK-NEXT:    mov z0.d, p3/m, z27.d
+; CHECK-NEXT:    fcmgt p3.h, p0/z, z10.h, z6.h
+; CHECK-NEXT:    mov z2.d, p9/m, #0 // =0x0
+; CHECK-NEXT:    sel z15.d, p2, z27.d, z21.d
+; CHECK-NEXT:    fcmuo p2.h, p0/z, z10.h, z10.h
 ; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    fcmgt p5.h, p0/z, z9.h, z5.h
-; CHECK-NEXT:    sel z15.d, p2, z26.d, z21.d
-; CHECK-NEXT:    fcmuo p2.h, p0/z, z12.h, z12.h
-; CHECK-NEXT:    mov z2.d, p8/m, #0 // =0x0
-; CHECK-NEXT:    sel z16.d, p7, z26.d, z22.d
-; CHECK-NEXT:    mov z0.d, p1/m, z26.d
-; CHECK-NEXT:    fcmgt p1.h, p0/z, z10.h, z5.h
-; CHECK-NEXT:    str z1, [x8, #14, mul vl]
-; CHECK-NEXT:    sel z17.d, p3, z26.d, z23.d
-; CHECK-NEXT:    fcmuo p3.h, p0/z, z10.h, z10.h
-; CHECK-NEXT:    str z2, [x8, #15, mul vl]
-; CHECK-NEXT:    sel z2.d, p5, z26.d, z18.d
-; CHECK-NEXT:    fcmuo p5.h, p0/z, z9.h, z9.h
+; CHECK-NEXT:    fcmgt p5.h, p0/z, z9.h, z6.h
+; CHECK-NEXT:    sel z8.d, p7, z27.d, z11.d
+; CHECK-NEXT:    sel z16.d, p1, z27.d, z23.d
+; CHECK-NEXT:    fcmuo p1.h, p0/z, z12.h, z12.h
 ; CHECK-NEXT:    mov z0.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    fcmgt p4.h, p0/z, z3.h, z5.h
-; CHECK-NEXT:    mov z15.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    sel z1.d, p1, z26.d, z20.d
-; CHECK-NEXT:    fcmgt p1.h, p0/z, z31.h, z5.h
-; CHECK-NEXT:    mov z17.d, p6/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p2.h, p0/z, z31.h, z31.h
+; CHECK-NEXT:    str z2, [x8, #15, mul vl]
+; CHECK-NEXT:    fcmgt p4.h, p0/z, z3.h, z6.h
+; CHECK-NEXT:    sel z11.d, p8, z27.d, z22.d
+; CHECK-NEXT:    str z1, [x8, #14, mul vl]
+; CHECK-NEXT:    sel z1.d, p3, z27.d, z20.d
+; CHECK-NEXT:    fcmgt p3.h, p0/z, z31.h, z6.h
 ; CHECK-NEXT:    str z0, [x8, #13, mul vl]
+; CHECK-NEXT:    sel z2.d, p5, z27.d, z18.d
+; CHECK-NEXT:    fcmuo p5.h, p0/z, z9.h, z9.h
+; CHECK-NEXT:    mov z15.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p1.h, p0/z, z31.h, z31.h
+; CHECK-NEXT:    mov z16.d, p6/m, #0 // =0x0
+; CHECK-NEXT:    mov z1.d, p2/m, #0 // =0x0
+; CHECK-NEXT:    fcmgt p2.h, p0/z, z26.h, z6.h
+; CHECK-NEXT:    sel z0.d, p3, z27.d, z14.d
+; CHECK-NEXT:    fcmuo p3.h, p0/z, z3.h, z3.h
+; CHECK-NEXT:    sel z3.d, p4, z27.d, z13.d
+; CHECK-NEXT:    str z16, [x8, #12, mul vl]
 ; CHECK-NEXT:    mov z2.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p5.h, p0/z, z25.h, z25.h
-; CHECK-NEXT:    str z17, [x8, #12, mul vl]
-; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
-; CHECK-NEXT:    fcmgt p3.h, p0/z, z25.h, z5.h
+; CHECK-NEXT:    fcmuo p5.h, p0/z, z26.h, z26.h
 ; CHECK-NEXT:    str z15, [x8, #11, mul vl]
-; CHECK-NEXT:    sel z0.d, p1, z26.d, z14.d
-; CHECK-NEXT:    fcmuo p1.h, p0/z, z3.h, z3.h
-; CHECK-NEXT:    sel z3.d, p4, z26.d, z13.d
 ; CHECK-NEXT:    fcmuo p4.h, p0/z, z28.h, z28.h
 ; CHECK-NEXT:    str z1, [x8, #10, mul vl]
-; CHECK-NEXT:    sel z1.d, p3, z26.d, z24.d
-; CHECK-NEXT:    fcmuo p3.h, p0/z, z7.h, z7.h
-; CHECK-NEXT:    ldr z7, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    sel z1.d, p2, z27.d, z24.d
+; CHECK-NEXT:    ldr z24, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    str z2, [x8, #9, mul vl]
-; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    mov z3.d, p1/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p1.h, p0/z, z6.h, z6.h
-; CHECK-NEXT:    mov z16.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p4.h, p0/z, z4.h, z4.h
-; CHECK-NEXT:    fcmgt p2.h, p0/z, z7.h, z5.h
-; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p5.h, p0/z, z30.h, z30.h
+; CHECK-NEXT:    mov z3.d, p3/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p2.h, p0/z, z25.h, z25.h
+; CHECK-NEXT:    fcmuo p3.h, p0/z, z4.h, z4.h
 ; CHECK-NEXT:    str z0, [x8, #8, mul vl]
-; CHECK-NEXT:    fcmuo p0.h, p0/z, z7.h, z7.h
-; CHECK-NEXT:    mov z11.d, p3/m, #0 // =0x0
+; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
+; CHECK-NEXT:    mov z11.d, p4/m, #0 // =0x0
+; CHECK-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    str z3, [x8, #7, mul vl]
+; CHECK-NEXT:    fcmgt p1.h, p0/z, z24.h, z6.h
+; CHECK-NEXT:    fcmuo p4.h, p0/z, z5.h, z5.h
+; CHECK-NEXT:    mov z8.d, p2/m, #0 // =0x0
+; CHECK-NEXT:    mov z30.d, p3/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p5.h, p0/z, z0.h, z0.h
 ; CHECK-NEXT:    ldr z0, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    str z16, [x8, #6, mul vl]
-; CHECK-NEXT:    mov z8.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    str z11, [x8, #6, mul vl]
 ; CHECK-NEXT:    str z1, [x8, #5, mul vl]
+; CHECK-NEXT:    fcmuo p0.h, p0/z, z24.h, z24.h
+; CHECK-NEXT:    str z8, [x8, #4, mul vl]
 ; CHECK-NEXT:    mov z29.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    mov z27.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    str z11, [x8, #4, mul vl]
-; CHECK-NEXT:    str z8, [x8, #3, mul vl]
-; CHECK-NEXT:    mov z0.d, p2/m, z26.d
+; CHECK-NEXT:    str z30, [x8, #3, mul vl]
+; CHECK-NEXT:    mov z0.d, p1/m, z27.d
+; CHECK-NEXT:    mov z7.d, p5/m, #0 // =0x0
 ; CHECK-NEXT:    str z29, [x8, #2, mul vl]
-; CHECK-NEXT:    str z27, [x8, #1, mul vl]
 ; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT:    str z7, [x8, #1, mul vl]
 ; CHECK-NEXT:    str z0, [x8]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
@@ -615,14 +619,15 @@ define <vscale x 1 x iXLen> @lrint_v1f32(<vscale x 1 x float> %x) {
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
 ; CHECK-NEXT:    mov z3.s, w8
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.s
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.s, p0/z, z0.s, z3.s
 ; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z0.s, z0.s
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -641,14 +646,15 @@ define <vscale x 2 x iXLen> @lrint_v2f32(<vscale x 2 x float> %x) {
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
 ; CHECK-NEXT:    mov z3.s, w8
 ; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.s
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.s, p0/z, z0.s, z3.s
 ; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z0.s, z0.s
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -673,6 +679,7 @@ define <vscale x 4 x iXLen> @lrint_v4f32(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z2.s, w8
 ; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    mov z3.s, w8
 ; CHECK-NEXT:    mov z6.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    frintx z1.s, p0/m, z1.s
@@ -684,10 +691,10 @@ define <vscale x 4 x iXLen> @lrint_v4f32(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z1.s
 ; CHECK-NEXT:    movprfx z5, z0
 ; CHECK-NEXT:    fcvtzs z5.d, p0/m, z0.s
-; CHECK-NEXT:    fcmgt p3.s, p0/z, z1.s, z3.s
 ; CHECK-NEXT:    fcmgt p4.s, p0/z, z0.s, z3.s
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    fcmgt p3.s, p0/z, z1.s, z3.s
 ; CHECK-NEXT:    sel z3.d, p1, z2.d, z4.d
 ; CHECK-NEXT:    fcmuo p1.s, p0/z, z1.s, z1.s
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z0.s, z0.s
@@ -720,10 +727,11 @@ define <vscale x 8 x iXLen> @lrint_v8f32(<vscale x 8 x float> %x) {
 ; CHECK-NEXT:    uunpkhi z0.d, z0.s
 ; CHECK-NEXT:    mov w8, #-553648128 // =0xdf000000
 ; CHECK-NEXT:    uunpklo z3.d, z1.s
-; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z4.s, w8
 ; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    mov z5.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z6.s, w8
 ; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
@@ -735,35 +743,35 @@ define <vscale x 8 x iXLen> @lrint_v8f32(<vscale x 8 x float> %x) {
 ; CHECK-NEXT:    fcmge p2.s, p0/z, z0.s, z4.s
 ; CHECK-NEXT:    movprfx z7, z0
 ; CHECK-NEXT:    fcvtzs z7.d, p0/m, z0.s
-; CHECK-NEXT:    fcmge p3.s, p0/z, z3.s, z4.s
-; CHECK-NEXT:    fcmge p4.s, p0/z, z1.s, z4.s
+; CHECK-NEXT:    fcmge p4.s, p0/z, z3.s, z4.s
+; CHECK-NEXT:    fcmge p5.s, p0/z, z1.s, z4.s
 ; CHECK-NEXT:    movprfx z4, z2
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z2.s
 ; CHECK-NEXT:    movprfx z24, z3
 ; CHECK-NEXT:    fcvtzs z24.d, p0/m, z3.s
 ; CHECK-NEXT:    movprfx z25, z1
 ; CHECK-NEXT:    fcvtzs z25.d, p0/m, z1.s
-; CHECK-NEXT:    fcmgt p7.s, p0/z, z3.s, z6.s
-; CHECK-NEXT:    fcmgt p5.s, p0/z, z2.s, z6.s
-; CHECK-NEXT:    fcmgt p6.s, p0/z, z0.s, z6.s
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    fcmgt p6.s, p0/z, z2.s, z6.s
+; CHECK-NEXT:    fcmgt p7.s, p0/z, z0.s, z6.s
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p3.b
 ; CHECK-NEXT:    mov z4.d, p1/m, z5.d
 ; CHECK-NEXT:    fcmgt p1.s, p0/z, z1.s, z6.s
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    eor p3.b, p0/z, p5.b, p3.b
+; CHECK-NEXT:    fcmgt p5.s, p0/z, z3.s, z6.s
 ; CHECK-NEXT:    sel z6.d, p2, z5.d, z7.d
+; CHECK-NEXT:    sel z7.d, p4, z5.d, z24.d
+; CHECK-NEXT:    fcmuo p4.s, p0/z, z3.s, z3.s
 ; CHECK-NEXT:    fcmuo p2.s, p0/z, z2.s, z2.s
-; CHECK-NEXT:    sel z7.d, p3, z5.d, z24.d
+; CHECK-NEXT:    sel z5.d, p3, z5.d, z25.d
 ; CHECK-NEXT:    fcmuo p3.s, p0/z, z0.s, z0.s
-; CHECK-NEXT:    sel z5.d, p4, z5.d, z25.d
-; CHECK-NEXT:    fcmuo p4.s, p0/z, z3.s, z3.s
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z1.s, z1.s
-; CHECK-NEXT:    sel z0.d, p5, z26.d, z4.d
-; CHECK-NEXT:    sel z1.d, p6, z26.d, z6.d
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z2.d, p7, z26.d, z7.d
+; CHECK-NEXT:    sel z0.d, p6, z26.d, z4.d
+; CHECK-NEXT:    sel z1.d, p7, z26.d, z6.d
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    sel z2.d, p5, z26.d, z7.d
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    sel z3.d, p1, z26.d, z5.d
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
@@ -783,7 +791,7 @@ define <vscale x 16 x iXLen> @lrint_v16f32(<vscale x 16 x float> %x) {
 ; CHECK-LABEL: lrint_v16f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-2
+; CHECK-NEXT:    addvl sp, sp, #-4
 ; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Folded Spill
@@ -791,119 +799,125 @@ define <vscale x 16 x iXLen> @lrint_v16f32(<vscale x 16 x float> %x) {
 ; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z8, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
+; CHECK-NEXT:    .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
+; CHECK-NEXT:    .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
 ; CHECK-NEXT:    uunpklo z4.d, z0.s
-; CHECK-NEXT:    uunpkhi z0.d, z0.s
+; CHECK-NEXT:    uunpkhi z6.d, z0.s
 ; CHECK-NEXT:    mov w8, #-553648128 // =0xdf000000
-; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    uunpklo z7.d, z1.s
-; CHECK-NEXT:    uunpkhi z1.d, z1.s
-; CHECK-NEXT:    uunpklo z24.d, z2.s
-; CHECK-NEXT:    uunpkhi z2.d, z2.s
-; CHECK-NEXT:    uunpklo z25.d, z3.s
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uunpkhi z24.d, z1.s
+; CHECK-NEXT:    uunpkhi z25.d, z2.s
+; CHECK-NEXT:    uunpklo z26.d, z3.s
 ; CHECK-NEXT:    uunpkhi z3.d, z3.s
-; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    mov z0.d, #0x8000000000000000
 ; CHECK-NEXT:    movprfx z5, z4
 ; CHECK-NEXT:    frintx z5.s, p0/m, z4.s
-; CHECK-NEXT:    movprfx z6, z0
-; CHECK-NEXT:    frintx z6.s, p0/m, z0.s
+; CHECK-NEXT:    movprfx z1, z6
+; CHECK-NEXT:    frintx z1.s, p0/m, z6.s
+; CHECK-NEXT:    uunpklo z6.d, z2.s
+; CHECK-NEXT:    movprfx z2, z7
+; CHECK-NEXT:    frintx z2.s, p0/m, z7.s
 ; CHECK-NEXT:    mov z4.s, w8
-; CHECK-NEXT:    frintx z7.s, p0/m, z7.s
-; CHECK-NEXT:    movprfx z28, z1
-; CHECK-NEXT:    frintx z28.s, p0/m, z1.s
 ; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
-; CHECK-NEXT:    mov z0.d, #0x8000000000000000
 ; CHECK-NEXT:    frintx z24.s, p0/m, z24.s
-; CHECK-NEXT:    movprfx z29, z2
-; CHECK-NEXT:    frintx z29.s, p0/m, z2.s
 ; CHECK-NEXT:    frintx z25.s, p0/m, z25.s
-; CHECK-NEXT:    movprfx z30, z3
-; CHECK-NEXT:    frintx z30.s, p0/m, z3.s
+; CHECK-NEXT:    movprfx z31, z3
+; CHECK-NEXT:    frintx z31.s, p0/m, z3.s
+; CHECK-NEXT:    frintx z26.s, p0/m, z26.s
 ; CHECK-NEXT:    mov z27.s, w8
-; CHECK-NEXT:    fcmge p1.s, p0/z, z5.s, z4.s
-; CHECK-NEXT:    fcmge p2.s, p0/z, z6.s, z4.s
-; CHECK-NEXT:    movprfx z1, z5
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z5.s
-; CHECK-NEXT:    movprfx z2, z6
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z6.s
-; CHECK-NEXT:    fcmge p5.s, p0/z, z7.s, z4.s
-; CHECK-NEXT:    fcmge p6.s, p0/z, z28.s, z4.s
-; CHECK-NEXT:    movprfx z3, z7
-; CHECK-NEXT:    fcvtzs z3.d, p0/m, z7.s
-; CHECK-NEXT:    fcmge p8.s, p0/z, z29.s, z4.s
-; CHECK-NEXT:    fcmgt p3.s, p0/z, z5.s, z27.s
-; CHECK-NEXT:    fcmgt p7.s, p0/z, z6.s, z27.s
+; CHECK-NEXT:    mov z7.d, #0x7fffffffffffffff
+; CHECK-NEXT:    fcmge p2.s, p0/z, z5.s, z4.s
+; CHECK-NEXT:    movprfx z28, z5
+; CHECK-NEXT:    fcvtzs z28.d, p0/m, z5.s
+; CHECK-NEXT:    movprfx z29, z1
+; CHECK-NEXT:    fcvtzs z29.d, p0/m, z1.s
+; CHECK-NEXT:    fcmge p5.s, p0/z, z2.s, z4.s
+; CHECK-NEXT:    fcmge p4.s, p0/z, z1.s, z4.s
+; CHECK-NEXT:    movprfx z30, z2
+; CHECK-NEXT:    fcvtzs z30.d, p0/m, z2.s
+; CHECK-NEXT:    frintx z6.s, p0/m, z6.s
+; CHECK-NEXT:    fcmge p6.s, p0/z, z24.s, z4.s
+; CHECK-NEXT:    movprfx z8, z31
+; CHECK-NEXT:    fcvtzs z8.d, p0/m, z31.s
 ; CHECK-NEXT:    fcmge p9.s, p0/z, z25.s, z4.s
-; CHECK-NEXT:    movprfx z31, z25
-; CHECK-NEXT:    fcvtzs z31.d, p0/m, z25.s
-; CHECK-NEXT:    not p4.b, p0/z, p1.b
-; CHECK-NEXT:    fcmuo p1.s, p0/z, z5.s, z5.s
-; CHECK-NEXT:    movprfx z5, z28
-; CHECK-NEXT:    fcvtzs z5.d, p0/m, z28.s
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    fcmge p10.s, p0/z, z30.s, z4.s
-; CHECK-NEXT:    movprfx z8, z30
-; CHECK-NEXT:    fcvtzs z8.d, p0/m, z30.s
-; CHECK-NEXT:    mov z1.d, p4/m, z0.d
-; CHECK-NEXT:    fcmge p4.s, p0/z, z24.s, z4.s
-; CHECK-NEXT:    movprfx z4, z29
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z29.s
-; CHECK-NEXT:    mov z2.d, p2/m, z0.d
-; CHECK-NEXT:    fcmuo p2.s, p0/z, z6.s, z6.s
-; CHECK-NEXT:    movprfx z6, z24
-; CHECK-NEXT:    fcvtzs z6.d, p0/m, z24.s
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    not p6.b, p0/z, p6.b
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
-; CHECK-NEXT:    mov z3.d, p5/m, z0.d
-; CHECK-NEXT:    not p5.b, p0/z, p8.b
-; CHECK-NEXT:    mov z5.d, p6/m, z0.d
-; CHECK-NEXT:    fcmgt p8.s, p0/z, z7.s, z27.s
-; CHECK-NEXT:    not p6.b, p0/z, p9.b
-; CHECK-NEXT:    mov z6.d, p4/m, z0.d
-; CHECK-NEXT:    fcmuo p9.s, p0/z, z7.s, z7.s
-; CHECK-NEXT:    not p4.b, p0/z, p10.b
-; CHECK-NEXT:    fcmgt p10.s, p0/z, z28.s, z27.s
-; CHECK-NEXT:    sel z7.d, p5, z0.d, z4.d
-; CHECK-NEXT:    fcmgt p5.s, p0/z, z24.s, z27.s
-; CHECK-NEXT:    mov z31.d, p6/m, z0.d
-; CHECK-NEXT:    fcmgt p6.s, p0/z, z30.s, z27.s
-; CHECK-NEXT:    mov z8.d, p4/m, z0.d
-; CHECK-NEXT:    sel z0.d, p3, z26.d, z1.d
-; CHECK-NEXT:    fcmgt p3.s, p0/z, z29.s, z27.s
-; CHECK-NEXT:    fcmgt p4.s, p0/z, z25.s, z27.s
-; CHECK-NEXT:    sel z1.d, p7, z26.d, z2.d
-; CHECK-NEXT:    fcmuo p7.s, p0/z, z28.s, z28.s
-; CHECK-NEXT:    sel z2.d, p8, z26.d, z3.d
-; CHECK-NEXT:    sel z3.d, p10, z26.d, z5.d
-; CHECK-NEXT:    fcmuo p8.s, p0/z, z29.s, z29.s
-; CHECK-NEXT:    sel z4.d, p5, z26.d, z6.d
-; CHECK-NEXT:    fcmuo p5.s, p0/z, z24.s, z24.s
-; CHECK-NEXT:    fcmuo p10.s, p0/z, z25.s, z25.s
-; CHECK-NEXT:    sel z5.d, p3, z26.d, z7.d
-; CHECK-NEXT:    fcmuo p0.s, p0/z, z30.s, z30.s
-; CHECK-NEXT:    sel z7.d, p6, z26.d, z8.d
-; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z6.d, p4, z26.d, z31.d
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z2.d, p9/m, #0 // =0x0
-; CHECK-NEXT:    mov z3.d, p7/m, #0 // =0x0
-; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z4.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    mov z5.d, p8/m, #0 // =0x0
+; CHECK-NEXT:    fcmgt p3.s, p0/z, z5.s, z27.s
+; CHECK-NEXT:    fcmge p10.s, p0/z, z31.s, z4.s
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p1.b
+; CHECK-NEXT:    fcmgt p8.s, p0/z, z1.s, z27.s
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    mov z28.d, p2/m, z0.d
+; CHECK-NEXT:    fcmuo p2.s, p0/z, z5.s, z5.s
+; CHECK-NEXT:    fcmge p7.s, p0/z, z6.s, z4.s
+; CHECK-NEXT:    sel z5.d, p5, z0.d, z30.d
+; CHECK-NEXT:    movprfx z30, z6
+; CHECK-NEXT:    fcvtzs z30.d, p0/m, z6.s
+; CHECK-NEXT:    fcmge p5.s, p0/z, z26.s, z4.s
+; CHECK-NEXT:    movprfx z4, z26
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z26.s
+; CHECK-NEXT:    sel z3.d, p4, z0.d, z29.d
+; CHECK-NEXT:    movprfx z29, z24
+; CHECK-NEXT:    fcvtzs z29.d, p0/m, z24.s
+; CHECK-NEXT:    fcmuo p4.s, p0/z, z1.s, z1.s
+; CHECK-NEXT:    movprfx z1, z25
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z25.s
+; CHECK-NEXT:    eor p6.b, p0/z, p6.b, p1.b
+; CHECK-NEXT:    eor p9.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    mov z29.d, p6/m, z0.d
+; CHECK-NEXT:    fcmgt p6.s, p0/z, z2.s, z27.s
+; CHECK-NEXT:    eor p1.b, p0/z, p10.b, p1.b
+; CHECK-NEXT:    fcmgt p10.s, p0/z, z24.s, z27.s
+; CHECK-NEXT:    sel z9.d, p9, z0.d, z1.d
+; CHECK-NEXT:    fcmgt p9.s, p0/z, z6.s, z27.s
+; CHECK-NEXT:    mov z30.d, p7/m, z0.d
+; CHECK-NEXT:    sel z10.d, p5, z0.d, z4.d
+; CHECK-NEXT:    mov z8.d, p1/m, z0.d
+; CHECK-NEXT:    sel z0.d, p3, z7.d, z28.d
+; CHECK-NEXT:    fcmgt p1.s, p0/z, z25.s, z27.s
+; CHECK-NEXT:    fcmgt p3.s, p0/z, z26.s, z27.s
+; CHECK-NEXT:    fcmgt p5.s, p0/z, z31.s, z27.s
+; CHECK-NEXT:    fcmuo p7.s, p0/z, z2.s, z2.s
+; CHECK-NEXT:    sel z1.d, p8, z7.d, z3.d
+; CHECK-NEXT:    fcmuo p8.s, p0/z, z24.s, z24.s
+; CHECK-NEXT:    sel z2.d, p6, z7.d, z5.d
+; CHECK-NEXT:    sel z3.d, p10, z7.d, z29.d
+; CHECK-NEXT:    sel z4.d, p9, z7.d, z30.d
+; CHECK-NEXT:    fcmuo p6.s, p0/z, z6.s, z6.s
+; CHECK-NEXT:    fcmuo p9.s, p0/z, z25.s, z25.s
+; CHECK-NEXT:    fcmuo p10.s, p0/z, z26.s, z26.s
+; CHECK-NEXT:    sel z5.d, p1, z7.d, z9.d
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    sel z6.d, p3, z7.d, z10.d
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    fcmuo p0.s, p0/z, z31.s, z31.s
+; CHECK-NEXT:    sel z7.d, p5, z7.d, z8.d
+; CHECK-NEXT:    ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z1.d, p4/m, #0 // =0x0
+; CHECK-NEXT:    mov z2.d, p7/m, #0 // =0x0
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z3.d, p8/m, #0 // =0x0
+; CHECK-NEXT:    mov z4.d, p6/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z5.d, p9/m, #0 // =0x0
 ; CHECK-NEXT:    mov z6.d, p10/m, #0 // =0x0
-; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z1.d, p2/m, #0 // =0x0
+; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    mov z7.d, p0/m, #0 // =0x0
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #2
+; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %a = call <vscale x 16 x iXLen> @llvm.lrint.nxv16iXLen.nxv16f32(<vscale x 16 x float> %x)
@@ -950,222 +964,221 @@ define <vscale x 32 x iXLen> @lrint_v32f32(<vscale x 32 x float> %x) {
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
 ; CHECK-NEXT:    uunpklo z24.d, z0.s
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uunpkhi z25.d, z0.s
 ; CHECK-NEXT:    mov w9, #-553648128 // =0xdf000000
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    uunpklo z26.d, z1.s
-; CHECK-NEXT:    uunpkhi z25.d, z0.s
-; CHECK-NEXT:    uunpkhi z28.d, z1.s
-; CHECK-NEXT:    mov z29.s, w9
+; CHECK-NEXT:    uunpkhi z27.d, z1.s
+; CHECK-NEXT:    mov z28.s, w9
 ; CHECK-NEXT:    mov w9, #1593835519 // =0x5effffff
-; CHECK-NEXT:    mov z17.d, z5.d
-; CHECK-NEXT:    mov z27.d, #0x8000000000000000
-; CHECK-NEXT:    uunpkhi z30.d, z2.s
-; CHECK-NEXT:    uunpklo z8.d, z3.s
-; CHECK-NEXT:    movprfx z0, z24
-; CHECK-NEXT:    frintx z0.s, p0/m, z24.s
-; CHECK-NEXT:    uunpkhi z9.d, z3.s
-; CHECK-NEXT:    uunpkhi z14.d, z4.s
+; CHECK-NEXT:    mov z16.d, z5.d
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    uunpkhi z29.d, z2.s
+; CHECK-NEXT:    mov z30.d, #0x8000000000000000
+; CHECK-NEXT:    movprfx z31, z24
+; CHECK-NEXT:    frintx z31.s, p0/m, z24.s
+; CHECK-NEXT:    movprfx z0, z25
+; CHECK-NEXT:    frintx z0.s, p0/m, z25.s
+; CHECK-NEXT:    mov z25.s, w9
 ; CHECK-NEXT:    movprfx z24, z26
 ; CHECK-NEXT:    frintx z24.s, p0/m, z26.s
-; CHECK-NEXT:    movprfx z1, z25
-; CHECK-NEXT:    frintx z1.s, p0/m, z25.s
-; CHECK-NEXT:    movprfx z5, z28
-; CHECK-NEXT:    frintx z5.s, p0/m, z28.s
 ; CHECK-NEXT:    uunpklo z26.d, z2.s
-; CHECK-NEXT:    uunpklo z16.d, z17.s
-; CHECK-NEXT:    mov z25.s, w9
-; CHECK-NEXT:    movprfx z28, z30
-; CHECK-NEXT:    frintx z28.s, p0/m, z30.s
-; CHECK-NEXT:    movprfx z30, z8
-; CHECK-NEXT:    frintx z30.s, p0/m, z8.s
-; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z29.s
-; CHECK-NEXT:    movprfx z31, z0
-; CHECK-NEXT:    fcvtzs z31.d, p0/m, z0.s
-; CHECK-NEXT:    str z0, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    fcmge p2.s, p0/z, z1.s, z29.s
-; CHECK-NEXT:    fcmge p3.s, p0/z, z24.s, z29.s
-; CHECK-NEXT:    fcmge p5.s, p0/z, z5.s, z29.s
+; CHECK-NEXT:    movprfx z5, z27
+; CHECK-NEXT:    frintx z5.s, p0/m, z27.s
+; CHECK-NEXT:    uunpklo z9.d, z3.s
+; CHECK-NEXT:    uunpkhi z11.d, z3.s
+; CHECK-NEXT:    uunpkhi z13.d, z4.s
+; CHECK-NEXT:    movprfx z27, z29
+; CHECK-NEXT:    frintx z27.s, p0/m, z29.s
+; CHECK-NEXT:    uunpklo z15.d, z16.s
+; CHECK-NEXT:    uunpkhi z17.d, z6.s
+; CHECK-NEXT:    fcmge p2.s, p0/z, z31.s, z28.s
+; CHECK-NEXT:    str z31, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    movprfx z8, z0
+; CHECK-NEXT:    fcvtzs z8.d, p0/m, z0.s
+; CHECK-NEXT:    fcmge p3.s, p0/z, z0.s, z28.s
+; CHECK-NEXT:    fcvtzs z31.d, p0/m, z31.s
+; CHECK-NEXT:    movprfx z10, z24
+; CHECK-NEXT:    fcvtzs z10.d, p0/m, z24.s
+; CHECK-NEXT:    fcmge p4.s, p0/z, z24.s, z28.s
 ; CHECK-NEXT:    frintx z26.s, p0/m, z26.s
-; CHECK-NEXT:    movprfx z10, z1
-; CHECK-NEXT:    fcvtzs z10.d, p0/m, z1.s
-; CHECK-NEXT:    movprfx z11, z24
-; CHECK-NEXT:    fcvtzs z11.d, p0/m, z24.s
+; CHECK-NEXT:    str z0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    fcmge p6.s, p0/z, z5.s, z28.s
 ; CHECK-NEXT:    movprfx z12, z5
 ; CHECK-NEXT:    fcvtzs z12.d, p0/m, z5.s
-; CHECK-NEXT:    movprfx z15, z28
-; CHECK-NEXT:    fcvtzs z15.d, p0/m, z28.s
-; CHECK-NEXT:    str z1, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    not p4.b, p0/z, p1.b
-; CHECK-NEXT:    fcmgt p1.s, p0/z, z1.s, z25.s
-; CHECK-NEXT:    fcmgt p9.s, p0/z, z5.s, z25.s
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    sel z0.d, p4, z27.d, z31.d
-; CHECK-NEXT:    fcmge p4.s, p0/z, z26.s, z29.s
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    movprfx z13, z26
-; CHECK-NEXT:    fcvtzs z13.d, p0/m, z26.s
-; CHECK-NEXT:    sel z31.d, p2, z27.d, z10.d
-; CHECK-NEXT:    uunpklo z10.d, z4.s
-; CHECK-NEXT:    sel z8.d, p3, z27.d, z11.d
-; CHECK-NEXT:    fcmge p3.s, p0/z, z28.s, z29.s
-; CHECK-NEXT:    sel z11.d, p5, z27.d, z12.d
-; CHECK-NEXT:    movprfx z4, z9
-; CHECK-NEXT:    frintx z4.s, p0/m, z9.s
-; CHECK-NEXT:    str z0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT:    not p5.b, p0/z, p4.b
-; CHECK-NEXT:    fcmge p4.s, p0/z, z30.s, z29.s
-; CHECK-NEXT:    fcmgt p2.s, p0/z, z24.s, z25.s
-; CHECK-NEXT:    sel z12.d, p5, z27.d, z13.d
-; CHECK-NEXT:    uunpkhi z13.d, z17.s
-; CHECK-NEXT:    movprfx z9, z10
-; CHECK-NEXT:    frintx z9.s, p0/m, z10.s
-; CHECK-NEXT:    movprfx z10, z14
-; CHECK-NEXT:    frintx z10.s, p0/m, z14.s
-; CHECK-NEXT:    uunpkhi z17.d, z6.s
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    uunpklo z14.d, z6.s
-; CHECK-NEXT:    movprfx z6, z16
-; CHECK-NEXT:    frintx z6.s, p0/m, z16.s
-; CHECK-NEXT:    uunpklo z16.d, z7.s
-; CHECK-NEXT:    uunpkhi z7.d, z7.s
-; CHECK-NEXT:    sel z3.d, p3, z27.d, z15.d
-; CHECK-NEXT:    fcmge p3.s, p0/z, z4.s, z29.s
-; CHECK-NEXT:    frintx z13.s, p0/m, z13.s
-; CHECK-NEXT:    movprfx z15, z30
-; CHECK-NEXT:    fcvtzs z15.d, p0/m, z30.s
-; CHECK-NEXT:    fcmge p5.s, p0/z, z9.s, z29.s
-; CHECK-NEXT:    fcmge p6.s, p0/z, z10.s, z29.s
+; CHECK-NEXT:    movprfx z29, z9
+; CHECK-NEXT:    frintx z29.s, p0/m, z9.s
+; CHECK-NEXT:    eor p5.b, p0/z, p2.b, p1.b
+; CHECK-NEXT:    fcmgt p2.s, p0/z, z0.s, z25.s
+; CHECK-NEXT:    uunpklo z9.d, z4.s
+; CHECK-NEXT:    eor p3.b, p0/z, p3.b, p1.b
+; CHECK-NEXT:    movprfx z4, z11
+; CHECK-NEXT:    frintx z4.s, p0/m, z11.s
+; CHECK-NEXT:    movprfx z14, z27
+; CHECK-NEXT:    fcvtzs z14.d, p0/m, z27.s
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    sel z0.d, p5, z30.d, z31.d
+; CHECK-NEXT:    fcmge p5.s, p0/z, z26.s, z28.s
+; CHECK-NEXT:    sel z31.d, p3, z30.d, z8.d
+; CHECK-NEXT:    eor p6.b, p0/z, p6.b, p1.b
 ; CHECK-NEXT:    frintx z17.s, p0/m, z17.s
+; CHECK-NEXT:    sel z8.d, p4, z30.d, z10.d
+; CHECK-NEXT:    movprfx z10, z26
+; CHECK-NEXT:    fcvtzs z10.d, p0/m, z26.s
+; CHECK-NEXT:    fcmge p4.s, p0/z, z27.s, z28.s
+; CHECK-NEXT:    sel z11.d, p6, z30.d, z12.d
 ; CHECK-NEXT:    movprfx z18, z4
 ; CHECK-NEXT:    fcvtzs z18.d, p0/m, z4.s
+; CHECK-NEXT:    str z0, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    eor p6.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    frintx z9.s, p0/m, z9.s
+; CHECK-NEXT:    fcmge p5.s, p0/z, z29.s, z28.s
+; CHECK-NEXT:    movprfx z0, z17
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z17.s
+; CHECK-NEXT:    fcmgt p3.s, p0/z, z24.s, z25.s
+; CHECK-NEXT:    sel z12.d, p6, z30.d, z10.d
+; CHECK-NEXT:    movprfx z10, z13
+; CHECK-NEXT:    frintx z10.s, p0/m, z13.s
+; CHECK-NEXT:    uunpkhi z13.d, z16.s
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    movprfx z16, z29
+; CHECK-NEXT:    fcvtzs z16.d, p0/m, z29.s
+; CHECK-NEXT:    fcmge p6.s, p0/z, z9.s, z28.s
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    sel z3.d, p4, z30.d, z14.d
+; CHECK-NEXT:    uunpklo z14.d, z6.s
+; CHECK-NEXT:    movprfx z6, z15
+; CHECK-NEXT:    frintx z6.s, p0/m, z15.s
+; CHECK-NEXT:    uunpklo z15.d, z7.s
+; CHECK-NEXT:    frintx z13.s, p0/m, z13.s
+; CHECK-NEXT:    uunpkhi z7.d, z7.s
+; CHECK-NEXT:    fcmge p4.s, p0/z, z4.s, z28.s
+; CHECK-NEXT:    fcmge p7.s, p0/z, z10.s, z28.s
 ; CHECK-NEXT:    movprfx z20, z10
 ; CHECK-NEXT:    fcvtzs z20.d, p0/m, z10.s
-; CHECK-NEXT:    frintx z16.s, p0/m, z16.s
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    eor p6.b, p0/z, p6.b, p1.b
+; CHECK-NEXT:    mov z16.d, p5/m, z30.d
+; CHECK-NEXT:    fcmge p5.s, p0/z, z17.s, z28.s
 ; CHECK-NEXT:    movprfx z19, z14
 ; CHECK-NEXT:    frintx z19.s, p0/m, z14.s
 ; CHECK-NEXT:    movprfx z14, z9
 ; CHECK-NEXT:    fcvtzs z14.d, p0/m, z9.s
-; CHECK-NEXT:    fcmge p7.s, p0/z, z6.s, z29.s
-; CHECK-NEXT:    fcmge p8.s, p0/z, z13.s, z29.s
+; CHECK-NEXT:    fcmge p8.s, p0/z, z6.s, z28.s
+; CHECK-NEXT:    frintx z15.s, p0/m, z15.s
+; CHECK-NEXT:    fcmge p9.s, p0/z, z13.s, z28.s
 ; CHECK-NEXT:    movprfx z21, z7
 ; CHECK-NEXT:    frintx z21.s, p0/m, z7.s
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    not p6.b, p0/z, p6.b
-; CHECK-NEXT:    mov z15.d, p4/m, z27.d
-; CHECK-NEXT:    fcmge p4.s, p0/z, z17.s, z29.s
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    sel z7.d, p3, z27.d, z18.d
-; CHECK-NEXT:    movprfx z0, z17
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z17.s
-; CHECK-NEXT:    sel z18.d, p6, z27.d, z20.d
-; CHECK-NEXT:    movprfx z20, z6
-; CHECK-NEXT:    fcvtzs z20.d, p0/m, z6.s
-; CHECK-NEXT:    fcmge p6.s, p0/z, z16.s, z29.s
-; CHECK-NEXT:    fcmge p3.s, p0/z, z19.s, z29.s
-; CHECK-NEXT:    mov z14.d, p5/m, z27.d
-; CHECK-NEXT:    not p5.b, p0/z, p7.b
-; CHECK-NEXT:    not p7.b, p0/z, p8.b
-; CHECK-NEXT:    fcmge p8.s, p0/z, z21.s, z29.s
-; CHECK-NEXT:    movprfx z1, z16
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z16.s
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
 ; CHECK-NEXT:    movprfx z22, z13
 ; CHECK-NEXT:    fcvtzs z22.d, p0/m, z13.s
-; CHECK-NEXT:    movprfx z23, z19
-; CHECK-NEXT:    fcvtzs z23.d, p0/m, z19.s
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    sel z7.d, p4, z30.d, z18.d
+; CHECK-NEXT:    fcmge p4.s, p0/z, z19.s, z28.s
+; CHECK-NEXT:    mov z14.d, p6/m, z30.d
+; CHECK-NEXT:    sel z18.d, p7, z30.d, z20.d
+; CHECK-NEXT:    eor p6.b, p0/z, p8.b, p1.b
+; CHECK-NEXT:    fcmge p7.s, p0/z, z15.s, z28.s
+; CHECK-NEXT:    eor p8.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    fcmge p9.s, p0/z, z21.s, z28.s
 ; CHECK-NEXT:    movprfx z2, z21
 ; CHECK-NEXT:    fcvtzs z2.d, p0/m, z21.s
-; CHECK-NEXT:    mov z29.d, #0x7fffffffffffffff
-; CHECK-NEXT:    mov z20.d, p5/m, z27.d
-; CHECK-NEXT:    not p5.b, p0/z, p6.b
-; CHECK-NEXT:    mov z0.d, p4/m, z27.d
-; CHECK-NEXT:    fcmgt p4.s, p0/z, z16.s, z25.s
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    not p6.b, p0/z, p8.b
-; CHECK-NEXT:    mov z1.d, p5/m, z27.d
-; CHECK-NEXT:    mov z22.d, p7/m, z27.d
-; CHECK-NEXT:    mov z23.d, p3/m, z27.d
-; CHECK-NEXT:    fcmgt p3.s, p0/z, z21.s, z25.s
-; CHECK-NEXT:    fcmuo p5.s, p0/z, z16.s, z16.s
-; CHECK-NEXT:    mov z2.d, p6/m, z27.d
-; CHECK-NEXT:    sel z27.d, p1, z29.d, z31.d
-; CHECK-NEXT:    fcmgt p1.s, p0/z, z17.s, z25.s
-; CHECK-NEXT:    mov z1.d, p4/m, z29.d
-; CHECK-NEXT:    fcmgt p6.s, p0/z, z26.s, z25.s
-; CHECK-NEXT:    fcmgt p7.s, p0/z, z30.s, z25.s
-; CHECK-NEXT:    sel z31.d, p2, z29.d, z8.d
-; CHECK-NEXT:    fcmgt p2.s, p0/z, z13.s, z25.s
-; CHECK-NEXT:    fcmuo p8.s, p0/z, z21.s, z21.s
-; CHECK-NEXT:    mov z2.d, p3/m, z29.d
+; CHECK-NEXT:    movprfx z1, z15
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z15.s
+; CHECK-NEXT:    movprfx z20, z6
+; CHECK-NEXT:    fcvtzs z20.d, p0/m, z6.s
+; CHECK-NEXT:    movprfx z23, z19
+; CHECK-NEXT:    fcvtzs z23.d, p0/m, z19.s
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    mov z28.d, #0x7fffffffffffffff
+; CHECK-NEXT:    mov z22.d, p8/m, z30.d
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    fcmgt p8.s, p0/z, z29.s, z25.s
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    eor p1.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    mov z20.d, p6/m, z30.d
+; CHECK-NEXT:    mov z23.d, p4/m, z30.d
+; CHECK-NEXT:    mov z1.d, p7/m, z30.d
+; CHECK-NEXT:    mov z0.d, p5/m, z30.d
+; CHECK-NEXT:    fcmgt p4.s, p0/z, z21.s, z25.s
+; CHECK-NEXT:    mov z2.d, p1/m, z30.d
+; CHECK-NEXT:    fcmgt p1.s, p0/z, z15.s, z25.s
+; CHECK-NEXT:    sel z30.d, p2, z28.d, z31.d
+; CHECK-NEXT:    fcmgt p2.s, p0/z, z17.s, z25.s
+; CHECK-NEXT:    fcmuo p5.s, p0/z, z15.s, z15.s
+; CHECK-NEXT:    fcmgt p6.s, p0/z, z5.s, z25.s
+; CHECK-NEXT:    fcmgt p7.s, p0/z, z26.s, z25.s
+; CHECK-NEXT:    fcmuo p9.s, p0/z, z21.s, z21.s
+; CHECK-NEXT:    sel z31.d, p3, z28.d, z8.d
+; CHECK-NEXT:    fcmgt p3.s, p0/z, z13.s, z25.s
+; CHECK-NEXT:    mov z2.d, p4/m, z28.d
 ; CHECK-NEXT:    fcmuo p4.s, p0/z, z17.s, z17.s
-; CHECK-NEXT:    fcmgt p3.s, p0/z, z19.s, z25.s
-; CHECK-NEXT:    mov z0.d, p1/m, z29.d
-; CHECK-NEXT:    fcmgt p1.s, p0/z, z6.s, z25.s
+; CHECK-NEXT:    mov z1.d, p1/m, z28.d
+; CHECK-NEXT:    fcmgt p1.s, p0/z, z19.s, z25.s
+; CHECK-NEXT:    mov z0.d, p2/m, z28.d
+; CHECK-NEXT:    fcmgt p2.s, p0/z, z6.s, z25.s
+; CHECK-NEXT:    sel z8.d, p6, z28.d, z11.d
+; CHECK-NEXT:    sel z11.d, p7, z28.d, z12.d
+; CHECK-NEXT:    sel z12.d, p8, z28.d, z16.d
+; CHECK-NEXT:    mov z2.d, p9/m, #0 // =0x0
 ; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    sel z8.d, p9, z29.d, z11.d
-; CHECK-NEXT:    sel z11.d, p6, z29.d, z12.d
-; CHECK-NEXT:    sel z12.d, p7, z29.d, z15.d
 ; CHECK-NEXT:    fcmgt p5.s, p0/z, z10.s, z25.s
-; CHECK-NEXT:    sel z15.d, p2, z29.d, z22.d
-; CHECK-NEXT:    fcmuo p2.s, p0/z, z13.s, z13.s
-; CHECK-NEXT:    str z1, [x8, #14, mul vl]
-; CHECK-NEXT:    mov z2.d, p8/m, #0 // =0x0
+; CHECK-NEXT:    sel z15.d, p3, z28.d, z22.d
+; CHECK-NEXT:    sel z16.d, p1, z28.d, z23.d
+; CHECK-NEXT:    fcmuo p1.s, p0/z, z13.s, z13.s
 ; CHECK-NEXT:    mov z0.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    sel z1.d, p1, z29.d, z20.d
-; CHECK-NEXT:    fcmgt p1.s, p0/z, z9.s, z25.s
+; CHECK-NEXT:    str z2, [x8, #15, mul vl]
 ; CHECK-NEXT:    fcmuo p6.s, p0/z, z19.s, z19.s
-; CHECK-NEXT:    sel z16.d, p3, z29.d, z23.d
 ; CHECK-NEXT:    fcmuo p3.s, p0/z, z6.s, z6.s
+; CHECK-NEXT:    str z1, [x8, #14, mul vl]
+; CHECK-NEXT:    sel z1.d, p2, z28.d, z20.d
+; CHECK-NEXT:    fcmgt p2.s, p0/z, z9.s, z25.s
+; CHECK-NEXT:    str z0, [x8, #13, mul vl]
+; CHECK-NEXT:    sel z2.d, p5, z28.d, z18.d
 ; CHECK-NEXT:    fcmgt p4.s, p0/z, z4.s, z25.s
-; CHECK-NEXT:    str z2, [x8, #15, mul vl]
-; CHECK-NEXT:    sel z2.d, p5, z29.d, z18.d
 ; CHECK-NEXT:    fcmuo p5.s, p0/z, z10.s, z10.s
-; CHECK-NEXT:    str z0, [x8, #13, mul vl]
-; CHECK-NEXT:    mov z15.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p2.s, p0/z, z9.s, z9.s
-; CHECK-NEXT:    sel z0.d, p1, z29.d, z14.d
+; CHECK-NEXT:    mov z15.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p1.s, p0/z, z9.s, z9.s
 ; CHECK-NEXT:    mov z16.d, p6/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p1.s, p0/z, z4.s, z4.s
 ; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
-; CHECK-NEXT:    fcmgt p3.s, p0/z, z28.s, z25.s
-; CHECK-NEXT:    sel z4.d, p4, z29.d, z7.d
+; CHECK-NEXT:    fcmgt p3.s, p0/z, z27.s, z25.s
+; CHECK-NEXT:    sel z0.d, p2, z28.d, z14.d
+; CHECK-NEXT:    fcmuo p2.s, p0/z, z4.s, z4.s
 ; CHECK-NEXT:    str z15, [x8, #11, mul vl]
-; CHECK-NEXT:    mov z2.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p5.s, p0/z, z28.s, z28.s
+; CHECK-NEXT:    sel z4.d, p4, z28.d, z7.d
+; CHECK-NEXT:    fcmuo p4.s, p0/z, z29.s, z29.s
 ; CHECK-NEXT:    str z16, [x8, #12, mul vl]
-; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p4.s, p0/z, z30.s, z30.s
+; CHECK-NEXT:    mov z2.d, p5/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p5.s, p0/z, z27.s, z27.s
 ; CHECK-NEXT:    str z1, [x8, #10, mul vl]
-; CHECK-NEXT:    mov z4.d, p1/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p1.s, p0/z, z5.s, z5.s
-; CHECK-NEXT:    sel z1.d, p3, z29.d, z3.d
+; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    sel z1.d, p3, z28.d, z3.d
 ; CHECK-NEXT:    ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    str z2, [x8, #9, mul vl]
-; CHECK-NEXT:    str z0, [x8, #8, mul vl]
+; CHECK-NEXT:    mov z4.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    fcmuo p3.s, p0/z, z26.s, z26.s
-; CHECK-NEXT:    ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    str z4, [x8, #7, mul vl]
+; CHECK-NEXT:    str z2, [x8, #9, mul vl]
+; CHECK-NEXT:    fcmuo p2.s, p0/z, z5.s, z5.s
 ; CHECK-NEXT:    mov z12.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    fcmgt p2.s, p0/z, z3.s, z25.s
+; CHECK-NEXT:    str z0, [x8, #8, mul vl]
 ; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
 ; CHECK-NEXT:    fcmuo p4.s, p0/z, z24.s, z24.s
-; CHECK-NEXT:    mov z8.d, p1/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p5.s, p0/z, z0.s, z0.s
 ; CHECK-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    str z4, [x8, #7, mul vl]
+; CHECK-NEXT:    fcmgt p1.s, p0/z, z3.s, z25.s
+; CHECK-NEXT:    mov z11.d, p3/m, #0 // =0x0
+; CHECK-NEXT:    mov z8.d, p2/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p5.s, p0/z, z0.s, z0.s
+; CHECK-NEXT:    ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    str z12, [x8, #6, mul vl]
 ; CHECK-NEXT:    str z1, [x8, #5, mul vl]
 ; CHECK-NEXT:    fcmuo p0.s, p0/z, z3.s, z3.s
-; CHECK-NEXT:    mov z11.d, p3/m, #0 // =0x0
-; CHECK-NEXT:    str z8, [x8, #3, mul vl]
 ; CHECK-NEXT:    mov z31.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    mov z0.d, p2/m, z29.d
 ; CHECK-NEXT:    str z11, [x8, #4, mul vl]
-; CHECK-NEXT:    mov z27.d, p5/m, #0 // =0x0
+; CHECK-NEXT:    str z8, [x8, #3, mul vl]
+; CHECK-NEXT:    mov z0.d, p1/m, z28.d
 ; CHECK-NEXT:    str z31, [x8, #2, mul vl]
+; CHECK-NEXT:    mov z30.d, p5/m, #0 // =0x0
 ; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
-; CHECK-NEXT:    str z27, [x8, #1, mul vl]
+; CHECK-NEXT:    str z30, [x8, #1, mul vl]
 ; CHECK-NEXT:    str z0, [x8]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
@@ -1206,14 +1219,15 @@ define <vscale x 1 x iXLen> @lrint_v1f64(<vscale x 1 x double> %x) {
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
 ; CHECK-NEXT:    mov z3.d, x8
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, z1.d
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.d
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.d, p0/z, z0.d, z3.d
 ; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z0.d, z0.d
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -1232,14 +1246,15 @@ define <vscale x 2 x iXLen> @lrint_v2f64(<vscale x 2 x double> %x) {
 ; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
 ; CHECK-NEXT:    mov z3.d, x8
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, z1.d
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.d
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fcmgt p2.d, p0/z, z0.d, z3.d
 ; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z0.d, z0.d
 ; CHECK-NEXT:    mov z1.d, p1/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
@@ -1263,6 +1278,7 @@ define <vscale x 4 x iXLen> @lrint_v4f64(<vscale x 4 x double> %x) {
 ; CHECK-NEXT:    mov z6.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    mov z2.d, x8
 ; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
 ; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
 ; CHECK-NEXT:    mov z3.d, x8
@@ -1273,10 +1289,10 @@ define <vscale x 4 x iXLen> @lrint_v4f64(<vscale x 4 x double> %x) {
 ; CHECK-NEXT:    fcvtzs z4.d, p0/m, z0.d
 ; CHECK-NEXT:    movprfx z5, z1
 ; CHECK-NEXT:    fcvtzs z5.d, p0/m, z1.d
-; CHECK-NEXT:    fcmgt p3.d, p0/z, z0.d, z3.d
 ; CHECK-NEXT:    fcmgt p4.d, p0/z, z1.d, z3.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p3.b
+; CHECK-NEXT:    eor p2.b, p0/z, p2.b, p3.b
+; CHECK-NEXT:    fcmgt p3.d, p0/z, z0.d, z3.d
 ; CHECK-NEXT:    sel z3.d, p1, z2.d, z4.d
 ; CHECK-NEXT:    fcmuo p1.d, p0/z, z0.d, z0.d
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z1.d, z1.d
@@ -1310,51 +1326,52 @@ define <vscale x 8 x iXLen> @lrint_v8f64(<vscale x 8 x double> %x) {
 ; CHECK-NEXT:    mov z5.d, #0x8000000000000000
 ; CHECK-NEXT:    mov z4.d, x8
 ; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
-; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
-; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
 ; CHECK-NEXT:    frintx z2.d, p0/m, z2.d
+; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
 ; CHECK-NEXT:    frintx z3.d, p0/m, z3.d
 ; CHECK-NEXT:    mov z6.d, x8
+; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, z4.d
-; CHECK-NEXT:    fcmge p2.d, p0/z, z1.d, z4.d
-; CHECK-NEXT:    fcmge p3.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    fcmge p4.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    movprfx z4, z0
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z0.d
-; CHECK-NEXT:    movprfx z7, z1
-; CHECK-NEXT:    fcvtzs z7.d, p0/m, z1.d
+; CHECK-NEXT:    fcmge p4.d, p0/z, z2.d, z4.d
+; CHECK-NEXT:    fcmge p3.d, p0/z, z1.d, z4.d
+; CHECK-NEXT:    fcmge p5.d, p0/z, z3.d, z4.d
+; CHECK-NEXT:    movprfx z7, z0
+; CHECK-NEXT:    fcvtzs z7.d, p0/m, z0.d
 ; CHECK-NEXT:    movprfx z24, z2
 ; CHECK-NEXT:    fcvtzs z24.d, p0/m, z2.d
+; CHECK-NEXT:    movprfx z4, z1
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z1.d
 ; CHECK-NEXT:    movprfx z25, z3
 ; CHECK-NEXT:    fcvtzs z25.d, p0/m, z3.d
-; CHECK-NEXT:    fcmgt p7.d, p0/z, z2.d, z6.d
-; CHECK-NEXT:    fcmgt p5.d, p0/z, z0.d, z6.d
-; CHECK-NEXT:    fcmgt p6.d, p0/z, z1.d, z6.d
-; CHECK-NEXT:    not p1.b, p0/z, p1.b
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    mov z4.d, p1/m, z5.d
-; CHECK-NEXT:    fcmgt p1.d, p0/z, z3.d, z6.d
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
-; CHECK-NEXT:    sel z6.d, p2, z5.d, z7.d
-; CHECK-NEXT:    fcmuo p2.d, p0/z, z0.d, z0.d
-; CHECK-NEXT:    sel z7.d, p3, z5.d, z24.d
-; CHECK-NEXT:    fcmuo p3.d, p0/z, z1.d, z1.d
-; CHECK-NEXT:    sel z5.d, p4, z5.d, z25.d
+; CHECK-NEXT:    fcmgt p6.d, p0/z, z0.d, z6.d
+; CHECK-NEXT:    fcmgt p7.d, p0/z, z3.d, z6.d
+; CHECK-NEXT:    eor p1.b, p0/z, p1.b, p2.b
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p2.b
+; CHECK-NEXT:    eor p3.b, p0/z, p3.b, p2.b
+; CHECK-NEXT:    mov z7.d, p1/m, z5.d
+; CHECK-NEXT:    fcmgt p1.d, p0/z, z1.d, z6.d
+; CHECK-NEXT:    eor p2.b, p0/z, p5.b, p2.b
+; CHECK-NEXT:    fcmgt p5.d, p0/z, z2.d, z6.d
+; CHECK-NEXT:    sel z6.d, p4, z5.d, z24.d
+; CHECK-NEXT:    mov z4.d, p3/m, z5.d
 ; CHECK-NEXT:    fcmuo p4.d, p0/z, z2.d, z2.d
+; CHECK-NEXT:    fcmuo p3.d, p0/z, z1.d, z1.d
+; CHECK-NEXT:    sel z5.d, p2, z5.d, z25.d
+; CHECK-NEXT:    fcmuo p2.d, p0/z, z0.d, z0.d
 ; CHECK-NEXT:    fcmuo p0.d, p0/z, z3.d, z3.d
-; CHECK-NEXT:    sel z0.d, p5, z26.d, z4.d
-; CHECK-NEXT:    sel z1.d, p6, z26.d, z6.d
+; CHECK-NEXT:    sel z0.d, p6, z26.d, z7.d
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z2.d, p7, z26.d, z7.d
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z3.d, p1, z26.d, z5.d
+; CHECK-NEXT:    sel z2.d, p5, z26.d, z6.d
+; CHECK-NEXT:    sel z1.d, p1, z26.d, z4.d
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    sel z3.d, p7, z26.d, z5.d
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z2.d, p4/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z3.d, p0/m, #0 // =0x0
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1383,101 +1400,103 @@ define <vscale x 16 x iXLen> @lrint_v16f64(<vscale x 16 x double> %x) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, #-4332462841530417152 // =0xc3e0000000000000
 ; CHECK-NEXT:    mov z24.d, #0x7fffffffffffffff
-; CHECK-NEXT:    mov z25.d, x8
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    movprfx z28, z0
+; CHECK-NEXT:    frintx z28.d, p0/m, z0.d
+; CHECK-NEXT:    mov z0.d, x8
+; CHECK-NEXT:    movprfx z26, z2
+; CHECK-NEXT:    frintx z26.d, p0/m, z2.d
+; CHECK-NEXT:    movprfx z25, z1
+; CHECK-NEXT:    frintx z25.d, p0/m, z1.d
+; CHECK-NEXT:    mov z2.d, #0x8000000000000000
 ; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
-; CHECK-NEXT:    movprfx z26, z0
-; CHECK-NEXT:    frintx z26.d, p0/m, z0.d
-; CHECK-NEXT:    movprfx z27, z1
-; CHECK-NEXT:    frintx z27.d, p0/m, z1.d
-; CHECK-NEXT:    frintx z2.d, p0/m, z2.d
-; CHECK-NEXT:    mov z0.d, #0x8000000000000000
-; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    frintx z3.d, p0/m, z3.d
-; CHECK-NEXT:    movprfx z28, z4
-; CHECK-NEXT:    frintx z28.d, p0/m, z4.d
+; CHECK-NEXT:    movprfx z27, z4
+; CHECK-NEXT:    frintx z27.d, p0/m, z4.d
 ; CHECK-NEXT:    frintx z5.d, p0/m, z5.d
+; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    frintx z6.d, p0/m, z6.d
 ; CHECK-NEXT:    frintx z7.d, p0/m, z7.d
-; CHECK-NEXT:    fcmge p1.d, p0/z, z26.d, z25.d
-; CHECK-NEXT:    fcmge p2.d, p0/z, z27.d, z25.d
-; CHECK-NEXT:    movprfx z4, z26
-; CHECK-NEXT:    fcvtzs z4.d, p0/m, z26.d
-; CHECK-NEXT:    fcmge p5.d, p0/z, z2.d, z25.d
-; CHECK-NEXT:    movprfx z29, z27
-; CHECK-NEXT:    fcvtzs z29.d, p0/m, z27.d
-; CHECK-NEXT:    fcmgt p3.d, p0/z, z26.d, z1.d
-; CHECK-NEXT:    fcmge p6.d, p0/z, z3.d, z25.d
-; CHECK-NEXT:    fcmge p8.d, p0/z, z5.d, z25.d
-; CHECK-NEXT:    fcmgt p7.d, p0/z, z27.d, z1.d
-; CHECK-NEXT:    fcmge p9.d, p0/z, z6.d, z25.d
-; CHECK-NEXT:    movprfx z30, z28
-; CHECK-NEXT:    fcvtzs z30.d, p0/m, z28.d
-; CHECK-NEXT:    fcmge p10.d, p0/z, z7.d, z25.d
-; CHECK-NEXT:    not p4.b, p0/z, p1.b
-; CHECK-NEXT:    fcmuo p1.d, p0/z, z26.d, z26.d
-; CHECK-NEXT:    movprfx z26, z2
-; CHECK-NEXT:    fcvtzs z26.d, p0/m, z2.d
-; CHECK-NEXT:    not p2.b, p0/z, p2.b
-; CHECK-NEXT:    movprfx z31, z6
-; CHECK-NEXT:    fcvtzs z31.d, p0/m, z6.d
+; CHECK-NEXT:    fcmge p3.d, p0/z, z28.d, z0.d
+; CHECK-NEXT:    movprfx z4, z28
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z28.d
+; CHECK-NEXT:    fcmge p6.d, p0/z, z26.d, z0.d
+; CHECK-NEXT:    fcmge p5.d, p0/z, z25.d, z0.d
+; CHECK-NEXT:    movprfx z29, z25
+; CHECK-NEXT:    fcvtzs z29.d, p0/m, z25.d
+; CHECK-NEXT:    fcmuo p2.d, p0/z, z28.d, z28.d
+; CHECK-NEXT:    fcmgt p4.d, p0/z, z28.d, z1.d
+; CHECK-NEXT:    movprfx z28, z26
+; CHECK-NEXT:    fcvtzs z28.d, p0/m, z26.d
+; CHECK-NEXT:    fcmge p7.d, p0/z, z3.d, z0.d
+; CHECK-NEXT:    fcmge p8.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    fcmge p9.d, p0/z, z6.d, z0.d
+; CHECK-NEXT:    fcmge p10.d, p0/z, z7.d, z0.d
+; CHECK-NEXT:    eor p3.b, p0/z, p3.b, p1.b
+; CHECK-NEXT:    movprfx z30, z3
+; CHECK-NEXT:    fcvtzs z30.d, p0/m, z3.d
+; CHECK-NEXT:    movprfx z31, z27
+; CHECK-NEXT:    fcvtzs z31.d, p0/m, z27.d
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
 ; CHECK-NEXT:    movprfx z8, z7
 ; CHECK-NEXT:    fcvtzs z8.d, p0/m, z7.d
-; CHECK-NEXT:    mov z4.d, p4/m, z0.d
-; CHECK-NEXT:    fcmge p4.d, p0/z, z28.d, z25.d
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    mov z29.d, p2/m, z0.d
-; CHECK-NEXT:    fcmuo p2.d, p0/z, z27.d, z27.d
-; CHECK-NEXT:    movprfx z27, z3
-; CHECK-NEXT:    fcvtzs z27.d, p0/m, z3.d
-; CHECK-NEXT:    sel z25.d, p5, z0.d, z26.d
-; CHECK-NEXT:    movprfx z26, z5
-; CHECK-NEXT:    fcvtzs z26.d, p0/m, z5.d
-; CHECK-NEXT:    not p6.b, p0/z, p6.b
-; CHECK-NEXT:    not p5.b, p0/z, p8.b
-; CHECK-NEXT:    fcmgt p8.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
-; CHECK-NEXT:    mov z27.d, p6/m, z0.d
-; CHECK-NEXT:    not p6.b, p0/z, p9.b
-; CHECK-NEXT:    fcmuo p9.d, p0/z, z2.d, z2.d
-; CHECK-NEXT:    mov z30.d, p4/m, z0.d
-; CHECK-NEXT:    not p4.b, p0/z, p10.b
+; CHECK-NEXT:    mov z4.d, p3/m, z2.d
+; CHECK-NEXT:    eor p3.b, p0/z, p6.b, p1.b
+; CHECK-NEXT:    fcmge p6.d, p0/z, z27.d, z0.d
+; CHECK-NEXT:    movprfx z0, z5
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z5.d
+; CHECK-NEXT:    mov z29.d, p5/m, z2.d
+; CHECK-NEXT:    fcmgt p5.d, p0/z, z25.d, z1.d
+; CHECK-NEXT:    mov z28.d, p3/m, z2.d
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    fcmuo p3.d, p0/z, z25.d, z25.d
+; CHECK-NEXT:    eor p8.b, p0/z, p8.b, p1.b
+; CHECK-NEXT:    movprfx z25, z6
+; CHECK-NEXT:    fcvtzs z25.d, p0/m, z6.d
+; CHECK-NEXT:    eor p6.b, p0/z, p6.b, p1.b
+; CHECK-NEXT:    mov z30.d, p7/m, z2.d
+; CHECK-NEXT:    fcmgt p7.d, p0/z, z26.d, z1.d
+; CHECK-NEXT:    eor p9.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    eor p1.b, p0/z, p10.b, p1.b
+; CHECK-NEXT:    mov z31.d, p6/m, z2.d
+; CHECK-NEXT:    fcmuo p6.d, p0/z, z26.d, z26.d
 ; CHECK-NEXT:    fcmgt p10.d, p0/z, z3.d, z1.d
-; CHECK-NEXT:    mov z26.d, p5/m, z0.d
-; CHECK-NEXT:    fcmgt p5.d, p0/z, z28.d, z1.d
-; CHECK-NEXT:    mov z31.d, p6/m, z0.d
-; CHECK-NEXT:    mov z8.d, p4/m, z0.d
-; CHECK-NEXT:    sel z0.d, p3, z24.d, z4.d
-; CHECK-NEXT:    fcmgt p3.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    sel z26.d, p8, z2.d, z0.d
+; CHECK-NEXT:    fcmgt p8.d, p0/z, z27.d, z1.d
+; CHECK-NEXT:    mov z8.d, p1/m, z2.d
+; CHECK-NEXT:    sel z0.d, p4, z24.d, z4.d
+; CHECK-NEXT:    fcmgt p1.d, p0/z, z5.d, z1.d
 ; CHECK-NEXT:    fcmgt p4.d, p0/z, z6.d, z1.d
-; CHECK-NEXT:    fcmgt p6.d, p0/z, z7.d, z1.d
-; CHECK-NEXT:    sel z1.d, p7, z24.d, z29.d
-; CHECK-NEXT:    fcmuo p7.d, p0/z, z3.d, z3.d
-; CHECK-NEXT:    sel z2.d, p8, z24.d, z25.d
-; CHECK-NEXT:    sel z3.d, p10, z24.d, z27.d
-; CHECK-NEXT:    sel z4.d, p5, z24.d, z30.d
-; CHECK-NEXT:    fcmuo p5.d, p0/z, z28.d, z28.d
+; CHECK-NEXT:    mov z25.d, p9/m, z2.d
+; CHECK-NEXT:    fcmgt p9.d, p0/z, z7.d, z1.d
+; CHECK-NEXT:    sel z1.d, p5, z24.d, z29.d
+; CHECK-NEXT:    fcmuo p5.d, p0/z, z3.d, z3.d
+; CHECK-NEXT:    sel z2.d, p7, z24.d, z28.d
+; CHECK-NEXT:    sel z3.d, p10, z24.d, z30.d
+; CHECK-NEXT:    sel z4.d, p8, z24.d, z31.d
+; CHECK-NEXT:    fcmuo p7.d, p0/z, z27.d, z27.d
 ; CHECK-NEXT:    fcmuo p8.d, p0/z, z5.d, z5.d
 ; CHECK-NEXT:    fcmuo p10.d, p0/z, z6.d, z6.d
-; CHECK-NEXT:    sel z5.d, p3, z24.d, z26.d
-; CHECK-NEXT:    fcmuo p0.d, p0/z, z7.d, z7.d
-; CHECK-NEXT:    sel z6.d, p4, z24.d, z31.d
+; CHECK-NEXT:    sel z5.d, p1, z24.d, z26.d
+; CHECK-NEXT:    sel z6.d, p4, z24.d, z25.d
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    sel z7.d, p6, z24.d, z8.d
+; CHECK-NEXT:    fcmuo p0.d, p0/z, z7.d, z7.d
+; CHECK-NEXT:    sel z7.d, p9, z24.d, z8.d
 ; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z2.d, p9/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z3.d, p7/m, #0 // =0x0
-; CHECK-NEXT:    mov z4.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z2.d, p6/m, #0 // =0x0
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z3.d, p5/m, #0 // =0x0
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z4.d, p7/m, #0 // =0x0
 ; CHECK-NEXT:    mov z5.d, p8/m, #0 // =0x0
+; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    mov z6.d, p10/m, #0 // =0x0
+; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
-; CHECK-NEXT:    mov z1.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z7.d, p0/m, #0 // =0x0
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1531,213 +1550,213 @@ define <vscale x 32 x iXLen> @lrint_v32f64(<vscale x 32 x double> %x) {
 ; CHECK-NEXT:    ldr z24, [x0, #6, mul vl]
 ; CHECK-NEXT:    ldr z1, [x0, #1, mul vl]
 ; CHECK-NEXT:    mov z7.d, x9
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    mov z26.d, #0x8000000000000000
-; CHECK-NEXT:    ldr z3, [x0, #3, mul vl]
-; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    movprfx z12, z0
+; CHECK-NEXT:    frintx z12.d, p0/m, z0.d
 ; CHECK-NEXT:    movprfx z30, z2
 ; CHECK-NEXT:    frintx z30.d, p0/m, z2.d
-; CHECK-NEXT:    ldr z6, [x0, #5, mul vl]
+; CHECK-NEXT:    ldr z4, [x0, #3, mul vl]
 ; CHECK-NEXT:    movprfx z25, z24
 ; CHECK-NEXT:    frintx z25.d, p0/m, z24.d
-; CHECK-NEXT:    movprfx z12, z1
-; CHECK-NEXT:    frintx z12.d, p0/m, z1.d
+; CHECK-NEXT:    movprfx z11, z1
+; CHECK-NEXT:    frintx z11.d, p0/m, z1.d
+; CHECK-NEXT:    ldr z6, [x0, #5, mul vl]
 ; CHECK-NEXT:    ldr z5, [x0, #4, mul vl]
-; CHECK-NEXT:    frintx z3.d, p0/m, z3.d
+; CHECK-NEXT:    ldr z9, [x0, #15, mul vl]
+; CHECK-NEXT:    ldr z8, [x0, #7, mul vl]
+; CHECK-NEXT:    frintx z4.d, p0/m, z4.d
 ; CHECK-NEXT:    mov x9, #4890909195324358655 // =0x43dfffffffffffff
-; CHECK-NEXT:    frintx z6.d, p0/m, z6.d
-; CHECK-NEXT:    mov z4.d, x9
-; CHECK-NEXT:    fcmge p3.d, p0/z, z0.d, z7.d
-; CHECK-NEXT:    movprfx z24, z0
-; CHECK-NEXT:    fcvtzs z24.d, p0/m, z0.d
+; CHECK-NEXT:    fcmge p3.d, p0/z, z12.d, z7.d
+; CHECK-NEXT:    movprfx z24, z12
+; CHECK-NEXT:    fcvtzs z24.d, p0/m, z12.d
 ; CHECK-NEXT:    fcmge p5.d, p0/z, z30.d, z7.d
 ; CHECK-NEXT:    movprfx z28, z30
 ; CHECK-NEXT:    fcvtzs z28.d, p0/m, z30.d
-; CHECK-NEXT:    str z0, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    fcmge p4.d, p0/z, z11.d, z7.d
 ; CHECK-NEXT:    frintx z5.d, p0/m, z5.d
-; CHECK-NEXT:    fcmge p4.d, p0/z, z12.d, z7.d
-; CHECK-NEXT:    ldr z8, [x0, #7, mul vl]
-; CHECK-NEXT:    ldr z9, [x0, #15, mul vl]
-; CHECK-NEXT:    movprfx z27, z12
-; CHECK-NEXT:    fcvtzs z27.d, p0/m, z12.d
-; CHECK-NEXT:    fcmge p6.d, p0/z, z3.d, z7.d
-; CHECK-NEXT:    fcmge p9.d, p0/z, z6.d, z7.d
-; CHECK-NEXT:    not p7.b, p0/z, p3.b
-; CHECK-NEXT:    movprfx z31, z3
-; CHECK-NEXT:    fcvtzs z31.d, p0/m, z3.d
-; CHECK-NEXT:    movprfx z15, z6
-; CHECK-NEXT:    fcvtzs z15.d, p0/m, z6.d
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    fcmge p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    movprfx z13, z5
-; CHECK-NEXT:    fcvtzs z13.d, p0/m, z5.d
-; CHECK-NEXT:    sel z0.d, p7, z26.d, z24.d
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    frintx z6.d, p0/m, z6.d
+; CHECK-NEXT:    movprfx z27, z11
+; CHECK-NEXT:    fcvtzs z27.d, p0/m, z11.d
 ; CHECK-NEXT:    movprfx z17, z25
 ; CHECK-NEXT:    fcvtzs z17.d, p0/m, z25.d
-; CHECK-NEXT:    not p3.b, p0/z, p6.b
-; CHECK-NEXT:    fcmge p6.d, p0/z, z25.d, z7.d
+; CHECK-NEXT:    fcmge p6.d, p0/z, z4.d, z7.d
+; CHECK-NEXT:    movprfx z31, z4
+; CHECK-NEXT:    fcvtzs z31.d, p0/m, z4.d
 ; CHECK-NEXT:    movprfx z22, z9
 ; CHECK-NEXT:    frintx z22.d, p0/m, z9.d
-; CHECK-NEXT:    sel z29.d, p4, z26.d, z27.d
+; CHECK-NEXT:    eor p3.b, p0/z, p3.b, p1.b
+; CHECK-NEXT:    mov z3.d, x9
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
+; CHECK-NEXT:    fcmge p7.d, p0/z, z5.d, z7.d
+; CHECK-NEXT:    movprfx z10, z5
+; CHECK-NEXT:    fcvtzs z10.d, p0/m, z5.d
+; CHECK-NEXT:    sel z0.d, p3, z26.d, z24.d
+; CHECK-NEXT:    eor p8.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    fcmge p9.d, p0/z, z6.d, z7.d
+; CHECK-NEXT:    movprfx z14, z6
+; CHECK-NEXT:    fcvtzs z14.d, p0/m, z6.d
+; CHECK-NEXT:    eor p4.b, p0/z, p6.b, p1.b
+; CHECK-NEXT:    movprfx z2, z22
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z22.d
+; CHECK-NEXT:    sel z29.d, p8, z26.d, z27.d
 ; CHECK-NEXT:    movprfx z27, z8
 ; CHECK-NEXT:    frintx z27.d, p0/m, z8.d
-; CHECK-NEXT:    fcmgt p1.d, p0/z, z12.d, z4.d
-; CHECK-NEXT:    str z0, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    fcmgt p2.d, p0/z, z11.d, z3.d
+; CHECK-NEXT:    str z0, [sp, #2, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    sel z0.d, p5, z26.d, z28.d
-; CHECK-NEXT:    not p4.b, p0/z, p8.b
-; CHECK-NEXT:    ldr z10, [x0, #8, mul vl]
-; CHECK-NEXT:    not p5.b, p0/z, p9.b
-; CHECK-NEXT:    sel z24.d, p3, z26.d, z31.d
-; CHECK-NEXT:    not p3.b, p0/z, p6.b
-; CHECK-NEXT:    movprfx z2, z22
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z22.d
-; CHECK-NEXT:    fcmgt p2.d, p0/z, z30.d, z4.d
-; CHECK-NEXT:    str z0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT:    fcmge p7.d, p0/z, z27.d, z7.d
-; CHECK-NEXT:    sel z31.d, p5, z26.d, z15.d
-; CHECK-NEXT:    ldr z11, [x0, #9, mul vl]
-; CHECK-NEXT:    movprfx z28, z10
-; CHECK-NEXT:    frintx z28.d, p0/m, z10.d
-; CHECK-NEXT:    ldr z10, [x0, #10, mul vl]
-; CHECK-NEXT:    ldr z18, [x0, #11, mul vl]
+; CHECK-NEXT:    eor p5.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    ldr z13, [x0, #14, mul vl]
+; CHECK-NEXT:    eor p6.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    fcmge p7.d, p0/z, z25.d, z7.d
+; CHECK-NEXT:    sel z24.d, p4, z26.d, z31.d
+; CHECK-NEXT:    sel z31.d, p5, z26.d, z10.d
+; CHECK-NEXT:    fcmgt p3.d, p0/z, z30.d, z3.d
+; CHECK-NEXT:    str z0, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    fcmge p8.d, p0/z, z27.d, z7.d
+; CHECK-NEXT:    sel z0.d, p6, z26.d, z14.d
+; CHECK-NEXT:    ldr z28, [x0, #8, mul vl]
+; CHECK-NEXT:    ldr z8, [x0, #9, mul vl]
+; CHECK-NEXT:    ldr z18, [x0, #10, mul vl]
+; CHECK-NEXT:    ldr z19, [x0, #11, mul vl]
 ; CHECK-NEXT:    ldr z16, [x0, #13, mul vl]
-; CHECK-NEXT:    ldr z14, [x0, #14, mul vl]
-; CHECK-NEXT:    ldr z19, [x0, #12, mul vl]
-; CHECK-NEXT:    mov z17.d, p3/m, z26.d
-; CHECK-NEXT:    fcmgt p9.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    movprfx z8, z11
-; CHECK-NEXT:    frintx z8.d, p0/m, z11.d
-; CHECK-NEXT:    sel z11.d, p4, z26.d, z13.d
-; CHECK-NEXT:    frintx z10.d, p0/m, z10.d
-; CHECK-NEXT:    movprfx z13, z18
-; CHECK-NEXT:    frintx z13.d, p0/m, z18.d
-; CHECK-NEXT:    fcmge p5.d, p0/z, z28.d, z7.d
+; CHECK-NEXT:    ldr z15, [x0, #12, mul vl]
+; CHECK-NEXT:    eor p4.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    movprfx z21, z13
+; CHECK-NEXT:    frintx z21.d, p0/m, z13.d
+; CHECK-NEXT:    str z0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    frintx z28.d, p0/m, z28.d
+; CHECK-NEXT:    frintx z8.d, p0/m, z8.d
+; CHECK-NEXT:    movprfx z10, z18
+; CHECK-NEXT:    frintx z10.d, p0/m, z18.d
+; CHECK-NEXT:    movprfx z14, z19
+; CHECK-NEXT:    frintx z14.d, p0/m, z19.d
 ; CHECK-NEXT:    movprfx z18, z27
 ; CHECK-NEXT:    fcvtzs z18.d, p0/m, z27.d
+; CHECK-NEXT:    eor p5.b, p0/z, p8.b, p1.b
+; CHECK-NEXT:    frintx z15.d, p0/m, z15.d
 ; CHECK-NEXT:    frintx z16.d, p0/m, z16.d
-; CHECK-NEXT:    movprfx z15, z19
-; CHECK-NEXT:    frintx z15.d, p0/m, z19.d
+; CHECK-NEXT:    mov z17.d, p4/m, z26.d
+; CHECK-NEXT:    movprfx z1, z21
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z21.d
+; CHECK-NEXT:    fcmge p6.d, p0/z, z28.d, z7.d
+; CHECK-NEXT:    fcmge p7.d, p0/z, z8.d, z7.d
 ; CHECK-NEXT:    movprfx z19, z28
 ; CHECK-NEXT:    fcvtzs z19.d, p0/m, z28.d
-; CHECK-NEXT:    movprfx z21, z14
-; CHECK-NEXT:    frintx z21.d, p0/m, z14.d
-; CHECK-NEXT:    not p4.b, p0/z, p7.b
-; CHECK-NEXT:    fcmge p6.d, p0/z, z8.d, z7.d
 ; CHECK-NEXT:    movprfx z20, z8
 ; CHECK-NEXT:    fcvtzs z20.d, p0/m, z8.d
-; CHECK-NEXT:    fcmge p7.d, p0/z, z10.d, z7.d
-; CHECK-NEXT:    fcmge p8.d, p0/z, z13.d, z7.d
-; CHECK-NEXT:    not p5.b, p0/z, p5.b
-; CHECK-NEXT:    sel z9.d, p4, z26.d, z18.d
-; CHECK-NEXT:    fcmge p4.d, p0/z, z16.d, z7.d
-; CHECK-NEXT:    fcmge p3.d, p0/z, z15.d, z7.d
+; CHECK-NEXT:    fcmge p8.d, p0/z, z10.d, z7.d
+; CHECK-NEXT:    fcmge p9.d, p0/z, z14.d, z7.d
+; CHECK-NEXT:    sel z9.d, p5, z26.d, z18.d
+; CHECK-NEXT:    fcmge p4.d, p0/z, z15.d, z7.d
+; CHECK-NEXT:    fcmge p5.d, p0/z, z16.d, z7.d
+; CHECK-NEXT:    movprfx z23, z15
+; CHECK-NEXT:    fcvtzs z23.d, p0/m, z15.d
 ; CHECK-NEXT:    movprfx z0, z16
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z16.d
-; CHECK-NEXT:    sel z14.d, p5, z26.d, z19.d
+; CHECK-NEXT:    eor p6.b, p0/z, p6.b, p1.b
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    sel z13.d, p6, z26.d, z19.d
+; CHECK-NEXT:    eor p6.b, p0/z, p8.b, p1.b
 ; CHECK-NEXT:    movprfx z19, z10
 ; CHECK-NEXT:    fcvtzs z19.d, p0/m, z10.d
-; CHECK-NEXT:    movprfx z1, z21
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z21.d
-; CHECK-NEXT:    not p6.b, p0/z, p6.b
-; CHECK-NEXT:    movprfx z23, z15
-; CHECK-NEXT:    fcvtzs z23.d, p0/m, z15.d
-; CHECK-NEXT:    not p5.b, p0/z, p7.b
-; CHECK-NEXT:    sel z18.d, p6, z26.d, z20.d
-; CHECK-NEXT:    fcmge p6.d, p0/z, z21.d, z7.d
-; CHECK-NEXT:    not p7.b, p0/z, p8.b
-; CHECK-NEXT:    fcmge p8.d, p0/z, z22.d, z7.d
-; CHECK-NEXT:    movprfx z20, z13
-; CHECK-NEXT:    fcvtzs z20.d, p0/m, z13.d
-; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    sel z18.d, p7, z26.d, z20.d
+; CHECK-NEXT:    fcmge p7.d, p0/z, z21.d, z7.d
+; CHECK-NEXT:    eor p8.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    fcmge p9.d, p0/z, z22.d, z7.d
+; CHECK-NEXT:    movprfx z20, z14
+; CHECK-NEXT:    fcvtzs z20.d, p0/m, z14.d
+; CHECK-NEXT:    eor p4.b, p0/z, p4.b, p1.b
+; CHECK-NEXT:    eor p5.b, p0/z, p5.b, p1.b
 ; CHECK-NEXT:    mov z7.d, #0x7fffffffffffffff
-; CHECK-NEXT:    mov z19.d, p5/m, z26.d
-; CHECK-NEXT:    not p3.b, p0/z, p3.b
-; CHECK-NEXT:    mov z0.d, p4/m, z26.d
-; CHECK-NEXT:    fcmgt p4.d, p0/z, z21.d, z4.d
-; CHECK-NEXT:    not p5.b, p0/z, p6.b
-; CHECK-NEXT:    mov z23.d, p3/m, z26.d
-; CHECK-NEXT:    fcmgt p3.d, p0/z, z22.d, z4.d
-; CHECK-NEXT:    not p6.b, p0/z, p8.b
-; CHECK-NEXT:    mov z20.d, p7/m, z26.d
-; CHECK-NEXT:    fcmuo p8.d, p0/z, z22.d, z22.d
-; CHECK-NEXT:    mov z1.d, p5/m, z26.d
+; CHECK-NEXT:    mov z19.d, p6/m, z26.d
+; CHECK-NEXT:    mov z23.d, p4/m, z26.d
+; CHECK-NEXT:    fcmgt p4.d, p0/z, z22.d, z3.d
+; CHECK-NEXT:    eor p7.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    mov z0.d, p5/m, z26.d
 ; CHECK-NEXT:    fcmuo p5.d, p0/z, z21.d, z21.d
-; CHECK-NEXT:    fcmgt p7.d, p0/z, z25.d, z4.d
-; CHECK-NEXT:    mov z2.d, p6/m, z26.d
-; CHECK-NEXT:    sel z26.d, p1, z7.d, z29.d
-; CHECK-NEXT:    fcmgt p1.d, p0/z, z16.d, z4.d
-; CHECK-NEXT:    ldr z29, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    fcmgt p6.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    mov z24.d, p9/m, z7.d
-; CHECK-NEXT:    mov z1.d, p4/m, z7.d
+; CHECK-NEXT:    eor p1.b, p0/z, p9.b, p1.b
+; CHECK-NEXT:    mov z20.d, p8/m, z26.d
+; CHECK-NEXT:    fcmgt p6.d, p0/z, z4.d, z3.d
+; CHECK-NEXT:    mov z1.d, p7/m, z26.d
+; CHECK-NEXT:    fcmuo p9.d, p0/z, z22.d, z22.d
+; CHECK-NEXT:    fcmgt p8.d, p0/z, z25.d, z3.d
+; CHECK-NEXT:    mov z2.d, p1/m, z26.d
+; CHECK-NEXT:    fcmgt p1.d, p0/z, z21.d, z3.d
+; CHECK-NEXT:    sel z26.d, p2, z7.d, z29.d
+; CHECK-NEXT:    fcmgt p2.d, p0/z, z16.d, z3.d
+; CHECK-NEXT:    ldr z29, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    fcmgt p7.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    mov z24.d, p6/m, z7.d
+; CHECK-NEXT:    fcmuo p6.d, p0/z, z15.d, z15.d
+; CHECK-NEXT:    mov z2.d, p4/m, z7.d
 ; CHECK-NEXT:    fcmuo p4.d, p0/z, z16.d, z16.d
-; CHECK-NEXT:    mov z2.d, p3/m, z7.d
-; CHECK-NEXT:    fcmgt p3.d, p0/z, z15.d, z4.d
-; CHECK-NEXT:    mov z17.d, p7/m, z7.d
-; CHECK-NEXT:    mov z29.d, p2/m, z7.d
-; CHECK-NEXT:    fcmgt p2.d, p0/z, z13.d, z4.d
-; CHECK-NEXT:    mov z0.d, p1/m, z7.d
-; CHECK-NEXT:    fcmgt p1.d, p0/z, z10.d, z4.d
+; CHECK-NEXT:    mov z17.d, p8/m, z7.d
+; CHECK-NEXT:    mov z1.d, p1/m, z7.d
+; CHECK-NEXT:    mov z29.d, p3/m, z7.d
+; CHECK-NEXT:    fcmgt p3.d, p0/z, z14.d, z3.d
+; CHECK-NEXT:    fcmgt p1.d, p0/z, z15.d, z3.d
+; CHECK-NEXT:    mov z0.d, p2/m, z7.d
+; CHECK-NEXT:    fcmgt p2.d, p0/z, z10.d, z3.d
+; CHECK-NEXT:    mov z2.d, p9/m, #0 // =0x0
+; CHECK-NEXT:    mov z31.d, p7/m, z7.d
 ; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    mov z11.d, p6/m, z7.d
-; CHECK-NEXT:    fcmuo p6.d, p0/z, z15.d, z15.d
-; CHECK-NEXT:    fcmgt p5.d, p0/z, z8.d, z4.d
-; CHECK-NEXT:    mov z2.d, p8/m, #0 // =0x0
-; CHECK-NEXT:    sel z16.d, p3, z7.d, z23.d
+; CHECK-NEXT:    fcmgt p5.d, p0/z, z8.d, z3.d
+; CHECK-NEXT:    sel z15.d, p3, z7.d, z20.d
 ; CHECK-NEXT:    fcmuo p3.d, p0/z, z10.d, z10.d
 ; CHECK-NEXT:    mov z0.d, p4/m, #0 // =0x0
-; CHECK-NEXT:    sel z15.d, p2, z7.d, z20.d
-; CHECK-NEXT:    fcmuo p2.d, p0/z, z13.d, z13.d
+; CHECK-NEXT:    sel z16.d, p1, z7.d, z23.d
+; CHECK-NEXT:    fcmuo p1.d, p0/z, z14.d, z14.d
+; CHECK-NEXT:    fcmgt p4.d, p0/z, z27.d, z3.d
 ; CHECK-NEXT:    str z1, [x8, #14, mul vl]
-; CHECK-NEXT:    sel z1.d, p1, z7.d, z19.d
-; CHECK-NEXT:    fcmgt p1.d, p0/z, z28.d, z4.d
-; CHECK-NEXT:    fcmgt p4.d, p0/z, z27.d, z4.d
+; CHECK-NEXT:    sel z1.d, p2, z7.d, z19.d
+; CHECK-NEXT:    fcmgt p2.d, p0/z, z28.d, z3.d
 ; CHECK-NEXT:    str z2, [x8, #15, mul vl]
 ; CHECK-NEXT:    sel z2.d, p5, z7.d, z18.d
-; CHECK-NEXT:    mov z16.d, p6/m, #0 // =0x0
 ; CHECK-NEXT:    fcmuo p5.d, p0/z, z8.d, z8.d
+; CHECK-NEXT:    mov z16.d, p6/m, #0 // =0x0
 ; CHECK-NEXT:    str z0, [x8, #13, mul vl]
-; CHECK-NEXT:    mov z15.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p2.d, p0/z, z28.d, z28.d
+; CHECK-NEXT:    mov z15.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p1.d, p0/z, z28.d, z28.d
 ; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
-; CHECK-NEXT:    fcmgt p3.d, p0/z, z6.d, z4.d
-; CHECK-NEXT:    sel z0.d, p1, z7.d, z14.d
-; CHECK-NEXT:    fcmuo p1.d, p0/z, z27.d, z27.d
+; CHECK-NEXT:    sel z0.d, p2, z7.d, z13.d
+; CHECK-NEXT:    fcmuo p2.d, p0/z, z27.d, z27.d
 ; CHECK-NEXT:    sel z27.d, p4, z7.d, z9.d
+; CHECK-NEXT:    fcmgt p3.d, p0/z, z6.d, z3.d
 ; CHECK-NEXT:    str z16, [x8, #12, mul vl]
-; CHECK-NEXT:    fcmuo p4.d, p0/z, z25.d, z25.d
-; CHECK-NEXT:    str z15, [x8, #11, mul vl]
 ; CHECK-NEXT:    mov z2.d, p5/m, #0 // =0x0
+; CHECK-NEXT:    str z15, [x8, #11, mul vl]
+; CHECK-NEXT:    fcmuo p4.d, p0/z, z25.d, z25.d
 ; CHECK-NEXT:    fcmuo p5.d, p0/z, z6.d, z6.d
 ; CHECK-NEXT:    str z1, [x8, #10, mul vl]
-; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
-; CHECK-NEXT:    sel z1.d, p3, z7.d, z31.d
-; CHECK-NEXT:    fcmuo p3.d, p0/z, z5.d, z5.d
-; CHECK-NEXT:    ldr z5, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    mov z27.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    fcmgt p1.d, p0/z, z12.d, z3.d
+; CHECK-NEXT:    ldr z1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    mov z27.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    str z2, [x8, #9, mul vl]
-; CHECK-NEXT:    fcmuo p1.d, p0/z, z3.d, z3.d
+; CHECK-NEXT:    fcmuo p2.d, p0/z, z4.d, z4.d
 ; CHECK-NEXT:    str z0, [x8, #8, mul vl]
 ; CHECK-NEXT:    mov z17.d, p4/m, #0 // =0x0
 ; CHECK-NEXT:    fcmuo p4.d, p0/z, z30.d, z30.d
-; CHECK-NEXT:    fcmgt p2.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    fcmuo p5.d, p0/z, z12.d, z12.d
+; CHECK-NEXT:    mov z1.d, p3/m, z7.d
 ; CHECK-NEXT:    str z27, [x8, #7, mul vl]
-; CHECK-NEXT:    fcmuo p0.d, p0/z, z5.d, z5.d
-; CHECK-NEXT:    mov z11.d, p3/m, #0 // =0x0
-; CHECK-NEXT:    ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    mov z24.d, p1/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p3.d, p0/z, z5.d, z5.d
+; CHECK-NEXT:    ldr z0, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    mov z24.d, p2/m, #0 // =0x0
 ; CHECK-NEXT:    str z17, [x8, #6, mul vl]
+; CHECK-NEXT:    mov z1.d, p5/m, #0 // =0x0
+; CHECK-NEXT:    fcmuo p5.d, p0/z, z11.d, z11.d
+; CHECK-NEXT:    fcmuo p0.d, p0/z, z12.d, z12.d
+; CHECK-NEXT:    mov z0.d, p1/m, z7.d
+; CHECK-NEXT:    mov z31.d, p3/m, #0 // =0x0
 ; CHECK-NEXT:    mov z29.d, p4/m, #0 // =0x0
+; CHECK-NEXT:    str z24, [x8, #3, mul vl]
 ; CHECK-NEXT:    str z1, [x8, #5, mul vl]
 ; CHECK-NEXT:    mov z26.d, p5/m, #0 // =0x0
-; CHECK-NEXT:    str z11, [x8, #4, mul vl]
-; CHECK-NEXT:    mov z0.d, p2/m, z7.d
-; CHECK-NEXT:    str z24, [x8, #3, mul vl]
+; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT:    str z31, [x8, #4, mul vl]
 ; CHECK-NEXT:    str z29, [x8, #2, mul vl]
 ; CHECK-NEXT:    str z26, [x8, #1, mul vl]
-; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
 ; CHECK-NEXT:    str z0, [x8]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll b/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll
index c680f8942f9a8..41a1236e5d9e2 100644
--- a/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll
+++ b/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll
@@ -36,13 +36,13 @@ define void @ld_st_nxv8i16(ptr %in, ptr %out) {
 ;
 ; ASM-LABEL: ld_st_nxv8i16:
 ; ASM:       // %bb.0: // %entry
-; ASM-NEXT:    ptrue p0.h
+; ASM-NEXT:    ptrue p0.b
 ; ASM-NEXT:    mov x8, xzr
 ; ASM-NEXT:    cnth x9
 ; ASM-NEXT:  .LBB0_1: // %loop
 ; ASM-NEXT:    // =>This Inner Loop Header: Depth=1
 ; ASM-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; ASM-NEXT:    add z0.h, z0.h, #3
+; ASM-NEXT:    add z0.h, z0.h, #3 // =0x3
 ; ASM-NEXT:    st1h { z0.h }, p0, [x1, x8, lsl #1]
 ; ASM-NEXT:    add x8, x8, x9
 ; ASM-NEXT:    cmp x8, #1024
@@ -102,13 +102,13 @@ define void @masked_ld_st_nxv8i16(ptr %in, ptr %out, i64 %n) {
 ;
 ; ASM-LABEL: masked_ld_st_nxv8i16:
 ; ASM:       // %bb.0: // %entry
-; ASM-NEXT:    ptrue p0.h
+; ASM-NEXT:    ptrue p0.b
 ; ASM-NEXT:    mov x8, xzr
 ; ASM-NEXT:    cnth x9
 ; ASM-NEXT:  .LBB1_1: // %loop
 ; ASM-NEXT:    // =>This Inner Loop Header: Depth=1
 ; ASM-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
-; ASM-NEXT:    add z0.h, z0.h, #3
+; ASM-NEXT:    add z0.h, z0.h, #3 // =0x3
 ; ASM-NEXT:    st1h { z0.h }, p0, [x1, x8, lsl #1]
 ; ASM-NEXT:    add x8, x8, x9
 ; ASM-NEXT:    cmp x2, x8
diff --git a/llvm/test/CodeGen/AArch64/sve-lsrchain.ll b/llvm/test/CodeGen/AArch64/sve-lsrchain.ll
index 78f93f1ecbb26..955cd15c53ed7 100644
--- a/llvm/test/CodeGen/AArch64/sve-lsrchain.ll
+++ b/llvm/test/CodeGen/AArch64/sve-lsrchain.ll
@@ -50,9 +50,9 @@ define void @test(ptr nocapture noundef readonly %kernel, i32 noundef %kw, float
 ; CHECK-NEXT:    fmad z4.h, p0/m, z0.h, z5.h
 ; CHECK-NEXT:    ld1b { z5.b }, p1/z, [x16, x15]
 ; CHECK-NEXT:    fmla z4.h, p0/m, z5.h, z1.h
-; CHECK-NEXT:    ld1h { z5.h }, p0/z, [x17, x12, lsl #1]
+; CHECK-NEXT:    ld1h { z5.h }, p1/z, [x17, x12, lsl #1]
 ; CHECK-NEXT:    fmla z4.h, p0/m, z5.h, z2.h
-; CHECK-NEXT:    ld1h { z5.h }, p0/z, [x17, x13, lsl #1]
+; CHECK-NEXT:    ld1h { z5.h }, p1/z, [x17, x13, lsl #1]
 ; CHECK-NEXT:    fmla z4.h, p0/m, z5.h, z3.h
 ; CHECK-NEXT:    ldr z5, [x16, #1, mul vl]
 ; CHECK-NEXT:    str z4, [x16]
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
index 5d58d8992694a..2c92dfd64a47f 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
@@ -10,11 +10,12 @@ define <vscale x 2 x i64> @masked_sgather_sext(ptr %base, <vscale x 2 x i64> %of
 ; CHECK-LABEL: masked_sgather_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, z0.d]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    movprfx z2, z0
 ; CHECK-NEXT:    sxtb z2.d, p0/m, z0.d
 ; CHECK-NEXT:    add z0.d, z0.d, z1.d
 ; CHECK-NEXT:    sxtb z0.d, p0/m, z0.d
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mul z0.d, p0/m, z0.d, z2.d
 ; CHECK-NEXT:    ret
   %ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
@@ -93,7 +94,7 @@ define <vscale x 4 x half> @masked_gather_nxv4f16(<vscale x 4 x ptr> %ptrs, <vsc
 define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i16> %indices, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    sxth z0.d, p1/m, z0.d
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2]
 ; CHECK-NEXT:    ret
@@ -142,7 +143,7 @@ define <vscale x 8 x bfloat> @masked_gather_nxv8bf16(ptr %base, <vscale x 8 x i1
 define <vscale x 4 x double> @masked_gather_nxv4f64(ptr %base, <vscale x 4 x i16> %indices, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    sxth z1.s, p1/m, z0.s
 ; CHECK-NEXT:    punpklo p1.h, p0.b
@@ -241,7 +242,7 @@ define <vscale x 4 x i32> @masked_sgather_nxv4i8(<vscale x 4 x ptr> %ptrs, <vsca
 ; CHECK-NEXT:    punpklo p0.h, p0.b
 ; CHECK-NEXT:    ld1b { z1.d }, p1/z, [z1.d]
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [z0.d]
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z1.s
 ; CHECK-NEXT:    sxtb z0.s, p0/m, z0.s
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
index 5277c2efab85d..693f0e768d511 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
@@ -68,7 +68,7 @@ define <vscale x 8 x i16> @masked_sload_nxv8i8(ptr %a, <vscale x 8 x i1> %mask)
 define <vscale x 2 x i64> @masked_sload_passthru(ptr %a, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru) {
 ; CHECK-LABEL: masked_sload_passthru:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    ld1sw { z1.d }, p0/z, [x0]
 ; CHECK-NEXT:    sxtw z0.d, p1/m, z0.d
 ; CHECK-NEXT:    mov z0.d, p0/m, z1.d
diff --git a/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll b/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll
index 8d3b12e359f3f..f52fbc67b6b79 100644
--- a/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll
+++ b/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll
@@ -1074,7 +1074,7 @@ entry:
 define <vscale x 2 x i64> @sdot_different_types(<vscale x 2 x i64> %acc, <vscale x 8 x i16> %a, <vscale x 8 x i8> %b){
 ; CHECK-LABEL: sdot_different_types:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sunpklo z3.s, z1.h
 ; CHECK-NEXT:    sunpkhi z1.s, z1.h
 ; CHECK-NEXT:    sxtb z2.h, p0/m, z2.h
@@ -1099,7 +1099,7 @@ define <vscale x 2 x i64> @sdot_different_types(<vscale x 2 x i64> %acc, <vscale
 ;
 ; CHECK-NEWLOWERING-LABEL: sdot_different_types:
 ; CHECK-NEWLOWERING:       // %bb.0: // %entry
-; CHECK-NEWLOWERING-NEXT:    ptrue p0.h
+; CHECK-NEWLOWERING-NEXT:    ptrue p0.b
 ; CHECK-NEWLOWERING-NEXT:    sunpklo z3.s, z1.h
 ; CHECK-NEWLOWERING-NEXT:    sunpkhi z1.s, z1.h
 ; CHECK-NEWLOWERING-NEXT:    sxtb z2.h, p0/m, z2.h
@@ -1132,7 +1132,7 @@ entry:
 define <vscale x 2 x i64> @usdot_different_types(<vscale x 2 x i64> %acc, <vscale x 8 x i16> %a, <vscale x 8 x i8> %b){
 ; CHECK-LABEL: usdot_different_types:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    uunpklo z3.s, z1.h
 ; CHECK-NEXT:    uunpkhi z1.s, z1.h
 ; CHECK-NEXT:    sxtb z2.h, p0/m, z2.h
@@ -1157,7 +1157,7 @@ define <vscale x 2 x i64> @usdot_different_types(<vscale x 2 x i64> %acc, <vscal
 ;
 ; CHECK-NEWLOWERING-LABEL: usdot_different_types:
 ; CHECK-NEWLOWERING:       // %bb.0: // %entry
-; CHECK-NEWLOWERING-NEXT:    ptrue p0.h
+; CHECK-NEWLOWERING-NEXT:    ptrue p0.b
 ; CHECK-NEWLOWERING-NEXT:    uunpklo z3.s, z1.h
 ; CHECK-NEWLOWERING-NEXT:    uunpkhi z1.s, z1.h
 ; CHECK-NEWLOWERING-NEXT:    sxtb z2.h, p0/m, z2.h
@@ -1302,7 +1302,7 @@ entry:
 define <vscale x 2 x i16> @sdot_nxv8i8_promote (<vscale x 2 x i16> %acc, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b){
 ; CHECK-LABEL: sdot_nxv8i8_promote:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z1.h, p0/m, z1.h
 ; CHECK-NEXT:    sxtb z2.h, p0/m, z2.h
 ; CHECK-NEXT:    ptrue p0.d
@@ -1327,7 +1327,7 @@ define <vscale x 2 x i16> @sdot_nxv8i8_promote (<vscale x 2 x i16> %acc, <vscale
 ;
 ; CHECK-NEWLOWERING-LABEL: sdot_nxv8i8_promote:
 ; CHECK-NEWLOWERING:       // %bb.0: // %entry
-; CHECK-NEWLOWERING-NEXT:    ptrue p0.h
+; CHECK-NEWLOWERING-NEXT:    ptrue p0.b
 ; CHECK-NEWLOWERING-NEXT:    sxtb z1.h, p0/m, z1.h
 ; CHECK-NEWLOWERING-NEXT:    sxtb z2.h, p0/m, z2.h
 ; CHECK-NEWLOWERING-NEXT:    ptrue p0.d
diff --git a/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll b/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll
index 11fb60ead4fb2..4b43a7edd56c1 100644
--- a/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll
+++ b/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll
@@ -174,7 +174,7 @@ entry:
 define <vscale x 2 x i32> @signed_wide_add_nxv4i16(<vscale x 2 x i32> %acc, <vscale x 4 x i16> %input){
 ; CHECK-LABEL: signed_wide_add_nxv4i16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z1.s, p0/m, z1.s
 ; CHECK-NEXT:    uunpklo z2.d, z1.s
 ; CHECK-NEXT:    uunpkhi z1.d, z1.s
diff --git a/llvm/test/CodeGen/AArch64/sve-pr92779.ll b/llvm/test/CodeGen/AArch64/sve-pr92779.ll
index 3f34d79b3bb49..be937a31b07ef 100644
--- a/llvm/test/CodeGen/AArch64/sve-pr92779.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pr92779.ll
@@ -6,7 +6,7 @@ define void @main(ptr %0) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    movi v1.2d, #0000000000000000
-; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    ptrue p0.b, vl1
 ; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
 ; CHECK-NEXT:    uzp1 v0.2s, v1.2s, v0.2s
 ; CHECK-NEXT:    neg v0.2s, v0.2s
diff --git a/llvm/test/CodeGen/AArch64/sve-pred-selectop2.ll b/llvm/test/CodeGen/AArch64/sve-pred-selectop2.ll
index bbc94f568dd0a..98e0a37857606 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-selectop2.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-selectop2.ll
@@ -989,8 +989,9 @@ define <vscale x 4 x float> @fadd_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fadd_nxv4f32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -1004,8 +1005,9 @@ define <vscale x 8 x half> @fadd_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fadd_nxv8f16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -1019,8 +1021,9 @@ define <vscale x 2 x double> @fadd_nxv2f64_x(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fadd_nxv2f64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -1034,8 +1037,9 @@ define <vscale x 4 x float> @fsub_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fsub_nxv4f32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -1049,8 +1053,9 @@ define <vscale x 8 x half> @fsub_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fsub_nxv8f16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -1064,8 +1069,9 @@ define <vscale x 2 x double> @fsub_nxv2f64_x(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fsub_nxv2f64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -1079,8 +1085,9 @@ define <vscale x 4 x float> @fmul_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fmul_nxv4f32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -1094,8 +1101,9 @@ define <vscale x 8 x half> @fmul_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fmul_nxv8f16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -1109,8 +1117,9 @@ define <vscale x 2 x double> @fmul_nxv2f64_x(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fmul_nxv2f64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmul z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -1124,9 +1133,10 @@ define <vscale x 4 x float> @fdiv_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fdiv_nxv4f32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fdivr z1.s, p0/m, z1.s, z0.s
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    mov z0.s, p0/m, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -1140,9 +1150,10 @@ define <vscale x 8 x half> @fdiv_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fdiv_nxv8f16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fdivr z1.h, p0/m, z1.h, z0.h
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    mov z0.h, p0/m, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -1156,9 +1167,10 @@ define <vscale x 2 x double> @fdiv_nxv2f64_x(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fdiv_nxv2f64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fdivr z1.d, p0/m, z1.d, z0.d
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    mov z0.d, p0/m, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -1172,8 +1184,9 @@ define <vscale x 4 x float> @minnum_nxv4f32_x(<vscale x 4 x float> %x, <vscale x
 ; CHECK-LABEL: minnum_nxv4f32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fminnm z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -1187,8 +1200,9 @@ define <vscale x 8 x half> @minnum_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8
 ; CHECK-LABEL: minnum_nxv8f16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fminnm z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -1202,8 +1216,9 @@ define <vscale x 2 x double> @minnum_nxv2f64_x(<vscale x 2 x double> %x, <vscale
 ; CHECK-LABEL: minnum_nxv2f64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fminnm z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -1217,8 +1232,9 @@ define <vscale x 4 x float> @maxnum_nxv4f32_x(<vscale x 4 x float> %x, <vscale x
 ; CHECK-LABEL: maxnum_nxv4f32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmaxnm z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -1232,8 +1248,9 @@ define <vscale x 8 x half> @maxnum_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8
 ; CHECK-LABEL: maxnum_nxv8f16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmaxnm z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -1247,8 +1264,9 @@ define <vscale x 2 x double> @maxnum_nxv2f64_x(<vscale x 2 x double> %x, <vscale
 ; CHECK-LABEL: maxnum_nxv2f64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmaxnm z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -1262,8 +1280,9 @@ define <vscale x 4 x float> @minimum_nxv4f32_x(<vscale x 4 x float> %x, <vscale
 ; CHECK-LABEL: minimum_nxv4f32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmin z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -1277,8 +1296,9 @@ define <vscale x 8 x half> @minimum_nxv8f16_x(<vscale x 8 x half> %x, <vscale x
 ; CHECK-LABEL: minimum_nxv8f16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmin z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -1292,8 +1312,9 @@ define <vscale x 2 x double> @minimum_nxv2f64_x(<vscale x 2 x double> %x, <vscal
 ; CHECK-LABEL: minimum_nxv2f64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmin z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -1307,8 +1328,9 @@ define <vscale x 4 x float> @maximum_nxv4f32_x(<vscale x 4 x float> %x, <vscale
 ; CHECK-LABEL: maximum_nxv4f32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmax z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -1322,8 +1344,9 @@ define <vscale x 8 x half> @maximum_nxv8f16_x(<vscale x 8 x half> %x, <vscale x
 ; CHECK-LABEL: maximum_nxv8f16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmax z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -1337,8 +1360,9 @@ define <vscale x 2 x double> @maximum_nxv2f64_x(<vscale x 2 x double> %x, <vscal
 ; CHECK-LABEL: maximum_nxv2f64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmax z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -1352,8 +1376,9 @@ define <vscale x 4 x float> @fmai_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fmai_nxv4f32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z3.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmla z0.s, p0/m, z1.s, z2.s
 ; CHECK-NEXT:    ret
 entry:
@@ -1367,8 +1392,9 @@ define <vscale x 8 x half> @fmai_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fmai_nxv8f16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z3.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmla z0.h, p0/m, z1.h, z2.h
 ; CHECK-NEXT:    ret
 entry:
@@ -1382,8 +1408,9 @@ define <vscale x 2 x double> @fmai_nxv2f64_x(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fmai_nxv2f64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z3.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmla z0.d, p0/m, z1.d, z2.d
 ; CHECK-NEXT:    ret
 entry:
@@ -1397,8 +1424,9 @@ define <vscale x 4 x float> @fma_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fma_nxv4f32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z3.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmla z0.s, p0/m, z1.s, z2.s
 ; CHECK-NEXT:    ret
 entry:
@@ -1413,8 +1441,9 @@ define <vscale x 8 x half> @fma_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fma_nxv8f16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z3.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmla z0.h, p0/m, z1.h, z2.h
 ; CHECK-NEXT:    ret
 entry:
@@ -1429,8 +1458,9 @@ define <vscale x 2 x double> @fma_nxv2f64_x(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fma_nxv2f64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z3.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmla z0.d, p0/m, z1.d, z2.d
 ; CHECK-NEXT:    ret
 entry:
@@ -2470,8 +2500,9 @@ define <vscale x 4 x float> @fadd_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -2486,8 +2517,9 @@ define <vscale x 8 x half> @fadd_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -2502,8 +2534,9 @@ define <vscale x 2 x double> @fadd_nxv2f64_y(<vscale x 2 x double> %x, <vscale x
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.d, p0, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -2517,8 +2550,9 @@ define <vscale x 4 x float> @fsub_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fsub_nxv4f32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fsubr z1.s, p0/m, z1.s, z0.s
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -2533,8 +2567,9 @@ define <vscale x 8 x half> @fsub_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fsub_nxv8f16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fsubr z1.h, p0/m, z1.h, z0.h
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -2549,8 +2584,9 @@ define <vscale x 2 x double> @fsub_nxv2f64_y(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fsub_nxv2f64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fsubr z1.d, p0/m, z1.d, z0.d
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -2566,8 +2602,9 @@ define <vscale x 4 x float> @fmul_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fmul z0.s, z0.s, z1.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -2582,8 +2619,9 @@ define <vscale x 8 x half> @fmul_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fmul z0.h, z0.h, z1.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -2598,8 +2636,9 @@ define <vscale x 2 x double> @fmul_nxv2f64_y(<vscale x 2 x double> %x, <vscale x
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmul z0.d, z0.d, z1.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.d, p0, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -2613,9 +2652,10 @@ define <vscale x 4 x float> @fdiv_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fdiv_nxv4f32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fdiv z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -2629,9 +2669,10 @@ define <vscale x 8 x half> @fdiv_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fdiv_nxv8f16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fdiv z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -2645,9 +2686,10 @@ define <vscale x 2 x double> @fdiv_nxv2f64_y(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fdiv_nxv2f64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fdiv z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.d, p0, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -2661,8 +2703,9 @@ define <vscale x 4 x float> @minnum_nxv4f32_y(<vscale x 4 x float> %x, <vscale x
 ; CHECK-LABEL: minnum_nxv4f32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fminnm z1.s, p0/m, z1.s, z0.s
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -2677,8 +2720,9 @@ define <vscale x 8 x half> @minnum_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8
 ; CHECK-LABEL: minnum_nxv8f16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fminnm z1.h, p0/m, z1.h, z0.h
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -2693,8 +2737,9 @@ define <vscale x 2 x double> @minnum_nxv2f64_y(<vscale x 2 x double> %x, <vscale
 ; CHECK-LABEL: minnum_nxv2f64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fminnm z1.d, p0/m, z1.d, z0.d
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -2709,8 +2754,9 @@ define <vscale x 4 x float> @maxnum_nxv4f32_y(<vscale x 4 x float> %x, <vscale x
 ; CHECK-LABEL: maxnum_nxv4f32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmaxnm z1.s, p0/m, z1.s, z0.s
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -2725,8 +2771,9 @@ define <vscale x 8 x half> @maxnum_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8
 ; CHECK-LABEL: maxnum_nxv8f16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmaxnm z1.h, p0/m, z1.h, z0.h
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -2741,8 +2788,9 @@ define <vscale x 2 x double> @maxnum_nxv2f64_y(<vscale x 2 x double> %x, <vscale
 ; CHECK-LABEL: maxnum_nxv2f64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmaxnm z1.d, p0/m, z1.d, z0.d
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -2757,8 +2805,9 @@ define <vscale x 4 x float> @minimum_nxv4f32_y(<vscale x 4 x float> %x, <vscale
 ; CHECK-LABEL: minimum_nxv4f32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmin z1.s, p0/m, z1.s, z0.s
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -2773,8 +2822,9 @@ define <vscale x 8 x half> @minimum_nxv8f16_y(<vscale x 8 x half> %x, <vscale x
 ; CHECK-LABEL: minimum_nxv8f16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmin z1.h, p0/m, z1.h, z0.h
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -2789,8 +2839,9 @@ define <vscale x 2 x double> @minimum_nxv2f64_y(<vscale x 2 x double> %x, <vscal
 ; CHECK-LABEL: minimum_nxv2f64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmin z1.d, p0/m, z1.d, z0.d
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -2805,8 +2856,9 @@ define <vscale x 4 x float> @maximum_nxv4f32_y(<vscale x 4 x float> %x, <vscale
 ; CHECK-LABEL: maximum_nxv4f32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmax z1.s, p0/m, z1.s, z0.s
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -2821,8 +2873,9 @@ define <vscale x 8 x half> @maximum_nxv8f16_y(<vscale x 8 x half> %x, <vscale x
 ; CHECK-LABEL: maximum_nxv8f16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmax z1.h, p0/m, z1.h, z0.h
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -2837,8 +2890,9 @@ define <vscale x 2 x double> @maximum_nxv2f64_y(<vscale x 2 x double> %x, <vscal
 ; CHECK-LABEL: maximum_nxv2f64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmax z1.d, p0/m, z1.d, z0.d
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -2853,9 +2907,10 @@ define <vscale x 4 x float> @fmai_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fmai_nxv4f32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z3.s, #0.0
 ; CHECK-NEXT:    fmla z0.s, p0/m, z1.s, z2.s
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -2869,9 +2924,10 @@ define <vscale x 8 x half> @fmai_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fmai_nxv8f16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z3.h, #0.0
 ; CHECK-NEXT:    fmla z0.h, p0/m, z1.h, z2.h
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -2885,9 +2941,10 @@ define <vscale x 2 x double> @fmai_nxv2f64_y(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fmai_nxv2f64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z3.d, #0.0
 ; CHECK-NEXT:    fmla z0.d, p0/m, z1.d, z2.d
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.d, p0, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -2901,9 +2958,10 @@ define <vscale x 4 x float> @fma_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fma_nxv4f32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z3.s, #0.0
 ; CHECK-NEXT:    fmla z0.s, p0/m, z1.s, z2.s
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -2918,9 +2976,10 @@ define <vscale x 8 x half> @fma_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fma_nxv8f16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z3.h, #0.0
 ; CHECK-NEXT:    fmla z0.h, p0/m, z1.h, z2.h
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -2935,9 +2994,10 @@ define <vscale x 2 x double> @fma_nxv2f64_y(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fma_nxv2f64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z3.d, #0.0
 ; CHECK-NEXT:    fmla z0.d, p0/m, z1.d, z2.d
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.d, p0, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/AArch64/sve-pred-selectop3.ll b/llvm/test/CodeGen/AArch64/sve-pred-selectop3.ll
index 66dece82a0ac5..dd0a08cf9a05e 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-selectop3.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-selectop3.ll
@@ -641,8 +641,9 @@ define <vscale x 4 x float> @fadd_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fadd_nxv4f32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -656,8 +657,9 @@ define <vscale x 8 x half> @fadd_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fadd_nxv8f16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -671,8 +673,9 @@ define <vscale x 2 x double> @fadd_nxv2f64_x(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fadd_nxv2f64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -686,8 +689,9 @@ define <vscale x 4 x float> @fsub_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fsub_nxv4f32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -701,8 +705,9 @@ define <vscale x 8 x half> @fsub_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fsub_nxv8f16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -716,8 +721,9 @@ define <vscale x 2 x double> @fsub_nxv2f64_x(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fsub_nxv2f64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -731,8 +737,9 @@ define <vscale x 4 x float> @fmul_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fmul_nxv4f32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -746,8 +753,9 @@ define <vscale x 8 x half> @fmul_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fmul_nxv8f16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -761,8 +769,9 @@ define <vscale x 2 x double> @fmul_nxv2f64_x(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fmul_nxv2f64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmul z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -776,9 +785,10 @@ define <vscale x 4 x float> @fdiv_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fdiv_nxv4f32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fdivr z1.s, p0/m, z1.s, z0.s
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    mov z0.s, p0/m, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -792,9 +802,10 @@ define <vscale x 8 x half> @fdiv_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fdiv_nxv8f16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fdivr z1.h, p0/m, z1.h, z0.h
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    mov z0.h, p0/m, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -808,9 +819,10 @@ define <vscale x 2 x double> @fdiv_nxv2f64_x(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fdiv_nxv2f64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fdivr z1.d, p0/m, z1.d, z0.d
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    mov z0.d, p0/m, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -824,8 +836,9 @@ define <vscale x 4 x float> @fma_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fma_nxv4f32_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z3.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmla z0.s, p0/m, z1.s, z2.s
 ; CHECK-NEXT:    ret
 entry:
@@ -840,8 +853,9 @@ define <vscale x 8 x half> @fma_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fma_nxv8f16_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z3.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmla z0.h, p0/m, z1.h, z2.h
 ; CHECK-NEXT:    ret
 entry:
@@ -856,8 +870,9 @@ define <vscale x 2 x double> @fma_nxv2f64_x(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fma_nxv2f64_x:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z3.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmla z0.d, p0/m, z1.d, z2.d
 ; CHECK-NEXT:    ret
 entry:
@@ -1540,8 +1555,9 @@ define <vscale x 4 x float> @fadd_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fadd_nxv4f32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fadd z1.s, p0/m, z1.s, z0.s
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -1556,8 +1572,9 @@ define <vscale x 8 x half> @fadd_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fadd_nxv8f16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fadd z1.h, p0/m, z1.h, z0.h
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -1572,8 +1589,9 @@ define <vscale x 2 x double> @fadd_nxv2f64_y(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fadd_nxv2f64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fadd z1.d, p0/m, z1.d, z0.d
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -1588,8 +1606,9 @@ define <vscale x 4 x float> @fsub_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fsub_nxv4f32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fsubr z1.s, p0/m, z1.s, z0.s
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -1604,8 +1623,9 @@ define <vscale x 8 x half> @fsub_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fsub_nxv8f16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fsubr z1.h, p0/m, z1.h, z0.h
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -1620,8 +1640,9 @@ define <vscale x 2 x double> @fsub_nxv2f64_y(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fsub_nxv2f64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fsubr z1.d, p0/m, z1.d, z0.d
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -1636,8 +1657,9 @@ define <vscale x 4 x float> @fmul_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fmul_nxv4f32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmul z1.s, p0/m, z1.s, z0.s
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -1652,8 +1674,9 @@ define <vscale x 8 x half> @fmul_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fmul_nxv8f16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmul z1.h, p0/m, z1.h, z0.h
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -1668,8 +1691,9 @@ define <vscale x 2 x double> @fmul_nxv2f64_y(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fmul_nxv2f64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    fmul z1.d, p0/m, z1.d, z0.d
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -1684,9 +1708,10 @@ define <vscale x 4 x float> @fdiv_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fdiv_nxv4f32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fdiv z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z2.s, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -1700,9 +1725,10 @@ define <vscale x 8 x half> @fdiv_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fdiv_nxv8f16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fdiv z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z2.h, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -1716,9 +1742,10 @@ define <vscale x 2 x double> @fdiv_nxv2f64_y(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fdiv_nxv2f64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fdiv z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z2.d, #0.0
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.d, p0, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -1732,9 +1759,10 @@ define <vscale x 4 x float> @fmai_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fmai_nxv4f32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z3.s, #0.0
 ; CHECK-NEXT:    fmla z0.s, p0/m, z1.s, z2.s
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -1748,9 +1776,10 @@ define <vscale x 8 x half> @fmai_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fmai_nxv8f16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z3.h, #0.0
 ; CHECK-NEXT:    fmla z0.h, p0/m, z1.h, z2.h
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -1764,9 +1793,10 @@ define <vscale x 2 x double> @fmai_nxv2f64_y(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fmai_nxv2f64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z3.d, #0.0
 ; CHECK-NEXT:    fmla z0.d, p0/m, z1.d, z2.d
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.d, p0, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
@@ -1780,9 +1810,10 @@ define <vscale x 4 x float> @fma_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4
 ; CHECK-LABEL: fma_nxv4f32_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.s, p0/z, z3.s, #0.0
 ; CHECK-NEXT:    fmla z0.s, p0/m, z1.s, z2.s
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    ret
 entry:
@@ -1797,9 +1828,10 @@ define <vscale x 8 x half> @fma_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x
 ; CHECK-LABEL: fma_nxv8f16_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.h, p0/z, z3.h, #0.0
 ; CHECK-NEXT:    fmla z0.h, p0/m, z1.h, z2.h
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
@@ -1814,9 +1846,10 @@ define <vscale x 2 x double> @fma_nxv2f64_y(<vscale x 2 x double> %x, <vscale x
 ; CHECK-LABEL: fma_nxv2f64_y:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    fcmle p1.d, p0/z, z3.d, #0.0
 ; CHECK-NEXT:    fmla z0.d, p0/m, z1.d, z2.d
-; CHECK-NEXT:    not p0.b, p0/z, p1.b
+; CHECK-NEXT:    eor p0.b, p0/z, p1.b, p2.b
 ; CHECK-NEXT:    sel z0.d, p0, z0.d, z1.d
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/AArch64/sve-ptest-removal-cmple.ll b/llvm/test/CodeGen/AArch64/sve-ptest-removal-cmple.ll
index 8bd38d7bc44df..d9681247e662b 100644
--- a/llvm/test/CodeGen/AArch64/sve-ptest-removal-cmple.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ptest-removal-cmple.ll
@@ -116,7 +116,7 @@ define i1 @cmp8_ptest_any_px(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vsc
 define i1 @cmp8_ptest_any_px_bad_ptrue(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: cmp8_ptest_any_px_bad_ptrue:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpge p0.b, p0/z, z0.b, z1.b
 ; CHECK-NEXT:    cset w0, ne
 ; CHECK-NEXT:    ret
@@ -372,7 +372,7 @@ define i1 @cmp32_ptest_first_ax(<vscale x 16 x i1> %pg, <vscale x 4 x i32> %a, <
 ; CHECK-LABEL: cmp32_ptest_first_ax:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpge p0.s, p0/z, z0.s, z1.s
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    ptest p1, p0.b
 ; CHECK-NEXT:    cset w0, mi
 ; CHECK-NEXT:    ret
@@ -391,7 +391,7 @@ define i1 @cmp32_ptest_last_ax(<vscale x 16 x i1> %pg, <vscale x 4 x i32> %a, <v
 ; CHECK-LABEL: cmp32_ptest_last_ax:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpge p0.s, p0/z, z0.s, z1.s
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    ptest p1, p0.b
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
@@ -409,6 +409,8 @@ define i1 @cmp32_ptest_any_ax(<vscale x 16 x i1> %pg, <vscale x 4 x i32> %a, <vs
 ; CHECK-LABEL: cmp32_ptest_any_ax:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpge p0.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    ptest p1, p0.b
 ; CHECK-NEXT:    cset w0, ne
 ; CHECK-NEXT:    ret
   %1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
@@ -476,8 +478,9 @@ define i1 @cmp8_ptest_any_aa(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 define i1 @cmp32_ptest_first_aa(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: cmp32_ptest_first_aa:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    cmpge p0.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    cmpge p1.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    ptest p0, p1.b
 ; CHECK-NEXT:    cset w0, mi
 ; CHECK-NEXT:    ret
   %1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
@@ -492,8 +495,9 @@ define i1 @cmp32_ptest_first_aa(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 define i1 @cmp32_ptest_last_aa(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: cmp32_ptest_last_aa:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    cmpge p0.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    cmpge p1.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    ptest p0, p1.b
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
@@ -508,7 +512,7 @@ define i1 @cmp32_ptest_last_aa(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 define i1 @cmp32_ptest_any_aa(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: cmp32_ptest_any_aa:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpge p0.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    cset w0, ne
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-reassocadd.ll b/llvm/test/CodeGen/AArch64/sve-reassocadd.ll
index 58697e6c2ec71..22b363e3494aa 100644
--- a/llvm/test/CodeGen/AArch64/sve-reassocadd.ll
+++ b/llvm/test/CodeGen/AArch64/sve-reassocadd.ll
@@ -38,7 +38,7 @@ define <vscale x 8 x i16> @i16_1v_8s(ptr %b) {
 ; CHECK-LABEL: i16_1v_8s:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    rdvl x8, #1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x9, #4 // =0x4
 ; CHECK-NEXT:    add x8, x0, x8
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x8, x9, lsl #1]
@@ -71,7 +71,7 @@ define <vscale x 8 x i16> @i16_2v_8s(ptr %b) {
 ; CHECK-LABEL: i16_2v_8s:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    rdvl x8, #2
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x9, #4 // =0x4
 ; CHECK-NEXT:    add x8, x0, x8
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x8, x9, lsl #1]
@@ -104,7 +104,7 @@ define <vscale x 4 x i32> @i32_1v_16s(ptr %b) {
 ; CHECK-LABEL: i32_1v_16s:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    rdvl x8, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x9, #4 // =0x4
 ; CHECK-NEXT:    add x8, x0, x8
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x8, x9, lsl #2]
@@ -137,7 +137,7 @@ define <vscale x 2 x i64> @i64_1v_32s(ptr %b) {
 ; CHECK-LABEL: i64_1v_32s:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    rdvl x8, #1
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x9, #4 // =0x4
 ; CHECK-NEXT:    add x8, x0, x8
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x8, x9, lsl #3]
@@ -204,7 +204,7 @@ define <vscale x 8 x i16> @i16_m2v_8s(ptr %b) {
 ; CHECK-LABEL: i16_m2v_8s:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cnth x8, all, mul #4
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x9, #4 // =0x4
 ; CHECK-NEXT:    sub x8, x0, x8
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x8, x9, lsl #1]
@@ -237,7 +237,7 @@ define <vscale x 4 x i32> @i32_m2v_16s(ptr %b) {
 ; CHECK-LABEL: i32_m2v_16s:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cnth x8, all, mul #4
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x9, #4 // =0x4
 ; CHECK-NEXT:    sub x8, x0, x8
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x8, x9, lsl #2]
@@ -270,7 +270,7 @@ define <vscale x 2 x i64> @i64_m2v_32s(ptr %b) {
 ; CHECK-LABEL: i64_m2v_32s:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cnth x8, all, mul #4
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x9, #4 // =0x4
 ; CHECK-NEXT:    sub x8, x0, x8
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x8, x9, lsl #3]
diff --git a/llvm/test/CodeGen/AArch64/sve-scmp.ll b/llvm/test/CodeGen/AArch64/sve-scmp.ll
index 2083ddd8c3837..71a679af0078d 100644
--- a/llvm/test/CodeGen/AArch64/sve-scmp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-scmp.ll
@@ -4,9 +4,10 @@
 define <vscale x 8 x i8> @s_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
 ; CHECK-LABEL: s_nxv8i8:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
 ; CHECK-NEXT:    sxtb z1.h, p0/m, z1.h
+; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    cmpgt p1.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    cmpgt p0.h, p0/z, z1.h, z0.h
 ; CHECK-NEXT:    mov z0.h, p1/z, #1 // =0x1
@@ -34,9 +35,10 @@ entry:
 define <vscale x 4 x i16> @s_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
 ; CHECK-LABEL: s_nxv4i16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
 ; CHECK-NEXT:    sxth z1.s, p0/m, z1.s
+; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    cmpgt p1.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    cmpgt p0.s, p0/z, z1.s, z0.s
 ; CHECK-NEXT:    mov z0.s, p1/z, #1 // =0x1
@@ -82,9 +84,10 @@ entry:
 define <vscale x 2 x i32> @s_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: s_nxv2i32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtw z0.d, p0/m, z0.d
 ; CHECK-NEXT:    sxtw z1.d, p0/m, z1.d
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    cmpgt p1.d, p0/z, z0.d, z1.d
 ; CHECK-NEXT:    cmpgt p0.d, p0/z, z1.d, z0.d
 ; CHECK-NEXT:    mov z0.d, p1/z, #1 // =0x1
diff --git a/llvm/test/CodeGen/AArch64/sve-setcc.ll b/llvm/test/CodeGen/AArch64/sve-setcc.ll
index d4c17c1bd838c..66f4d9d12e9c1 100644
--- a/llvm/test/CodeGen/AArch64/sve-setcc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-setcc.ll
@@ -51,7 +51,7 @@ if.end:
 define void @sve_cmplt_setcc_hslo(ptr %out, <vscale x 8 x i16> %in, <vscale x 8 x i1> %pg) {
 ; CHECK-LABEL: sve_cmplt_setcc_hslo:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    cmplt p2.h, p0/z, z0.h, #0
 ; CHECK-NEXT:    and p1.b, p0/z, p0.b, p1.b
 ; CHECK-NEXT:    ptest p1, p2.b
diff --git a/llvm/test/CodeGen/AArch64/sve-sext-zext.ll b/llvm/test/CodeGen/AArch64/sve-sext-zext.ll
index 88e13ea1e0fa4..7552ea86ededb 100644
--- a/llvm/test/CodeGen/AArch64/sve-sext-zext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-sext-zext.ll
@@ -76,7 +76,7 @@ define <vscale x 2 x i64> @zext_i1_i64(<vscale x 2 x i1> %a) {
 define <vscale x 8 x i16> @sext_i8_i16(<vscale x 8 x i8> %a) {
 ; CHECK-LABEL: sext_i8_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
 ; CHECK-NEXT:    ret
   %r = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
@@ -86,7 +86,7 @@ define <vscale x 8 x i16> @sext_i8_i16(<vscale x 8 x i8> %a) {
 define <vscale x 4 x i32> @sext_i8_i32(<vscale x 4 x i8> %a) {
 ; CHECK-LABEL: sext_i8_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.s, p0/m, z0.s
 ; CHECK-NEXT:    ret
   %r = sext <vscale x 4 x i8> %a to <vscale x 4 x i32>
@@ -96,7 +96,7 @@ define <vscale x 4 x i32> @sext_i8_i32(<vscale x 4 x i8> %a) {
 define <vscale x 2 x i64> @sext_i8_i64(<vscale x 2 x i8> %a) {
 ; CHECK-LABEL: sext_i8_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.d, p0/m, z0.d
 ; CHECK-NEXT:    ret
   %r = sext <vscale x 2 x i8> %a to <vscale x 2 x i64>
@@ -133,7 +133,7 @@ define <vscale x 2 x i64> @zext_i8_i64(<vscale x 2 x i8> %a) {
 define <vscale x 4 x i32> @sext_i16_i32(<vscale x 4 x i16> %a) {
 ; CHECK-LABEL: sext_i16_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
 ; CHECK-NEXT:    ret
   %r = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
@@ -143,7 +143,7 @@ define <vscale x 4 x i32> @sext_i16_i32(<vscale x 4 x i16> %a) {
 define <vscale x 2 x i64> @sext_i16_i64(<vscale x 2 x i16> %a) {
 ; CHECK-LABEL: sext_i16_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z0.d, p0/m, z0.d
 ; CHECK-NEXT:    ret
   %r = sext <vscale x 2 x i16> %a to <vscale x 2 x i64>
@@ -171,7 +171,7 @@ define <vscale x 2 x i64> @zext_i16_i64(<vscale x 2 x i16> %a) {
 define <vscale x 2 x i64> @sext_i32_i64(<vscale x 2 x i32> %a) {
 ; CHECK-LABEL: sext_i32_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtw z0.d, p0/m, z0.d
 ; CHECK-NEXT:    ret
   %r = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
@@ -383,7 +383,7 @@ define <vscale x 8 x i64> @zext_8i8_8i64(<vscale x 8 x i8> %aval) {
 define <vscale x 4 x i64> @sext_4i8_4i64(<vscale x 4 x i8> %aval) {
 ; CHECK-LABEL: sext_4i8_4i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    sxtb z1.s, p0/m, z0.s
 ; CHECK-NEXT:    sunpklo z0.d, z1.s
@@ -396,7 +396,7 @@ define <vscale x 4 x i64> @sext_4i8_4i64(<vscale x 4 x i8> %aval) {
 define <vscale x 4 x i64> @sext_4i16_4i64(<vscale x 4 x i16> %aval) {
 ; CHECK-LABEL: sext_4i16_4i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    sxth z1.s, p0/m, z0.s
 ; CHECK-NEXT:    sunpklo z0.d, z1.s
@@ -409,7 +409,7 @@ define <vscale x 4 x i64> @sext_4i16_4i64(<vscale x 4 x i16> %aval) {
 define <vscale x 8 x i32> @sext_8i8_8i32(<vscale x 8 x i8> %aval) {
 ; CHECK-LABEL: sext_8i8_8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    sxtb z1.h, p0/m, z0.h
 ; CHECK-NEXT:    sunpklo z0.s, z1.h
@@ -422,7 +422,7 @@ define <vscale x 8 x i32> @sext_8i8_8i32(<vscale x 8 x i8> %aval) {
 define <vscale x 8 x i64> @sext_8i8_8i64(<vscale x 8 x i8> %aval) {
 ; CHECK-LABEL: sext_8i8_8i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
 ; CHECK-NEXT:    sunpklo z1.s, z0.h
 ; CHECK-NEXT:    sunpkhi z3.s, z0.h
diff --git a/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll b/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll
index a2429d4975b7a..52d6c0fd7c086 100644
--- a/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll
+++ b/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll
@@ -6,13 +6,14 @@ declare { <vscale x 2 x i8>, <vscale x 2 x i1> } @llvm.smul.with.overflow.nxv2i8
 define <vscale x 2 x i8> @smulo_nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %y) {
 ; CHECK-LABEL: smulo_nxv2i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    sxtb z1.d, p0/m, z1.d
 ; CHECK-NEXT:    sxtb z0.d, p0/m, z0.d
-; CHECK-NEXT:    mul z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    mul z0.d, p1/m, z0.d, z1.d
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    sxtb z1.d, p0/m, z0.d
-; CHECK-NEXT:    cmpne p0.d, p0/z, z1.d, z0.d
+; CHECK-NEXT:    cmpne p0.d, p1/z, z1.d, z0.d
 ; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
 ; CHECK-NEXT:    ret
   %a = call { <vscale x 2 x i8>, <vscale x 2 x i1> } @llvm.smul.with.overflow.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %y)
@@ -27,13 +28,14 @@ declare { <vscale x 4 x i8>, <vscale x 4 x i1> } @llvm.smul.with.overflow.nxv4i8
 define <vscale x 4 x i8> @smulo_nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %y) {
 ; CHECK-LABEL: smulo_nxv4i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p1.s
 ; CHECK-NEXT:    sxtb z1.s, p0/m, z1.s
 ; CHECK-NEXT:    sxtb z0.s, p0/m, z0.s
-; CHECK-NEXT:    mul z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    mul z0.s, p1/m, z0.s, z1.s
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    sxtb z1.s, p0/m, z0.s
-; CHECK-NEXT:    cmpne p0.s, p0/z, z1.s, z0.s
+; CHECK-NEXT:    cmpne p0.s, p1/z, z1.s, z0.s
 ; CHECK-NEXT:    mov z0.s, p0/m, #0 // =0x0
 ; CHECK-NEXT:    ret
   %a = call { <vscale x 4 x i8>, <vscale x 4 x i1> } @llvm.smul.with.overflow.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %y)
@@ -48,13 +50,14 @@ declare { <vscale x 8 x i8>, <vscale x 8 x i1> } @llvm.smul.with.overflow.nxv8i8
 define <vscale x 8 x i8> @smulo_nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y) {
 ; CHECK-LABEL: smulo_nxv8i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p1.h
 ; CHECK-NEXT:    sxtb z1.h, p0/m, z1.h
 ; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
-; CHECK-NEXT:    mul z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    mul z0.h, p1/m, z0.h, z1.h
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    sxtb z1.h, p0/m, z0.h
-; CHECK-NEXT:    cmpne p0.h, p0/z, z1.h, z0.h
+; CHECK-NEXT:    cmpne p0.h, p1/z, z1.h, z0.h
 ; CHECK-NEXT:    mov z0.h, p0/m, #0 // =0x0
 ; CHECK-NEXT:    ret
   %a = call { <vscale x 8 x i8>, <vscale x 8 x i1> } @llvm.smul.with.overflow.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y)
@@ -160,13 +163,14 @@ declare { <vscale x 2 x i16>, <vscale x 2 x i1> } @llvm.smul.with.overflow.nxv2i
 define <vscale x 2 x i16> @smulo_nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %y) {
 ; CHECK-LABEL: smulo_nxv2i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    sxth z1.d, p0/m, z1.d
 ; CHECK-NEXT:    sxth z0.d, p0/m, z0.d
-; CHECK-NEXT:    mul z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    mul z0.d, p1/m, z0.d, z1.d
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    sxth z1.d, p0/m, z0.d
-; CHECK-NEXT:    cmpne p0.d, p0/z, z1.d, z0.d
+; CHECK-NEXT:    cmpne p0.d, p1/z, z1.d, z0.d
 ; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
 ; CHECK-NEXT:    ret
   %a = call { <vscale x 2 x i16>, <vscale x 2 x i1> } @llvm.smul.with.overflow.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %y)
@@ -181,13 +185,14 @@ declare { <vscale x 4 x i16>, <vscale x 4 x i1> } @llvm.smul.with.overflow.nxv4i
 define <vscale x 4 x i16> @smulo_nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %y) {
 ; CHECK-LABEL: smulo_nxv4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p1.s
 ; CHECK-NEXT:    sxth z1.s, p0/m, z1.s
 ; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
-; CHECK-NEXT:    mul z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    mul z0.s, p1/m, z0.s, z1.s
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    sxth z1.s, p0/m, z0.s
-; CHECK-NEXT:    cmpne p0.s, p0/z, z1.s, z0.s
+; CHECK-NEXT:    cmpne p0.s, p1/z, z1.s, z0.s
 ; CHECK-NEXT:    mov z0.s, p0/m, #0 // =0x0
 ; CHECK-NEXT:    ret
   %a = call { <vscale x 4 x i16>, <vscale x 4 x i1> } @llvm.smul.with.overflow.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %y)
@@ -293,13 +298,14 @@ declare { <vscale x 2 x i32>, <vscale x 2 x i1> } @llvm.smul.with.overflow.nxv2i
 define <vscale x 2 x i32> @smulo_nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y) {
 ; CHECK-LABEL: smulo_nxv2i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    sxtw z1.d, p0/m, z1.d
 ; CHECK-NEXT:    sxtw z0.d, p0/m, z0.d
-; CHECK-NEXT:    mul z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    mul z0.d, p1/m, z0.d, z1.d
 ; CHECK-NEXT:    movprfx z1, z0
 ; CHECK-NEXT:    sxtw z1.d, p0/m, z0.d
-; CHECK-NEXT:    cmpne p0.d, p0/z, z1.d, z0.d
+; CHECK-NEXT:    cmpne p0.d, p1/z, z1.d, z0.d
 ; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
 ; CHECK-NEXT:    ret
   %a = call { <vscale x 2 x i32>, <vscale x 2 x i1> } @llvm.smul.with.overflow.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y)
diff --git a/llvm/test/CodeGen/AArch64/sve-splat-one-and-ptrue.ll b/llvm/test/CodeGen/AArch64/sve-splat-one-and-ptrue.ll
index a409865138e0d..24d4929bb3350 100644
--- a/llvm/test/CodeGen/AArch64/sve-splat-one-and-ptrue.ll
+++ b/llvm/test/CodeGen/AArch64/sve-splat-one-and-ptrue.ll
@@ -8,7 +8,7 @@ target triple = "aarch64-unknown-linux-gnu"
 define <vscale x 16 x i1> @fold_away_ptrue_and_ptrue() #0 {
 ; CHECK-LABEL: fold_away_ptrue_and_ptrue:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ret
   %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
   %2 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
@@ -20,7 +20,7 @@ define <vscale x 16 x i1> @fold_away_ptrue_and_ptrue() #0 {
 define <vscale x 16 x i1> @fold_away_ptrue_and_splat_predicate() #0 {
 ; CHECK-LABEL: fold_away_ptrue_and_splat_predicate:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ret
   %1 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> splat(i1 true))
   %2 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
@@ -32,8 +32,8 @@ define <vscale x 16 x i1> @fold_away_ptrue_and_splat_predicate() #0 {
 define <vscale x 16 x i1> @fold_away_ptrue_and_convert_to() #0 {
 ; CHECK-LABEL: fold_away_ptrue_and_convert_to:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    and p0.b, p1/z, p1.b, p0.b
 ; CHECK-NEXT:    ret
   %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
diff --git a/llvm/test/CodeGen/AArch64/sve-splat-sext.ll b/llvm/test/CodeGen/AArch64/sve-splat-sext.ll
index 467af905a4d84..bb588e045fca7 100644
--- a/llvm/test/CodeGen/AArch64/sve-splat-sext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-splat-sext.ll
@@ -14,8 +14,9 @@ define <vscale x 8 x i16> @sext_splat_v8i16_128() {
 define <vscale x 8 x i1> @sext_icmp_splat_v8i16_128(<vscale x 8 x i8> %d) {
 ; CHECK-LABEL: sext_icmp_splat_v8i16_128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
+; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    cmpgt p0.h, p0/z, z0.h, #-1
 ; CHECK-NEXT:    ret
   %c = icmp ugt <vscale x 8 x i8> splat(i8 128), %d
@@ -25,8 +26,9 @@ define <vscale x 8 x i1> @sext_icmp_splat_v8i16_128(<vscale x 8 x i8> %d) {
 define <vscale x 4 x i1> @sext_icmp_splat_v4i16_128(<vscale x 4 x i8> %d) {
 ; CHECK-LABEL: sext_icmp_splat_v4i16_128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.s, p0/m, z0.s
+; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    cmpgt p0.s, p0/z, z0.s, #-1
 ; CHECK-NEXT:    ret
   %c = icmp ugt <vscale x 4 x i8> splat(i8 128), %d
diff --git a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
index d7ed42d717937..45ec6bcf86c1b 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
@@ -8,7 +8,7 @@ define <vscale x 8 x i8> @promote_insert_8i8(<vscale x 8 x i8> %a, i8 %elt, i64
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.h, #0, #1
 ; CHECK-NEXT:    mov z2.h, w1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z1.h, z2.h
 ; CHECK-NEXT:    mov z0.h, p0/m, w0
 ; CHECK-NEXT:    ret
@@ -99,7 +99,7 @@ define <vscale x 4 x i16> @promote_insert_4i16(<vscale x 4 x i16> %a, i16 %elt)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #5 // =0x5
 ; CHECK-NEXT:    index z1.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.s, w8
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z1.s, z2.s
 ; CHECK-NEXT:    mov z0.s, p0/m, w0
diff --git a/llvm/test/CodeGen/AArch64/sve-split-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-split-int-reduce.ll
index 90383b43d5812..ddb42ff9d7442 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-int-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-int-reduce.ll
@@ -148,8 +148,9 @@ define i64 @umin_nxv4i64(<vscale x 4 x i64> %a) {
 define i8 @smin_nxv4i8(<vscale x 4 x i8> %a) {
 ; CHECK-LABEL: smin_nxv4i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.s, p0/m, z0.s
+; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    sminv s0, p0, z0.s
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-split-load.ll b/llvm/test/CodeGen/AArch64/sve-split-load.ll
index e1dd66c9d249a..77e552c15df3e 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-load.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-load.ll
@@ -6,7 +6,7 @@
 define <vscale x 4 x i16> @load_promote_4i16(ptr %a) {
 ; CHECK-LABEL: load_promote_4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %load = load <vscale x 4 x i16>, ptr %a
diff --git a/llvm/test/CodeGen/AArch64/sve-split-store.ll b/llvm/test/CodeGen/AArch64/sve-split-store.ll
index b1419b3f679cf..65bb279810b82 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-store.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-store.ll
@@ -6,7 +6,7 @@
 define void @store_promote_4i8(<vscale x 4 x i8> %data, ptr %a) {
 ; CHECK-LABEL: store_promote_4i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   store <vscale x 4 x i8> %data, ptr %a
diff --git a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll
index 71b883f0ef7ec..bcecda9b4a27c 100644
--- a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll
@@ -96,7 +96,7 @@ define void @store_nxv2f32(ptr %out) {
 ; CHECK-LABEL: store_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov z0.s, #1.00000000
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   store <vscale x 2 x float> splat(float 1.0), ptr %out
@@ -107,7 +107,7 @@ define void @store_nxv4f16(ptr %out) {
 ; CHECK-LABEL: store_nxv4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov z0.h, #1.00000000
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   store <vscale x 4 x half> splat(half 1.0), ptr %out
@@ -120,7 +120,7 @@ define void @store_nxv6f32(ptr %out) {
 ; CHECK-LABEL: store_nxv6f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov z0.s, #1.00000000
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
@@ -132,7 +132,7 @@ define void @store_nxv12f16(ptr %out) {
 ; CHECK-LABEL: store_nxv12f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov z0.h, #1.00000000
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    str z0, [x0]
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-reg.ll
index d859bbb567ebb..e6ca4da420c59 100644
--- a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-reg.ll
@@ -50,7 +50,7 @@ define void @st1_nxv16i8_bitcast_from_i64(ptr %addr, i64 %off, <vscale x 2 x i64
 define void @st1_nxv8i16_trunc8(ptr %addr, i64 %off, <vscale x 8 x i16> %val) {
 ; CHECK-LABEL: st1_nxv8i16_trunc8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
@@ -62,7 +62,7 @@ define void @st1_nxv8i16_trunc8(ptr %addr, i64 %off, <vscale x 8 x i16> %val) {
 define void @st1_nxv4i32_trunc8(ptr %addr, i64 %off, <vscale x 4 x i32> %val) {
 ; CHECK-LABEL: st1_nxv4i32_trunc8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
@@ -74,7 +74,7 @@ define void @st1_nxv4i32_trunc8(ptr %addr, i64 %off, <vscale x 4 x i32> %val) {
 define void @st1_nxv2i64_trunc8(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 ; CHECK-LABEL: st1_nxv2i64_trunc8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
@@ -88,7 +88,7 @@ define void @st1_nxv2i64_trunc8(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 define void @st1_nxv8i16(ptr %addr, i64 %off, <vscale x 8 x i16> %val) {
 ; CHECK-LABEL: st1_nxv8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
@@ -99,7 +99,7 @@ define void @st1_nxv8i16(ptr %addr, i64 %off, <vscale x 8 x i16> %val) {
 define void @st1_nxv4i32_trunc16(ptr %addr, i64 %off, <vscale x 4 x i32> %val) {
 ; CHECK-LABEL: st1_nxv4i32_trunc16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
@@ -111,7 +111,7 @@ define void @st1_nxv4i32_trunc16(ptr %addr, i64 %off, <vscale x 4 x i32> %val) {
 define void @st1_nxv2i64_trunc16(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 ; CHECK-LABEL: st1_nxv2i64_trunc16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
@@ -123,7 +123,7 @@ define void @st1_nxv2i64_trunc16(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 define void @st1_nxv8f16(ptr %addr, i64 %off, <vscale x 8 x half> %val) {
 ; CHECK-LABEL: st1_nxv8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds half, ptr %addr, i64 %off
@@ -134,7 +134,7 @@ define void @st1_nxv8f16(ptr %addr, i64 %off, <vscale x 8 x half> %val) {
 define void @st1_nxv8bf16(ptr %addr, i64 %off, <vscale x 8 x bfloat> %val) {
 ; CHECK-LABEL: st1_nxv8bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
@@ -145,7 +145,7 @@ define void @st1_nxv8bf16(ptr %addr, i64 %off, <vscale x 8 x bfloat> %val) {
 define void @st1_nxv4f16(ptr %addr, i64 %off, <vscale x 4 x half> %val) {
 ; CHECK-LABEL: st1_nxv4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds half, ptr %addr, i64 %off
@@ -156,7 +156,7 @@ define void @st1_nxv4f16(ptr %addr, i64 %off, <vscale x 4 x half> %val) {
 define void @st1_nxv4bf16(ptr %addr, i64 %off, <vscale x 4 x bfloat> %val) {
 ; CHECK-LABEL: st1_nxv4bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
@@ -167,7 +167,7 @@ define void @st1_nxv4bf16(ptr %addr, i64 %off, <vscale x 4 x bfloat> %val) {
 define void @st1_nxv2f16(ptr %addr, i64 %off, <vscale x 2 x half> %val) {
 ; CHECK-LABEL: st1_nxv2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds half, ptr %addr, i64 %off
@@ -178,7 +178,7 @@ define void @st1_nxv2f16(ptr %addr, i64 %off, <vscale x 2 x half> %val) {
 define void @st1_nxv2bf16(ptr %addr, i64 %off, <vscale x 2 x bfloat> %val) {
 ; CHECK-LABEL: st1_nxv2bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
@@ -191,7 +191,7 @@ define void @st1_nxv2bf16(ptr %addr, i64 %off, <vscale x 2 x bfloat> %val) {
 define void @st1_nxv4i32(ptr %addr, i64 %off, <vscale x 4 x i32> %val) {
 ; CHECK-LABEL: st1_nxv4i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i32, ptr %addr, i64 %off
@@ -202,7 +202,7 @@ define void @st1_nxv4i32(ptr %addr, i64 %off, <vscale x 4 x i32> %val) {
 define void @st1_nxv2i64_trunc32(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 ; CHECK-LABEL: st1_nxv2i64_trunc32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i32, ptr %addr, i64 %off
@@ -214,7 +214,7 @@ define void @st1_nxv2i64_trunc32(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 define void @st1_nxv4f32(ptr %addr, i64 %off, <vscale x 4 x float> %val) {
 ; CHECK-LABEL: st1_nxv4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds float, ptr %addr, i64 %off
@@ -225,7 +225,7 @@ define void @st1_nxv4f32(ptr %addr, i64 %off, <vscale x 4 x float> %val) {
 define void @st1_nxv2f32(ptr %addr, i64 %off, <vscale x 2 x float> %val) {
 ; CHECK-LABEL: st1_nxv2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds float, ptr %addr, i64 %off
@@ -238,7 +238,7 @@ define void @st1_nxv2f32(ptr %addr, i64 %off, <vscale x 2 x float> %val) {
 define void @st1_nxv2i64(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 ; CHECK-LABEL: st1_nxv2i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i64, ptr %addr, i64 %off
@@ -249,7 +249,7 @@ define void @st1_nxv2i64(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 define void @st1_nxv2f64(ptr %addr, i64 %off, <vscale x 2 x double> %val) {
 ; CHECK-LABEL: st1_nxv2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds double, ptr %addr, i64 %off
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
index ad00e99b704dd..3d12860a5a6fe 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
@@ -14,7 +14,7 @@ define <4 x i8> @insertelement_v4i8(<4 x i8> %op1) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #3 // =0x3
 ; CHECK-NEXT:    index z1.h, #0, #1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.h, w8
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    mov w8, #5 // =0x5
@@ -169,7 +169,7 @@ define <2 x i16> @insertelement_v2i16(<2 x i16> %op1) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1 // =0x1
 ; CHECK-NEXT:    index z1.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.s, w8
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    mov w8, #5 // =0x5
@@ -202,7 +202,7 @@ define <4 x i16> @insertelement_v4i16(<4 x i16> %op1) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #3 // =0x3
 ; CHECK-NEXT:    index z1.h, #0, #1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.h, w8
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    mov w8, #5 // =0x5
@@ -238,7 +238,7 @@ define <8 x i16> @insertelement_v8i16(<8 x i16> %op1) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #7 // =0x7
 ; CHECK-NEXT:    index z1.h, #0, #1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.h, w8
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    mov w8, #5 // =0x5
@@ -276,7 +276,7 @@ define <16 x i16> @insertelement_v16i16(<16 x i16> %op1) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #7 // =0x7
 ; CHECK-NEXT:    index z2.h, #0, #1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z3.h, w8
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
 ; CHECK-NEXT:    mov w8, #5 // =0x5
@@ -315,7 +315,7 @@ define <2 x i32> @insertelement_v2i32(<2 x i32> %op1) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1 // =0x1
 ; CHECK-NEXT:    index z1.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.s, w8
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    mov w8, #5 // =0x5
@@ -348,7 +348,7 @@ define <4 x i32> @insertelement_v4i32(<4 x i32> %op1) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #3 // =0x3
 ; CHECK-NEXT:    index z1.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.s, w8
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    mov w8, #5 // =0x5
@@ -383,7 +383,7 @@ define <8 x i32> @insertelement_v8i32(ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #3 // =0x3
 ; CHECK-NEXT:    index z0.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    mov w8, #5 // =0x5
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
@@ -439,7 +439,7 @@ define <2 x i64> @insertelement_v2i64(<2 x i64> %op1) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1 // =0x1
 ; CHECK-NEXT:    index z1.d, #0, #1
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.d, x8
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    mov w8, #5 // =0x5
@@ -472,7 +472,7 @@ define <4 x i64> @insertelement_v4i64(ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1 // =0x1
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    mov w8, #5 // =0x5
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
@@ -532,7 +532,7 @@ define <4 x half> @insertelement_v4f16(<4 x half> %op1) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #3 // =0x3
 ; CHECK-NEXT:    index z1.h, #0, #1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.h, w8
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z1.h, z2.h
@@ -569,7 +569,7 @@ define <8 x half> @insertelement_v8f16(<8 x half> %op1) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #7 // =0x7
 ; CHECK-NEXT:    index z1.h, #0, #1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.h, w8
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z1.h, z2.h
@@ -608,7 +608,7 @@ define <16 x half> @insertelement_v16f16(ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #7 // =0x7
 ; CHECK-NEXT:    index z0.h, #0, #1
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    fmov h2, #5.00000000
 ; CHECK-NEXT:    cmpeq p0.h, p0/z, z0.h, z1.h
@@ -649,7 +649,7 @@ define <2 x float> @insertelement_v2f32(<2 x float> %op1) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1 // =0x1
 ; CHECK-NEXT:    index z1.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.s, w8
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z1.s, z2.s
@@ -682,7 +682,7 @@ define <4 x float> @insertelement_v4f32(<4 x float> %op1) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #3 // =0x3
 ; CHECK-NEXT:    index z1.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.s, w8
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z1.s, z2.s
@@ -717,7 +717,7 @@ define <8 x float> @insertelement_v8f32(ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #3 // =0x3
 ; CHECK-NEXT:    index z0.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    fmov s2, #5.00000000
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
@@ -774,7 +774,7 @@ define <2 x double> @insertelement_v2f64(<2 x double> %op1) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1 // =0x1
 ; CHECK-NEXT:    index z1.d, #0, #1
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z2.d, x8
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z1.d, z2.d
@@ -807,7 +807,7 @@ define <4 x double> @insertelement_v4f64(ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1 // =0x1
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    fmov d2, #5.00000000
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-gather-scatter.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-gather-scatter.ll
index d9f8482a3c503..9f8f7dc519000 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-gather-scatter.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-gather-scatter.ll
@@ -19,7 +19,7 @@ define <2 x i64> @masked_gather_v2i64(ptr %a, ptr %b) vscale_range(2, 2) {
 ; CHECK-NEXT:    and z0.d, z1.d, z0.d
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uaddv d0, p0, z0.d
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    fmov x8, d0
 ; CHECK-NEXT:    strb w8, [sp, #12]
 ; CHECK-NEXT:    and w8, w8, #0xff
diff --git a/llvm/test/CodeGen/AArch64/sve-trunc.ll b/llvm/test/CodeGen/AArch64/sve-trunc.ll
index 0ec6538947c73..d572e231e4f0e 100644
--- a/llvm/test/CodeGen/AArch64/sve-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-trunc.ll
@@ -207,7 +207,7 @@ define void @trunc_promoteIntRes(<vscale x 4 x i64> %0, ptr %ptr) {
 ; CHECK-LABEL: trunc_promoteIntRes:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z1.s
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/AArch64/sve-unary-movprfx.ll b/llvm/test/CodeGen/AArch64/sve-unary-movprfx.ll
index a6c5abef19ab8..0aa442f268575 100644
--- a/llvm/test/CodeGen/AArch64/sve-unary-movprfx.ll
+++ b/llvm/test/CodeGen/AArch64/sve-unary-movprfx.ll
@@ -785,7 +785,7 @@ define <vscale x 2 x double> @fsqrt_f64_not_active(<vscale x 2 x double> %a, <vs
 define <vscale x 8 x i16> @sxtb_i16(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) #0 {
 ; CHECK-LABEL: sxtb_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    movprfx z0, z1
 ; CHECK-NEXT:    sxtb z0.h, p0/m, z1.h
 ; CHECK-NEXT:    ret
@@ -796,7 +796,7 @@ define <vscale x 8 x i16> @sxtb_i16(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b)
 define <vscale x 8 x i16> @sxtb_i16_dupreg(<vscale x 8 x i8> %a) #0 {
 ; CHECK-LABEL: sxtb_i16_dupreg:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
 ; CHECK-NEXT:    ret
   %ret = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
@@ -843,7 +843,7 @@ define <vscale x 8 x i16> @sxtb_i16_not_active(<vscale x 8 x i16> %a, <vscale x
 define <vscale x 4 x i32> @sxtb_i32(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) #0 {
 ; CHECK-LABEL: sxtb_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    movprfx z0, z1
 ; CHECK-NEXT:    sxtb z0.s, p0/m, z1.s
 ; CHECK-NEXT:    ret
@@ -854,7 +854,7 @@ define <vscale x 4 x i32> @sxtb_i32(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b)
 define <vscale x 4 x i32> @sxtb_i32_dupreg(<vscale x 4 x i8> %a) #0 {
 ; CHECK-LABEL: sxtb_i32_dupreg:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.s, p0/m, z0.s
 ; CHECK-NEXT:    ret
   %ret = sext <vscale x 4 x i8> %a to <vscale x 4 x i32>
@@ -901,7 +901,7 @@ define <vscale x 4 x i32> @sxtb_i32_not_active(<vscale x 4 x i32> %a, <vscale x
 define <vscale x 2 x i64> @sxtb_i64(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) #0 {
 ; CHECK-LABEL: sxtb_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    movprfx z0, z1
 ; CHECK-NEXT:    sxtb z0.d, p0/m, z1.d
 ; CHECK-NEXT:    ret
@@ -912,7 +912,7 @@ define <vscale x 2 x i64> @sxtb_i64(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b)
 define <vscale x 2 x i64> @sxtb_i64_dupreg(<vscale x 2 x i8> %a) #0 {
 ; CHECK-LABEL: sxtb_i64_dupreg:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtb z0.d, p0/m, z0.d
 ; CHECK-NEXT:    ret
   %ret = sext <vscale x 2 x i8> %a to <vscale x 2 x i64>
@@ -959,7 +959,7 @@ define <vscale x 2 x i64> @sxtb_i64_not_active(<vscale x 2 x i64> %a, <vscale x
 define <vscale x 4 x i32> @sxth_i32(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) #0 {
 ; CHECK-LABEL: sxth_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    movprfx z0, z1
 ; CHECK-NEXT:    sxth z0.s, p0/m, z1.s
 ; CHECK-NEXT:    ret
@@ -970,7 +970,7 @@ define <vscale x 4 x i32> @sxth_i32(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b
 define <vscale x 4 x i32> @sxth_i32_dupreg(<vscale x 4 x i16> %a) #0 {
 ; CHECK-LABEL: sxth_i32_dupreg:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
 ; CHECK-NEXT:    ret
   %ret = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
@@ -1017,7 +1017,7 @@ define <vscale x 4 x i32> @sxth_i32_not_active(<vscale x 4 x i32> %a, <vscale x
 define <vscale x 2 x i64> @sxth_i64(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) #0 {
 ; CHECK-LABEL: sxth_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    movprfx z0, z1
 ; CHECK-NEXT:    sxth z0.d, p0/m, z1.d
 ; CHECK-NEXT:    ret
@@ -1028,7 +1028,7 @@ define <vscale x 2 x i64> @sxth_i64(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b
 define <vscale x 2 x i64> @sxth_i64_dupreg(<vscale x 2 x i16> %a) #0 {
 ; CHECK-LABEL: sxth_i64_dupreg:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxth z0.d, p0/m, z0.d
 ; CHECK-NEXT:    ret
   %ret = sext <vscale x 2 x i16> %a to <vscale x 2 x i64>
@@ -1075,7 +1075,7 @@ define <vscale x 2 x i64> @sxth_i64_not_active(<vscale x 2 x i64> %a, <vscale x
 define <vscale x 2 x i64> @sxtw_i64(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) #0 {
 ; CHECK-LABEL: sxtw_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    movprfx z0, z1
 ; CHECK-NEXT:    sxtw z0.d, p0/m, z1.d
 ; CHECK-NEXT:    ret
@@ -1086,7 +1086,7 @@ define <vscale x 2 x i64> @sxtw_i64(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b
 define <vscale x 2 x i64> @sxtw_i64_dupreg(<vscale x 2 x i32> %a) #0 {
 ; CHECK-LABEL: sxtw_i64_dupreg:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    sxtw z0.d, p0/m, z0.d
 ; CHECK-NEXT:    ret
   %ret = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll
index 8a504cd739211..8d09c26706851 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll
@@ -107,20 +107,21 @@ define <vscale x 8 x i32> @test_compress_large(<vscale x 8 x i32> %vec, <vscale
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    punpklo p2.h, p0.b
+; CHECK-NEXT:    punpklo p1.h, p0.b
 ; CHECK-NEXT:    cnth x9
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p2.s
 ; CHECK-NEXT:    sub x9, x9, #1
 ; CHECK-NEXT:    punpkhi p0.h, p0.b
-; CHECK-NEXT:    compact z0.s, p2, z0.s
-; CHECK-NEXT:    cntp x8, p1, p2.s
+; CHECK-NEXT:    compact z0.s, p1, z0.s
+; CHECK-NEXT:    cntp x8, p2, p1.s
 ; CHECK-NEXT:    compact z1.s, p0, z1.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    str z0, [sp]
 ; CHECK-NEXT:    mov w8, w8
 ; CHECK-NEXT:    cmp x8, x9
 ; CHECK-NEXT:    csel x8, x8, x9, lo
 ; CHECK-NEXT:    mov x9, sp
-; CHECK-NEXT:    st1w { z1.s }, p1, [x9, x8, lsl #2]
+; CHECK-NEXT:    st1w { z1.s }, p0, [x9, x8, lsl #2]
 ; CHECK-NEXT:    ldr z0, [sp]
 ; CHECK-NEXT:    ldr z1, [sp, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #2
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-splat.ll b/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
index 5cca5539048b5..7c3e306c9d4fe 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
@@ -580,7 +580,7 @@ define <vscale x 4 x float> @splat_nxv4f32_imm_out_of_range() {
 define <vscale x 2 x double> @splat_nxv2f64_imm_out_of_range() {
 ; CHECK-LABEL: splat_nxv2f64_imm_out_of_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    adrp x8, .LCPI60_0
 ; CHECK-NEXT:    add x8, x8, :lo12:.LCPI60_0
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
@@ -594,7 +594,7 @@ define <vscale x 2 x double> @splat_nxv2f64_imm_out_of_range() {
 define <vscale x 2 x i1> @sve_splat_i1_allactive() {
 ; CHECK-LABEL: sve_splat_i1_allactive:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ret
   ret <vscale x 2 x i1> splat(i1 true)
 }
diff --git a/llvm/test/CodeGen/AArch64/sve2-histcnt.ll b/llvm/test/CodeGen/AArch64/sve2-histcnt.ll
index 06cd65620d1c9..21921f4791509 100644
--- a/llvm/test/CodeGen/AArch64/sve2-histcnt.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-histcnt.ll
@@ -368,9 +368,10 @@ define void @histogram_zext_from_i16_to_i64(ptr %base, <vscale x 4 x i16> %indic
 define void @histogram_sext_from_i16_to_i64(ptr %base, <vscale x 4 x i16> %indices, <vscale x 4 x i1> %mask) #0{
 ; CHECK-LABEL: histogram_sext_from_i16_to_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    mov z3.s, #1 // =0x1
 ; CHECK-NEXT:    sxth z0.s, p1/m, z0.s
+; CHECK-NEXT:    ptrue p1.s
 ; CHECK-NEXT:    histcnt z1.s, p0/z, z0.s, z0.s
 ; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x0, z0.s, sxtw #2]
 ; CHECK-NEXT:    mad z1.s, p1/m, z3.s, z2.s
diff --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-combine-rshrnb.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-combine-rshrnb.ll
index 7d184dd66952b..1881721ac03a5 100644
--- a/llvm/test/CodeGen/AArch64/sve2-intrinsics-combine-rshrnb.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-combine-rshrnb.ll
@@ -5,7 +5,7 @@ define void @add_lshr_rshrnb_b_6(ptr %ptr, ptr %dst, i64 %index){
 ; CHECK-LABEL: add_lshr_rshrnb_b_6:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr z0, [x0]
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    rshrnb z0.b, z0.h, #6
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x1, x2]
 ; CHECK-NEXT:    ret
@@ -22,7 +22,7 @@ define void @neg_add_lshr_rshrnb_b_6(ptr %ptr, ptr %dst, i64 %index){
 ; CHECK-LABEL: neg_add_lshr_rshrnb_b_6:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr z0, [x0]
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add z0.h, z0.h, #1 // =0x1
 ; CHECK-NEXT:    lsr z0.h, z0.h, #6
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x1, x2]
@@ -40,7 +40,7 @@ define void @add_lshr_rshrnb_h_7(ptr %ptr, ptr %dst, i64 %index){
 ; CHECK-LABEL: add_lshr_rshrnb_h_7:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr z0, [x0]
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    rshrnb z0.b, z0.h, #7
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x1, x2]
 ; CHECK-NEXT:    ret
@@ -57,7 +57,7 @@ define void @add_lshr_rshrn_h_6(ptr %ptr, ptr %dst, i64 %index){
 ; CHECK-LABEL: add_lshr_rshrn_h_6:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr z0, [x0]
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    rshrnb z0.h, z0.s, #6
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x1, x2, lsl #1]
 ; CHECK-NEXT:    ret
@@ -74,7 +74,7 @@ define void @add_lshr_rshrnb_h_2(ptr %ptr, ptr %dst, i64 %index){
 ; CHECK-LABEL: add_lshr_rshrnb_h_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr z0, [x0]
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    rshrnb z0.h, z0.s, #2
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x1, x2, lsl #1]
 ; CHECK-NEXT:    ret
@@ -104,7 +104,7 @@ define void @neg_zero_shift(ptr %ptr, ptr %dst, i64 %index){
 ; CHECK-LABEL: neg_zero_shift:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr z0, [x0]
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add z0.s, z0.s, #1 // =0x1
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x1, x2, lsl #1]
 ; CHECK-NEXT:    ret
@@ -143,7 +143,7 @@ define void @wide_add_shift_add_rshrnb_h(ptr %dest, i64 %index, <vscale x 8 x i3
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rshrnb z1.h, z1.s, #6
 ; CHECK-NEXT:    rshrnb z0.h, z0.s, #6
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z1.h
 ; CHECK-NEXT:    add z0.h, z2.h, z0.h
@@ -164,7 +164,7 @@ define void @wide_add_shift_add_rshrnb_d(ptr %dest, i64 %index, <vscale x 4 x i6
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rshrnb z1.s, z1.d, #32
 ; CHECK-NEXT:    rshrnb z0.s, z0.d, #32
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z1.s
 ; CHECK-NEXT:    add z0.s, z2.s, z0.s
@@ -185,7 +185,7 @@ define void @neg_wide_add_shift_add_rshrnb_d(ptr %dest, i64 %index, <vscale x 4
 ; CHECK-LABEL: neg_wide_add_shift_add_rshrnb_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z2.d, #0x800000000000
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add z0.d, z0.d, z2.d
 ; CHECK-NEXT:    add z1.d, z1.d, z2.d
 ; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x0, x1, lsl #2]
@@ -209,7 +209,7 @@ define void @neg_trunc_lsr_add_op1_not_splat(ptr %ptr, ptr %dst, i64 %index, <vs
 ; CHECK-LABEL: neg_trunc_lsr_add_op1_not_splat:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr z1, [x0]
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add z0.h, z1.h, z0.h
 ; CHECK-NEXT:    lsr z0.h, z0.h, #6
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x1, x2]
@@ -230,6 +230,7 @@ define void @neg_trunc_lsr_op1_not_splat(ptr %ptr, ptr %dst, i64 %index, <vscale
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    add z1.h, z1.h, #32 // =0x20
 ; CHECK-NEXT:    lsrr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x1, x2]
 ; CHECK-NEXT:    ret
   %load = load <vscale x 8 x i16>, ptr %ptr, align 2
@@ -245,7 +246,7 @@ define void @neg_add_has_two_uses(ptr %ptr, ptr %dst, ptr %dst2, i64 %index){
 ; CHECK-LABEL: neg_add_has_two_uses:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr z0, [x0]
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add z0.h, z0.h, #32 // =0x20
 ; CHECK-NEXT:    add z1.h, z0.h, z0.h
 ; CHECK-NEXT:    lsr z0.h, z0.h, #6
@@ -268,7 +269,7 @@ define void @add_lshr_rshrnb_s(ptr %ptr, ptr %dst, i64 %index){
 ; CHECK-LABEL: add_lshr_rshrnb_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr z0, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    rshrnb z0.s, z0.d, #6
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x1, x2, lsl #2]
 ; CHECK-NEXT:    ret
@@ -285,7 +286,7 @@ define void @neg_add_lshr_rshrnb_s(ptr %ptr, ptr %dst, i64 %index){
 ; CHECK-LABEL: neg_add_lshr_rshrnb_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr z0, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add z0.d, z0.d, #32 // =0x20
 ; CHECK-NEXT:    lsr z0.d, z0.d, #6
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x1, x2, lsl #1]
diff --git a/llvm/test/CodeGen/AArch64/veclib-llvm.modf.ll b/llvm/test/CodeGen/AArch64/veclib-llvm.modf.ll
index 78dbc09a57afd..1509d500a6a36 100644
--- a/llvm/test/CodeGen/AArch64/veclib-llvm.modf.ll
+++ b/llvm/test/CodeGen/AArch64/veclib-llvm.modf.ll
@@ -67,7 +67,7 @@ define <vscale x 4 x float> @test_modf_nxv4f32(<vscale x 4 x float> %x, ptr %out
 ; ARMPL-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; ARMPL-NEXT:    .cfi_def_cfa_offset 16
 ; ARMPL-NEXT:    .cfi_offset w30, -16
-; ARMPL-NEXT:    ptrue p0.s
+; ARMPL-NEXT:    ptrue p0.b
 ; ARMPL-NEXT:    bl armpl_svmodf_f32_x
 ; ARMPL-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; ARMPL-NEXT:    ret
@@ -93,7 +93,7 @@ define <vscale x 2 x double> @test_modf_nxv2f64(<vscale x 2 x double> %x, ptr %o
 ; ARMPL-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; ARMPL-NEXT:    .cfi_def_cfa_offset 16
 ; ARMPL-NEXT:    .cfi_offset w30, -16
-; ARMPL-NEXT:    ptrue p0.d
+; ARMPL-NEXT:    ptrue p0.b
 ; ARMPL-NEXT:    bl armpl_svmodf_f64_x
 ; ARMPL-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; ARMPL-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/veclib-llvm.sincos.ll b/llvm/test/CodeGen/AArch64/veclib-llvm.sincos.ll
index e18ac46165d2e..24ae60378031f 100644
--- a/llvm/test/CodeGen/AArch64/veclib-llvm.sincos.ll
+++ b/llvm/test/CodeGen/AArch64/veclib-llvm.sincos.ll
@@ -35,7 +35,7 @@ define void @test_sincos_nxv4f32(<vscale x 4 x float> %x, ptr noalias %out_sin,
 ; SLEEF:    bl _ZGVsNxvl4l4_sincosf
 ;
 ; ARMPL-LABEL: test_sincos_nxv4f32:
-; ARMPL:    ptrue p0.s
+; ARMPL:    ptrue p0.b
 ; ARMPL:    bl armpl_svsincos_f32_x
   %result = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.sincos.nxv4f32(<vscale x 4 x float> %x)
   %result.0 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } %result, 0
@@ -50,7 +50,7 @@ define void @test_sincos_nxv2f64(<vscale x 2 x double> %x, ptr noalias %out_sin,
 ; SLEEF:    bl _ZGVsNxvl8l8_sincos
 ;
 ; ARMPL-LABEL: test_sincos_nxv2f64:
-; ARMPL:    ptrue p0.d
+; ARMPL:    ptrue p0.b
 ; ARMPL:    bl armpl_svsincos_f64_x
   %result = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.sincos.nxv2f64(<vscale x 2 x double> %x)
   %result.0 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %result, 0
diff --git a/llvm/test/CodeGen/AArch64/veclib-llvm.sincospi.ll b/llvm/test/CodeGen/AArch64/veclib-llvm.sincospi.ll
index fad865d20f7df..3a6e9e8a7e0f7 100644
--- a/llvm/test/CodeGen/AArch64/veclib-llvm.sincospi.ll
+++ b/llvm/test/CodeGen/AArch64/veclib-llvm.sincospi.ll
@@ -69,7 +69,7 @@ define void @test_sincospi_nxv4f32(<vscale x 4 x float> %x, ptr noalias %out_sin
 ; ARMPL-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; ARMPL-NEXT:    .cfi_def_cfa_offset 16
 ; ARMPL-NEXT:    .cfi_offset w30, -16
-; ARMPL-NEXT:    ptrue p0.s
+; ARMPL-NEXT:    ptrue p0.b
 ; ARMPL-NEXT:    bl armpl_svsincospi_f32_x
 ; ARMPL-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; ARMPL-NEXT:    ret
@@ -96,7 +96,7 @@ define void @test_sincospi_nxv2f64(<vscale x 2 x double> %x, ptr noalias %out_si
 ; ARMPL-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; ARMPL-NEXT:    .cfi_def_cfa_offset 16
 ; ARMPL-NEXT:    .cfi_offset w30, -16
-; ARMPL-NEXT:    ptrue p0.d
+; ARMPL-NEXT:    ptrue p0.b
 ; ARMPL-NEXT:    bl armpl_svsincospi_f64_x
 ; ARMPL-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; ARMPL-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/zext-to-tbl.ll b/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
index 6536f0c355b47..9801fc468db15 100644
--- a/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
@@ -1963,7 +1963,7 @@ exit:
 define void @zext_v16i8_to_v16i32_in_loop_scalable_vectors(ptr %src, ptr %dst) {
 ; CHECK-LABEL: zext_v16i8_to_v16i32_in_loop_scalable_vectors:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x8, xzr
 ; CHECK-NEXT:  LBB19_1: ; %loop
 ; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -1989,7 +1989,7 @@ define void @zext_v16i8_to_v16i32_in_loop_scalable_vectors(ptr %src, ptr %dst) {
 ;
 ; CHECK-BE-LABEL: zext_v16i8_to_v16i32_in_loop_scalable_vectors:
 ; CHECK-BE:       // %bb.0: // %entry
-; CHECK-BE-NEXT:    ptrue p0.s
+; CHECK-BE-NEXT:    ptrue p0.b
 ; CHECK-BE-NEXT:    mov x8, xzr
 ; CHECK-BE-NEXT:  .LBB19_1: // %loop
 ; CHECK-BE-NEXT:    // =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll b/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll
index bae69ef590f52..196273cea3654 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll
@@ -63,7 +63,6 @@ define void @many_mulvl1_addressing(ptr %src_rows, ptr %dst_rows, i64 %stride, i
 ; COMMON-LABEL: many_mulvl1_addressing:
 ; COMMON:       // %bb.0: // %entry
 ; COMMON-NEXT:    ptrue p0.b
-; COMMON-NEXT:    ptrue p1.h
 ; COMMON-NEXT:  .LBB1_1: // %for.body
 ; COMMON-NEXT:    // =>This Inner Loop Header: Depth=1
 ; COMMON-NEXT:    add x8, x0, x2
@@ -75,8 +74,8 @@ define void @many_mulvl1_addressing(ptr %src_rows, ptr %dst_rows, i64 %stride, i
 ; COMMON-NEXT:    addvl x0, x0, #2
 ; COMMON-NEXT:    add z0.b, z0.b, z1.b
 ; COMMON-NEXT:    add z1.b, z2.b, z3.b
-; COMMON-NEXT:    st1b { z0.h }, p1, [x1]
-; COMMON-NEXT:    st1b { z1.h }, p1, [x1, #1, mul vl]
+; COMMON-NEXT:    st1b { z0.h }, p0, [x1]
+; COMMON-NEXT:    st1b { z1.h }, p0, [x1, #1, mul vl]
 ; COMMON-NEXT:    addvl x1, x1, #2
 ; COMMON-NEXT:    b.ne .LBB1_1
 ; COMMON-NEXT:  // %bb.2: // %for.exit
@@ -156,7 +155,7 @@ for.exit:
 define void @mixed_offsets_scalable_then_fixed(ptr %src, ptr %dst, i64 %count) #0 {
 ; BASE-LABEL: mixed_offsets_scalable_then_fixed:
 ; BASE:       // %bb.0: // %entry
-; BASE-NEXT:    ptrue p0.s
+; BASE-NEXT:    ptrue p0.b
 ; BASE-NEXT:    addvl x8, x0, #4
 ; BASE-NEXT:    mov x9, #8 // =0x8
 ; BASE-NEXT:  .LBB3_1: // %for.body
@@ -176,7 +175,7 @@ define void @mixed_offsets_scalable_then_fixed(ptr %src, ptr %dst, i64 %count) #
 ;
 ; PREINDEX-LABEL: mixed_offsets_scalable_then_fixed:
 ; PREINDEX:       // %bb.0: // %entry
-; PREINDEX-NEXT:    ptrue p0.s
+; PREINDEX-NEXT:    ptrue p0.b
 ; PREINDEX-NEXT:    addvl x8, x0, #4
 ; PREINDEX-NEXT:    mov x9, #8 // =0x8
 ; PREINDEX-NEXT:  .LBB3_1: // %for.body
@@ -196,7 +195,7 @@ define void @mixed_offsets_scalable_then_fixed(ptr %src, ptr %dst, i64 %count) #
 ;
 ; POSTINDEX-LABEL: mixed_offsets_scalable_then_fixed:
 ; POSTINDEX:       // %bb.0: // %entry
-; POSTINDEX-NEXT:    ptrue p0.s
+; POSTINDEX-NEXT:    ptrue p0.b
 ; POSTINDEX-NEXT:    mov x8, xzr
 ; POSTINDEX-NEXT:    addvl x9, x0, #4
 ; POSTINDEX-NEXT:    mov x10, #8 // =0x8
@@ -244,7 +243,7 @@ define void @mixed_offsets_fixed_then_scalable(ptr %src, ptr %dst, i64 %count) #
 ; COMMON-LABEL: mixed_offsets_fixed_then_scalable:
 ; COMMON:       // %bb.0: // %entry
 ; COMMON-NEXT:    addvl x9, x0, #4
-; COMMON-NEXT:    ptrue p0.s
+; COMMON-NEXT:    ptrue p0.b
 ; COMMON-NEXT:    mov x8, xzr
 ; COMMON-NEXT:    add x9, x9, #32
 ; COMMON-NEXT:    mov x10, #8 // =0x8
@@ -332,7 +331,7 @@ define void @three_access_wide_gap(ptr %src, ptr %dst, i64 %count) #0 {
 ;
 ; POSTINDEX-LABEL: three_access_wide_gap:
 ; POSTINDEX:       // %bb.0: // %entry
-; POSTINDEX-NEXT:    ptrue p0.s
+; POSTINDEX-NEXT:    ptrue p0.b
 ; POSTINDEX-NEXT:    mov x8, xzr
 ; POSTINDEX-NEXT:  .LBB5_1: // %for.body
 ; POSTINDEX-NEXT:    // =>This Inner Loop Header: Depth=1



More information about the llvm-commits mailing list