[llvm] [AArch64] Split large loop dependence masks (PR #153187)

Sam Tebbs via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 25 09:52:12 PST 2025


https://github.com/SamTebbs33 updated https://github.com/llvm/llvm-project/pull/153187

>From 0a8f1f191436e7be393574b61dc85074cd7585b3 Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Mon, 17 Nov 2025 17:30:30 +0000
Subject: [PATCH 1/3] [SelectionDAG] Fix unsafe cases for
 loop.dependence.{war/raw}.mask

There is an unsafe case with the loop dependence mask intrinsics where
the difference between the two pointers is less than half the vector
length, e.g. ptrA = 0 and ptrB 3 when the vector length is 32. Currently that
produces a correct low-mask with 3 active lanes and an incorrect high mask with
all lanes active. This PR adds a select on the high mask which guards
against this case.
---
 .../SelectionDAG/LegalizeVectorTypes.cpp      |  19 +
 llvm/test/CodeGen/AArch64/alias_mask.ll       | 440 ++++++++++--------
 .../CodeGen/AArch64/alias_mask_scalable.ll    | 288 +++++++-----
 3 files changed, 425 insertions(+), 322 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 71eeee78bd868..c8d66cc21244f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1692,6 +1692,18 @@ void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
   Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
 }
 
+/// Split a loop dependence mask.
+/// This is done by creating a high and low mask, each of half the vector
+/// length. A select of the high mask and a predicate of all zeroes is needed to
+/// guarantee that the high mask is safe. A case where simply producing a high
+/// mask without the select is unsafe, is when the difference between the two
+/// pointers is less than half the vector length, e.g. ptrA = 0 and ptrB 3 when
+/// the vector length is 32.
+///     The full 32xi1 mask should be three active lanes and the rest inactive,
+///     however when half the vector length is added to ptrA to produce the high
+///     mask, the difference between ptrA and ptrB is now -13, which will result
+///     in a mask with all lanes active. The select will guard against this case
+///     by choosing a mask of all inactive lanes when ptrA + VL/2 >= ptrB.
 void DAGTypeLegalizer::SplitVecRes_LOOP_DEPENDENCE_MASK(SDNode *N, SDValue &Lo,
                                                         SDValue &Hi) {
   SDLoc DL(N);
@@ -1708,7 +1720,14 @@ void DAGTypeLegalizer::SplitVecRes_LOOP_DEPENDENCE_MASK(SDNode *N, SDValue &Lo,
                        : DAG.getConstant(Offset, DL, MVT::i64);
 
   PtrA = DAG.getNode(ISD::ADD, DL, MVT::i64, PtrA, Addend);
+  EVT CmpVT = MVT::i1;
+  SDValue Cmp = DAG.getSetCC(DL, CmpVT, PtrA, PtrB, ISD::CondCode::SETUGE);
+  Cmp = DAG.getSplat(EVT::getVectorVT(*DAG.getContext(), CmpVT,
+                                      HiVT.getVectorMinNumElements(),
+                                      HiVT.isScalableVT()),
+                     DL, Cmp);
   Hi = DAG.getNode(N->getOpcode(), DL, HiVT, PtrA, PtrB, N->getOperand(2));
+  Hi = DAG.getSelect(DL, HiVT, Cmp, DAG.getConstant(0, DL, HiVT), Hi);
 }
 
 void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo,
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index fdd0a6a4709da..da14e17bf2463 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -101,26 +101,30 @@ define <32 x i1> @whilewr_8_split(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_split:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    add x9, x0, #16
-; CHECK-NEXT:    whilewr p0.b, x0, x1
-; CHECK-NEXT:    whilewr p1.b, x9, x1
+; CHECK-NEXT:    cmp x9, x1
+; CHECK-NEXT:    cset w10, hs
+; CHECK-NEXT:    whilewr p0.b, x9, x1
 ; CHECK-NEXT:    adrp x9, .LCPI8_0
-; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    dup v0.16b, w10
+; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    mov z2.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    bic v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    shl v1.16b, v2.16b, #7
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI8_0]
-; CHECK-NEXT:    mov z1.b, p1/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
-; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    addv h0, v0.8h
+; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
 ; CHECK-NEXT:    addv h1, v1.8h
-; CHECK-NEXT:    str h0, [x8]
-; CHECK-NEXT:    str h1, [x8, #2]
+; CHECK-NEXT:    addv h0, v0.8h
+; CHECK-NEXT:    str h1, [x8]
+; CHECK-NEXT:    str h0, [x8, #2]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 1)
@@ -131,46 +135,61 @@ define <64 x i1> @whilewr_8_split2(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_split2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    add x9, x0, #48
-; CHECK-NEXT:    whilewr p0.b, x0, x1
 ; CHECK-NEXT:    add x10, x0, #16
-; CHECK-NEXT:    whilewr p1.b, x9, x1
-; CHECK-NEXT:    add x9, x0, #32
-; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    whilewr p0.b, x9, x1
+; CHECK-NEXT:    cmp x9, x1
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    dup v0.16b, w9
+; CHECK-NEXT:    add x9, x0, #32
+; CHECK-NEXT:    cmp x9, x1
+; CHECK-NEXT:    cset w11, hs
+; CHECK-NEXT:    cmp x10, x1
+; CHECK-NEXT:    shl v0.16b, v0.16b, #7
+; CHECK-NEXT:    cset w12, hs
+; CHECK-NEXT:    whilewr p1.b, x9, x1
+; CHECK-NEXT:    whilewr p0.b, x10, x1
+; CHECK-NEXT:    dup v2.16b, w11
+; CHECK-NEXT:    dup v5.16b, w12
+; CHECK-NEXT:    mov z3.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    whilewr p1.b, x0, x1
 ; CHECK-NEXT:    adrp x9, .LCPI9_0
+; CHECK-NEXT:    cmge v0.16b, v0.16b, #0
+; CHECK-NEXT:    mov z4.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    bic v3.16b, v3.16b, v2.16b
+; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    mov z1.b, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    whilewr p1.b, x10, x1
+; CHECK-NEXT:    bic v4.16b, v4.16b, v5.16b
+; CHECK-NEXT:    bic v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
+; CHECK-NEXT:    shl v2.16b, v3.16b, #7
+; CHECK-NEXT:    shl v3.16b, v4.16b, #7
 ; CHECK-NEXT:    ldr q4, [x9, :lo12:.LCPI9_0]
-; CHECK-NEXT:    mov z2.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    mov z3.b, p1/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
-; CHECK-NEXT:    shl v2.16b, v2.16b, #7
-; CHECK-NEXT:    shl v3.16b, v3.16b, #7
-; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    cmlt v2.16b, v2.16b, #0
 ; CHECK-NEXT:    cmlt v3.16b, v3.16b, #0
-; CHECK-NEXT:    and v0.16b, v0.16b, v4.16b
+; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    and v1.16b, v1.16b, v4.16b
 ; CHECK-NEXT:    and v2.16b, v2.16b, v4.16b
 ; CHECK-NEXT:    and v3.16b, v3.16b, v4.16b
-; CHECK-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    and v0.16b, v0.16b, v4.16b
 ; CHECK-NEXT:    ext v5.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v6.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    ext v7.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v4.16b
+; CHECK-NEXT:    ext v4.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    ext v6.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    ext v7.16b, v0.16b, v0.16b, #8
 ; CHECK-NEXT:    zip1 v1.16b, v1.16b, v5.16b
-; CHECK-NEXT:    zip1 v2.16b, v2.16b, v6.16b
-; CHECK-NEXT:    zip1 v3.16b, v3.16b, v7.16b
-; CHECK-NEXT:    addv h0, v0.8h
+; CHECK-NEXT:    zip1 v2.16b, v2.16b, v4.16b
+; CHECK-NEXT:    zip1 v3.16b, v3.16b, v6.16b
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v7.16b
 ; CHECK-NEXT:    addv h1, v1.8h
 ; CHECK-NEXT:    addv h2, v2.8h
 ; CHECK-NEXT:    addv h3, v3.8h
-; CHECK-NEXT:    str h0, [x8]
-; CHECK-NEXT:    str h1, [x8, #6]
+; CHECK-NEXT:    addv h0, v0.8h
+; CHECK-NEXT:    str h1, [x8]
 ; CHECK-NEXT:    str h2, [x8, #4]
 ; CHECK-NEXT:    str h3, [x8, #2]
+; CHECK-NEXT:    str h0, [x8, #6]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <64 x i1> @llvm.loop.dependence.war.mask.v64i1(ptr %a, ptr %b, i64 1)
@@ -227,69 +246,74 @@ entry:
 define <32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_16_expand2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x9, x1, x0
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x10, x9, #32
-; CHECK-NEXT:    add x9, x9, x9, lsr #63
+; CHECK-NEXT:    add x9, x0, #32
+; CHECK-NEXT:    sub x10, x1, x0
+; CHECK-NEXT:    subs x9, x1, x9
 ; CHECK-NEXT:    add x10, x10, x10, lsr #63
-; CHECK-NEXT:    asr x9, x9, #1
-; CHECK-NEXT:    asr x10, x10, #1
+; CHECK-NEXT:    add x11, x9, x9, lsr #63
+; CHECK-NEXT:    asr x9, x10, #1
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
 ; CHECK-NEXT:    mov z4.d, z0.d
 ; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    dup v7.2d, x9
-; CHECK-NEXT:    dup v16.2d, x10
+; CHECK-NEXT:    mov z7.d, z0.d
+; CHECK-NEXT:    mov z16.d, z0.d
+; CHECK-NEXT:    mov z18.d, z0.d
+; CHECK-NEXT:    asr x10, x11, #1
+; CHECK-NEXT:    dup v3.2d, x9
 ; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
 ; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
+; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
+; CHECK-NEXT:    dup v6.2d, x10
+; CHECK-NEXT:    add z5.d, z5.d, #6 // =0x6
+; CHECK-NEXT:    add z7.d, z7.d, #4 // =0x4
+; CHECK-NEXT:    add z18.d, z18.d, #14 // =0xe
+; CHECK-NEXT:    add z16.d, z16.d, #2 // =0x2
+; CHECK-NEXT:    cmhi v17.2d, v3.2d, v0.2d
+; CHECK-NEXT:    cmhi v19.2d, v3.2d, v1.2d
+; CHECK-NEXT:    cmhi v20.2d, v3.2d, v2.2d
+; CHECK-NEXT:    cset w11, ls
+; CHECK-NEXT:    cmhi v0.2d, v6.2d, v0.2d
+; CHECK-NEXT:    cmhi v1.2d, v6.2d, v1.2d
+; CHECK-NEXT:    cmhi v2.2d, v6.2d, v2.2d
+; CHECK-NEXT:    cmhi v21.2d, v6.2d, v4.2d
+; CHECK-NEXT:    cmhi v22.2d, v6.2d, v18.2d
+; CHECK-NEXT:    cmhi v23.2d, v6.2d, v5.2d
+; CHECK-NEXT:    cmhi v24.2d, v6.2d, v7.2d
+; CHECK-NEXT:    cmhi v6.2d, v6.2d, v16.2d
+; CHECK-NEXT:    cmhi v4.2d, v3.2d, v4.2d
+; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
+; CHECK-NEXT:    cmhi v18.2d, v3.2d, v18.2d
 ; CHECK-NEXT:    cmp x10, #1
-; CHECK-NEXT:    add z3.d, z3.d, #8 // =0x8
-; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z5.d, z5.d, #4 // =0x4
-; CHECK-NEXT:    add z6.d, z6.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v17.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v18.2d, v16.2d, v0.2d
-; CHECK-NEXT:    add z0.d, z0.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v19.2d, v7.2d, v1.2d
-; CHECK-NEXT:    cmhi v20.2d, v7.2d, v2.2d
-; CHECK-NEXT:    cmhi v21.2d, v7.2d, v3.2d
-; CHECK-NEXT:    cmhi v22.2d, v7.2d, v4.2d
-; CHECK-NEXT:    cmhi v23.2d, v7.2d, v5.2d
-; CHECK-NEXT:    cmhi v24.2d, v7.2d, v6.2d
-; CHECK-NEXT:    cmhi v1.2d, v16.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v16.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v16.2d, v3.2d
-; CHECK-NEXT:    cmhi v4.2d, v16.2d, v4.2d
-; CHECK-NEXT:    cmhi v7.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v5.2d, v16.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v16.2d, v6.2d
+; CHECK-NEXT:    cmhi v7.2d, v3.2d, v7.2d
+; CHECK-NEXT:    cmhi v3.2d, v3.2d, v16.2d
 ; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmhi v0.2d, v16.2d, v0.2d
-; CHECK-NEXT:    uzp1 v16.4s, v21.4s, v20.4s
+; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v22.4s
+; CHECK-NEXT:    uzp1 v2.4s, v21.4s, v2.4s
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    uzp1 v20.4s, v23.4s, v22.4s
-; CHECK-NEXT:    uzp1 v17.4s, v17.4s, v24.4s
+; CHECK-NEXT:    uzp1 v16.4s, v24.4s, v23.4s
+; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v6.4s
 ; CHECK-NEXT:    cset w9, lt
-; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    uzp1 v3.4s, v19.4s, v7.4s
-; CHECK-NEXT:    uzp1 v4.4s, v5.4s, v4.4s
-; CHECK-NEXT:    uzp1 v5.4s, v18.4s, v6.4s
-; CHECK-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    uzp1 v1.8h, v17.8h, v20.8h
-; CHECK-NEXT:    uzp1 v3.8h, v16.8h, v3.8h
-; CHECK-NEXT:    uzp1 v4.8h, v5.8h, v4.8h
-; CHECK-NEXT:    uzp1 v0.8h, v2.8h, v0.8h
-; CHECK-NEXT:    dup v2.16b, w9
+; CHECK-NEXT:    uzp1 v6.4s, v19.4s, v18.4s
+; CHECK-NEXT:    uzp1 v4.4s, v4.4s, v20.4s
+; CHECK-NEXT:    uzp1 v5.4s, v7.4s, v5.4s
+; CHECK-NEXT:    uzp1 v3.4s, v17.4s, v3.4s
+; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v16.8h
+; CHECK-NEXT:    uzp1 v2.8h, v4.8h, v6.8h
+; CHECK-NEXT:    uzp1 v3.8h, v3.8h, v5.8h
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    dup v1.16b, w10
+; CHECK-NEXT:    uzp1 v2.16b, v3.16b, v2.16b
+; CHECK-NEXT:    dup v3.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI11_0
-; CHECK-NEXT:    uzp1 v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    dup v3.16b, w10
-; CHECK-NEXT:    uzp1 v0.16b, v4.16b, v0.16b
-; CHECK-NEXT:    orr v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    dup v1.16b, w11
+; CHECK-NEXT:    orr v2.16b, v2.16b, v3.16b
+; CHECK-NEXT:    bic v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    shl v1.16b, v2.16b, #7
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI11_0]
-; CHECK-NEXT:    orr v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
 ; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
@@ -393,85 +417,89 @@ entry:
 define <32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_32_expand3:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x10, x1, x0
+; CHECK-NEXT:    add x9, x0, #64
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x9, x10, #61
-; CHECK-NEXT:    subs x11, x10, #64
-; CHECK-NEXT:    add x12, x10, #3
-; CHECK-NEXT:    csel x9, x9, x11, mi
+; CHECK-NEXT:    subs x9, x1, x9
+; CHECK-NEXT:    add x10, x9, #3
+; CHECK-NEXT:    csel x9, x10, x9, mi
 ; CHECK-NEXT:    asr x11, x9, #2
+; CHECK-NEXT:    cset w9, ls
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
 ; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    cmp x11, #1
 ; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    cset w9, lt
-; CHECK-NEXT:    cmp x10, #0
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    csel x10, x12, x10, mi
-; CHECK-NEXT:    dup v7.2d, x11
+; CHECK-NEXT:    cmp x11, #1
+; CHECK-NEXT:    mov z7.d, z0.d
+; CHECK-NEXT:    mov z16.d, z0.d
+; CHECK-NEXT:    mov z17.d, z0.d
+; CHECK-NEXT:    cset w10, lt
+; CHECK-NEXT:    subs x12, x1, x0
+; CHECK-NEXT:    add x13, x12, #3
+; CHECK-NEXT:    dup v5.2d, x11
 ; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    asr x10, x10, #2
+; CHECK-NEXT:    csel x12, x13, x12, mi
 ; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
 ; CHECK-NEXT:    add z3.d, z3.d, #8 // =0x8
 ; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z5.d, z5.d, #4 // =0x4
-; CHECK-NEXT:    add z6.d, z6.d, #2 // =0x2
-; CHECK-NEXT:    dup v16.2d, x10
-; CHECK-NEXT:    cmhi v17.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v19.2d, v7.2d, v1.2d
-; CHECK-NEXT:    cmhi v20.2d, v7.2d, v2.2d
-; CHECK-NEXT:    cmhi v21.2d, v7.2d, v3.2d
-; CHECK-NEXT:    cmp x10, #1
-; CHECK-NEXT:    cmhi v22.2d, v7.2d, v4.2d
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmhi v18.2d, v16.2d, v0.2d
-; CHECK-NEXT:    add z0.d, z0.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v1.2d, v16.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v16.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v16.2d, v3.2d
-; CHECK-NEXT:    cmhi v4.2d, v16.2d, v4.2d
-; CHECK-NEXT:    cmhi v23.2d, v16.2d, v5.2d
-; CHECK-NEXT:    cmhi v24.2d, v16.2d, v6.2d
-; CHECK-NEXT:    cmhi v5.2d, v7.2d, v5.2d
-; CHECK-NEXT:    cmhi v16.2d, v16.2d, v0.2d
-; CHECK-NEXT:    cmhi v6.2d, v7.2d, v6.2d
-; CHECK-NEXT:    cmhi v0.2d, v7.2d, v0.2d
-; CHECK-NEXT:    uzp1 v7.4s, v21.4s, v20.4s
+; CHECK-NEXT:    add z7.d, z7.d, #4 // =0x4
+; CHECK-NEXT:    add z17.d, z17.d, #14 // =0xe
+; CHECK-NEXT:    add z16.d, z16.d, #2 // =0x2
+; CHECK-NEXT:    asr x12, x12, #2
+; CHECK-NEXT:    cmhi v18.2d, v5.2d, v0.2d
+; CHECK-NEXT:    cmhi v19.2d, v5.2d, v1.2d
+; CHECK-NEXT:    cmhi v20.2d, v5.2d, v2.2d
+; CHECK-NEXT:    cmhi v21.2d, v5.2d, v3.2d
+; CHECK-NEXT:    dup v6.2d, x12
+; CHECK-NEXT:    cmhi v22.2d, v5.2d, v4.2d
+; CHECK-NEXT:    cmhi v23.2d, v5.2d, v7.2d
+; CHECK-NEXT:    cmhi v24.2d, v5.2d, v17.2d
+; CHECK-NEXT:    cmhi v5.2d, v5.2d, v16.2d
+; CHECK-NEXT:    cmp x12, #1
+; CHECK-NEXT:    cmhi v0.2d, v6.2d, v0.2d
+; CHECK-NEXT:    cmhi v1.2d, v6.2d, v1.2d
+; CHECK-NEXT:    cmhi v2.2d, v6.2d, v2.2d
+; CHECK-NEXT:    cmhi v3.2d, v6.2d, v3.2d
+; CHECK-NEXT:    cmhi v4.2d, v6.2d, v4.2d
+; CHECK-NEXT:    cmhi v17.2d, v6.2d, v17.2d
+; CHECK-NEXT:    cmhi v7.2d, v6.2d, v7.2d
+; CHECK-NEXT:    cmhi v6.2d, v6.2d, v16.2d
+; CHECK-NEXT:    uzp1 v16.4s, v19.4s, v24.4s
+; CHECK-NEXT:    uzp1 v19.4s, v21.4s, v20.4s
+; CHECK-NEXT:    uzp1 v20.4s, v23.4s, v22.4s
+; CHECK-NEXT:    uzp1 v5.4s, v18.4s, v5.4s
+; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v17.4s
 ; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    uzp1 v3.4s, v23.4s, v4.4s
-; CHECK-NEXT:    uzp1 v4.4s, v18.4s, v24.4s
-; CHECK-NEXT:    uzp1 v5.4s, v5.4s, v22.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v6.4s, v17.4s, v6.4s
-; CHECK-NEXT:    uzp1 v0.4s, v19.4s, v0.4s
-; CHECK-NEXT:    uzp1 v3.8h, v4.8h, v3.8h
+; CHECK-NEXT:    uzp1 v3.4s, v7.4s, v4.4s
+; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v6.4s
+; CHECK-NEXT:    uzp1 v4.8h, v19.8h, v16.8h
+; CHECK-NEXT:    uzp1 v5.8h, v5.8h, v20.8h
 ; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v2.8h, v6.8h, v5.8h
-; CHECK-NEXT:    uzp1 v0.8h, v7.8h, v0.8h
-; CHECK-NEXT:    uzp1 v1.16b, v3.16b, v1.16b
-; CHECK-NEXT:    uzp1 v0.16b, v2.16b, v0.16b
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v3.8h
 ; CHECK-NEXT:    dup v3.16b, w10
-; CHECK-NEXT:    dup v2.16b, w9
+; CHECK-NEXT:    cset w10, lt
+; CHECK-NEXT:    uzp1 v2.16b, v5.16b, v4.16b
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    dup v1.16b, w10
+; CHECK-NEXT:    orr v2.16b, v2.16b, v3.16b
+; CHECK-NEXT:    dup v3.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI14_0
-; CHECK-NEXT:    orr v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    bic v1.16b, v2.16b, v3.16b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI14_0]
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
 ; CHECK-NEXT:    addv h0, v0.8h
-; CHECK-NEXT:    str h1, [x8]
-; CHECK-NEXT:    str h0, [x8, #2]
+; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    str h0, [x8]
+; CHECK-NEXT:    str h1, [x8, #2]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 4)
@@ -587,85 +615,89 @@ entry:
 define <32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_64_expand4:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x10, x1, x0
+; CHECK-NEXT:    add x9, x0, #128
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x9, x10, #121
-; CHECK-NEXT:    subs x11, x10, #128
-; CHECK-NEXT:    add x12, x10, #7
-; CHECK-NEXT:    csel x9, x9, x11, mi
+; CHECK-NEXT:    subs x9, x1, x9
+; CHECK-NEXT:    add x10, x9, #7
+; CHECK-NEXT:    csel x9, x10, x9, mi
 ; CHECK-NEXT:    asr x11, x9, #3
+; CHECK-NEXT:    cset w9, ls
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
 ; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    cmp x11, #1
 ; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    cset w9, lt
-; CHECK-NEXT:    cmp x10, #0
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    csel x10, x12, x10, mi
-; CHECK-NEXT:    dup v7.2d, x11
+; CHECK-NEXT:    cmp x11, #1
+; CHECK-NEXT:    mov z7.d, z0.d
+; CHECK-NEXT:    mov z16.d, z0.d
+; CHECK-NEXT:    mov z17.d, z0.d
+; CHECK-NEXT:    cset w10, lt
+; CHECK-NEXT:    subs x12, x1, x0
+; CHECK-NEXT:    add x13, x12, #7
+; CHECK-NEXT:    dup v5.2d, x11
 ; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    asr x10, x10, #3
+; CHECK-NEXT:    csel x12, x13, x12, mi
 ; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
 ; CHECK-NEXT:    add z3.d, z3.d, #8 // =0x8
 ; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z5.d, z5.d, #4 // =0x4
-; CHECK-NEXT:    add z6.d, z6.d, #2 // =0x2
-; CHECK-NEXT:    dup v16.2d, x10
-; CHECK-NEXT:    cmhi v17.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v19.2d, v7.2d, v1.2d
-; CHECK-NEXT:    cmhi v20.2d, v7.2d, v2.2d
-; CHECK-NEXT:    cmhi v21.2d, v7.2d, v3.2d
-; CHECK-NEXT:    cmp x10, #1
-; CHECK-NEXT:    cmhi v22.2d, v7.2d, v4.2d
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmhi v18.2d, v16.2d, v0.2d
-; CHECK-NEXT:    add z0.d, z0.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v1.2d, v16.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v16.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v16.2d, v3.2d
-; CHECK-NEXT:    cmhi v4.2d, v16.2d, v4.2d
-; CHECK-NEXT:    cmhi v23.2d, v16.2d, v5.2d
-; CHECK-NEXT:    cmhi v24.2d, v16.2d, v6.2d
-; CHECK-NEXT:    cmhi v5.2d, v7.2d, v5.2d
-; CHECK-NEXT:    cmhi v16.2d, v16.2d, v0.2d
-; CHECK-NEXT:    cmhi v6.2d, v7.2d, v6.2d
-; CHECK-NEXT:    cmhi v0.2d, v7.2d, v0.2d
-; CHECK-NEXT:    uzp1 v7.4s, v21.4s, v20.4s
+; CHECK-NEXT:    add z7.d, z7.d, #4 // =0x4
+; CHECK-NEXT:    add z17.d, z17.d, #14 // =0xe
+; CHECK-NEXT:    add z16.d, z16.d, #2 // =0x2
+; CHECK-NEXT:    asr x12, x12, #3
+; CHECK-NEXT:    cmhi v18.2d, v5.2d, v0.2d
+; CHECK-NEXT:    cmhi v19.2d, v5.2d, v1.2d
+; CHECK-NEXT:    cmhi v20.2d, v5.2d, v2.2d
+; CHECK-NEXT:    cmhi v21.2d, v5.2d, v3.2d
+; CHECK-NEXT:    dup v6.2d, x12
+; CHECK-NEXT:    cmhi v22.2d, v5.2d, v4.2d
+; CHECK-NEXT:    cmhi v23.2d, v5.2d, v7.2d
+; CHECK-NEXT:    cmhi v24.2d, v5.2d, v17.2d
+; CHECK-NEXT:    cmhi v5.2d, v5.2d, v16.2d
+; CHECK-NEXT:    cmp x12, #1
+; CHECK-NEXT:    cmhi v0.2d, v6.2d, v0.2d
+; CHECK-NEXT:    cmhi v1.2d, v6.2d, v1.2d
+; CHECK-NEXT:    cmhi v2.2d, v6.2d, v2.2d
+; CHECK-NEXT:    cmhi v3.2d, v6.2d, v3.2d
+; CHECK-NEXT:    cmhi v4.2d, v6.2d, v4.2d
+; CHECK-NEXT:    cmhi v17.2d, v6.2d, v17.2d
+; CHECK-NEXT:    cmhi v7.2d, v6.2d, v7.2d
+; CHECK-NEXT:    cmhi v6.2d, v6.2d, v16.2d
+; CHECK-NEXT:    uzp1 v16.4s, v19.4s, v24.4s
+; CHECK-NEXT:    uzp1 v19.4s, v21.4s, v20.4s
+; CHECK-NEXT:    uzp1 v20.4s, v23.4s, v22.4s
+; CHECK-NEXT:    uzp1 v5.4s, v18.4s, v5.4s
+; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v17.4s
 ; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    uzp1 v3.4s, v23.4s, v4.4s
-; CHECK-NEXT:    uzp1 v4.4s, v18.4s, v24.4s
-; CHECK-NEXT:    uzp1 v5.4s, v5.4s, v22.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v6.4s, v17.4s, v6.4s
-; CHECK-NEXT:    uzp1 v0.4s, v19.4s, v0.4s
-; CHECK-NEXT:    uzp1 v3.8h, v4.8h, v3.8h
+; CHECK-NEXT:    uzp1 v3.4s, v7.4s, v4.4s
+; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v6.4s
+; CHECK-NEXT:    uzp1 v4.8h, v19.8h, v16.8h
+; CHECK-NEXT:    uzp1 v5.8h, v5.8h, v20.8h
 ; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v2.8h, v6.8h, v5.8h
-; CHECK-NEXT:    uzp1 v0.8h, v7.8h, v0.8h
-; CHECK-NEXT:    uzp1 v1.16b, v3.16b, v1.16b
-; CHECK-NEXT:    uzp1 v0.16b, v2.16b, v0.16b
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v3.8h
 ; CHECK-NEXT:    dup v3.16b, w10
-; CHECK-NEXT:    dup v2.16b, w9
+; CHECK-NEXT:    cset w10, lt
+; CHECK-NEXT:    uzp1 v2.16b, v5.16b, v4.16b
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    dup v1.16b, w10
+; CHECK-NEXT:    orr v2.16b, v2.16b, v3.16b
+; CHECK-NEXT:    dup v3.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI18_0
-; CHECK-NEXT:    orr v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    bic v1.16b, v2.16b, v3.16b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI18_0]
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
 ; CHECK-NEXT:    addv h0, v0.8h
-; CHECK-NEXT:    str h1, [x8]
-; CHECK-NEXT:    str h0, [x8, #2]
+; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    str h0, [x8]
+; CHECK-NEXT:    str h1, [x8, #2]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 8)
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index 3435ceca28e17..b9a9484a33e7b 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -84,9 +84,15 @@ entry:
 define <vscale x 32 x i1> @whilewr_8_split(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_split:
 ; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x8
+; CHECK-NEXT:    cmp x8, x1
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    whilewr p0.b, x8, x1
+; CHECK-NEXT:    sbfx x8, x9, #0, #1
+; CHECK-NEXT:    whilelo p1.b, xzr, x8
+; CHECK-NEXT:    bic p1.b, p0/z, p0.b, p1.b
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
-; CHECK-NEXT:    incb x0
-; CHECK-NEXT:    whilewr p1.b, x0, x1
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 32 x i1> @llvm.loop.dependence.war.mask.nxv32i1(ptr %a, ptr %b, i64 1)
@@ -96,14 +102,40 @@ entry:
 define <vscale x 64 x i1> @whilewr_8_split2(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_split2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    mov x9, x0
+; CHECK-NEXT:    mov x10, x0
+; CHECK-NEXT:    rdvl x8, #3
+; CHECK-NEXT:    incb x9
+; CHECK-NEXT:    incb x10, all, mul #2
+; CHECK-NEXT:    add x8, x0, x8
+; CHECK-NEXT:    cmp x9, x1
+; CHECK-NEXT:    cset w11, hs
+; CHECK-NEXT:    whilewr p0.b, x9, x1
+; CHECK-NEXT:    sbfx x11, x11, #0, #1
+; CHECK-NEXT:    whilelo p1.b, xzr, x11
+; CHECK-NEXT:    cmp x10, x1
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    whilewr p2.b, x10, x1
+; CHECK-NEXT:    sbfx x9, x9, #0, #1
+; CHECK-NEXT:    bic p1.b, p0/z, p0.b, p1.b
+; CHECK-NEXT:    whilelo p3.b, xzr, x9
+; CHECK-NEXT:    cmp x8, x1
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    whilewr p4.b, x8, x1
+; CHECK-NEXT:    sbfx x9, x9, #0, #1
+; CHECK-NEXT:    bic p2.b, p2/z, p2.b, p3.b
+; CHECK-NEXT:    whilelo p0.b, xzr, x9
+; CHECK-NEXT:    bic p4.b, p4/z, p4.b, p0.b
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
-; CHECK-NEXT:    addvl x9, x0, #3
-; CHECK-NEXT:    incb x0, all, mul #2
-; CHECK-NEXT:    incb x8
-; CHECK-NEXT:    whilewr p3.b, x9, x1
-; CHECK-NEXT:    whilewr p2.b, x0, x1
-; CHECK-NEXT:    whilewr p1.b, x8, x1
+; CHECK-NEXT:    bic p3.b, p4/z, p4.b, p3.b
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 64 x i1> @llvm.loop.dependence.war.mask.nxv64i1(ptr %a, ptr %b, i64 1)
@@ -176,6 +208,8 @@ define <vscale x 32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p11, [sp] // 2-byte Spill
+; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
@@ -186,75 +220,81 @@ define <vscale x 32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    incb x0, all, mul #2
-; CHECK-NEXT:    add x8, x8, x8, lsr #63
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-NEXT:    incb x0, all, mul #2
 ; CHECK-NEXT:    asr x8, x8, #1
-; CHECK-NEXT:    sub x9, x1, x0
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
+; CHECK-NEXT:    mov z4.d, z0.d
 ; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    add x9, x9, x9, lsr #63
 ; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    incd z3.d, all, mul #4
-; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    asr x9, x9, #1
-; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    mov z6.d, z1.d
-; CHECK-NEXT:    mov z7.d, z2.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    incd z4.d, all, mul #4
+; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    mov z3.d, z1.d
+; CHECK-NEXT:    mov z6.d, z2.d
+; CHECK-NEXT:    mov z7.d, z1.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z4.d
 ; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
+; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    incd z3.d, all, mul #2
 ; CHECK-NEXT:    incd z6.d, all, mul #4
 ; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
-; CHECK-NEXT:    mov z24.d, z4.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
+; CHECK-NEXT:    uzp1 p6.s, p7.s, p6.s
+; CHECK-NEXT:    mov z24.d, z3.d
+; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z6.d
+; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
+; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z3.d
 ; CHECK-NEXT:    incd z24.d, all, mul #4
-; CHECK-NEXT:    uzp1 p2.s, p3.s, p4.s
-; CHECK-NEXT:    uzp1 p3.s, p5.s, p6.s
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    mov z5.d, x9
+; CHECK-NEXT:    uzp1 p3.s, p3.s, p8.s
+; CHECK-NEXT:    uzp1 p5.s, p5.s, p9.s
+; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z24.d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
+; CHECK-NEXT:    uzp1 p5.h, p6.h, p5.h
 ; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    uzp1 p7.s, p7.s, p8.s
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z2.d
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p7.h
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    uzp1 p2.s, p2.s, p4.s
+; CHECK-NEXT:    whilelo p1.b, xzr, x8
+; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    uzp1 p2.h, p3.h, p2.h
+; CHECK-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-NEXT:    cset w9, ls
+; CHECK-NEXT:    uzp1 p2.b, p5.b, p2.b
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT:    asr x8, x8, #1
+; CHECK-NEXT:    mov z5.d, x8
+; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z6.d
+; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
+; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z4.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    cmphi p11.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
 ; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
-; CHECK-NEXT:    uzp1 p5.s, p9.s, p6.s
+; CHECK-NEXT:    cmp x8, #1
+; CHECK-NEXT:    uzp1 p7.s, p9.s, p7.s
+; CHECK-NEXT:    cset w8, lt
 ; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT:    whilelo p6.b, xzr, x8
-; CHECK-NEXT:    uzp1 p3.s, p8.s, p3.s
-; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    uzp1 p8.s, p10.s, p8.s
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p6.s, p11.s, p6.s
+; CHECK-NEXT:    ldr p11, [sp] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.s, p0.s, p4.s
+; CHECK-NEXT:    uzp1 p4.h, p8.h, p7.h
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p7.s
-; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p6.h
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p4.h, p5.h, p4.h
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
-; CHECK-NEXT:    uzp1 p1.b, p1.b, p2.b
-; CHECK-NEXT:    uzp1 p2.b, p0.b, p4.b
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    whilelo p3.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p1, p1.b, p6.b
+; CHECK-NEXT:    sbfx x8, x9, #0, #1
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    sel p1.b, p2, p2.b, p3.b
+; CHECK-NEXT:    uzp1 p0.b, p0.b, p4.b
+; CHECK-NEXT:    whilelo p4.b, xzr, x8
+; CHECK-NEXT:    mov p3.b, p0/m, p0.b
+; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
+; CHECK-NEXT:    bic p1.b, p3/z, p3.b, p4.b
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -364,6 +404,7 @@ define <vscale x 32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p11, [sp] // 2-byte Spill
 ; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
@@ -387,65 +428,70 @@ define <vscale x 32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
 ; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    incd z2.d, all, mul #2
 ; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z0.d
 ; CHECK-NEXT:    mov z3.d, z1.d
 ; CHECK-NEXT:    mov z6.d, z2.d
 ; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z4.d
+; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
 ; CHECK-NEXT:    incd z3.d, all, mul #2
 ; CHECK-NEXT:    incd z6.d, all, mul #4
 ; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
+; CHECK-NEXT:    uzp1 p6.s, p7.s, p6.s
 ; CHECK-NEXT:    mov z24.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z6.d
+; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
+; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z3.d
 ; CHECK-NEXT:    incd z24.d, all, mul #4
-; CHECK-NEXT:    uzp1 p2.s, p2.s, p7.s
 ; CHECK-NEXT:    uzp1 p3.s, p3.s, p8.s
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    uzp1 p5.s, p5.s, p9.s
+; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z24.d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p3.h, p4.h, p3.h
+; CHECK-NEXT:    uzp1 p5.h, p6.h, p5.h
 ; CHECK-NEXT:    cset w8, lt
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p6.s, p6.s, p9.s
+; CHECK-NEXT:    uzp1 p2.s, p2.s, p4.s
 ; CHECK-NEXT:    whilelo p1.b, xzr, x8
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p6.h
+; CHECK-NEXT:    uzp1 p2.h, p3.h, p2.h
 ; CHECK-NEXT:    add x9, x8, #3
 ; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    uzp1 p2.b, p3.b, p2.b
+; CHECK-NEXT:    cset w9, ls
+; CHECK-NEXT:    uzp1 p2.b, p5.b, p2.b
 ; CHECK-NEXT:    asr x8, x8, #2
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z6.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z6.d
 ; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z4.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    cmphi p11.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
 ; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p5.s, p7.s, p5.s
+; CHECK-NEXT:    uzp1 p7.s, p9.s, p7.s
 ; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    uzp1 p7.s, p9.s, p8.s
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p4.s, p10.s, p4.s
+; CHECK-NEXT:    uzp1 p8.s, p10.s, p8.s
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p6.s
+; CHECK-NEXT:    uzp1 p6.s, p11.s, p6.s
+; CHECK-NEXT:    ldr p11, [sp] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.s, p0.s, p4.s
+; CHECK-NEXT:    uzp1 p4.h, p8.h, p7.h
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p5.h, p7.h, p5.h
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p6.h
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p4.h
+; CHECK-NEXT:    whilelo p3.b, xzr, x8
+; CHECK-NEXT:    sbfx x8, x9, #0, #1
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.b, p0.b, p4.b
 ; CHECK-NEXT:    whilelo p4.b, xzr, x8
-; CHECK-NEXT:    uzp1 p3.b, p0.b, p5.b
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT:    mov p3.b, p0/m, p0.b
 ; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
-; CHECK-NEXT:    sel p1.b, p3, p3.b, p4.b
+; CHECK-NEXT:    bic p1.b, p3/z, p3.b, p4.b
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -582,6 +628,7 @@ define <vscale x 32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p11, [sp] // 2-byte Spill
 ; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
@@ -605,65 +652,70 @@ define <vscale x 32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
 ; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    incd z2.d, all, mul #2
 ; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z0.d
 ; CHECK-NEXT:    mov z3.d, z1.d
 ; CHECK-NEXT:    mov z6.d, z2.d
 ; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z4.d
+; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
 ; CHECK-NEXT:    incd z3.d, all, mul #2
 ; CHECK-NEXT:    incd z6.d, all, mul #4
 ; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
+; CHECK-NEXT:    uzp1 p6.s, p7.s, p6.s
 ; CHECK-NEXT:    mov z24.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z6.d
+; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
+; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z3.d
 ; CHECK-NEXT:    incd z24.d, all, mul #4
-; CHECK-NEXT:    uzp1 p2.s, p2.s, p7.s
 ; CHECK-NEXT:    uzp1 p3.s, p3.s, p8.s
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    uzp1 p5.s, p5.s, p9.s
+; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z24.d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p3.h, p4.h, p3.h
+; CHECK-NEXT:    uzp1 p5.h, p6.h, p5.h
 ; CHECK-NEXT:    cset w8, lt
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p6.s, p6.s, p9.s
+; CHECK-NEXT:    uzp1 p2.s, p2.s, p4.s
 ; CHECK-NEXT:    whilelo p1.b, xzr, x8
 ; CHECK-NEXT:    subs x8, x1, x9
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p6.h
+; CHECK-NEXT:    uzp1 p2.h, p3.h, p2.h
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    uzp1 p2.b, p3.b, p2.b
+; CHECK-NEXT:    cset w9, ls
+; CHECK-NEXT:    uzp1 p2.b, p5.b, p2.b
 ; CHECK-NEXT:    asr x8, x8, #3
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z6.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z6.d
 ; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z4.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    cmphi p11.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
 ; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p5.s, p7.s, p5.s
+; CHECK-NEXT:    uzp1 p7.s, p9.s, p7.s
 ; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    uzp1 p7.s, p9.s, p8.s
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p4.s, p10.s, p4.s
+; CHECK-NEXT:    uzp1 p8.s, p10.s, p8.s
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p6.s
+; CHECK-NEXT:    uzp1 p6.s, p11.s, p6.s
+; CHECK-NEXT:    ldr p11, [sp] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.s, p0.s, p4.s
+; CHECK-NEXT:    uzp1 p4.h, p8.h, p7.h
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p5.h, p7.h, p5.h
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p6.h
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p4.h
+; CHECK-NEXT:    whilelo p3.b, xzr, x8
+; CHECK-NEXT:    sbfx x8, x9, #0, #1
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.b, p0.b, p4.b
 ; CHECK-NEXT:    whilelo p4.b, xzr, x8
-; CHECK-NEXT:    uzp1 p3.b, p0.b, p5.b
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT:    mov p3.b, p0/m, p0.b
 ; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
-; CHECK-NEXT:    sel p1.b, p3, p3.b, p4.b
+; CHECK-NEXT:    bic p1.b, p3/z, p3.b, p4.b
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload

>From f6f9548458f9cb17265195fc639b5a5b2a40ea2d Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Tue, 25 Nov 2025 11:39:10 +0000
Subject: [PATCH 2/3] Add lane offset operand instead of splitting in legaliser

---
 .../SelectionDAG/LegalizeVectorOps.cpp        |   6 +
 .../SelectionDAG/LegalizeVectorTypes.cpp      |  31 +-
 .../SelectionDAG/SelectionDAGBuilder.cpp      |   6 +-
 .../Target/AArch64/AArch64ISelLowering.cpp    |  36 +-
 llvm/test/CodeGen/AArch64/alias_mask.ll       | 544 ++++++++++--------
 .../CodeGen/AArch64/alias_mask_scalable.ll    | 412 ++++++++-----
 6 files changed, 611 insertions(+), 424 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index e8d9bce43f6ea..8b9799002675a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1820,6 +1820,12 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
   EVT VT = N->getValueType(0);
   EVT PtrVT = SourceValue->getValueType(0);
 
+  SDValue Offset = N->getOperand(3);
+  if (VT.isScalableVT())
+    Offset = DAG.getVScale(DL, PtrVT, N->getConstantOperandAPInt(3));
+
+  SourceValue = DAG.getNode(ISD::ADD, DL, PtrVT, SourceValue,
+                            DAG.getNode(ISD::MUL, DL, PtrVT, EltSize, Offset));
   SDValue Diff = DAG.getNode(ISD::SUB, DL, PtrVT, SinkValue, SourceValue);
   if (IsReadAfterWrite)
     Diff = DAG.getNode(ISD::ABS, DL, PtrVT, Diff);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index c8d66cc21244f..bcfb32b6d09f7 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -407,9 +407,16 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_LOOP_DEPENDENCE_MASK(SDNode *N) {
   SDValue SourceValue = N->getOperand(0);
   SDValue SinkValue = N->getOperand(1);
   SDValue EltSize = N->getOperand(2);
+  SDValue Offset = N->getOperand(3);
   EVT PtrVT = SourceValue->getValueType(0);
   SDLoc DL(N);
 
+  // Increment the source pointer by the lane offset multiplied by the element
+  // size. A non-zero offset is normally used when a larger-than-legal mask has
+  // been split.
+  Offset = DAG.getNode(ISD::MUL, DL, PtrVT, Offset, EltSize);
+  SourceValue = DAG.getNode(ISD::ADD, DL, PtrVT, SourceValue, Offset);
+
   SDValue Diff = DAG.getNode(ISD::SUB, DL, PtrVT, SinkValue, SourceValue);
   EVT CmpVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
                                      Diff.getValueType());
@@ -1711,23 +1718,13 @@ void DAGTypeLegalizer::SplitVecRes_LOOP_DEPENDENCE_MASK(SDNode *N, SDValue &Lo,
   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
   SDValue PtrA = N->getOperand(0);
   SDValue PtrB = N->getOperand(1);
-  Lo = DAG.getNode(N->getOpcode(), DL, LoVT, PtrA, PtrB, N->getOperand(2));
-
-  unsigned EltSize = N->getConstantOperandVal(2);
-  unsigned Offset = EltSize * HiVT.getVectorMinNumElements();
-  SDValue Addend = HiVT.isScalableVT()
-                       ? DAG.getVScale(DL, MVT::i64, APInt(64, Offset))
-                       : DAG.getConstant(Offset, DL, MVT::i64);
 
-  PtrA = DAG.getNode(ISD::ADD, DL, MVT::i64, PtrA, Addend);
-  EVT CmpVT = MVT::i1;
-  SDValue Cmp = DAG.getSetCC(DL, CmpVT, PtrA, PtrB, ISD::CondCode::SETUGE);
-  Cmp = DAG.getSplat(EVT::getVectorVT(*DAG.getContext(), CmpVT,
-                                      HiVT.getVectorMinNumElements(),
-                                      HiVT.isScalableVT()),
-                     DL, Cmp);
-  Hi = DAG.getNode(N->getOpcode(), DL, HiVT, PtrA, PtrB, N->getOperand(2));
-  Hi = DAG.getSelect(DL, HiVT, Cmp, DAG.getConstant(0, DL, HiVT), Hi);
+  Lo = DAG.getNode(N->getOpcode(), DL, LoVT, PtrA, PtrB, N->getOperand(2),
+                   N->getOperand(3));
+  unsigned Offset =
+      N->getConstantOperandVal(3) + LoVT.getVectorMinNumElements();
+  Hi = DAG.getNode(N->getOpcode(), DL, HiVT, PtrA, PtrB, N->getOperand(2),
+                   DAG.getConstant(Offset, DL, MVT::i64));
 }
 
 void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo,
@@ -6071,7 +6068,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_LOOP_DEPENDENCE_MASK(SDNode *N) {
   return DAG.getNode(
       N->getOpcode(), SDLoc(N),
       TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)),
-      N->getOperand(0), N->getOperand(1), N->getOperand(2));
+      N->getOperand(0), N->getOperand(1), N->getOperand(2), N->getOperand(3));
 }
 
 SDValue DAGTypeLegalizer::WidenVecRes_BUILD_VECTOR(SDNode *N) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 4f13f3b128ea4..f9fab349200d1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8391,13 +8391,15 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
     setValue(&I,
              DAG.getNode(ISD::LOOP_DEPENDENCE_WAR_MASK, sdl,
                          EVT::getEVT(I.getType()), getValue(I.getOperand(0)),
-                         getValue(I.getOperand(1)), getValue(I.getOperand(2))));
+                         getValue(I.getOperand(1)), getValue(I.getOperand(2)),
+                         DAG.getConstant(0, sdl, MVT::i64)));
     return;
   case Intrinsic::loop_dependence_raw_mask:
     setValue(&I,
              DAG.getNode(ISD::LOOP_DEPENDENCE_RAW_MASK, sdl,
                          EVT::getEVT(I.getType()), getValue(I.getOperand(0)),
-                         getValue(I.getOperand(1)), getValue(I.getOperand(2))));
+                         getValue(I.getOperand(1)), getValue(I.getOperand(2)),
+                         DAG.getConstant(0, sdl, MVT::i64)));
     return;
   }
 }
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 35836af3c874b..0abf6560ad1e1 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5387,11 +5387,17 @@ AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
     return SDValue();
   }
 
+  // TODO: Support split masks
+  unsigned LaneOffset = Op.getConstantOperandVal(3);
+  if (LaneOffset != 0)
+    return SDValue();
+
   SDValue PtrA = Op.getOperand(0);
   SDValue PtrB = Op.getOperand(1);
 
   if (VT.isScalableVT())
-    return DAG.getNode(Op.getOpcode(), DL, VT, PtrA, PtrB, Op.getOperand(2));
+    return DAG.getNode(Op.getOpcode(), DL, VT, PtrA, PtrB, Op.getOperand(2),
+                       Op.getOperand(3));
 
   // We can use the SVE whilewr/whilerw instruction to lower this
   // intrinsic by creating the appropriate sequence of scalable vector
@@ -5402,8 +5408,8 @@ AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
                        VT.getVectorNumElements(), true);
   EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
 
-  SDValue Mask =
-      DAG.getNode(Op.getOpcode(), DL, WhileVT, PtrA, PtrB, Op.getOperand(2));
+  SDValue Mask = DAG.getNode(Op.getOpcode(), DL, WhileVT, PtrA, PtrB,
+                             Op.getOperand(2), Op.getOperand(3));
   SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, DL, ContainerVT, Mask);
   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, MaskAsInt,
                      DAG.getVectorIdxConstant(0, DL));
@@ -6172,35 +6178,43 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
   case Intrinsic::aarch64_sve_whilewr_b:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_WAR_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(1, DL, MVT::i64));
+                       DAG.getConstant(1, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilewr_h:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_WAR_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(2, DL, MVT::i64));
+                       DAG.getConstant(2, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilewr_s:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_WAR_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(4, DL, MVT::i64));
+                       DAG.getConstant(4, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilewr_d:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_WAR_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(8, DL, MVT::i64));
+                       DAG.getConstant(8, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilerw_b:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_RAW_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(1, DL, MVT::i64));
+                       DAG.getConstant(1, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilerw_h:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_RAW_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(2, DL, MVT::i64));
+                       DAG.getConstant(2, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilerw_s:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_RAW_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(4, DL, MVT::i64));
+                       DAG.getConstant(4, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilerw_d:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_RAW_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(8, DL, MVT::i64));
+                       DAG.getConstant(8, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_neon_abs: {
     EVT Ty = Op.getValueType();
     if (Ty == MVT::i64) {
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index da14e17bf2463..1ec6eeded90cd 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -100,16 +100,46 @@ entry:
 define <32 x i1> @whilewr_8_split(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    add x9, x0, #16
-; CHECK-NEXT:    cmp x9, x1
-; CHECK-NEXT:    cset w10, hs
-; CHECK-NEXT:    whilewr p0.b, x9, x1
-; CHECK-NEXT:    adrp x9, .LCPI8_0
-; CHECK-NEXT:    dup v0.16b, w10
-; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    index z0.d, #0, #1
+; CHECK-NEXT:    sub x9, x1, x0
+; CHECK-NEXT:    sub x9, x9, #16
+; CHECK-NEXT:    dup v1.2d, x9
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    cset w9, lt
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    mov z3.d, z0.d
+; CHECK-NEXT:    mov z4.d, z0.d
+; CHECK-NEXT:    mov z5.d, z0.d
+; CHECK-NEXT:    mov z6.d, z0.d
+; CHECK-NEXT:    mov z7.d, z0.d
+; CHECK-NEXT:    mov z16.d, z0.d
+; CHECK-NEXT:    cmhi v0.2d, v1.2d, v0.2d
+; CHECK-NEXT:    add z2.d, z2.d, #12 // =0xc
+; CHECK-NEXT:    add z3.d, z3.d, #10 // =0xa
+; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
+; CHECK-NEXT:    add z5.d, z5.d, #6 // =0x6
+; CHECK-NEXT:    add z6.d, z6.d, #4 // =0x4
+; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
+; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
+; CHECK-NEXT:    cmhi v2.2d, v1.2d, v2.2d
+; CHECK-NEXT:    cmhi v3.2d, v1.2d, v3.2d
+; CHECK-NEXT:    cmhi v4.2d, v1.2d, v4.2d
+; CHECK-NEXT:    cmhi v5.2d, v1.2d, v5.2d
+; CHECK-NEXT:    cmhi v6.2d, v1.2d, v6.2d
+; CHECK-NEXT:    cmhi v16.2d, v1.2d, v16.2d
+; CHECK-NEXT:    cmhi v1.2d, v1.2d, v7.2d
+; CHECK-NEXT:    uzp1 v3.4s, v4.4s, v3.4s
+; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
+; CHECK-NEXT:    uzp1 v2.4s, v2.4s, v16.4s
+; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    uzp1 v1.8h, v3.8h, v2.8h
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v4.8h
 ; CHECK-NEXT:    mov z2.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    bic v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    dup v1.16b, w9
+; CHECK-NEXT:    adrp x9, .LCPI8_0
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    shl v1.16b, v2.16b, #7
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI8_0]
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
@@ -134,62 +164,117 @@ entry:
 define <64 x i1> @whilewr_8_split2(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_split2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    add x9, x0, #48
-; CHECK-NEXT:    add x10, x0, #16
-; CHECK-NEXT:    whilewr p0.b, x9, x1
-; CHECK-NEXT:    cmp x9, x1
-; CHECK-NEXT:    cset w9, hs
-; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    dup v0.16b, w9
-; CHECK-NEXT:    add x9, x0, #32
-; CHECK-NEXT:    cmp x9, x1
-; CHECK-NEXT:    cset w11, hs
-; CHECK-NEXT:    cmp x10, x1
-; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    cset w12, hs
-; CHECK-NEXT:    whilewr p1.b, x9, x1
-; CHECK-NEXT:    whilewr p0.b, x10, x1
-; CHECK-NEXT:    dup v2.16b, w11
-; CHECK-NEXT:    dup v5.16b, w12
-; CHECK-NEXT:    mov z3.b, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    whilewr p1.b, x0, x1
+; CHECK-NEXT:    index z0.d, #0, #1
+; CHECK-NEXT:    sub x11, x1, x0
+; CHECK-NEXT:    sub x9, x11, #16
+; CHECK-NEXT:    sub x10, x11, #32
+; CHECK-NEXT:    sub x11, x11, #48
+; CHECK-NEXT:    dup v1.2d, x9
+; CHECK-NEXT:    dup v2.2d, x10
+; CHECK-NEXT:    dup v17.2d, x11
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    mov z3.d, z0.d
+; CHECK-NEXT:    mov z4.d, z0.d
+; CHECK-NEXT:    mov z5.d, z0.d
+; CHECK-NEXT:    mov z18.d, z0.d
+; CHECK-NEXT:    mov z6.d, z0.d
+; CHECK-NEXT:    mov z7.d, z0.d
+; CHECK-NEXT:    mov z16.d, z0.d
+; CHECK-NEXT:    cmhi v19.2d, v1.2d, v0.2d
+; CHECK-NEXT:    cmhi v20.2d, v2.2d, v0.2d
+; CHECK-NEXT:    add z3.d, z3.d, #12 // =0xc
+; CHECK-NEXT:    add z4.d, z4.d, #10 // =0xa
+; CHECK-NEXT:    add z5.d, z5.d, #8 // =0x8
+; CHECK-NEXT:    add z18.d, z18.d, #14 // =0xe
+; CHECK-NEXT:    add z6.d, z6.d, #6 // =0x6
+; CHECK-NEXT:    add z7.d, z7.d, #4 // =0x4
+; CHECK-NEXT:    add z16.d, z16.d, #2 // =0x2
+; CHECK-NEXT:    cmhi v0.2d, v17.2d, v0.2d
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    cmhi v21.2d, v1.2d, v3.2d
+; CHECK-NEXT:    cmhi v22.2d, v1.2d, v4.2d
+; CHECK-NEXT:    cmp x10, #1
+; CHECK-NEXT:    cmhi v23.2d, v1.2d, v5.2d
+; CHECK-NEXT:    cmhi v26.2d, v1.2d, v18.2d
+; CHECK-NEXT:    cset w10, lt
+; CHECK-NEXT:    cmhi v24.2d, v1.2d, v6.2d
+; CHECK-NEXT:    cmhi v25.2d, v1.2d, v7.2d
+; CHECK-NEXT:    cmp x11, #1
+; CHECK-NEXT:    cmhi v27.2d, v2.2d, v3.2d
+; CHECK-NEXT:    cmhi v28.2d, v2.2d, v18.2d
+; CHECK-NEXT:    cset w11, lt
+; CHECK-NEXT:    cmhi v29.2d, v2.2d, v4.2d
+; CHECK-NEXT:    cmhi v30.2d, v2.2d, v5.2d
+; CHECK-NEXT:    uzp1 v22.4s, v23.4s, v22.4s
+; CHECK-NEXT:    cmhi v18.2d, v17.2d, v18.2d
+; CHECK-NEXT:    cmhi v3.2d, v17.2d, v3.2d
+; CHECK-NEXT:    uzp1 v21.4s, v21.4s, v26.4s
+; CHECK-NEXT:    cmhi v4.2d, v17.2d, v4.2d
+; CHECK-NEXT:    cmhi v5.2d, v17.2d, v5.2d
+; CHECK-NEXT:    cmhi v23.2d, v17.2d, v6.2d
+; CHECK-NEXT:    cmhi v26.2d, v17.2d, v7.2d
+; CHECK-NEXT:    cmhi v17.2d, v17.2d, v16.2d
+; CHECK-NEXT:    cmhi v6.2d, v2.2d, v6.2d
+; CHECK-NEXT:    cmhi v7.2d, v2.2d, v7.2d
+; CHECK-NEXT:    cmhi v2.2d, v2.2d, v16.2d
+; CHECK-NEXT:    cmhi v1.2d, v1.2d, v16.2d
+; CHECK-NEXT:    uzp1 v16.4s, v27.4s, v28.4s
+; CHECK-NEXT:    uzp1 v3.4s, v3.4s, v18.4s
+; CHECK-NEXT:    uzp1 v4.4s, v5.4s, v4.4s
+; CHECK-NEXT:    uzp1 v5.4s, v26.4s, v23.4s
+; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v17.4s
+; CHECK-NEXT:    uzp1 v17.4s, v30.4s, v29.4s
+; CHECK-NEXT:    uzp1 v6.4s, v7.4s, v6.4s
+; CHECK-NEXT:    uzp1 v2.4s, v20.4s, v2.4s
+; CHECK-NEXT:    uzp1 v7.4s, v25.4s, v24.4s
+; CHECK-NEXT:    uzp1 v1.4s, v19.4s, v1.4s
+; CHECK-NEXT:    uzp1 v18.8h, v22.8h, v21.8h
+; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    uzp1 v3.8h, v4.8h, v3.8h
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v5.8h
+; CHECK-NEXT:    uzp1 v4.8h, v17.8h, v16.8h
+; CHECK-NEXT:    dup v5.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI9_0
-; CHECK-NEXT:    cmge v0.16b, v0.16b, #0
-; CHECK-NEXT:    mov z4.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    bic v3.16b, v3.16b, v2.16b
-; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    mov z1.b, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    bic v4.16b, v4.16b, v5.16b
-; CHECK-NEXT:    bic v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
-; CHECK-NEXT:    shl v2.16b, v3.16b, #7
-; CHECK-NEXT:    shl v3.16b, v4.16b, #7
+; CHECK-NEXT:    uzp1 v2.8h, v2.8h, v6.8h
+; CHECK-NEXT:    mov z6.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    uzp1 v1.8h, v1.8h, v7.8h
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v3.16b
+; CHECK-NEXT:    dup v3.16b, w11
+; CHECK-NEXT:    uzp1 v2.16b, v2.16b, v4.16b
+; CHECK-NEXT:    dup v4.16b, w10
+; CHECK-NEXT:    uzp1 v1.16b, v1.16b, v18.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v3.16b
+; CHECK-NEXT:    shl v3.16b, v6.16b, #7
+; CHECK-NEXT:    orr v2.16b, v2.16b, v4.16b
 ; CHECK-NEXT:    ldr q4, [x9, :lo12:.LCPI9_0]
+; CHECK-NEXT:    orr v1.16b, v1.16b, v5.16b
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
-; CHECK-NEXT:    cmlt v2.16b, v2.16b, #0
 ; CHECK-NEXT:    cmlt v3.16b, v3.16b, #0
+; CHECK-NEXT:    shl v2.16b, v2.16b, #7
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v4.16b
-; CHECK-NEXT:    and v2.16b, v2.16b, v4.16b
 ; CHECK-NEXT:    and v3.16b, v3.16b, v4.16b
+; CHECK-NEXT:    cmlt v2.16b, v2.16b, #0
+; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    and v0.16b, v0.16b, v4.16b
-; CHECK-NEXT:    ext v5.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v4.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    ext v6.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT:    ext v7.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v5.16b
-; CHECK-NEXT:    zip1 v2.16b, v2.16b, v4.16b
-; CHECK-NEXT:    zip1 v3.16b, v3.16b, v6.16b
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v7.16b
-; CHECK-NEXT:    addv h1, v1.8h
-; CHECK-NEXT:    addv h2, v2.8h
+; CHECK-NEXT:    ext v5.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    and v2.16b, v2.16b, v4.16b
+; CHECK-NEXT:    and v1.16b, v1.16b, v4.16b
+; CHECK-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v6.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    zip1 v3.16b, v3.16b, v5.16b
+; CHECK-NEXT:    ext v7.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v4.16b
+; CHECK-NEXT:    zip1 v2.16b, v2.16b, v6.16b
 ; CHECK-NEXT:    addv h3, v3.8h
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v7.16b
 ; CHECK-NEXT:    addv h0, v0.8h
-; CHECK-NEXT:    str h1, [x8]
-; CHECK-NEXT:    str h2, [x8, #4]
-; CHECK-NEXT:    str h3, [x8, #2]
+; CHECK-NEXT:    str h3, [x8]
+; CHECK-NEXT:    addv h2, v2.8h
+; CHECK-NEXT:    addv h1, v1.8h
 ; CHECK-NEXT:    str h0, [x8, #6]
+; CHECK-NEXT:    str h2, [x8, #4]
+; CHECK-NEXT:    str h1, [x8, #2]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <64 x i1> @llvm.loop.dependence.war.mask.v64i1(ptr %a, ptr %b, i64 1)
@@ -246,74 +331,69 @@ entry:
 define <32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_16_expand2:
 ; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub x9, x1, x0
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    add x9, x0, #32
-; CHECK-NEXT:    sub x10, x1, x0
-; CHECK-NEXT:    subs x9, x1, x9
+; CHECK-NEXT:    sub x10, x9, #32
+; CHECK-NEXT:    add x9, x9, x9, lsr #63
 ; CHECK-NEXT:    add x10, x10, x10, lsr #63
-; CHECK-NEXT:    add x11, x9, x9, lsr #63
-; CHECK-NEXT:    asr x9, x10, #1
+; CHECK-NEXT:    asr x9, x9, #1
+; CHECK-NEXT:    asr x10, x10, #1
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    mov z3.d, z0.d
 ; CHECK-NEXT:    mov z4.d, z0.d
 ; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
-; CHECK-NEXT:    mov z18.d, z0.d
-; CHECK-NEXT:    asr x10, x11, #1
-; CHECK-NEXT:    dup v3.2d, x9
+; CHECK-NEXT:    mov z6.d, z0.d
+; CHECK-NEXT:    dup v7.2d, x9
+; CHECK-NEXT:    dup v16.2d, x10
 ; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
 ; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
-; CHECK-NEXT:    dup v6.2d, x10
-; CHECK-NEXT:    add z5.d, z5.d, #6 // =0x6
-; CHECK-NEXT:    add z7.d, z7.d, #4 // =0x4
-; CHECK-NEXT:    add z18.d, z18.d, #14 // =0xe
-; CHECK-NEXT:    add z16.d, z16.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v17.2d, v3.2d, v0.2d
-; CHECK-NEXT:    cmhi v19.2d, v3.2d, v1.2d
-; CHECK-NEXT:    cmhi v20.2d, v3.2d, v2.2d
-; CHECK-NEXT:    cset w11, ls
-; CHECK-NEXT:    cmhi v0.2d, v6.2d, v0.2d
-; CHECK-NEXT:    cmhi v1.2d, v6.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v6.2d, v2.2d
-; CHECK-NEXT:    cmhi v21.2d, v6.2d, v4.2d
-; CHECK-NEXT:    cmhi v22.2d, v6.2d, v18.2d
-; CHECK-NEXT:    cmhi v23.2d, v6.2d, v5.2d
-; CHECK-NEXT:    cmhi v24.2d, v6.2d, v7.2d
-; CHECK-NEXT:    cmhi v6.2d, v6.2d, v16.2d
-; CHECK-NEXT:    cmhi v4.2d, v3.2d, v4.2d
-; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
-; CHECK-NEXT:    cmhi v18.2d, v3.2d, v18.2d
 ; CHECK-NEXT:    cmp x10, #1
-; CHECK-NEXT:    cmhi v7.2d, v3.2d, v7.2d
-; CHECK-NEXT:    cmhi v3.2d, v3.2d, v16.2d
+; CHECK-NEXT:    add z3.d, z3.d, #8 // =0x8
+; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
+; CHECK-NEXT:    add z5.d, z5.d, #4 // =0x4
+; CHECK-NEXT:    add z6.d, z6.d, #2 // =0x2
+; CHECK-NEXT:    cmhi v17.2d, v7.2d, v0.2d
+; CHECK-NEXT:    cmhi v18.2d, v16.2d, v0.2d
+; CHECK-NEXT:    add z0.d, z0.d, #14 // =0xe
+; CHECK-NEXT:    cmhi v19.2d, v7.2d, v1.2d
+; CHECK-NEXT:    cmhi v20.2d, v7.2d, v2.2d
+; CHECK-NEXT:    cmhi v21.2d, v7.2d, v3.2d
+; CHECK-NEXT:    cmhi v22.2d, v7.2d, v4.2d
+; CHECK-NEXT:    cmhi v23.2d, v7.2d, v5.2d
+; CHECK-NEXT:    cmhi v24.2d, v7.2d, v6.2d
+; CHECK-NEXT:    cmhi v1.2d, v16.2d, v1.2d
+; CHECK-NEXT:    cmhi v2.2d, v16.2d, v2.2d
+; CHECK-NEXT:    cmhi v3.2d, v16.2d, v3.2d
+; CHECK-NEXT:    cmhi v4.2d, v16.2d, v4.2d
+; CHECK-NEXT:    cmhi v7.2d, v7.2d, v0.2d
+; CHECK-NEXT:    cmhi v5.2d, v16.2d, v5.2d
+; CHECK-NEXT:    cmhi v6.2d, v16.2d, v6.2d
 ; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v22.4s
-; CHECK-NEXT:    uzp1 v2.4s, v21.4s, v2.4s
+; CHECK-NEXT:    cmhi v0.2d, v16.2d, v0.2d
+; CHECK-NEXT:    uzp1 v16.4s, v21.4s, v20.4s
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    uzp1 v16.4s, v24.4s, v23.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v6.4s
+; CHECK-NEXT:    uzp1 v20.4s, v23.4s, v22.4s
+; CHECK-NEXT:    uzp1 v17.4s, v17.4s, v24.4s
 ; CHECK-NEXT:    cset w9, lt
-; CHECK-NEXT:    uzp1 v6.4s, v19.4s, v18.4s
-; CHECK-NEXT:    uzp1 v4.4s, v4.4s, v20.4s
-; CHECK-NEXT:    uzp1 v5.4s, v7.4s, v5.4s
-; CHECK-NEXT:    uzp1 v3.4s, v17.4s, v3.4s
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v16.8h
-; CHECK-NEXT:    uzp1 v2.8h, v4.8h, v6.8h
-; CHECK-NEXT:    uzp1 v3.8h, v3.8h, v5.8h
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    dup v1.16b, w10
-; CHECK-NEXT:    uzp1 v2.16b, v3.16b, v2.16b
-; CHECK-NEXT:    dup v3.16b, w9
+; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
+; CHECK-NEXT:    uzp1 v3.4s, v19.4s, v7.4s
+; CHECK-NEXT:    uzp1 v4.4s, v5.4s, v4.4s
+; CHECK-NEXT:    uzp1 v5.4s, v18.4s, v6.4s
+; CHECK-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    uzp1 v1.8h, v17.8h, v20.8h
+; CHECK-NEXT:    uzp1 v3.8h, v16.8h, v3.8h
+; CHECK-NEXT:    uzp1 v4.8h, v5.8h, v4.8h
+; CHECK-NEXT:    uzp1 v0.8h, v2.8h, v0.8h
+; CHECK-NEXT:    dup v2.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI11_0
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    dup v1.16b, w11
-; CHECK-NEXT:    orr v2.16b, v2.16b, v3.16b
-; CHECK-NEXT:    bic v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    shl v1.16b, v2.16b, #7
+; CHECK-NEXT:    uzp1 v1.16b, v1.16b, v3.16b
+; CHECK-NEXT:    dup v3.16b, w10
+; CHECK-NEXT:    uzp1 v0.16b, v4.16b, v0.16b
+; CHECK-NEXT:    orr v1.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI11_0]
+; CHECK-NEXT:    orr v0.16b, v0.16b, v3.16b
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
 ; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
@@ -417,89 +497,85 @@ entry:
 define <32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_32_expand3:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    add x9, x0, #64
+; CHECK-NEXT:    sub x10, x1, x0
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x9, x1, x9
-; CHECK-NEXT:    add x10, x9, #3
-; CHECK-NEXT:    csel x9, x10, x9, mi
+; CHECK-NEXT:    sub x9, x10, #61
+; CHECK-NEXT:    subs x11, x10, #64
+; CHECK-NEXT:    add x12, x10, #3
+; CHECK-NEXT:    csel x9, x9, x11, mi
 ; CHECK-NEXT:    asr x11, x9, #2
-; CHECK-NEXT:    cset w9, ls
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
 ; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
 ; CHECK-NEXT:    cmp x11, #1
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
-; CHECK-NEXT:    mov z17.d, z0.d
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    subs x12, x1, x0
-; CHECK-NEXT:    add x13, x12, #3
-; CHECK-NEXT:    dup v5.2d, x11
+; CHECK-NEXT:    mov z4.d, z0.d
+; CHECK-NEXT:    mov z5.d, z0.d
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    cmp x10, #0
+; CHECK-NEXT:    mov z6.d, z0.d
+; CHECK-NEXT:    csel x10, x12, x10, mi
+; CHECK-NEXT:    dup v7.2d, x11
 ; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    csel x12, x13, x12, mi
+; CHECK-NEXT:    asr x10, x10, #2
 ; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
 ; CHECK-NEXT:    add z3.d, z3.d, #8 // =0x8
 ; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z7.d, z7.d, #4 // =0x4
-; CHECK-NEXT:    add z17.d, z17.d, #14 // =0xe
-; CHECK-NEXT:    add z16.d, z16.d, #2 // =0x2
-; CHECK-NEXT:    asr x12, x12, #2
-; CHECK-NEXT:    cmhi v18.2d, v5.2d, v0.2d
-; CHECK-NEXT:    cmhi v19.2d, v5.2d, v1.2d
-; CHECK-NEXT:    cmhi v20.2d, v5.2d, v2.2d
-; CHECK-NEXT:    cmhi v21.2d, v5.2d, v3.2d
-; CHECK-NEXT:    dup v6.2d, x12
-; CHECK-NEXT:    cmhi v22.2d, v5.2d, v4.2d
-; CHECK-NEXT:    cmhi v23.2d, v5.2d, v7.2d
-; CHECK-NEXT:    cmhi v24.2d, v5.2d, v17.2d
-; CHECK-NEXT:    cmhi v5.2d, v5.2d, v16.2d
-; CHECK-NEXT:    cmp x12, #1
-; CHECK-NEXT:    cmhi v0.2d, v6.2d, v0.2d
-; CHECK-NEXT:    cmhi v1.2d, v6.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v6.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v6.2d, v3.2d
-; CHECK-NEXT:    cmhi v4.2d, v6.2d, v4.2d
-; CHECK-NEXT:    cmhi v17.2d, v6.2d, v17.2d
-; CHECK-NEXT:    cmhi v7.2d, v6.2d, v7.2d
-; CHECK-NEXT:    cmhi v6.2d, v6.2d, v16.2d
-; CHECK-NEXT:    uzp1 v16.4s, v19.4s, v24.4s
-; CHECK-NEXT:    uzp1 v19.4s, v21.4s, v20.4s
-; CHECK-NEXT:    uzp1 v20.4s, v23.4s, v22.4s
-; CHECK-NEXT:    uzp1 v5.4s, v18.4s, v5.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v17.4s
+; CHECK-NEXT:    add z5.d, z5.d, #4 // =0x4
+; CHECK-NEXT:    add z6.d, z6.d, #2 // =0x2
+; CHECK-NEXT:    dup v16.2d, x10
+; CHECK-NEXT:    cmhi v17.2d, v7.2d, v0.2d
+; CHECK-NEXT:    cmhi v19.2d, v7.2d, v1.2d
+; CHECK-NEXT:    cmhi v20.2d, v7.2d, v2.2d
+; CHECK-NEXT:    cmhi v21.2d, v7.2d, v3.2d
+; CHECK-NEXT:    cmp x10, #1
+; CHECK-NEXT:    cmhi v22.2d, v7.2d, v4.2d
+; CHECK-NEXT:    cset w10, lt
+; CHECK-NEXT:    cmhi v18.2d, v16.2d, v0.2d
+; CHECK-NEXT:    add z0.d, z0.d, #14 // =0xe
+; CHECK-NEXT:    cmhi v1.2d, v16.2d, v1.2d
+; CHECK-NEXT:    cmhi v2.2d, v16.2d, v2.2d
+; CHECK-NEXT:    cmhi v3.2d, v16.2d, v3.2d
+; CHECK-NEXT:    cmhi v4.2d, v16.2d, v4.2d
+; CHECK-NEXT:    cmhi v23.2d, v16.2d, v5.2d
+; CHECK-NEXT:    cmhi v24.2d, v16.2d, v6.2d
+; CHECK-NEXT:    cmhi v5.2d, v7.2d, v5.2d
+; CHECK-NEXT:    cmhi v16.2d, v16.2d, v0.2d
+; CHECK-NEXT:    cmhi v6.2d, v7.2d, v6.2d
+; CHECK-NEXT:    cmhi v0.2d, v7.2d, v0.2d
+; CHECK-NEXT:    uzp1 v7.4s, v21.4s, v20.4s
 ; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    uzp1 v3.4s, v7.4s, v4.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v6.4s
-; CHECK-NEXT:    uzp1 v4.8h, v19.8h, v16.8h
-; CHECK-NEXT:    uzp1 v5.8h, v5.8h, v20.8h
+; CHECK-NEXT:    uzp1 v3.4s, v23.4s, v4.4s
+; CHECK-NEXT:    uzp1 v4.4s, v18.4s, v24.4s
+; CHECK-NEXT:    uzp1 v5.4s, v5.4s, v22.4s
+; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
+; CHECK-NEXT:    uzp1 v6.4s, v17.4s, v6.4s
+; CHECK-NEXT:    uzp1 v0.4s, v19.4s, v0.4s
+; CHECK-NEXT:    uzp1 v3.8h, v4.8h, v3.8h
 ; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v3.8h
+; CHECK-NEXT:    uzp1 v2.8h, v6.8h, v5.8h
+; CHECK-NEXT:    uzp1 v0.8h, v7.8h, v0.8h
+; CHECK-NEXT:    uzp1 v1.16b, v3.16b, v1.16b
+; CHECK-NEXT:    uzp1 v0.16b, v2.16b, v0.16b
 ; CHECK-NEXT:    dup v3.16b, w10
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    uzp1 v2.16b, v5.16b, v4.16b
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    dup v1.16b, w10
-; CHECK-NEXT:    orr v2.16b, v2.16b, v3.16b
-; CHECK-NEXT:    dup v3.16b, w9
+; CHECK-NEXT:    dup v2.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI14_0
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    bic v1.16b, v2.16b, v3.16b
+; CHECK-NEXT:    orr v1.16b, v1.16b, v3.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI14_0]
-; CHECK-NEXT:    shl v0.16b, v0.16b, #7
 ; CHECK-NEXT:    shl v1.16b, v1.16b, #7
-; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
+; CHECK-NEXT:    shl v0.16b, v0.16b, #7
 ; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    addv h0, v0.8h
+; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
 ; CHECK-NEXT:    addv h1, v1.8h
-; CHECK-NEXT:    str h0, [x8]
-; CHECK-NEXT:    str h1, [x8, #2]
+; CHECK-NEXT:    addv h0, v0.8h
+; CHECK-NEXT:    str h1, [x8]
+; CHECK-NEXT:    str h0, [x8, #2]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 4)
@@ -615,89 +691,85 @@ entry:
 define <32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_64_expand4:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    add x9, x0, #128
+; CHECK-NEXT:    sub x10, x1, x0
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x9, x1, x9
-; CHECK-NEXT:    add x10, x9, #7
-; CHECK-NEXT:    csel x9, x10, x9, mi
+; CHECK-NEXT:    sub x9, x10, #121
+; CHECK-NEXT:    subs x11, x10, #128
+; CHECK-NEXT:    add x12, x10, #7
+; CHECK-NEXT:    csel x9, x9, x11, mi
 ; CHECK-NEXT:    asr x11, x9, #3
-; CHECK-NEXT:    cset w9, ls
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
 ; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
 ; CHECK-NEXT:    cmp x11, #1
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
-; CHECK-NEXT:    mov z17.d, z0.d
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    subs x12, x1, x0
-; CHECK-NEXT:    add x13, x12, #7
-; CHECK-NEXT:    dup v5.2d, x11
+; CHECK-NEXT:    mov z4.d, z0.d
+; CHECK-NEXT:    mov z5.d, z0.d
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    cmp x10, #0
+; CHECK-NEXT:    mov z6.d, z0.d
+; CHECK-NEXT:    csel x10, x12, x10, mi
+; CHECK-NEXT:    dup v7.2d, x11
 ; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    csel x12, x13, x12, mi
+; CHECK-NEXT:    asr x10, x10, #3
 ; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
 ; CHECK-NEXT:    add z3.d, z3.d, #8 // =0x8
 ; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z7.d, z7.d, #4 // =0x4
-; CHECK-NEXT:    add z17.d, z17.d, #14 // =0xe
-; CHECK-NEXT:    add z16.d, z16.d, #2 // =0x2
-; CHECK-NEXT:    asr x12, x12, #3
-; CHECK-NEXT:    cmhi v18.2d, v5.2d, v0.2d
-; CHECK-NEXT:    cmhi v19.2d, v5.2d, v1.2d
-; CHECK-NEXT:    cmhi v20.2d, v5.2d, v2.2d
-; CHECK-NEXT:    cmhi v21.2d, v5.2d, v3.2d
-; CHECK-NEXT:    dup v6.2d, x12
-; CHECK-NEXT:    cmhi v22.2d, v5.2d, v4.2d
-; CHECK-NEXT:    cmhi v23.2d, v5.2d, v7.2d
-; CHECK-NEXT:    cmhi v24.2d, v5.2d, v17.2d
-; CHECK-NEXT:    cmhi v5.2d, v5.2d, v16.2d
-; CHECK-NEXT:    cmp x12, #1
-; CHECK-NEXT:    cmhi v0.2d, v6.2d, v0.2d
-; CHECK-NEXT:    cmhi v1.2d, v6.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v6.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v6.2d, v3.2d
-; CHECK-NEXT:    cmhi v4.2d, v6.2d, v4.2d
-; CHECK-NEXT:    cmhi v17.2d, v6.2d, v17.2d
-; CHECK-NEXT:    cmhi v7.2d, v6.2d, v7.2d
-; CHECK-NEXT:    cmhi v6.2d, v6.2d, v16.2d
-; CHECK-NEXT:    uzp1 v16.4s, v19.4s, v24.4s
-; CHECK-NEXT:    uzp1 v19.4s, v21.4s, v20.4s
-; CHECK-NEXT:    uzp1 v20.4s, v23.4s, v22.4s
-; CHECK-NEXT:    uzp1 v5.4s, v18.4s, v5.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v17.4s
+; CHECK-NEXT:    add z5.d, z5.d, #4 // =0x4
+; CHECK-NEXT:    add z6.d, z6.d, #2 // =0x2
+; CHECK-NEXT:    dup v16.2d, x10
+; CHECK-NEXT:    cmhi v17.2d, v7.2d, v0.2d
+; CHECK-NEXT:    cmhi v19.2d, v7.2d, v1.2d
+; CHECK-NEXT:    cmhi v20.2d, v7.2d, v2.2d
+; CHECK-NEXT:    cmhi v21.2d, v7.2d, v3.2d
+; CHECK-NEXT:    cmp x10, #1
+; CHECK-NEXT:    cmhi v22.2d, v7.2d, v4.2d
+; CHECK-NEXT:    cset w10, lt
+; CHECK-NEXT:    cmhi v18.2d, v16.2d, v0.2d
+; CHECK-NEXT:    add z0.d, z0.d, #14 // =0xe
+; CHECK-NEXT:    cmhi v1.2d, v16.2d, v1.2d
+; CHECK-NEXT:    cmhi v2.2d, v16.2d, v2.2d
+; CHECK-NEXT:    cmhi v3.2d, v16.2d, v3.2d
+; CHECK-NEXT:    cmhi v4.2d, v16.2d, v4.2d
+; CHECK-NEXT:    cmhi v23.2d, v16.2d, v5.2d
+; CHECK-NEXT:    cmhi v24.2d, v16.2d, v6.2d
+; CHECK-NEXT:    cmhi v5.2d, v7.2d, v5.2d
+; CHECK-NEXT:    cmhi v16.2d, v16.2d, v0.2d
+; CHECK-NEXT:    cmhi v6.2d, v7.2d, v6.2d
+; CHECK-NEXT:    cmhi v0.2d, v7.2d, v0.2d
+; CHECK-NEXT:    uzp1 v7.4s, v21.4s, v20.4s
 ; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    uzp1 v3.4s, v7.4s, v4.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v6.4s
-; CHECK-NEXT:    uzp1 v4.8h, v19.8h, v16.8h
-; CHECK-NEXT:    uzp1 v5.8h, v5.8h, v20.8h
+; CHECK-NEXT:    uzp1 v3.4s, v23.4s, v4.4s
+; CHECK-NEXT:    uzp1 v4.4s, v18.4s, v24.4s
+; CHECK-NEXT:    uzp1 v5.4s, v5.4s, v22.4s
+; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
+; CHECK-NEXT:    uzp1 v6.4s, v17.4s, v6.4s
+; CHECK-NEXT:    uzp1 v0.4s, v19.4s, v0.4s
+; CHECK-NEXT:    uzp1 v3.8h, v4.8h, v3.8h
 ; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v3.8h
+; CHECK-NEXT:    uzp1 v2.8h, v6.8h, v5.8h
+; CHECK-NEXT:    uzp1 v0.8h, v7.8h, v0.8h
+; CHECK-NEXT:    uzp1 v1.16b, v3.16b, v1.16b
+; CHECK-NEXT:    uzp1 v0.16b, v2.16b, v0.16b
 ; CHECK-NEXT:    dup v3.16b, w10
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    uzp1 v2.16b, v5.16b, v4.16b
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    dup v1.16b, w10
-; CHECK-NEXT:    orr v2.16b, v2.16b, v3.16b
-; CHECK-NEXT:    dup v3.16b, w9
+; CHECK-NEXT:    dup v2.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI18_0
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    bic v1.16b, v2.16b, v3.16b
+; CHECK-NEXT:    orr v1.16b, v1.16b, v3.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI18_0]
-; CHECK-NEXT:    shl v0.16b, v0.16b, #7
 ; CHECK-NEXT:    shl v1.16b, v1.16b, #7
-; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
+; CHECK-NEXT:    shl v0.16b, v0.16b, #7
 ; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    addv h0, v0.8h
+; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
 ; CHECK-NEXT:    addv h1, v1.8h
-; CHECK-NEXT:    str h0, [x8]
-; CHECK-NEXT:    str h1, [x8, #2]
+; CHECK-NEXT:    addv h0, v0.8h
+; CHECK-NEXT:    str h1, [x8]
+; CHECK-NEXT:    str h0, [x8, #2]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 8)
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index b9a9484a33e7b..92da69e2e6f41 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -84,15 +84,59 @@ entry:
 define <vscale x 32 x i1> @whilewr_8_split(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_split:
 ; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    incb x8
-; CHECK-NEXT:    cmp x8, x1
-; CHECK-NEXT:    cset w9, hs
-; CHECK-NEXT:    whilewr p0.b, x8, x1
-; CHECK-NEXT:    sbfx x8, x9, #0, #1
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    bic p1.b, p0/z, p0.b, p1.b
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    sub x8, x1, x8
+; CHECK-NEXT:    mov z4.d, z0.d
+; CHECK-NEXT:    mov z2.d, x8
+; CHECK-NEXT:    mov z5.d, z0.d
+; CHECK-NEXT:    incd z1.d
+; CHECK-NEXT:    incd z4.d, all, mul #2
+; CHECK-NEXT:    cmphi p2.d, p0/z, z2.d, z0.d
+; CHECK-NEXT:    incd z5.d, all, mul #4
+; CHECK-NEXT:    mov z3.d, z1.d
+; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z1.d
+; CHECK-NEXT:    incd z1.d, all, mul #4
+; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z4.d
+; CHECK-NEXT:    incd z4.d, all, mul #4
+; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z5.d
+; CHECK-NEXT:    incd z3.d, all, mul #2
+; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z1.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z2.d, z4.d
+; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
+; CHECK-NEXT:    mov z0.d, z3.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z3.d
+; CHECK-NEXT:    uzp1 p2.s, p4.s, p5.s
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    incd z0.d, all, mul #4
+; CHECK-NEXT:    uzp1 p3.s, p3.s, p6.s
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z0.d
+; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
+; CHECK-NEXT:    cmp x8, #1
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
+; CHECK-NEXT:    whilelo p2.b, xzr, x8
+; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
+; CHECK-NEXT:    sel p1.b, p0, p0.b, p2.b
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 32 x i1> @llvm.loop.dependence.war.mask.nxv32i1(ptr %a, ptr %b, i64 1)
@@ -104,35 +148,107 @@ define <vscale x 64 x i1> @whilewr_8_split2(ptr %a, ptr %b) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    index z0.d, #0, #1
+; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:    mov x9, x0
-; CHECK-NEXT:    mov x10, x0
-; CHECK-NEXT:    rdvl x8, #3
-; CHECK-NEXT:    incb x9
-; CHECK-NEXT:    incb x10, all, mul #2
-; CHECK-NEXT:    add x8, x0, x8
-; CHECK-NEXT:    cmp x9, x1
-; CHECK-NEXT:    cset w11, hs
-; CHECK-NEXT:    whilewr p0.b, x9, x1
-; CHECK-NEXT:    sbfx x11, x11, #0, #1
-; CHECK-NEXT:    whilelo p1.b, xzr, x11
-; CHECK-NEXT:    cmp x10, x1
-; CHECK-NEXT:    cset w9, hs
-; CHECK-NEXT:    whilewr p2.b, x10, x1
-; CHECK-NEXT:    sbfx x9, x9, #0, #1
-; CHECK-NEXT:    bic p1.b, p0/z, p0.b, p1.b
-; CHECK-NEXT:    whilelo p3.b, xzr, x9
-; CHECK-NEXT:    cmp x8, x1
-; CHECK-NEXT:    cset w9, hs
-; CHECK-NEXT:    whilewr p4.b, x8, x1
+; CHECK-NEXT:    incb x8
+; CHECK-NEXT:    incb x9, all, mul #2
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    addvl x10, x0, #3
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    mov z4.d, z0.d
+; CHECK-NEXT:    sub x8, x1, x8
+; CHECK-NEXT:    sub x9, x1, x9
+; CHECK-NEXT:    mov z5.d, x8
+; CHECK-NEXT:    incd z1.d
+; CHECK-NEXT:    incd z2.d, all, mul #2
+; CHECK-NEXT:    incd z4.d, all, mul #4
+; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    mov z3.d, z1.d
+; CHECK-NEXT:    mov z6.d, z1.d
+; CHECK-NEXT:    mov z7.d, z2.d
+; CHECK-NEXT:    cmphi p1.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z4.d
+; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    incd z3.d, all, mul #2
+; CHECK-NEXT:    incd z6.d, all, mul #4
+; CHECK-NEXT:    incd z7.d, all, mul #4
+; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
+; CHECK-NEXT:    mov z24.d, z3.d
+; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z6.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
+; CHECK-NEXT:    incd z24.d, all, mul #4
+; CHECK-NEXT:    uzp1 p2.s, p3.s, p4.s
+; CHECK-NEXT:    uzp1 p3.s, p5.s, p6.s
+; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    mov z5.d, x9
+; CHECK-NEXT:    cmp x8, #1
+; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z7.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
+; CHECK-NEXT:    uzp1 p7.s, p7.s, p8.s
+; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z4.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p7.h
+; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
+; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    uzp1 p6.s, p9.s, p6.s
+; CHECK-NEXT:    whilelo p9.b, xzr, x8
+; CHECK-NEXT:    sub x8, x1, x10
+; CHECK-NEXT:    uzp1 p3.s, p8.s, p3.s
+; CHECK-NEXT:    mov z5.d, x8
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    uzp1 p5.s, p5.s, p7.s
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    uzp1 p1.b, p1.b, p2.b
 ; CHECK-NEXT:    sbfx x9, x9, #0, #1
-; CHECK-NEXT:    bic p2.b, p2/z, p2.b, p3.b
-; CHECK-NEXT:    whilelo p0.b, xzr, x9
-; CHECK-NEXT:    bic p4.b, p4/z, p4.b, p0.b
+; CHECK-NEXT:    uzp1 p4.h, p6.h, p4.h
+; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
+; CHECK-NEXT:    uzp1 p3.h, p5.h, p3.h
+; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
+; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z4.d
+; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    sel p1.b, p1, p1.b, p9.b
+; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    uzp1 p3.b, p3.b, p4.b
+; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    uzp1 p2.s, p7.s, p2.s
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p6.s, p8.s, p6.s
+; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p5.s, p9.s, p5.s
+; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.s, p0.s, p4.s
+; CHECK-NEXT:    whilelo p4.b, xzr, x9
+; CHECK-NEXT:    cmp x8, #1
+; CHECK-NEXT:    uzp1 p6.h, p6.h, p2.h
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p5.h
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    sel p2.b, p3, p3.b, p4.b
+; CHECK-NEXT:    uzp1 p3.b, p0.b, p6.b
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    whilelo p4.b, xzr, x8
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
-; CHECK-NEXT:    bic p3.b, p4/z, p4.b, p3.b
+; CHECK-NEXT:    sel p3.b, p3, p3.b, p4.b
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -208,8 +324,6 @@ define <vscale x 32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p11, [sp] // 2-byte Spill
-; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
@@ -220,81 +334,75 @@ define <vscale x 32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x8, x8, x8, lsr #63
 ; CHECK-NEXT:    incb x0, all, mul #2
+; CHECK-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    asr x8, x8, #1
+; CHECK-NEXT:    sub x9, x1, x0
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
+; CHECK-NEXT:    mov z3.d, z0.d
 ; CHECK-NEXT:    mov z5.d, x8
+; CHECK-NEXT:    add x9, x9, x9, lsr #63
 ; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    mov z6.d, z2.d
-; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z4.d
+; CHECK-NEXT:    incd z3.d, all, mul #4
+; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    asr x9, x9, #1
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    mov z6.d, z1.d
+; CHECK-NEXT:    mov z7.d, z2.d
+; CHECK-NEXT:    cmphi p1.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z3.d
 ; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
+; CHECK-NEXT:    incd z4.d, all, mul #2
 ; CHECK-NEXT:    incd z6.d, all, mul #4
 ; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p6.s, p7.s, p6.s
-; CHECK-NEXT:    mov z24.d, z3.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
+; CHECK-NEXT:    mov z24.d, z4.d
+; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z6.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z4.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
 ; CHECK-NEXT:    incd z24.d, all, mul #4
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p8.s
-; CHECK-NEXT:    uzp1 p5.s, p5.s, p9.s
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    uzp1 p2.s, p3.s, p4.s
+; CHECK-NEXT:    uzp1 p3.s, p5.s, p6.s
+; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    mov z5.d, x9
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p5.h, p6.h, p5.h
+; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
 ; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z7.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
+; CHECK-NEXT:    uzp1 p7.s, p7.s, p8.s
+; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z4.d
+; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z2.d
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p2.s, p2.s, p4.s
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    uzp1 p2.h, p3.h, p2.h
-; CHECK-NEXT:    add x8, x8, x8, lsr #63
-; CHECK-NEXT:    cset w9, ls
-; CHECK-NEXT:    uzp1 p2.b, p5.b, p2.b
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    asr x8, x8, #1
-; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p11.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p7.h
+; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z1.d
 ; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p7.s, p9.s, p7.s
-; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
+; CHECK-NEXT:    uzp1 p5.s, p9.s, p6.s
 ; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p8.s, p10.s, p8.s
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p6.s, p11.s, p6.s
-; CHECK-NEXT:    ldr p11, [sp] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p4.s
-; CHECK-NEXT:    uzp1 p4.h, p8.h, p7.h
+; CHECK-NEXT:    whilelo p6.b, xzr, x8
+; CHECK-NEXT:    uzp1 p3.s, p8.s, p3.s
+; CHECK-NEXT:    cmp x9, #1
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p6.h
+; CHECK-NEXT:    uzp1 p0.s, p0.s, p7.s
+; CHECK-NEXT:    cset w8, lt
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p4.h, p5.h, p4.h
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
+; CHECK-NEXT:    uzp1 p1.b, p1.b, p2.b
+; CHECK-NEXT:    uzp1 p2.b, p0.b, p4.b
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    whilelo p3.b, xzr, x8
-; CHECK-NEXT:    sbfx x8, x9, #0, #1
+; CHECK-NEXT:    sel p0.b, p1, p1.b, p6.b
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.b, p0.b, p4.b
-; CHECK-NEXT:    whilelo p4.b, xzr, x8
-; CHECK-NEXT:    mov p3.b, p0/m, p0.b
-; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
-; CHECK-NEXT:    bic p1.b, p3/z, p3.b, p4.b
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    sel p1.b, p2, p2.b, p3.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -404,7 +512,6 @@ define <vscale x 32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p11, [sp] // 2-byte Spill
 ; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
@@ -428,70 +535,65 @@ define <vscale x 32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
 ; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    incd z2.d, all, mul #2
 ; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z0.d
 ; CHECK-NEXT:    mov z3.d, z1.d
 ; CHECK-NEXT:    mov z6.d, z2.d
 ; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z4.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
 ; CHECK-NEXT:    incd z3.d, all, mul #2
 ; CHECK-NEXT:    incd z6.d, all, mul #4
 ; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p6.s, p7.s, p6.s
+; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
 ; CHECK-NEXT:    mov z24.d, z3.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
+; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z3.d
 ; CHECK-NEXT:    incd z24.d, all, mul #4
+; CHECK-NEXT:    uzp1 p2.s, p2.s, p7.s
 ; CHECK-NEXT:    uzp1 p3.s, p3.s, p8.s
-; CHECK-NEXT:    uzp1 p5.s, p5.s, p9.s
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z24.d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p5.h, p6.h, p5.h
+; CHECK-NEXT:    uzp1 p3.h, p4.h, p3.h
 ; CHECK-NEXT:    cset w8, lt
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p2.s, p2.s, p4.s
+; CHECK-NEXT:    uzp1 p6.s, p6.s, p9.s
 ; CHECK-NEXT:    whilelo p1.b, xzr, x8
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    uzp1 p2.h, p3.h, p2.h
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p6.h
 ; CHECK-NEXT:    add x9, x8, #3
 ; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    cset w9, ls
-; CHECK-NEXT:    uzp1 p2.b, p5.b, p2.b
+; CHECK-NEXT:    uzp1 p2.b, p3.b, p2.b
 ; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z6.d
+; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z6.d
 ; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p11.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z4.d
+; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
 ; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p7.s, p9.s, p7.s
+; CHECK-NEXT:    uzp1 p5.s, p7.s, p5.s
 ; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p8.s, p10.s, p8.s
+; CHECK-NEXT:    uzp1 p7.s, p9.s, p8.s
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p4.s, p10.s, p4.s
 ; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p6.s, p11.s, p6.s
-; CHECK-NEXT:    ldr p11, [sp] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p4.s
-; CHECK-NEXT:    uzp1 p4.h, p8.h, p7.h
+; CHECK-NEXT:    uzp1 p0.s, p0.s, p6.s
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p6.h
+; CHECK-NEXT:    uzp1 p5.h, p7.h, p5.h
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    whilelo p3.b, xzr, x8
-; CHECK-NEXT:    sbfx x8, x9, #0, #1
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p4.h
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.b, p0.b, p4.b
 ; CHECK-NEXT:    whilelo p4.b, xzr, x8
-; CHECK-NEXT:    mov p3.b, p0/m, p0.b
+; CHECK-NEXT:    uzp1 p3.b, p0.b, p5.b
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
-; CHECK-NEXT:    bic p1.b, p3/z, p3.b, p4.b
+; CHECK-NEXT:    sel p1.b, p3, p3.b, p4.b
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -628,7 +730,6 @@ define <vscale x 32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p11, [sp] // 2-byte Spill
 ; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
@@ -652,70 +753,65 @@ define <vscale x 32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
 ; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    incd z2.d, all, mul #2
 ; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z0.d
 ; CHECK-NEXT:    mov z3.d, z1.d
 ; CHECK-NEXT:    mov z6.d, z2.d
 ; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z4.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
 ; CHECK-NEXT:    incd z3.d, all, mul #2
 ; CHECK-NEXT:    incd z6.d, all, mul #4
 ; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p6.s, p7.s, p6.s
+; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
 ; CHECK-NEXT:    mov z24.d, z3.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
+; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z3.d
 ; CHECK-NEXT:    incd z24.d, all, mul #4
+; CHECK-NEXT:    uzp1 p2.s, p2.s, p7.s
 ; CHECK-NEXT:    uzp1 p3.s, p3.s, p8.s
-; CHECK-NEXT:    uzp1 p5.s, p5.s, p9.s
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z24.d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p5.h, p6.h, p5.h
+; CHECK-NEXT:    uzp1 p3.h, p4.h, p3.h
 ; CHECK-NEXT:    cset w8, lt
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p2.s, p2.s, p4.s
+; CHECK-NEXT:    uzp1 p6.s, p6.s, p9.s
 ; CHECK-NEXT:    whilelo p1.b, xzr, x8
 ; CHECK-NEXT:    subs x8, x1, x9
-; CHECK-NEXT:    uzp1 p2.h, p3.h, p2.h
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p6.h
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    cset w9, ls
-; CHECK-NEXT:    uzp1 p2.b, p5.b, p2.b
+; CHECK-NEXT:    uzp1 p2.b, p3.b, p2.b
 ; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z6.d
+; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z6.d
 ; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p11.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z4.d
+; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
 ; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p7.s, p9.s, p7.s
+; CHECK-NEXT:    uzp1 p5.s, p7.s, p5.s
 ; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p8.s, p10.s, p8.s
+; CHECK-NEXT:    uzp1 p7.s, p9.s, p8.s
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p4.s, p10.s, p4.s
 ; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p6.s, p11.s, p6.s
-; CHECK-NEXT:    ldr p11, [sp] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p4.s
-; CHECK-NEXT:    uzp1 p4.h, p8.h, p7.h
+; CHECK-NEXT:    uzp1 p0.s, p0.s, p6.s
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p6.h
+; CHECK-NEXT:    uzp1 p5.h, p7.h, p5.h
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    whilelo p3.b, xzr, x8
-; CHECK-NEXT:    sbfx x8, x9, #0, #1
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p4.h
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.b, p0.b, p4.b
 ; CHECK-NEXT:    whilelo p4.b, xzr, x8
-; CHECK-NEXT:    mov p3.b, p0/m, p0.b
+; CHECK-NEXT:    uzp1 p3.b, p0.b, p5.b
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
-; CHECK-NEXT:    bic p1.b, p3/z, p3.b, p4.b
+; CHECK-NEXT:    sel p1.b, p3, p3.b, p4.b
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload

>From 353e4ba1eae7b7118c9c8ae445e453d57ef9ab00 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Tue, 12 Aug 2025 14:25:36 +0100
Subject: [PATCH 3/3] [AArch64] Split large loop dependence masks

This PR adds splitting in the AArch64 backend for the LOOP_DEPENDENCE_MASK nodes
so that even large vector types can be turned into whilewr/rw.
---
 .../SelectionDAG/LegalizeVectorTypes.cpp      |   4 +
 .../Target/AArch64/AArch64ISelLowering.cpp    | 135 ++-
 llvm/test/CodeGen/AArch64/alias_mask.ll       | 839 ++++++++++--------
 .../CodeGen/AArch64/alias_mask_scalable.ll    | 825 ++++++++---------
 4 files changed, 985 insertions(+), 818 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index bcfb32b6d09f7..0dbdd19fe314e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -4012,6 +4012,10 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
     report_fatal_error("Don't know how to extract fixed-width predicate "
                        "subvector from a scalable predicate vector");
 
+  // Don't create a stack temporary if the result is unused.
+  if (!N->hasAnyUseOfValue(0))
+    return DAG.getPOISON(SubVT);
+
   // Spill the vector to the stack. We should use the alignment for
   // the smallest part.
   SDValue Vec = N->getOperand(0);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 0abf6560ad1e1..4c388ed5d9d29 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1933,9 +1933,12 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
   if (Subtarget->hasSVE2() ||
       (Subtarget->hasSME() && Subtarget->isStreaming())) {
     // FIXME: Support wider fixed-length types when msve-vector-bits is used.
-    for (auto VT : {MVT::v2i32, MVT::v4i16, MVT::v8i8, MVT::v16i8}) {
-      setOperationAction(ISD::LOOP_DEPENDENCE_RAW_MASK, VT, Custom);
-      setOperationAction(ISD::LOOP_DEPENDENCE_WAR_MASK, VT, Custom);
+    for (auto Elts : {2, 4, 8, 16}) {
+      for (auto EltVT : {MVT::i8, MVT::i16, MVT::i32}) {
+        MVT VT = MVT::getVectorVT(EltVT, Elts);
+        setOperationAction(ISD::LOOP_DEPENDENCE_RAW_MASK, VT, Custom);
+        setOperationAction(ISD::LOOP_DEPENDENCE_WAR_MASK, VT, Custom);
+      }
     }
     for (auto VT : {MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1, MVT::nxv16i1}) {
       setOperationAction(ISD::LOOP_DEPENDENCE_RAW_MASK, VT, Custom);
@@ -5359,33 +5362,25 @@ SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op,
 
 static MVT getSVEContainerType(EVT ContentTy);
 
+static inline SDValue getPTrue(SelectionDAG &DAG, SDLoc DL, EVT VT,
+                               int Pattern);
+
+static SDValue getSVEPredicateBitCast(EVT VT, SDValue Op, SelectionDAG &DAG);
+
 SDValue
 AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
                                                  SelectionDAG &DAG) const {
   SDLoc DL(Op);
-  uint64_t EltSize = Op.getConstantOperandVal(2);
+  assert((Subtarget->hasSVE2() ||
+          (Subtarget->hasSME() && Subtarget->isStreaming())) &&
+         "Lowering loop_dependence_raw_mask or loop_dependence_war_mask "
+         "requires SVE or SME");
+
+  LLVMContext &Ctx = *DAG.getContext();
   EVT VT = Op.getValueType();
-  switch (EltSize) {
-  case 1:
-    if (VT != MVT::v16i8 && VT != MVT::nxv16i1)
-      return SDValue();
-    break;
-  case 2:
-    if (VT != MVT::v8i8 && VT != MVT::nxv8i1)
-      return SDValue();
-    break;
-  case 4:
-    if (VT != MVT::v4i16 && VT != MVT::nxv4i1)
-      return SDValue();
-    break;
-  case 8:
-    if (VT != MVT::v2i32 && VT != MVT::nxv2i1)
-      return SDValue();
-    break;
-  default:
-    // Other element sizes are incompatible with whilewr/rw, so expand instead
-    return SDValue();
-  }
+  unsigned MaskOpcode = Op.getOpcode();
+  unsigned NumElements = VT.getVectorMinNumElements();
+  uint64_t EltSizeInBytes = Op.getConstantOperandVal(2);
 
   // TODO: Support split masks
   unsigned LaneOffset = Op.getConstantOperandVal(3);
@@ -5394,25 +5389,83 @@ AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
 
   SDValue PtrA = Op.getOperand(0);
   SDValue PtrB = Op.getOperand(1);
+  SDValue EltSizeInBytesValue = Op.getOperand(2);
 
-  if (VT.isScalableVT())
-    return DAG.getNode(Op.getOpcode(), DL, VT, PtrA, PtrB, Op.getOperand(2),
-                       Op.getOperand(3));
+  // Other element sizes are incompatible with whilewr/rw, so expand instead
+  if (!is_contained({1u, 2u, 4u, 8u}, EltSizeInBytes))
+    return SDValue();
 
-  // We can use the SVE whilewr/whilerw instruction to lower this
-  // intrinsic by creating the appropriate sequence of scalable vector
-  // operations and then extracting a fixed-width subvector from the scalable
-  // vector. Scalable vector variants are already legal.
-  EVT ContainerVT =
-      EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
-                       VT.getVectorNumElements(), true);
-  EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
+  if (EltSizeInBytes * NumElements < 16) {
+    // The element size and vector length combination must at least form a
+    // 128-bit vector. Shorter vector lengths can be widened then extracted
+    EVT WideVT = VT.getDoubleNumVectorElementsVT(Ctx);
+    // Re-create the node, but widened.
+    SDValue Widened = DAG.getNode(MaskOpcode, DL, WideVT, Op->ops());
+    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Widened,
+                       DAG.getVectorIdxConstant(0, DL));
+  }
 
-  SDValue Mask = DAG.getNode(Op.getOpcode(), DL, WhileVT, PtrA, PtrB,
-                             Op.getOperand(2), Op.getOperand(3));
-  SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, DL, ContainerVT, Mask);
-  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, MaskAsInt,
-                     DAG.getVectorIdxConstant(0, DL));
+  if (!VT.isScalableVT()) {
+    // We can use the SVE whilewr/whilerw instruction to lower this
+    // intrinsic by creating the appropriate sequence of scalable vector
+    // operations and then extracting a fixed-width subvector from the
+    // scalable vector. Scalable vector variants are already legal.
+    EVT ContainerVT = MVT::getScalableVectorVT(
+        VT.getVectorElementType().getSimpleVT(), NumElements);
+    EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
+
+    SDValue Mask =
+        DAG.getNode(MaskOpcode, DL, WhileVT, PtrA, PtrB, EltSizeInBytesValue, Op.getOperand(3));
+    SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, DL, ContainerVT, Mask);
+    return convertFromScalableVector(DAG, VT, MaskAsInt);
+  }
+
+  EVT EltVT = MVT::getIntegerVT(EltSizeInBytes * 8);
+  unsigned PredElements = getPackedSVEVectorVT(EltVT).getVectorMinNumElements();
+  bool NeedsSplit = NumElements > PredElements;
+  if (!NeedsSplit)
+    return Op;
+
+  EVT PartVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
+  TypeSize PartOffset =
+      TypeSize::get(PartVT.getVectorMinNumElements(), PartVT.isScalableVT()) *
+      EltSizeInBytes;
+
+  SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
+  SDValue Low =
+      DAG.getNode(MaskOpcode, DL, PartVT, PtrA, PtrB, EltSizeInBytesValue, Zero);
+  SDValue High = DAG.getNode(MaskOpcode, DL, PartVT,
+                             DAG.getMemBasePlusOffset(PtrA, PartOffset, DL),
+                             PtrB, EltSizeInBytesValue, Zero);
+
+  /// Split the loop dependence mask.
+  /// This is done by creating a high and low mask, each of half the vector
+  /// length. A BRKPB of the high mask and a predicate of all zeroes is needed
+  /// to guarantee that the high mask is safe. A case where simply producing a
+  /// high mask without the select is unsafe, is when the difference between the
+  /// two pointers is less than half the vector length, e.g. ptrA = 0 and ptrB 3
+  /// when the vector length is 32.
+  ///     The full 32xi1 mask should be three active lanes and the rest
+  ///     inactive, however when half the vector length is added to ptrA to
+  ///     produce the high mask, the difference between ptrA and ptrB is now
+  ///     -13, which will result in a mask with all lanes active. The BRKPB will
+  ///     guard against this case by producing a mask of all inactive lanes when
+  ///     the final element of the low mask is inactive, and a correct high mask
+  ///     in other cases.
+  High = DAG.getNOT(DL, High, PartVT);
+  High = DAG.getNode(
+      ISD::INTRINSIC_WO_CHAIN, DL, MVT::nxv16i1,
+      {DAG.getConstant(Intrinsic::aarch64_sve_brkpb_z, DL, MVT::i64),
+       getPTrue(DAG, DL, MVT::nxv16i1, AArch64SVEPredPattern::all),
+       getSVEPredicateBitCast(MVT::nxv16i1, Low, DAG),
+       getSVEPredicateBitCast(MVT::nxv16i1, High, DAG)});
+  High = getSVEPredicateBitCast(PartVT, High, DAG);
+  SDValue Inserted =
+      DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getPOISON(VT), Low,
+                  DAG.getVectorIdxConstant(0, DL));
+  return DAG.getNode(
+      ISD::INSERT_SUBVECTOR, DL, VT, Inserted, High,
+      DAG.getVectorIdxConstant(PartVT.getVectorMinNumElements(), DL));
 }
 
 SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op,
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 1ec6eeded90cd..985ba136e8d3f 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -97,8 +97,8 @@ entry:
   ret <2 x i1> %0
 }
 
-define <32 x i1> @whilewr_8_split(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_8_split:
+define <32 x i1> @whilewr_8_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_expand_high:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    sub x9, x1, x0
@@ -161,8 +161,8 @@ entry:
   ret <32 x i1> %0
 }
 
-define <64 x i1> @whilewr_8_split2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_8_split2:
+define <64 x i1> @whilewr_8_expand_high2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_expand_high2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    sub x11, x1, x0
@@ -281,495 +281,494 @@ entry:
   ret <64 x i1> %0
 }
 
-define <16 x i1> @whilewr_16_expand(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_16_expand:
+define <16 x i1> @whilewr_16_split(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    add x8, x8, x8, lsr #63
-; CHECK-NEXT:    asr x8, x8, #1
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
-; CHECK-NEXT:    dup v3.2d, x8
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
-; CHECK-NEXT:    add z5.d, z5.d, #6 // =0x6
-; CHECK-NEXT:    add z6.d, z6.d, #4 // =0x4
-; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
-; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v0.2d, v3.2d, v0.2d
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    cmhi v1.2d, v3.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v3.2d, v2.2d
-; CHECK-NEXT:    cmhi v4.2d, v3.2d, v4.2d
-; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v3.2d, v6.2d
-; CHECK-NEXT:    cmhi v16.2d, v3.2d, v16.2d
-; CHECK-NEXT:    cmhi v3.2d, v3.2d, v7.2d
-; CHECK-NEXT:    uzp1 v2.4s, v4.4s, v2.4s
-; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v4.8h
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    dup v1.16b, w8
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    whilewr p1.h, x0, x1
+; CHECK-NEXT:    incb x0
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    whilewr p3.h, x0, x1
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    and p0.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
+; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 2)
   ret <16 x i1> %0
 }
 
-define <32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_16_expand2:
+define <32 x i1> @whilewr_16_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_expand_high:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x9, x1, x0
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x10, x9, #32
+; CHECK-NEXT:    sub x9, x1, x0
+; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    sub x9, x9, #32
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    add x9, x9, x9, lsr #63
-; CHECK-NEXT:    add x10, x10, x10, lsr #63
-; CHECK-NEXT:    asr x9, x9, #1
-; CHECK-NEXT:    asr x10, x10, #1
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
 ; CHECK-NEXT:    mov z3.d, z0.d
 ; CHECK-NEXT:    mov z4.d, z0.d
 ; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    dup v7.2d, x9
-; CHECK-NEXT:    dup v16.2d, x10
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
+; CHECK-NEXT:    mov z7.d, z0.d
+; CHECK-NEXT:    mov z16.d, z0.d
+; CHECK-NEXT:    asr x9, x9, #1
 ; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    cmp x10, #1
 ; CHECK-NEXT:    add z3.d, z3.d, #8 // =0x8
+; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
+; CHECK-NEXT:    dup v6.2d, x9
 ; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
 ; CHECK-NEXT:    add z5.d, z5.d, #4 // =0x4
-; CHECK-NEXT:    add z6.d, z6.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v17.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v18.2d, v16.2d, v0.2d
-; CHECK-NEXT:    add z0.d, z0.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v19.2d, v7.2d, v1.2d
-; CHECK-NEXT:    cmhi v20.2d, v7.2d, v2.2d
-; CHECK-NEXT:    cmhi v21.2d, v7.2d, v3.2d
-; CHECK-NEXT:    cmhi v22.2d, v7.2d, v4.2d
-; CHECK-NEXT:    cmhi v23.2d, v7.2d, v5.2d
-; CHECK-NEXT:    cmhi v24.2d, v7.2d, v6.2d
-; CHECK-NEXT:    cmhi v1.2d, v16.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v16.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v16.2d, v3.2d
-; CHECK-NEXT:    cmhi v4.2d, v16.2d, v4.2d
-; CHECK-NEXT:    cmhi v7.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v5.2d, v16.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v16.2d, v6.2d
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmhi v0.2d, v16.2d, v0.2d
-; CHECK-NEXT:    uzp1 v16.4s, v21.4s, v20.4s
+; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
+; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    uzp1 v20.4s, v23.4s, v22.4s
-; CHECK-NEXT:    uzp1 v17.4s, v17.4s, v24.4s
 ; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    whilewr p0.h, x0, x1
+; CHECK-NEXT:    incb x0
+; CHECK-NEXT:    cmhi v2.2d, v6.2d, v2.2d
+; CHECK-NEXT:    cmhi v3.2d, v6.2d, v3.2d
+; CHECK-NEXT:    cmhi v0.2d, v6.2d, v0.2d
+; CHECK-NEXT:    cmhi v1.2d, v6.2d, v1.2d
+; CHECK-NEXT:    cmhi v4.2d, v6.2d, v4.2d
+; CHECK-NEXT:    cmhi v5.2d, v6.2d, v5.2d
+; CHECK-NEXT:    cmhi v7.2d, v6.2d, v7.2d
+; CHECK-NEXT:    cmhi v6.2d, v6.2d, v16.2d
+; CHECK-NEXT:    whilewr p2.h, x0, x1
 ; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    uzp1 v3.4s, v19.4s, v7.4s
-; CHECK-NEXT:    uzp1 v4.4s, v5.4s, v4.4s
-; CHECK-NEXT:    uzp1 v5.4s, v18.4s, v6.4s
-; CHECK-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    uzp1 v1.8h, v17.8h, v20.8h
-; CHECK-NEXT:    uzp1 v3.8h, v16.8h, v3.8h
-; CHECK-NEXT:    uzp1 v4.8h, v5.8h, v4.8h
-; CHECK-NEXT:    uzp1 v0.8h, v2.8h, v0.8h
-; CHECK-NEXT:    dup v2.16b, w9
+; CHECK-NEXT:    uzp1 v3.4s, v5.4s, v4.4s
+; CHECK-NEXT:    not p2.b, p1/z, p2.b
+; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v7.4s
+; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v6.4s
+; CHECK-NEXT:    and p2.b, p2/z, p2.b, p1.b
+; CHECK-NEXT:    and p1.b, p0/z, p0.b, p1.b
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v3.8h
+; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
+; CHECK-NEXT:    brkpb p1.b, p3/z, p1.b, p2.b
+; CHECK-NEXT:    uzp1 p0.b, p0.b, p1.b
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    dup v1.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI11_0
-; CHECK-NEXT:    uzp1 v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    dup v3.16b, w10
-; CHECK-NEXT:    uzp1 v0.16b, v4.16b, v0.16b
-; CHECK-NEXT:    orr v1.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI11_0]
-; CHECK-NEXT:    orr v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
 ; CHECK-NEXT:    addv h0, v0.8h
-; CHECK-NEXT:    str h1, [x8]
+; CHECK-NEXT:    addv h1, v1.8h
 ; CHECK-NEXT:    str h0, [x8, #2]
+; CHECK-NEXT:    str h1, [x8]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 2)
   ret <32 x i1> %0
 }
 
-define <8 x i1> @whilewr_32_expand(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_32_expand:
+define <8 x i1> @whilewr_32_split(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    add x9, x8, #3
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    dup v1.2d, x8
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z2.d, z2.d, #4 // =0x4
-; CHECK-NEXT:    add z3.d, z3.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v0.2d, v1.2d, v0.2d
-; CHECK-NEXT:    cmhi v4.2d, v1.2d, v4.2d
-; CHECK-NEXT:    cmhi v2.2d, v1.2d, v2.2d
-; CHECK-NEXT:    cmhi v1.2d, v1.2d, v3.2d
-; CHECK-NEXT:    uzp1 v2.4s, v2.4s, v4.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    dup v1.8b, w8
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    whilewr p1.s, x0, x1
+; CHECK-NEXT:    incb x0
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    whilewr p3.s, x0, x1
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    and p0.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
+; CHECK-NEXT:    uzp1 p0.h, p1.h, p0.h
+; CHECK-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    xtn v0.8b, v0.8h
-; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 4)
   ret <8 x i1> %0
 }
 
-define <16 x i1> @whilewr_32_expand2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_32_expand2:
+define <16 x i1> @whilewr_32_split2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_split2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    add x9, x8, #3
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
-; CHECK-NEXT:    dup v3.2d, x8
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
-; CHECK-NEXT:    add z5.d, z5.d, #6 // =0x6
-; CHECK-NEXT:    add z6.d, z6.d, #4 // =0x4
-; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
-; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v0.2d, v3.2d, v0.2d
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    cmhi v1.2d, v3.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v3.2d, v2.2d
-; CHECK-NEXT:    cmhi v4.2d, v3.2d, v4.2d
-; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v3.2d, v6.2d
-; CHECK-NEXT:    cmhi v16.2d, v3.2d, v16.2d
-; CHECK-NEXT:    cmhi v3.2d, v3.2d, v7.2d
-; CHECK-NEXT:    uzp1 v2.4s, v4.4s, v2.4s
-; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v4.8h
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    dup v1.16b, w8
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    addvl x8, x0, #3
+; CHECK-NEXT:    whilewr p2.s, x0, x1
+; CHECK-NEXT:    whilewr p1.s, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x0, all, mul #2
+; CHECK-NEXT:    incb x8
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    whilewr p6.s, x0, x1
+; CHECK-NEXT:    whilewr p5.s, x8, x1
+; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    not p5.b, p0/z, p5.b
+; CHECK-NEXT:    and p4.b, p2/z, p2.b, p0.b
+; CHECK-NEXT:    and p5.b, p5/z, p5.b, p0.b
+; CHECK-NEXT:    and p7.b, p6/z, p6.b, p0.b
+; CHECK-NEXT:    and p0.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    brkpb p1.b, p3/z, p4.b, p5.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p7.b, p0.b
+; CHECK-NEXT:    ptrue p4.h
+; CHECK-NEXT:    uzp1 p0.h, p6.h, p0.h
+; CHECK-NEXT:    uzp1 p1.h, p2.h, p1.h
+; CHECK-NEXT:    not p0.b, p4/z, p0.b
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p4.b
+; CHECK-NEXT:    and p0.b, p0/z, p0.b, p4.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
+; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 4)
   ret <16 x i1> %0
 }
 
-define <32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_32_expand3:
+define <32 x i1> @whilewr_32_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_expand_high:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x10, x1, x0
+; CHECK-NEXT:    sub x9, x1, x0
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x9, x10, #61
-; CHECK-NEXT:    subs x11, x10, #64
-; CHECK-NEXT:    add x12, x10, #3
-; CHECK-NEXT:    csel x9, x9, x11, mi
-; CHECK-NEXT:    asr x11, x9, #2
-; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    sub x10, x9, #61
+; CHECK-NEXT:    subs x9, x9, #64
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    csel x9, x10, x9, mi
+; CHECK-NEXT:    mov x10, x0
+; CHECK-NEXT:    asr x9, x9, #2
+; CHECK-NEXT:    incb x10
 ; CHECK-NEXT:    mov z2.d, z0.d
 ; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    cmp x11, #1
 ; CHECK-NEXT:    mov z4.d, z0.d
+; CHECK-NEXT:    dup v1.2d, x9
 ; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    cset w9, lt
-; CHECK-NEXT:    cmp x10, #0
 ; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    csel x10, x12, x10, mi
-; CHECK-NEXT:    dup v7.2d, x11
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    asr x10, x10, #2
-; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z3.d, z3.d, #8 // =0x8
-; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z5.d, z5.d, #4 // =0x4
-; CHECK-NEXT:    add z6.d, z6.d, #2 // =0x2
-; CHECK-NEXT:    dup v16.2d, x10
-; CHECK-NEXT:    cmhi v17.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v19.2d, v7.2d, v1.2d
-; CHECK-NEXT:    cmhi v20.2d, v7.2d, v2.2d
-; CHECK-NEXT:    cmhi v21.2d, v7.2d, v3.2d
-; CHECK-NEXT:    cmp x10, #1
-; CHECK-NEXT:    cmhi v22.2d, v7.2d, v4.2d
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmhi v18.2d, v16.2d, v0.2d
+; CHECK-NEXT:    mov z7.d, z0.d
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    whilewr p3.s, x10, x1
+; CHECK-NEXT:    addvl x10, x0, #3
+; CHECK-NEXT:    whilewr p2.s, x0, x1
+; CHECK-NEXT:    incb x0, all, mul #2
+; CHECK-NEXT:    add z2.d, z2.d, #12 // =0xc
+; CHECK-NEXT:    add z3.d, z3.d, #10 // =0xa
+; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
+; CHECK-NEXT:    cmhi v16.2d, v1.2d, v0.2d
+; CHECK-NEXT:    add z5.d, z5.d, #6 // =0x6
+; CHECK-NEXT:    whilewr p4.s, x10, x1
 ; CHECK-NEXT:    add z0.d, z0.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v1.2d, v16.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v16.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v16.2d, v3.2d
-; CHECK-NEXT:    cmhi v4.2d, v16.2d, v4.2d
-; CHECK-NEXT:    cmhi v23.2d, v16.2d, v5.2d
-; CHECK-NEXT:    cmhi v24.2d, v16.2d, v6.2d
-; CHECK-NEXT:    cmhi v5.2d, v7.2d, v5.2d
-; CHECK-NEXT:    cmhi v16.2d, v16.2d, v0.2d
-; CHECK-NEXT:    cmhi v6.2d, v7.2d, v6.2d
-; CHECK-NEXT:    cmhi v0.2d, v7.2d, v0.2d
-; CHECK-NEXT:    uzp1 v7.4s, v21.4s, v20.4s
-; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    uzp1 v3.4s, v23.4s, v4.4s
-; CHECK-NEXT:    uzp1 v4.4s, v18.4s, v24.4s
-; CHECK-NEXT:    uzp1 v5.4s, v5.4s, v22.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v6.4s, v17.4s, v6.4s
-; CHECK-NEXT:    uzp1 v0.4s, v19.4s, v0.4s
-; CHECK-NEXT:    uzp1 v3.8h, v4.8h, v3.8h
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v2.8h, v6.8h, v5.8h
-; CHECK-NEXT:    uzp1 v0.8h, v7.8h, v0.8h
-; CHECK-NEXT:    uzp1 v1.16b, v3.16b, v1.16b
-; CHECK-NEXT:    uzp1 v0.16b, v2.16b, v0.16b
-; CHECK-NEXT:    dup v3.16b, w10
-; CHECK-NEXT:    dup v2.16b, w9
+; CHECK-NEXT:    add z6.d, z6.d, #4 // =0x4
+; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
+; CHECK-NEXT:    not p3.b, p1/z, p3.b
+; CHECK-NEXT:    whilewr p6.s, x0, x1
+; CHECK-NEXT:    cmhi v2.2d, v1.2d, v2.2d
+; CHECK-NEXT:    cmhi v3.2d, v1.2d, v3.2d
+; CHECK-NEXT:    not p4.b, p1/z, p4.b
+; CHECK-NEXT:    cmhi v0.2d, v1.2d, v0.2d
+; CHECK-NEXT:    cmhi v4.2d, v1.2d, v4.2d
+; CHECK-NEXT:    cmhi v5.2d, v1.2d, v5.2d
+; CHECK-NEXT:    cmhi v6.2d, v1.2d, v6.2d
+; CHECK-NEXT:    cmhi v1.2d, v1.2d, v7.2d
+; CHECK-NEXT:    and p5.b, p2/z, p2.b, p1.b
+; CHECK-NEXT:    and p3.b, p3/z, p3.b, p1.b
+; CHECK-NEXT:    uzp1 v3.4s, v4.4s, v3.4s
+; CHECK-NEXT:    uzp1 v0.4s, v2.4s, v0.4s
+; CHECK-NEXT:    and p7.b, p6/z, p6.b, p1.b
+; CHECK-NEXT:    uzp1 v2.4s, v6.4s, v5.4s
+; CHECK-NEXT:    uzp1 v1.4s, v16.4s, v1.4s
+; CHECK-NEXT:    and p1.b, p4/z, p4.b, p1.b
+; CHECK-NEXT:    ptrue p4.h
+; CHECK-NEXT:    brkpb p1.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    uzp1 v0.8h, v3.8h, v0.8h
+; CHECK-NEXT:    brkpb p3.b, p0/z, p5.b, p3.b
+; CHECK-NEXT:    uzp1 v1.8h, v1.8h, v2.8h
+; CHECK-NEXT:    uzp1 p1.h, p6.h, p1.h
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p3.h
+; CHECK-NEXT:    not p1.b, p4/z, p1.b
+; CHECK-NEXT:    and p3.b, p2/z, p2.b, p4.b
+; CHECK-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    dup v1.16b, w9
+; CHECK-NEXT:    and p1.b, p1/z, p1.b, p4.b
 ; CHECK-NEXT:    adrp x9, .LCPI14_0
-; CHECK-NEXT:    orr v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI14_0]
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
+; CHECK-NEXT:    brkpb p0.b, p0/z, p3.b, p1.b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    uzp1 p0.b, p2.b, p0.b
+; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
 ; CHECK-NEXT:    addv h0, v0.8h
-; CHECK-NEXT:    str h1, [x8]
+; CHECK-NEXT:    addv h1, v1.8h
 ; CHECK-NEXT:    str h0, [x8, #2]
+; CHECK-NEXT:    str h1, [x8]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 4)
   ret <32 x i1> %0
 }
 
-define <4 x i1> @whilewr_64_expand(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand:
+define <4 x i1> @whilewr_64_split(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    dup v2.2d, x8
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    add z1.d, z1.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v0.2d, v2.2d, v0.2d
-; CHECK-NEXT:    cmhi v1.2d, v2.2d, v1.2d
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    dup v1.4h, w8
+; CHECK-NEXT:    whilewr p1.d, x0, x1
+; CHECK-NEXT:    incb x0
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    whilewr p3.d, x0, x1
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    and p0.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
+; CHECK-NEXT:    uzp1 p0.s, p1.s, p0.s
+; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    xtn v0.4h, v0.4s
-; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 8)
   ret <4 x i1> %0
 }
 
-define <8 x i1> @whilewr_64_expand2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand2:
+define <8 x i1> @whilewr_64_split2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_split2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    dup v1.2d, x8
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z2.d, z2.d, #4 // =0x4
-; CHECK-NEXT:    add z3.d, z3.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v0.2d, v1.2d, v0.2d
-; CHECK-NEXT:    cmhi v4.2d, v1.2d, v4.2d
-; CHECK-NEXT:    cmhi v2.2d, v1.2d, v2.2d
-; CHECK-NEXT:    cmhi v1.2d, v1.2d, v3.2d
-; CHECK-NEXT:    uzp1 v2.4s, v2.4s, v4.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    dup v1.8b, w8
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    addvl x8, x0, #3
+; CHECK-NEXT:    whilewr p2.d, x0, x1
+; CHECK-NEXT:    whilewr p1.d, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x0, all, mul #2
+; CHECK-NEXT:    incb x8
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    whilewr p6.d, x0, x1
+; CHECK-NEXT:    whilewr p5.d, x8, x1
+; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    not p5.b, p0/z, p5.b
+; CHECK-NEXT:    and p4.b, p2/z, p2.b, p0.b
+; CHECK-NEXT:    and p5.b, p5/z, p5.b, p0.b
+; CHECK-NEXT:    and p7.b, p6/z, p6.b, p0.b
+; CHECK-NEXT:    and p0.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    brkpb p1.b, p3/z, p4.b, p5.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p7.b, p0.b
+; CHECK-NEXT:    ptrue p4.s
+; CHECK-NEXT:    uzp1 p0.s, p6.s, p0.s
+; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
+; CHECK-NEXT:    not p0.b, p4/z, p0.b
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p4.b
+; CHECK-NEXT:    and p0.b, p0/z, p0.b, p4.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
+; CHECK-NEXT:    uzp1 p0.h, p1.h, p0.h
+; CHECK-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    xtn v0.8b, v0.8h
-; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 8)
   ret <8 x i1> %0
 }
 
-define <16 x i1> @whilewr_64_expand3(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand3:
+define <16 x i1> @whilewr_64_split3(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_split3:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
-; CHECK-NEXT:    dup v3.2d, x8
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
-; CHECK-NEXT:    add z5.d, z5.d, #6 // =0x6
-; CHECK-NEXT:    add z6.d, z6.d, #4 // =0x4
-; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
-; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v0.2d, v3.2d, v0.2d
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    cmhi v1.2d, v3.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v3.2d, v2.2d
-; CHECK-NEXT:    cmhi v4.2d, v3.2d, v4.2d
-; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v3.2d, v6.2d
-; CHECK-NEXT:    cmhi v16.2d, v3.2d, v16.2d
-; CHECK-NEXT:    cmhi v3.2d, v3.2d, v7.2d
-; CHECK-NEXT:    uzp1 v2.4s, v4.4s, v2.4s
-; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v4.8h
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    dup v1.16b, w8
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    addvl x9, x0, #3
+; CHECK-NEXT:    incb x8
+; CHECK-NEXT:    whilewr p2.d, x0, x1
+; CHECK-NEXT:    whilewr p3.d, x9, x1
+; CHECK-NEXT:    addvl x9, x0, #6
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    whilewr p4.d, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x8, all, mul #2
+; CHECK-NEXT:    and p5.b, p2/z, p2.b, p0.b
+; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    whilewr p6.d, x8, x1
+; CHECK-NEXT:    addvl x8, x0, #5
+; CHECK-NEXT:    and p4.b, p4/z, p4.b, p0.b
+; CHECK-NEXT:    and p3.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    brkpb p4.b, p1/z, p5.b, p4.b
+; CHECK-NEXT:    and p5.b, p6/z, p6.b, p0.b
+; CHECK-NEXT:    uzp1 p2.s, p2.s, p4.s
+; CHECK-NEXT:    brkpb p4.b, p1/z, p5.b, p3.b
+; CHECK-NEXT:    whilewr p5.d, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x8, all, mul #4
+; CHECK-NEXT:    whilewr p9.d, x9, x1
+; CHECK-NEXT:    not p5.b, p0/z, p5.b
+; CHECK-NEXT:    and p10.b, p9/z, p9.b, p0.b
+; CHECK-NEXT:    whilewr p7.d, x8, x1
+; CHECK-NEXT:    addvl x8, x0, #7
+; CHECK-NEXT:    whilewr p8.d, x8, x1
+; CHECK-NEXT:    and p5.b, p5/z, p5.b, p0.b
+; CHECK-NEXT:    not p8.b, p0/z, p8.b
+; CHECK-NEXT:    ptrue p3.s
+; CHECK-NEXT:    and p8.b, p8/z, p8.b, p0.b
+; CHECK-NEXT:    and p0.b, p7/z, p7.b, p0.b
+; CHECK-NEXT:    brkpb p8.b, p1/z, p10.b, p8.b
+; CHECK-NEXT:    uzp1 p4.s, p6.s, p4.s
+; CHECK-NEXT:    brkpb p0.b, p1/z, p0.b, p5.b
+; CHECK-NEXT:    uzp1 p5.s, p9.s, p8.s
+; CHECK-NEXT:    not p4.b, p3/z, p4.b
+; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
+; CHECK-NEXT:    not p5.b, p3/z, p5.b
+; CHECK-NEXT:    and p6.b, p2/z, p2.b, p3.b
+; CHECK-NEXT:    and p4.b, p4/z, p4.b, p3.b
+; CHECK-NEXT:    and p7.b, p0/z, p0.b, p3.b
+; CHECK-NEXT:    and p3.b, p5/z, p5.b, p3.b
+; CHECK-NEXT:    brkpb p4.b, p1/z, p6.b, p4.b
+; CHECK-NEXT:    brkpb p3.b, p1/z, p7.b, p3.b
+; CHECK-NEXT:    ptrue p5.h
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p4.h
+; CHECK-NEXT:    not p0.b, p5/z, p0.b
+; CHECK-NEXT:    and p3.b, p2/z, p2.b, p5.b
+; CHECK-NEXT:    and p0.b, p0/z, p0.b, p5.b
+; CHECK-NEXT:    brkpb p0.b, p1/z, p3.b, p0.b
+; CHECK-NEXT:    uzp1 p0.b, p2.b, p0.b
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 8)
   ret <16 x i1> %0
 }
 
-define <32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand4:
+define <32 x i1> @whilewr_64_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_expand_high:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x10, x1, x0
+; CHECK-NEXT:    sub x9, x1, x0
+; CHECK-NEXT:    mov x11, x0
+; CHECK-NEXT:    addvl x13, x0, #3
+; CHECK-NEXT:    sub x10, x9, #121
+; CHECK-NEXT:    subs x9, x9, #128
+; CHECK-NEXT:    incb x11, all, mul #2
+; CHECK-NEXT:    csel x9, x10, x9, mi
+; CHECK-NEXT:    ptrue p6.d
+; CHECK-NEXT:    mov x12, x0
+; CHECK-NEXT:    asr x10, x9, #3
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    incb x12, all, mul #4
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x9, x10, #121
-; CHECK-NEXT:    subs x11, x10, #128
-; CHECK-NEXT:    add x12, x10, #7
-; CHECK-NEXT:    csel x9, x9, x11, mi
-; CHECK-NEXT:    asr x11, x9, #3
+; CHECK-NEXT:    cmp x10, #1
+; CHECK-NEXT:    dup v4.2d, x10
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    whilewr p4.d, x11, x1
+; CHECK-NEXT:    mov x11, x0
+; CHECK-NEXT:    incb x11
+; CHECK-NEXT:    whilewr p3.d, x13, x1
+; CHECK-NEXT:    addvl x13, x0, #5
+; CHECK-NEXT:    whilewr p2.d, x0, x1
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    not p3.b, p6/z, p3.b
 ; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    cmp x11, #1
-; CHECK-NEXT:    mov z4.d, z0.d
 ; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    cset w9, lt
-; CHECK-NEXT:    cmp x10, #0
+; CHECK-NEXT:    whilewr p8.d, x11, x1
+; CHECK-NEXT:    addvl x11, x0, #6
 ; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    csel x10, x12, x10, mi
-; CHECK-NEXT:    dup v7.2d, x11
+; CHECK-NEXT:    and p10.b, p4/z, p4.b, p6.b
+; CHECK-NEXT:    mov z7.d, z0.d
+; CHECK-NEXT:    mov z16.d, z0.d
+; CHECK-NEXT:    and p3.b, p3/z, p3.b, p6.b
+; CHECK-NEXT:    add z3.d, z3.d, #14 // =0xe
 ; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    asr x10, x10, #3
+; CHECK-NEXT:    not p8.b, p6/z, p8.b
 ; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z3.d, z3.d, #8 // =0x8
-; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z5.d, z5.d, #4 // =0x4
-; CHECK-NEXT:    add z6.d, z6.d, #2 // =0x2
-; CHECK-NEXT:    dup v16.2d, x10
-; CHECK-NEXT:    cmhi v17.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v19.2d, v7.2d, v1.2d
-; CHECK-NEXT:    cmhi v20.2d, v7.2d, v2.2d
-; CHECK-NEXT:    cmhi v21.2d, v7.2d, v3.2d
-; CHECK-NEXT:    cmp x10, #1
-; CHECK-NEXT:    cmhi v22.2d, v7.2d, v4.2d
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmhi v18.2d, v16.2d, v0.2d
-; CHECK-NEXT:    add z0.d, z0.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v1.2d, v16.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v16.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v16.2d, v3.2d
-; CHECK-NEXT:    cmhi v4.2d, v16.2d, v4.2d
-; CHECK-NEXT:    cmhi v23.2d, v16.2d, v5.2d
-; CHECK-NEXT:    cmhi v24.2d, v16.2d, v6.2d
-; CHECK-NEXT:    cmhi v5.2d, v7.2d, v5.2d
-; CHECK-NEXT:    cmhi v16.2d, v16.2d, v0.2d
-; CHECK-NEXT:    cmhi v6.2d, v7.2d, v6.2d
-; CHECK-NEXT:    cmhi v0.2d, v7.2d, v0.2d
-; CHECK-NEXT:    uzp1 v7.4s, v21.4s, v20.4s
-; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    uzp1 v3.4s, v23.4s, v4.4s
-; CHECK-NEXT:    uzp1 v4.4s, v18.4s, v24.4s
-; CHECK-NEXT:    uzp1 v5.4s, v5.4s, v22.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v6.4s, v17.4s, v6.4s
-; CHECK-NEXT:    uzp1 v0.4s, v19.4s, v0.4s
-; CHECK-NEXT:    uzp1 v3.8h, v4.8h, v3.8h
+; CHECK-NEXT:    add z5.d, z5.d, #8 // =0x8
+; CHECK-NEXT:    and p9.b, p2/z, p2.b, p6.b
+; CHECK-NEXT:    add z6.d, z6.d, #6 // =0x6
+; CHECK-NEXT:    add z7.d, z7.d, #4 // =0x4
+; CHECK-NEXT:    brkpb p10.b, p0/z, p10.b, p3.b
+; CHECK-NEXT:    add z16.d, z16.d, #2 // =0x2
+; CHECK-NEXT:    cmhi v0.2d, v4.2d, v0.2d
+; CHECK-NEXT:    and p8.b, p8/z, p8.b, p6.b
+; CHECK-NEXT:    cmhi v3.2d, v4.2d, v3.2d
+; CHECK-NEXT:    cmhi v1.2d, v4.2d, v1.2d
+; CHECK-NEXT:    whilewr p5.d, x13, x1
+; CHECK-NEXT:    cmhi v2.2d, v4.2d, v2.2d
+; CHECK-NEXT:    cmhi v5.2d, v4.2d, v5.2d
+; CHECK-NEXT:    ptrue p3.s
+; CHECK-NEXT:    cmhi v6.2d, v4.2d, v6.2d
+; CHECK-NEXT:    cmhi v7.2d, v4.2d, v7.2d
+; CHECK-NEXT:    brkpb p8.b, p0/z, p9.b, p8.b
+; CHECK-NEXT:    cmhi v4.2d, v4.2d, v16.2d
+; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v3.4s
+; CHECK-NEXT:    uzp1 p4.s, p4.s, p10.s
+; CHECK-NEXT:    uzp1 v2.4s, v5.4s, v2.4s
+; CHECK-NEXT:    whilewr p1.d, x12, x1
+; CHECK-NEXT:    addvl x12, x0, #7
+; CHECK-NEXT:    uzp1 v3.4s, v7.4s, v6.4s
+; CHECK-NEXT:    whilewr p7.d, x12, x1
+; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v4.4s
+; CHECK-NEXT:    uzp1 p2.s, p2.s, p8.s
+; CHECK-NEXT:    not p8.b, p3/z, p4.b
 ; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v2.8h, v6.8h, v5.8h
-; CHECK-NEXT:    uzp1 v0.8h, v7.8h, v0.8h
-; CHECK-NEXT:    uzp1 v1.16b, v3.16b, v1.16b
-; CHECK-NEXT:    uzp1 v0.16b, v2.16b, v0.16b
-; CHECK-NEXT:    dup v3.16b, w10
-; CHECK-NEXT:    dup v2.16b, w9
+; CHECK-NEXT:    not p9.b, p6/z, p5.b
+; CHECK-NEXT:    and p5.b, p8/z, p8.b, p3.b
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v3.8h
+; CHECK-NEXT:    and p8.b, p9/z, p9.b, p6.b
+; CHECK-NEXT:    whilewr p9.d, x11, x1
+; CHECK-NEXT:    not p7.b, p6/z, p7.b
+; CHECK-NEXT:    and p10.b, p9/z, p9.b, p6.b
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    dup v1.16b, w9
+; CHECK-NEXT:    and p7.b, p7/z, p7.b, p6.b
 ; CHECK-NEXT:    adrp x9, .LCPI18_0
-; CHECK-NEXT:    orr v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    and p6.b, p1/z, p1.b, p6.b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI18_0]
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
+; CHECK-NEXT:    brkpb p7.b, p0/z, p10.b, p7.b
+; CHECK-NEXT:    brkpb p6.b, p0/z, p6.b, p8.b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    uzp1 p7.s, p9.s, p7.s
+; CHECK-NEXT:    uzp1 p1.s, p1.s, p6.s
+; CHECK-NEXT:    not p6.b, p3/z, p7.b
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT:    and p4.b, p2/z, p2.b, p3.b
+; CHECK-NEXT:    and p7.b, p1/z, p1.b, p3.b
+; CHECK-NEXT:    and p3.b, p6/z, p6.b, p3.b
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    brkpb p4.b, p0/z, p4.b, p5.b
+; CHECK-NEXT:    brkpb p3.b, p0/z, p7.b, p3.b
+; CHECK-NEXT:    ptrue p5.h
 ; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p4.h
+; CHECK-NEXT:    not p1.b, p5/z, p1.b
+; CHECK-NEXT:    and p3.b, p2/z, p2.b, p5.b
+; CHECK-NEXT:    and p1.b, p1/z, p1.b, p5.b
+; CHECK-NEXT:    brkpb p0.b, p0/z, p3.b, p1.b
+; CHECK-NEXT:    uzp1 p0.b, p2.b, p0.b
+; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
+; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
 ; CHECK-NEXT:    addv h0, v0.8h
-; CHECK-NEXT:    str h1, [x8]
+; CHECK-NEXT:    addv h1, v1.8h
 ; CHECK-NEXT:    str h0, [x8, #2]
+; CHECK-NEXT:    str h1, [x8]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 8)
@@ -994,3 +993,79 @@ entry:
   %0 = call <1 x i1> @llvm.loop.dependence.raw.mask.v1i1(ptr %a, ptr %b, i64 8)
   ret <1 x i1> %0
 }
+
+define <2 x i1> @whilewr_8_widen_extract(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_widen_extract:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <2 x i1> @llvm.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 1)
+  ret <2 x i1> %0
+}
+
+define <4 x i1> @whilewr_8_widen_extract2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_widen_extract2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 1)
+  ret <4 x i1> %0
+}
+
+define <8 x i1> @whilewr_8_widen_extract3(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_widen_extract3:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 1)
+  ret <8 x i1> %0
+}
+
+define <2 x i1> @whilewr_16_widen_extract(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_widen_extract:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.h, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <2 x i1> @llvm.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 2)
+  ret <2 x i1> %0
+}
+
+define <4 x i1> @whilewr_16_widen_extract2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_widen_extract2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.h, x0, x1
+; CHECK-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 2)
+  ret <4 x i1> %0
+}
+
+define <2 x i1> @whilewr_32_widen_extract(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_widen_extract:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.s, x0, x1
+; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <2 x i1> @llvm.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 4)
+  ret <2 x i1> %0
+}
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index 92da69e2e6f41..1b3dfe6f86418 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -81,8 +81,8 @@ entry:
   ret <vscale x 2 x i1> %0
 }
 
-define <vscale x 32 x i1> @whilewr_8_split(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_8_split:
+define <vscale x 32 x i1> @whilewr_8_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_expand_high:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
@@ -143,8 +143,8 @@ entry:
   ret <vscale x 32 x i1> %0
 }
 
-define <vscale x 64 x i1> @whilewr_8_split2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_8_split2:
+define <vscale x 64 x i1> @whilewr_8_expand_high2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_expand_high2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
@@ -258,151 +258,87 @@ entry:
   ret <vscale x 64 x i1> %0
 }
 
-define <vscale x 16 x i1> @whilewr_16_expand(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_16_expand:
+define <vscale x 16 x i1> @whilewr_16_split(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x8, x8, x8, lsr #63
-; CHECK-NEXT:    asr x8, x8, #1
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    incd z5.d, all, mul #4
-; CHECK-NEXT:    cmphi p2.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    incd z1.d, all, mul #4
-; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z5.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
-; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
-; CHECK-NEXT:    mov z0.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z3.d
-; CHECK-NEXT:    uzp1 p2.s, p4.s, p5.s
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    incd z0.d, all, mul #4
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p6.s
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
+; CHECK-NEXT:    whilewr p1.h, x0, x1
+; CHECK-NEXT:    incb x0
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    whilewr p3.h, x0, x1
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    and p0.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
 ; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
-; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 2)
   ret <vscale x 16 x i1> %0
 }
 
-define <vscale x 32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_16_expand2:
+define <vscale x 32 x i1> @whilewr_16_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_expand_high:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    incb x0, all, mul #2
-; CHECK-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    asr x8, x8, #1
-; CHECK-NEXT:    sub x9, x1, x0
+; CHECK-NEXT:    incb x8, all, mul #2
 ; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    sub x8, x1, x8
 ; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    add x9, x9, x9, lsr #63
+; CHECK-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-NEXT:    mov z5.d, z0.d
 ; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    incd z3.d, all, mul #4
-; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    asr x9, x9, #1
+; CHECK-NEXT:    asr x8, x8, #1
+; CHECK-NEXT:    incd z3.d, all, mul #2
+; CHECK-NEXT:    incd z5.d, all, mul #4
+; CHECK-NEXT:    mov z2.d, x8
 ; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    mov z6.d, z1.d
-; CHECK-NEXT:    mov z7.d, z2.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z1.d
+; CHECK-NEXT:    cmphi p2.d, p0/z, z2.d, z0.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z3.d
 ; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    incd z6.d, all, mul #4
-; CHECK-NEXT:    incd z7.d, all, mul #4
+; CHECK-NEXT:    incd z1.d, all, mul #4
+; CHECK-NEXT:    incd z3.d, all, mul #4
+; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z5.d
+; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z4.d
+; CHECK-NEXT:    incd z4.d, all, mul #4
+; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z1.d
 ; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
-; CHECK-NEXT:    mov z24.d, z4.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    incd z24.d, all, mul #4
 ; CHECK-NEXT:    uzp1 p2.s, p3.s, p4.s
-; CHECK-NEXT:    uzp1 p3.s, p5.s, p6.s
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    mov z5.d, x9
+; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z3.d
+; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z4.d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    uzp1 p7.s, p7.s, p8.s
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p7.h
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
-; CHECK-NEXT:    uzp1 p5.s, p9.s, p6.s
-; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT:    whilelo p6.b, xzr, x8
-; CHECK-NEXT:    uzp1 p3.s, p8.s, p3.s
-; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p7.s
+; CHECK-NEXT:    uzp1 p4.s, p5.s, p6.s
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p1.h, p1.h, p2.h
 ; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p4.h, p5.h, p4.h
+; CHECK-NEXT:    whilewr p2.h, x0, x1
+; CHECK-NEXT:    incb x0
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
-; CHECK-NEXT:    uzp1 p1.b, p1.b, p2.b
-; CHECK-NEXT:    uzp1 p2.b, p0.b, p4.b
+; CHECK-NEXT:    ptrue p5.h
+; CHECK-NEXT:    uzp1 p0.s, p3.s, p0.s
+; CHECK-NEXT:    whilewr p3.h, x0, x1
+; CHECK-NEXT:    uzp1 p0.h, p4.h, p0.h
+; CHECK-NEXT:    not p3.b, p5/z, p3.b
+; CHECK-NEXT:    and p4.b, p2/z, p2.b, p5.b
+; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
+; CHECK-NEXT:    and p1.b, p3/z, p3.b, p5.b
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    whilelo p5.b, xzr, x8
+; CHECK-NEXT:    brkpb p3.b, p3/z, p4.b, p1.b
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    whilelo p3.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p1, p1.b, p6.b
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    sel p1.b, p2, p2.b, p3.b
+; CHECK-NEXT:    sel p1.b, p0, p0.b, p5.b
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.b, p2.b, p3.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -411,42 +347,27 @@ entry:
   ret <vscale x 32 x i1> %0
 }
 
-define <vscale x 8 x i1> @whilewr_32_expand(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_32_expand:
+define <vscale x 8 x i1> @whilewr_32_split(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x9, x8, #3
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    cmphi p1.d, p0/z, z3.d, z0.d
-; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z2.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
-; CHECK-NEXT:    cmphi p0.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.s, p3.s, p0.s
+; CHECK-NEXT:    whilewr p1.s, x0, x1
+; CHECK-NEXT:    incb x0
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    whilewr p3.s, x0, x1
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    and p0.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
 ; CHECK-NEXT:    uzp1 p0.h, p1.h, p0.h
-; CHECK-NEXT:    whilelo p1.h, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 8 x i1> @llvm.loop.dependence.war.mask.nxv8i1(ptr %a, ptr %b, i64 4)
   ret <vscale x 8 x i1> %0
 }
 
-define <vscale x 16 x i1> @whilewr_32_expand2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_32_expand2:
+define <vscale x 16 x i1> @whilewr_32_split2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_split2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
@@ -456,49 +377,36 @@ define <vscale x 16 x i1> @whilewr_32_expand2(ptr %a, ptr %b) {
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x9, x8, #3
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    incd z5.d, all, mul #4
-; CHECK-NEXT:    cmphi p2.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    incd z1.d, all, mul #4
-; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z5.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
-; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
-; CHECK-NEXT:    mov z0.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z3.d
-; CHECK-NEXT:    uzp1 p2.s, p4.s, p5.s
+; CHECK-NEXT:    addvl x8, x0, #3
+; CHECK-NEXT:    whilewr p2.s, x0, x1
+; CHECK-NEXT:    whilewr p1.s, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x0, all, mul #2
+; CHECK-NEXT:    incb x8
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    whilewr p6.s, x0, x1
+; CHECK-NEXT:    whilewr p5.s, x8, x1
+; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    not p5.b, p0/z, p5.b
+; CHECK-NEXT:    and p4.b, p2/z, p2.b, p0.b
+; CHECK-NEXT:    and p5.b, p5/z, p5.b, p0.b
+; CHECK-NEXT:    and p7.b, p6/z, p6.b, p0.b
+; CHECK-NEXT:    and p0.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    brkpb p1.b, p3/z, p4.b, p5.b
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    incd z0.d, all, mul #4
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p6.s
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
+; CHECK-NEXT:    brkpb p0.b, p3/z, p7.b, p0.b
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
+; CHECK-NEXT:    ptrue p4.h
+; CHECK-NEXT:    uzp1 p0.h, p6.h, p0.h
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p1.h, p2.h, p1.h
+; CHECK-NEXT:    not p0.b, p4/z, p0.b
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p4.b
+; CHECK-NEXT:    and p0.b, p0/z, p0.b, p4.b
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
 ; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -507,13 +415,11 @@ entry:
   ret <vscale x 16 x i1> %0
 }
 
-define <vscale x 32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_32_expand3:
+define <vscale x 32 x i1> @whilewr_32_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_expand_high:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
@@ -522,79 +428,76 @@ define <vscale x 32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x9, x8, #3
-; CHECK-NEXT:    incb x0, all, mul #4
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #2
+; CHECK-NEXT:    incb x8, all, mul #4
+; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    mov z6.d, z2.d
-; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
-; CHECK-NEXT:    incd z6.d, all, mul #4
-; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
-; CHECK-NEXT:    mov z24.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    incd z24.d, all, mul #4
-; CHECK-NEXT:    uzp1 p2.s, p2.s, p7.s
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p8.s
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p3.h, p4.h, p3.h
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p6.s, p6.s, p9.s
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p6.h
+; CHECK-NEXT:    subs x8, x1, x8
+; CHECK-NEXT:    mov z3.d, z0.d
 ; CHECK-NEXT:    add x9, x8, #3
+; CHECK-NEXT:    mov z5.d, z0.d
 ; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    uzp1 p2.b, p3.b, p2.b
+; CHECK-NEXT:    mov x9, x0
+; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    incd z3.d, all, mul #2
+; CHECK-NEXT:    incd z5.d, all, mul #4
+; CHECK-NEXT:    incb x9
+; CHECK-NEXT:    mov z2.d, x8
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z1.d
+; CHECK-NEXT:    incd z1.d, all, mul #4
+; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z0.d
+; CHECK-NEXT:    incd z4.d, all, mul #2
+; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z3.d
+; CHECK-NEXT:    incd z3.d, all, mul #4
+; CHECK-NEXT:    cmphi p7.d, p0/z, z2.d, z5.d
+; CHECK-NEXT:    cmphi p8.d, p0/z, z2.d, z1.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z4.d
+; CHECK-NEXT:    incd z4.d, all, mul #4
+; CHECK-NEXT:    uzp1 p3.s, p4.s, p3.s
+; CHECK-NEXT:    uzp1 p4.s, p5.s, p6.s
+; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z3.d
+; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z4.d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p5.s, p7.s, p5.s
+; CHECK-NEXT:    uzp1 p6.s, p7.s, p8.s
+; CHECK-NEXT:    uzp1 p3.h, p3.h, p4.h
 ; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    uzp1 p7.s, p9.s, p8.s
+; CHECK-NEXT:    whilewr p4.s, x9, x1
+; CHECK-NEXT:    addvl x9, x0, #3
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p4.s, p10.s, p4.s
-; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p6.s
+; CHECK-NEXT:    whilewr p7.s, x0, x1
+; CHECK-NEXT:    incb x0, all, mul #2
+; CHECK-NEXT:    whilewr p8.s, x9, x1
+; CHECK-NEXT:    uzp1 p0.s, p5.s, p0.s
+; CHECK-NEXT:    not p4.b, p1/z, p4.b
+; CHECK-NEXT:    not p8.b, p1/z, p8.b
+; CHECK-NEXT:    uzp1 p0.h, p6.h, p0.h
+; CHECK-NEXT:    whilewr p6.s, x0, x1
+; CHECK-NEXT:    and p5.b, p7/z, p7.b, p1.b
+; CHECK-NEXT:    and p4.b, p4/z, p4.b, p1.b
+; CHECK-NEXT:    and p8.b, p8/z, p8.b, p1.b
+; CHECK-NEXT:    and p1.b, p6/z, p6.b, p1.b
+; CHECK-NEXT:    uzp1 p0.b, p3.b, p0.b
+; CHECK-NEXT:    brkpb p1.b, p2/z, p1.b, p8.b
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p5.h, p7.h, p5.h
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p4.h
+; CHECK-NEXT:    brkpb p3.b, p2/z, p5.b, p4.b
+; CHECK-NEXT:    ptrue p4.h
+; CHECK-NEXT:    uzp1 p1.h, p6.h, p1.h
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p3.h, p7.h, p3.h
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    not p1.b, p4/z, p1.b
+; CHECK-NEXT:    and p5.b, p3/z, p3.b, p4.b
+; CHECK-NEXT:    and p1.b, p1/z, p1.b, p4.b
 ; CHECK-NEXT:    whilelo p4.b, xzr, x8
-; CHECK-NEXT:    uzp1 p3.b, p0.b, p5.b
+; CHECK-NEXT:    brkpb p2.b, p2/z, p5.b, p1.b
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
-; CHECK-NEXT:    sel p1.b, p3, p3.b, p4.b
+; CHECK-NEXT:    sel p1.b, p0, p0.b, p4.b
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.b, p3.b, p2.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -603,120 +506,152 @@ entry:
   ret <vscale x 32 x i1> %0
 }
 
-define <vscale x 4 x i1> @whilewr_64_expand(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand:
+define <vscale x 4 x i1> @whilewr_64_split(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    whilewr p1.d, x0, x1
+; CHECK-NEXT:    incb x0
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    whilewr p3.d, x0, x1
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    and p0.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
 ; CHECK-NEXT:    uzp1 p0.s, p1.s, p0.s
-; CHECK-NEXT:    whilelo p1.s, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 4 x i1> @llvm.loop.dependence.war.mask.nxv4i1(ptr %a, ptr %b, i64 8)
   ret <vscale x 4 x i1> %0
 }
 
-define <vscale x 8 x i1> @whilewr_64_expand2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand2:
+define <vscale x 8 x i1> @whilewr_64_split2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_split2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    addvl x8, x0, #3
+; CHECK-NEXT:    whilewr p2.d, x0, x1
+; CHECK-NEXT:    whilewr p1.d, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x0, all, mul #2
+; CHECK-NEXT:    incb x8
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    cmphi p1.d, p0/z, z3.d, z0.d
-; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z2.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
-; CHECK-NEXT:    cmphi p0.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.s, p3.s, p0.s
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    whilewr p6.d, x0, x1
+; CHECK-NEXT:    whilewr p5.d, x8, x1
+; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    not p5.b, p0/z, p5.b
+; CHECK-NEXT:    and p4.b, p2/z, p2.b, p0.b
+; CHECK-NEXT:    and p5.b, p5/z, p5.b, p0.b
+; CHECK-NEXT:    and p7.b, p6/z, p6.b, p0.b
+; CHECK-NEXT:    and p0.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    brkpb p1.b, p3/z, p4.b, p5.b
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT:    brkpb p0.b, p3/z, p7.b, p0.b
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    ptrue p4.s
+; CHECK-NEXT:    uzp1 p0.s, p6.s, p0.s
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
+; CHECK-NEXT:    not p0.b, p4/z, p0.b
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p4.b
+; CHECK-NEXT:    and p0.b, p0/z, p0.b, p4.b
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
 ; CHECK-NEXT:    uzp1 p0.h, p1.h, p0.h
-; CHECK-NEXT:    whilelo p1.h, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 8 x i1> @llvm.loop.dependence.war.mask.nxv8i1(ptr %a, ptr %b, i64 8)
   ret <vscale x 8 x i1> %0
 }
 
-define <vscale x 16 x i1> @whilewr_64_expand3(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand3:
+define <vscale x 16 x i1> @whilewr_64_split3(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_split3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    incd z5.d, all, mul #4
-; CHECK-NEXT:    cmphi p2.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    incd z1.d, all, mul #4
-; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z5.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
-; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
-; CHECK-NEXT:    mov z0.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z3.d
-; CHECK-NEXT:    uzp1 p2.s, p4.s, p5.s
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    incd z0.d, all, mul #4
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p6.s
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    addvl x9, x0, #3
+; CHECK-NEXT:    incb x8
+; CHECK-NEXT:    whilewr p2.d, x0, x1
+; CHECK-NEXT:    whilewr p3.d, x9, x1
+; CHECK-NEXT:    addvl x9, x0, #6
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    whilewr p4.d, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x8, all, mul #2
+; CHECK-NEXT:    and p5.b, p2/z, p2.b, p0.b
+; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    whilewr p6.d, x8, x1
+; CHECK-NEXT:    addvl x8, x0, #5
+; CHECK-NEXT:    and p4.b, p4/z, p4.b, p0.b
+; CHECK-NEXT:    and p3.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    brkpb p4.b, p1/z, p5.b, p4.b
+; CHECK-NEXT:    and p5.b, p6/z, p6.b, p0.b
+; CHECK-NEXT:    uzp1 p2.s, p2.s, p4.s
+; CHECK-NEXT:    brkpb p4.b, p1/z, p5.b, p3.b
+; CHECK-NEXT:    whilewr p5.d, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x8, all, mul #4
+; CHECK-NEXT:    whilewr p9.d, x9, x1
+; CHECK-NEXT:    not p5.b, p0/z, p5.b
+; CHECK-NEXT:    and p10.b, p9/z, p9.b, p0.b
+; CHECK-NEXT:    whilewr p7.d, x8, x1
+; CHECK-NEXT:    addvl x8, x0, #7
+; CHECK-NEXT:    whilewr p8.d, x8, x1
+; CHECK-NEXT:    and p5.b, p5/z, p5.b, p0.b
+; CHECK-NEXT:    not p8.b, p0/z, p8.b
+; CHECK-NEXT:    ptrue p3.s
+; CHECK-NEXT:    and p8.b, p8/z, p8.b, p0.b
+; CHECK-NEXT:    and p0.b, p7/z, p7.b, p0.b
+; CHECK-NEXT:    brkpb p8.b, p1/z, p10.b, p8.b
+; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p4.s, p6.s, p4.s
+; CHECK-NEXT:    brkpb p0.b, p1/z, p0.b, p5.b
+; CHECK-NEXT:    uzp1 p5.s, p9.s, p8.s
+; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
+; CHECK-NEXT:    not p4.b, p3/z, p4.b
+; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
+; CHECK-NEXT:    not p5.b, p3/z, p5.b
+; CHECK-NEXT:    and p6.b, p2/z, p2.b, p3.b
+; CHECK-NEXT:    and p4.b, p4/z, p4.b, p3.b
+; CHECK-NEXT:    and p7.b, p0/z, p0.b, p3.b
+; CHECK-NEXT:    and p3.b, p5/z, p5.b, p3.b
+; CHECK-NEXT:    brkpb p4.b, p1/z, p6.b, p4.b
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    brkpb p3.b, p1/z, p7.b, p3.b
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
-; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-NEXT:    ptrue p5.h
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p4.h
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    not p0.b, p5/z, p0.b
+; CHECK-NEXT:    and p3.b, p2/z, p2.b, p5.b
+; CHECK-NEXT:    and p0.b, p0/z, p0.b, p5.b
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT:    brkpb p0.b, p1/z, p3.b, p0.b
+; CHECK-NEXT:    uzp1 p0.b, p2.b, p0.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -725,11 +660,12 @@ entry:
   ret <vscale x 16 x i1> %0
 }
 
-define <vscale x 32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand4:
+define <vscale x 32 x i1> @whilewr_64_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_expand_high:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p11, [sp] // 2-byte Spill
 ; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
@@ -739,80 +675,109 @@ define <vscale x 32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    index z1.d, #0, #1
+; CHECK-NEXT:    addvl x8, x0, #8
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    subs x8, x1, x8
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    addvl x9, x0, #8
+; CHECK-NEXT:    mov x9, x0
+; CHECK-NEXT:    mov z0.d, z1.d
+; CHECK-NEXT:    mov z2.d, z1.d
 ; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    incd z1.d
+; CHECK-NEXT:    mov z5.d, z1.d
+; CHECK-NEXT:    incb x9, all, mul #2
+; CHECK-NEXT:    mov z4.d, x8
+; CHECK-NEXT:    incd z0.d
 ; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    mov z6.d, z2.d
-; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    incd z5.d, all, mul #4
+; CHECK-NEXT:    cmphi p5.d, p0/z, z4.d, z1.d
+; CHECK-NEXT:    mov z3.d, z0.d
+; CHECK-NEXT:    cmphi p2.d, p0/z, z4.d, z2.d
+; CHECK-NEXT:    incd z2.d, all, mul #4
+; CHECK-NEXT:    cmphi p3.d, p0/z, z4.d, z0.d
+; CHECK-NEXT:    incd z0.d, all, mul #4
+; CHECK-NEXT:    cmphi p10.d, p0/z, z4.d, z5.d
 ; CHECK-NEXT:    incd z3.d, all, mul #2
+; CHECK-NEXT:    cmphi p6.d, p0/z, z4.d, z2.d
+; CHECK-NEXT:    cmphi p11.d, p0/z, z4.d, z0.d
+; CHECK-NEXT:    uzp1 p3.s, p5.s, p3.s
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    cmphi p9.d, p0/z, z4.d, z3.d
+; CHECK-NEXT:    uzp1 p5.s, p10.s, p11.s
 ; CHECK-NEXT:    incd z6.d, all, mul #4
-; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
-; CHECK-NEXT:    mov z24.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    incd z24.d, all, mul #4
-; CHECK-NEXT:    uzp1 p2.s, p2.s, p7.s
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p8.s
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    uzp1 p9.s, p2.s, p9.s
+; CHECK-NEXT:    cmphi p7.d, p0/z, z4.d, z6.d
+; CHECK-NEXT:    uzp1 p3.h, p3.h, p9.h
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p3.h, p4.h, p3.h
 ; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    whilewr p4.d, x9, x1
+; CHECK-NEXT:    addvl x9, x0, #3
+; CHECK-NEXT:    whilewr p8.d, x9, x1
+; CHECK-NEXT:    mov x9, x0
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p6.s, p6.s, p9.s
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    subs x8, x1, x9
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p6.h
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    uzp1 p2.b, p3.b, p2.b
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p5.s, p7.s, p5.s
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    uzp1 p7.s, p9.s, p8.s
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    incb x9, all, mul #4
+; CHECK-NEXT:    uzp1 p6.s, p6.s, p7.s
+; CHECK-NEXT:    not p8.b, p0/z, p8.b
+; CHECK-NEXT:    uzp1 p5.h, p5.h, p6.h
+; CHECK-NEXT:    whilewr p2.d, x9, x1
+; CHECK-NEXT:    addvl x9, x0, #5
+; CHECK-NEXT:    whilewr p9.d, x9, x1
+; CHECK-NEXT:    addvl x9, x0, #6
+; CHECK-NEXT:    whilewr p11.d, x9, x1
+; CHECK-NEXT:    addvl x9, x0, #7
+; CHECK-NEXT:    whilewr p7.d, x9, x1
+; CHECK-NEXT:    not p9.b, p0/z, p9.b
+; CHECK-NEXT:    whilewr p10.d, x0, x1
+; CHECK-NEXT:    incb x0
+; CHECK-NEXT:    and p6.b, p4/z, p4.b, p0.b
+; CHECK-NEXT:    uzp1 p3.b, p3.b, p5.b
+; CHECK-NEXT:    and p5.b, p2/z, p2.b, p0.b
+; CHECK-NEXT:    and p8.b, p8/z, p8.b, p0.b
+; CHECK-NEXT:    and p9.b, p9/z, p9.b, p0.b
+; CHECK-NEXT:    not p7.b, p0/z, p7.b
+; CHECK-NEXT:    brkpb p6.b, p1/z, p6.b, p8.b
+; CHECK-NEXT:    and p8.b, p11/z, p11.b, p0.b
+; CHECK-NEXT:    brkpb p5.b, p1/z, p5.b, p9.b
+; CHECK-NEXT:    whilewr p9.d, x0, x1
+; CHECK-NEXT:    and p7.b, p7/z, p7.b, p0.b
+; CHECK-NEXT:    not p9.b, p0/z, p9.b
+; CHECK-NEXT:    brkpb p7.b, p1/z, p8.b, p7.b
+; CHECK-NEXT:    ptrue p8.s
+; CHECK-NEXT:    and p9.b, p9/z, p9.b, p0.b
+; CHECK-NEXT:    and p0.b, p10/z, p10.b, p0.b
+; CHECK-NEXT:    uzp1 p7.s, p11.s, p7.s
+; CHECK-NEXT:    ldr p11, [sp] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p4.s, p4.s, p6.s
+; CHECK-NEXT:    brkpb p0.b, p1/z, p0.b, p9.b
 ; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p4.s, p10.s, p4.s
+; CHECK-NEXT:    uzp1 p2.s, p2.s, p5.s
+; CHECK-NEXT:    not p5.b, p8/z, p7.b
+; CHECK-NEXT:    not p4.b, p8/z, p4.b
+; CHECK-NEXT:    uzp1 p0.s, p10.s, p0.s
 ; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p6.s
+; CHECK-NEXT:    and p7.b, p2/z, p2.b, p8.b
+; CHECK-NEXT:    and p5.b, p5/z, p5.b, p8.b
+; CHECK-NEXT:    and p4.b, p4/z, p4.b, p8.b
+; CHECK-NEXT:    and p6.b, p0/z, p0.b, p8.b
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p5.h, p7.h, p5.h
+; CHECK-NEXT:    brkpb p5.b, p1/z, p7.b, p5.b
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    brkpb p4.b, p1/z, p6.b, p4.b
+; CHECK-NEXT:    ptrue p6.h
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p5.h
 ; CHECK-NEXT:    uzp1 p0.h, p0.h, p4.h
+; CHECK-NEXT:    not p2.b, p6/z, p2.b
+; CHECK-NEXT:    and p4.b, p0/z, p0.b, p6.b
+; CHECK-NEXT:    and p2.b, p2/z, p2.b, p6.b
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    whilelo p4.b, xzr, x8
-; CHECK-NEXT:    uzp1 p3.b, p0.b, p5.b
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
-; CHECK-NEXT:    sel p1.b, p3, p3.b, p4.b
+; CHECK-NEXT:    whilelo p5.b, xzr, x8
+; CHECK-NEXT:    brkpb p2.b, p1/z, p4.b, p2.b
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    sel p1.b, p3, p3.b, p5.b
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.b, p0.b, p2.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -913,3 +878,73 @@ entry:
   %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 3)
   ret <vscale x 16 x i1> %0
 }
+
+define <vscale x 2 x i1> @whilewr_8_widen_extract(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_widen_extract:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <vscale x 2 x i1> @llvm.loop.dependence.war.mask.nxv2i1(ptr %a, ptr %b, i64 1)
+  ret <vscale x 2 x i1> %0
+}
+
+define <vscale x 4 x i1> @whilewr_8_widen_extract2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_widen_extract2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <vscale x 4 x i1> @llvm.loop.dependence.war.mask.nxv4i1(ptr %a, ptr %b, i64 1)
+  ret <vscale x 4 x i1> %0
+}
+
+define <vscale x 8 x i1> @whilewr_8_widen_extract3(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_widen_extract3:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <vscale x 8 x i1> @llvm.loop.dependence.war.mask.nxv8i1(ptr %a, ptr %b, i64 1)
+  ret <vscale x 8 x i1> %0
+}
+
+define <vscale x 2 x i1> @whilewr_16_widen_extract(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_widen_extract:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.h, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <vscale x 2 x i1> @llvm.loop.dependence.war.mask.nxv2i1(ptr %a, ptr %b, i64 2)
+  ret <vscale x 2 x i1> %0
+}
+
+define <vscale x 4 x i1> @whilewr_16_widen_extract2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_widen_extract2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.h, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <vscale x 4 x i1> @llvm.loop.dependence.war.mask.nxv4i1(ptr %a, ptr %b, i64 2)
+  ret <vscale x 4 x i1> %0
+}
+
+define <vscale x 2 x i1> @whilewr_32_widen_extract(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_widen_extract:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.s, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <vscale x 2 x i1> @llvm.loop.dependence.war.mask.nxv2i1(ptr %a, ptr %b, i64 4)
+  ret <vscale x 2 x i1> %0
+}



More information about the llvm-commits mailing list