[llvm] [SelectionDAG] Fix unsafe cases for loop.dependence.{war/raw}.mask (PR #168565)

Benjamin Maxwell via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 10 02:50:00 PST 2025


https://github.com/MacDue updated https://github.com/llvm/llvm-project/pull/168565

>From 613428429ffaab070df0e7c45c6bb5f8f83d5779 Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Mon, 17 Nov 2025 17:30:30 +0000
Subject: [PATCH 1/3] [SelectionDAG] Fix unsafe cases for
 loop.dependence.{war/raw}.mask

There is an unsafe case with the loop dependence mask intrinsics where
the difference between the two pointers is less than half the vector
length, e.g. ptrA = 0 and ptrB 3 when the vector length is 32. Currently that
produces a correct low-mask with 3 active lanes and an incorrect high mask with
all lanes active. This PR adds a select on the high mask which guards
against this case.
---
 llvm/include/llvm/CodeGen/ISDOpcodes.h        |   2 +-
 .../SelectionDAG/LegalizeVectorOps.cpp        |  49 +-
 .../SelectionDAG/LegalizeVectorTypes.cpp      |  28 +-
 .../SelectionDAG/SelectionDAGBuilder.cpp      |   6 +-
 .../Target/AArch64/AArch64ISelLowering.cpp    |  36 +-
 llvm/test/CodeGen/AArch64/alias_mask.ll       | 597 +++++++++++-------
 llvm/test/CodeGen/AArch64/alias_mask_nosve.ll |  54 +-
 .../CodeGen/AArch64/alias_mask_scalable.ll    | 507 +++++++++------
 .../AArch64/alias_mask_scalable_nosve2.ll     |  61 +-
 9 files changed, 800 insertions(+), 540 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index b32f3dacbb3a4..8d5f3f6585cbf 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1569,7 +1569,7 @@ enum NodeType {
   GET_ACTIVE_LANE_MASK,
 
   // The `llvm.loop.dependence.{war, raw}.mask` intrinsics
-  // Operands: Load pointer, Store pointer, Element size
+  // Operands: Load pointer, Store pointer, Element size, Lane offset
   // Output: Mask
   LOOP_DEPENDENCE_WAR_MASK,
   LOOP_DEPENDENCE_RAW_MASK,
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index e8d9bce43f6ea..8c6a4209e1c33 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -42,6 +42,7 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
 #include <cassert>
+#include <cmath>
 #include <cstdint>
 #include <iterator>
 #include <utility>
@@ -1810,11 +1811,22 @@ SDValue VectorLegalizer::ExpandVP_FCOPYSIGN(SDNode *Node) {
   return DAG.getNode(ISD::BITCAST, DL, VT, CopiedSign);
 }
 
+// Expand a loop dependence mask.
+// First the difference is taken between the pointers and divided by the element
+// size, to see how many lanes separate them. That difference is then splat and
+// compared with a step vector to produce a mask with lanes less than the
+// difference active and the rest inactive. To capture the case where the
+// pointers are the same (or the source pointer is greater than the sink pointer
+// for write-after-read), the difference is compared to zero and that result is
+// splat to another mask. Those two masks are then ORed to produce the final
+// loop dependence mask.
 SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
   SDLoc DL(N);
   SDValue SourceValue = N->getOperand(0);
   SDValue SinkValue = N->getOperand(1);
   SDValue EltSize = N->getOperand(2);
+  unsigned EltSizeInBytes = N->getConstantOperandVal(2);
+  unsigned LaneOffset = N->getConstantOperandVal(3);
 
   bool IsReadAfterWrite = N->getOpcode() == ISD::LOOP_DEPENDENCE_RAW_MASK;
   EVT VT = N->getValueType(0);
@@ -1833,10 +1845,29 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
   SDValue Cmp = DAG.getSetCC(DL, CmpVT, Diff, Zero,
                              IsReadAfterWrite ? ISD::SETEQ : ISD::SETLE);
 
-  // Create the lane mask
-  EVT SplatVT = VT.changeElementType(PtrVT);
-  SDValue DiffSplat = DAG.getSplat(SplatVT, DL, Diff);
+  // Create the mask with lanes less than the difference active and the rest
+  // inactive. For optimisation reaons we want to minimise the size of the
+  // integer used to splat the difference and add the lane offset. If we keep it
+  // as a 64 bit value then the splat will use lots of vectors unnecessarily.
+  int SplatBitWidth =
+      std::pow(2, Log2_32(VT.getVectorMinNumElements() * EltSizeInBytes) + 1);
+  SplatBitWidth = std::min(SplatBitWidth, 64);
+  EVT SplatVT = VT.changeElementType(MVT::getIntegerVT(SplatBitWidth));
+
+  // Truncate and splat the diff. If this ends up being an unsafe truncate (i.e.
+  // diff > vector length), then it's ignored later on when it's ORed with
+  // abs(diff) >= vector_length.
+  SDValue DiffTrunc = DAG.getExtOrTrunc(!IsReadAfterWrite, Diff, DL,
+                                        SplatVT.getVectorElementType());
+  SDValue DiffSplat = DAG.getSplat(SplatVT, DL, DiffTrunc);
   SDValue VectorStep = DAG.getStepVector(DL, SplatVT);
+  // Add the lane offset. A non-zero lane offset often comes from a
+  // larger-than-legal vector length being split in two.
+  VectorStep = DAG.getNode(
+      ISD::ADD, DL, SplatVT, VectorStep,
+      DAG.getSplat(
+          SplatVT, DL,
+          DAG.getConstant(LaneOffset, DL, SplatVT.getVectorElementType())));
   EVT MaskVT = VT.changeElementType(MVT::i1);
   SDValue DiffMask =
       DAG.getSetCC(DL, MaskVT, VectorStep, DiffSplat, ISD::CondCode::SETULT);
@@ -1847,9 +1878,19 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
   if (EltVT.getScalarSizeInBits() > MaskVT.getScalarSizeInBits())
     DiffMask = DAG.getNode(ISD::ANY_EXTEND, DL, VT, DiffMask);
 
-  // Splat the compare result then OR it with the lane mask
   if (CmpVT.getScalarSizeInBits() < EltVT.getScalarSizeInBits())
     Cmp = DAG.getNode(ISD::ZERO_EXTEND, DL, EltVT, Cmp);
+
+  // If the pointer difference was greater than or equal to the max number of
+  // lanes in the mask, then the truncated pointer difference should be ignored
+  // since the truncate could have been unsafe. Use a mask of all active lanes
+  // instead since a pointer difference >= the number of lanes has no loop
+  // depedencies anyway.
+  SDValue AbsDiff = DAG.getNode(ISD::ABS, DL, PtrVT, Diff);
+  SDValue NumElts = DAG.getConstant(VT.getVectorMinNumElements(), DL, PtrVT);
+  Cmp = DAG.getNode(ISD::OR, DL, CmpVT, Cmp,
+                    DAG.getSetCC(DL, CmpVT, AbsDiff, NumElts, ISD::SETUGE));
+
   SDValue Splat = DAG.getSplat(VT, DL, Cmp);
   return DAG.getNode(ISD::OR, DL, VT, DiffMask, Splat);
 }
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 6e1e02f38113e..f7aec7f25b746 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -407,9 +407,16 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_LOOP_DEPENDENCE_MASK(SDNode *N) {
   SDValue SourceValue = N->getOperand(0);
   SDValue SinkValue = N->getOperand(1);
   SDValue EltSize = N->getOperand(2);
+  SDValue Offset = N->getOperand(3);
   EVT PtrVT = SourceValue->getValueType(0);
   SDLoc DL(N);
 
+  // Increment the source pointer by the lane offset multiplied by the element
+  // size. A non-zero offset is normally used when a larger-than-legal mask has
+  // been split.
+  Offset = DAG.getNode(ISD::MUL, DL, PtrVT, Offset, EltSize);
+  SourceValue = DAG.getNode(ISD::ADD, DL, PtrVT, SourceValue, Offset);
+
   SDValue Diff = DAG.getNode(ISD::SUB, DL, PtrVT, SinkValue, SourceValue);
   EVT CmpVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
                                      Diff.getValueType());
@@ -1692,6 +1699,10 @@ void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
   Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
 }
 
+/// Split a loop dependence mask.
+/// This is done by creating a high and low mask, each of half the vector
+/// length. The low mask inherits the lane offset from the original mask, and
+/// the high mask adds half the vector length.
 void DAGTypeLegalizer::SplitVecRes_LOOP_DEPENDENCE_MASK(SDNode *N, SDValue &Lo,
                                                         SDValue &Hi) {
   SDLoc DL(N);
@@ -1699,14 +1710,15 @@ void DAGTypeLegalizer::SplitVecRes_LOOP_DEPENDENCE_MASK(SDNode *N, SDValue &Lo,
   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
   SDValue PtrA = N->getOperand(0);
   SDValue PtrB = N->getOperand(1);
-  Lo = DAG.getNode(N->getOpcode(), DL, LoVT, PtrA, PtrB, N->getOperand(2));
-
-  unsigned EltSize = N->getConstantOperandVal(2);
-  ElementCount Offset = HiVT.getVectorElementCount() * EltSize;
-  SDValue Addend = DAG.getElementCount(DL, MVT::i64, Offset);
 
-  PtrA = DAG.getNode(ISD::ADD, DL, MVT::i64, PtrA, Addend);
-  Hi = DAG.getNode(N->getOpcode(), DL, HiVT, PtrA, PtrB, N->getOperand(2));
+  Lo = DAG.getNode(N->getOpcode(), DL, LoVT, PtrA, PtrB,
+                   /*ElementSizeInBytes=*/N->getOperand(2),
+                   /*LaneOffset=*/N->getOperand(3));
+  unsigned LaneOffset =
+      N->getConstantOperandVal(3) + LoVT.getVectorMinNumElements();
+  Hi = DAG.getNode(N->getOpcode(), DL, HiVT, PtrA, PtrB,
+                   /*ElementSizeInBytes=*/N->getOperand(2),
+                   /*LaneOffset=*/DAG.getConstant(LaneOffset, DL, MVT::i64));
 }
 
 void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo,
@@ -6062,7 +6074,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_LOOP_DEPENDENCE_MASK(SDNode *N) {
   return DAG.getNode(
       N->getOpcode(), SDLoc(N),
       TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)),
-      N->getOperand(0), N->getOperand(1), N->getOperand(2));
+      N->getOperand(0), N->getOperand(1), N->getOperand(2), N->getOperand(3));
 }
 
 SDValue DAGTypeLegalizer::WidenVecRes_BUILD_VECTOR(SDNode *N) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 2caf847370383..69b6e625fd219 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8390,13 +8390,15 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
     setValue(&I,
              DAG.getNode(ISD::LOOP_DEPENDENCE_WAR_MASK, sdl,
                          EVT::getEVT(I.getType()), getValue(I.getOperand(0)),
-                         getValue(I.getOperand(1)), getValue(I.getOperand(2))));
+                         getValue(I.getOperand(1)), getValue(I.getOperand(2)),
+                         DAG.getConstant(0, sdl, MVT::i64)));
     return;
   case Intrinsic::loop_dependence_raw_mask:
     setValue(&I,
              DAG.getNode(ISD::LOOP_DEPENDENCE_RAW_MASK, sdl,
                          EVT::getEVT(I.getType()), getValue(I.getOperand(0)),
-                         getValue(I.getOperand(1)), getValue(I.getOperand(2))));
+                         getValue(I.getOperand(1)), getValue(I.getOperand(2)),
+                         DAG.getConstant(0, sdl, MVT::i64)));
     return;
   }
 }
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4be371bd4e67a..7fb98c3d7ed5c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5461,11 +5461,17 @@ AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
     return SDValue();
   }
 
+  // TODO: Support split masks
+  unsigned LaneOffset = Op.getConstantOperandVal(3);
+  if (LaneOffset != 0)
+    return SDValue();
+
   SDValue PtrA = Op.getOperand(0);
   SDValue PtrB = Op.getOperand(1);
 
   if (VT.isScalableVT())
-    return DAG.getNode(Op.getOpcode(), DL, VT, PtrA, PtrB, Op.getOperand(2));
+    return DAG.getNode(Op.getOpcode(), DL, VT, PtrA, PtrB, Op.getOperand(2),
+                       Op.getOperand(3));
 
   // We can use the SVE whilewr/whilerw instruction to lower this
   // intrinsic by creating the appropriate sequence of scalable vector
@@ -5476,8 +5482,8 @@ AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
                        VT.getVectorNumElements(), true);
   EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
 
-  SDValue Mask =
-      DAG.getNode(Op.getOpcode(), DL, WhileVT, PtrA, PtrB, Op.getOperand(2));
+  SDValue Mask = DAG.getNode(Op.getOpcode(), DL, WhileVT, PtrA, PtrB,
+                             Op.getOperand(2), Op.getOperand(3));
   SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, DL, ContainerVT, Mask);
   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, MaskAsInt,
                      DAG.getVectorIdxConstant(0, DL));
@@ -6248,35 +6254,43 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
   case Intrinsic::aarch64_sve_whilewr_b:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_WAR_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(1, DL, MVT::i64));
+                       DAG.getConstant(1, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilewr_h:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_WAR_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(2, DL, MVT::i64));
+                       DAG.getConstant(2, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilewr_s:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_WAR_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(4, DL, MVT::i64));
+                       DAG.getConstant(4, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilewr_d:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_WAR_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(8, DL, MVT::i64));
+                       DAG.getConstant(8, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilerw_b:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_RAW_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(1, DL, MVT::i64));
+                       DAG.getConstant(1, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilerw_h:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_RAW_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(2, DL, MVT::i64));
+                       DAG.getConstant(2, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilerw_s:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_RAW_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(4, DL, MVT::i64));
+                       DAG.getConstant(4, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilerw_d:
     return DAG.getNode(ISD::LOOP_DEPENDENCE_RAW_MASK, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2),
-                       DAG.getConstant(8, DL, MVT::i64));
+                       DAG.getConstant(8, DL, MVT::i64),
+                       DAG.getConstant(0, DL, MVT::i64));
   case Intrinsic::aarch64_neon_abs: {
     EVT Ty = Op.getValueType();
     if (Ty == MVT::i64) {
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index fdd0a6a4709da..3e6ade6278e9d 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -100,27 +100,47 @@ entry:
 define <32 x i1> @whilewr_8_split(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    add x9, x0, #16
+; CHECK-NEXT:    index z0.s, #0, #1
+; CHECK-NEXT:    subs x9, x1, x0
+; CHECK-NEXT:    dup v1.4s, w9
+; CHECK-NEXT:    cneg x10, x9, mi
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    ccmp x10, #15, #2, ge
+; CHECK-NEXT:    cset w9, hi
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
-; CHECK-NEXT:    whilewr p1.b, x9, x1
+; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    mov z3.d, z0.d
+; CHECK-NEXT:    mov z4.d, z0.d
+; CHECK-NEXT:    add z0.s, z0.s, #16 // =0x10
+; CHECK-NEXT:    add z2.s, z2.s, #28 // =0x1c
+; CHECK-NEXT:    add z3.s, z3.s, #24 // =0x18
+; CHECK-NEXT:    add z4.s, z4.s, #20 // =0x14
+; CHECK-NEXT:    cmhi v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    cmhi v2.4s, v1.4s, v2.4s
+; CHECK-NEXT:    cmhi v3.4s, v1.4s, v3.4s
+; CHECK-NEXT:    cmhi v4.4s, v1.4s, v4.4s
+; CHECK-NEXT:    uzp1 v1.8h, v3.8h, v2.8h
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v4.8h
+; CHECK-NEXT:    mov z2.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    dup v1.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI8_0
-; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    shl v1.16b, v2.16b, #7
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI8_0]
-; CHECK-NEXT:    mov z1.b, p1/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
-; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    addv h0, v0.8h
+; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
 ; CHECK-NEXT:    addv h1, v1.8h
-; CHECK-NEXT:    str h0, [x8]
-; CHECK-NEXT:    str h1, [x8, #2]
+; CHECK-NEXT:    addv h0, v0.8h
+; CHECK-NEXT:    str h1, [x8]
+; CHECK-NEXT:    str h0, [x8, #2]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 1)
@@ -130,47 +150,93 @@ entry:
 define <64 x i1> @whilewr_8_split2(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_split2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    add x9, x0, #48
+; CHECK-NEXT:    index z1.s, #0, #1
+; CHECK-NEXT:    subs x9, x1, x0
+; CHECK-NEXT:    dup v0.4s, w9
+; CHECK-NEXT:    cneg x10, x9, mi
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    ccmp x10, #15, #2, ge
+; CHECK-NEXT:    cset w9, hi
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
-; CHECK-NEXT:    add x10, x0, #16
-; CHECK-NEXT:    whilewr p1.b, x9, x1
-; CHECK-NEXT:    add x9, x0, #32
-; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    whilewr p0.b, x9, x1
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    mov z5.d, z1.d
+; CHECK-NEXT:    mov z2.d, z1.d
+; CHECK-NEXT:    mov z3.d, z1.d
+; CHECK-NEXT:    mov z6.d, z1.d
+; CHECK-NEXT:    mov z7.d, z1.d
+; CHECK-NEXT:    mov z16.d, z1.d
+; CHECK-NEXT:    mov z17.d, z1.d
+; CHECK-NEXT:    mov z18.d, z1.d
+; CHECK-NEXT:    mov z19.d, z1.d
+; CHECK-NEXT:    mov z20.d, z1.d
+; CHECK-NEXT:    add z4.s, z4.s, #28 // =0x1c
+; CHECK-NEXT:    add z5.s, z5.s, #24 // =0x18
+; CHECK-NEXT:    add z1.s, z1.s, #48 // =0x30
+; CHECK-NEXT:    add z2.s, z2.s, #20 // =0x14
+; CHECK-NEXT:    add z6.s, z6.s, #44 // =0x2c
+; CHECK-NEXT:    add z18.s, z18.s, #60 // =0x3c
+; CHECK-NEXT:    add z7.s, z7.s, #40 // =0x28
+; CHECK-NEXT:    add z19.s, z19.s, #56 // =0x38
+; CHECK-NEXT:    add z20.s, z20.s, #52 // =0x34
+; CHECK-NEXT:    add z16.s, z16.s, #36 // =0x24
+; CHECK-NEXT:    add z17.s, z17.s, #32 // =0x20
+; CHECK-NEXT:    add z3.s, z3.s, #16 // =0x10
+; CHECK-NEXT:    cmhi v4.4s, v0.4s, v4.4s
+; CHECK-NEXT:    cmhi v5.4s, v0.4s, v5.4s
+; CHECK-NEXT:    cmhi v6.4s, v0.4s, v6.4s
+; CHECK-NEXT:    cmhi v18.4s, v0.4s, v18.4s
+; CHECK-NEXT:    cmhi v19.4s, v0.4s, v19.4s
+; CHECK-NEXT:    cmhi v20.4s, v0.4s, v20.4s
+; CHECK-NEXT:    cmhi v1.4s, v0.4s, v1.4s
+; CHECK-NEXT:    cmhi v7.4s, v0.4s, v7.4s
+; CHECK-NEXT:    cmhi v16.4s, v0.4s, v16.4s
+; CHECK-NEXT:    cmhi v17.4s, v0.4s, v17.4s
+; CHECK-NEXT:    cmhi v2.4s, v0.4s, v2.4s
+; CHECK-NEXT:    cmhi v0.4s, v0.4s, v3.4s
+; CHECK-NEXT:    uzp1 v4.8h, v5.8h, v4.8h
+; CHECK-NEXT:    uzp1 v3.8h, v19.8h, v18.8h
+; CHECK-NEXT:    uzp1 v1.8h, v1.8h, v20.8h
+; CHECK-NEXT:    uzp1 v5.8h, v7.8h, v6.8h
+; CHECK-NEXT:    uzp1 v6.8h, v17.8h, v16.8h
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    dup v2.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI9_0
-; CHECK-NEXT:    mov z1.b, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    whilewr p1.b, x10, x1
+; CHECK-NEXT:    uzp1 v1.16b, v1.16b, v3.16b
+; CHECK-NEXT:    uzp1 v3.16b, v6.16b, v5.16b
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v4.16b
+; CHECK-NEXT:    mov z4.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    orr v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    orr v3.16b, v3.16b, v2.16b
+; CHECK-NEXT:    shl v4.16b, v4.16b, #7
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
+; CHECK-NEXT:    shl v2.16b, v3.16b, #7
+; CHECK-NEXT:    cmlt v3.16b, v4.16b, #0
 ; CHECK-NEXT:    ldr q4, [x9, :lo12:.LCPI9_0]
-; CHECK-NEXT:    mov z2.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    mov z3.b, p1/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
-; CHECK-NEXT:    shl v2.16b, v2.16b, #7
-; CHECK-NEXT:    shl v3.16b, v3.16b, #7
-; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    cmlt v2.16b, v2.16b, #0
-; CHECK-NEXT:    cmlt v3.16b, v3.16b, #0
-; CHECK-NEXT:    and v0.16b, v0.16b, v4.16b
+; CHECK-NEXT:    and v3.16b, v3.16b, v4.16b
+; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    and v1.16b, v1.16b, v4.16b
 ; CHECK-NEXT:    and v2.16b, v2.16b, v4.16b
-; CHECK-NEXT:    and v3.16b, v3.16b, v4.16b
-; CHECK-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v5.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v5.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    and v0.16b, v0.16b, v4.16b
+; CHECK-NEXT:    ext v4.16b, v1.16b, v1.16b, #8
 ; CHECK-NEXT:    ext v6.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    ext v7.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v4.16b
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v5.16b
+; CHECK-NEXT:    ext v7.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    zip1 v3.16b, v3.16b, v5.16b
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v4.16b
 ; CHECK-NEXT:    zip1 v2.16b, v2.16b, v6.16b
-; CHECK-NEXT:    zip1 v3.16b, v3.16b, v7.16b
-; CHECK-NEXT:    addv h0, v0.8h
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v7.16b
+; CHECK-NEXT:    addv h3, v3.8h
 ; CHECK-NEXT:    addv h1, v1.8h
 ; CHECK-NEXT:    addv h2, v2.8h
-; CHECK-NEXT:    addv h3, v3.8h
-; CHECK-NEXT:    str h0, [x8]
+; CHECK-NEXT:    str h3, [x8]
+; CHECK-NEXT:    addv h0, v0.8h
 ; CHECK-NEXT:    str h1, [x8, #6]
 ; CHECK-NEXT:    str h2, [x8, #4]
-; CHECK-NEXT:    str h3, [x8, #2]
+; CHECK-NEXT:    str h0, [x8, #2]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <64 x i1> @llvm.loop.dependence.war.mask.v64i1(ptr %a, ptr %b, i64 1)
@@ -192,7 +258,7 @@ define <16 x i1> @whilewr_16_expand(ptr %a, ptr %b) {
 ; CHECK-NEXT:    mov z7.d, z0.d
 ; CHECK-NEXT:    mov z16.d, z0.d
 ; CHECK-NEXT:    dup v3.2d, x8
-; CHECK-NEXT:    cmp x8, #1
+; CHECK-NEXT:    cmp x8, #0
 ; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
 ; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
 ; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
@@ -201,15 +267,18 @@ define <16 x i1> @whilewr_16_expand(ptr %a, ptr %b) {
 ; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
 ; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
 ; CHECK-NEXT:    cmhi v0.2d, v3.2d, v0.2d
-; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmhi v1.2d, v3.2d, v1.2d
 ; CHECK-NEXT:    cmhi v2.2d, v3.2d, v2.2d
 ; CHECK-NEXT:    cmhi v4.2d, v3.2d, v4.2d
 ; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
 ; CHECK-NEXT:    cmhi v6.2d, v3.2d, v6.2d
+; CHECK-NEXT:    cmp x8, #1
 ; CHECK-NEXT:    cmhi v16.2d, v3.2d, v16.2d
 ; CHECK-NEXT:    cmhi v3.2d, v3.2d, v7.2d
+; CHECK-NEXT:    ccmp x9, #15, #2, ge
 ; CHECK-NEXT:    uzp1 v2.4s, v4.4s, v2.4s
+; CHECK-NEXT:    cset w8, hi
 ; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
 ; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
 ; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
@@ -229,80 +298,92 @@ define <32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub x9, x1, x0
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x10, x9, #32
 ; CHECK-NEXT:    add x9, x9, x9, lsr #63
-; CHECK-NEXT:    add x10, x10, x10, lsr #63
 ; CHECK-NEXT:    asr x9, x9, #1
-; CHECK-NEXT:    asr x10, x10, #1
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
 ; CHECK-NEXT:    mov z4.d, z0.d
 ; CHECK-NEXT:    mov z5.d, z0.d
 ; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    dup v7.2d, x9
-; CHECK-NEXT:    dup v16.2d, x10
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    cmp x10, #1
-; CHECK-NEXT:    add z3.d, z3.d, #8 // =0x8
-; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z5.d, z5.d, #4 // =0x4
-; CHECK-NEXT:    add z6.d, z6.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v17.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v18.2d, v16.2d, v0.2d
-; CHECK-NEXT:    add z0.d, z0.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v19.2d, v7.2d, v1.2d
-; CHECK-NEXT:    cmhi v20.2d, v7.2d, v2.2d
-; CHECK-NEXT:    cmhi v21.2d, v7.2d, v3.2d
-; CHECK-NEXT:    cmhi v22.2d, v7.2d, v4.2d
-; CHECK-NEXT:    cmhi v23.2d, v7.2d, v5.2d
-; CHECK-NEXT:    cmhi v24.2d, v7.2d, v6.2d
-; CHECK-NEXT:    cmhi v1.2d, v16.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v16.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v16.2d, v3.2d
-; CHECK-NEXT:    cmhi v4.2d, v16.2d, v4.2d
-; CHECK-NEXT:    cmhi v7.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v5.2d, v16.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v16.2d, v6.2d
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmhi v0.2d, v16.2d, v0.2d
-; CHECK-NEXT:    uzp1 v16.4s, v21.4s, v20.4s
+; CHECK-NEXT:    mov z7.d, z0.d
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    mov z3.d, z0.d
+; CHECK-NEXT:    mov z16.d, z0.d
+; CHECK-NEXT:    dup v17.2d, x9
+; CHECK-NEXT:    mov z18.d, z0.d
+; CHECK-NEXT:    mov z20.d, z0.d
+; CHECK-NEXT:    mov z21.d, z0.d
+; CHECK-NEXT:    mov z22.d, z0.d
+; CHECK-NEXT:    mov z23.d, z0.d
+; CHECK-NEXT:    mov z24.d, z0.d
+; CHECK-NEXT:    add z4.d, z4.d, #14 // =0xe
+; CHECK-NEXT:    add z5.d, z5.d, #12 // =0xc
+; CHECK-NEXT:    add z6.d, z6.d, #10 // =0xa
+; CHECK-NEXT:    add z7.d, z7.d, #8 // =0x8
+; CHECK-NEXT:    add z1.d, z1.d, #6 // =0x6
+; CHECK-NEXT:    add z2.d, z2.d, #4 // =0x4
+; CHECK-NEXT:    add z16.d, z16.d, #30 // =0x1e
+; CHECK-NEXT:    add z18.d, z18.d, #28 // =0x1c
+; CHECK-NEXT:    cmhi v19.2d, v17.2d, v0.2d
+; CHECK-NEXT:    add z20.d, z20.d, #26 // =0x1a
+; CHECK-NEXT:    add z21.d, z21.d, #24 // =0x18
+; CHECK-NEXT:    cmp x9, #0
+; CHECK-NEXT:    add z22.d, z22.d, #22 // =0x16
+; CHECK-NEXT:    add z23.d, z23.d, #20 // =0x14
+; CHECK-NEXT:    cneg x10, x9, mi
+; CHECK-NEXT:    add z24.d, z24.d, #18 // =0x12
+; CHECK-NEXT:    add z0.d, z0.d, #16 // =0x10
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    uzp1 v20.4s, v23.4s, v22.4s
-; CHECK-NEXT:    uzp1 v17.4s, v17.4s, v24.4s
-; CHECK-NEXT:    cset w9, lt
-; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    uzp1 v3.4s, v19.4s, v7.4s
+; CHECK-NEXT:    add z3.d, z3.d, #2 // =0x2
+; CHECK-NEXT:    cmhi v4.2d, v17.2d, v4.2d
+; CHECK-NEXT:    ccmp x10, #15, #2, ge
+; CHECK-NEXT:    cmhi v5.2d, v17.2d, v5.2d
+; CHECK-NEXT:    cmhi v6.2d, v17.2d, v6.2d
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    cmhi v7.2d, v17.2d, v7.2d
+; CHECK-NEXT:    cmhi v16.2d, v17.2d, v16.2d
+; CHECK-NEXT:    cmhi v18.2d, v17.2d, v18.2d
+; CHECK-NEXT:    cmhi v20.2d, v17.2d, v20.2d
+; CHECK-NEXT:    cmhi v21.2d, v17.2d, v21.2d
+; CHECK-NEXT:    cmhi v22.2d, v17.2d, v22.2d
+; CHECK-NEXT:    cmhi v23.2d, v17.2d, v23.2d
+; CHECK-NEXT:    cmhi v24.2d, v17.2d, v24.2d
+; CHECK-NEXT:    cmhi v0.2d, v17.2d, v0.2d
+; CHECK-NEXT:    cmhi v1.2d, v17.2d, v1.2d
+; CHECK-NEXT:    cmhi v2.2d, v17.2d, v2.2d
+; CHECK-NEXT:    cmhi v3.2d, v17.2d, v3.2d
 ; CHECK-NEXT:    uzp1 v4.4s, v5.4s, v4.4s
-; CHECK-NEXT:    uzp1 v5.4s, v18.4s, v6.4s
-; CHECK-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    uzp1 v1.8h, v17.8h, v20.8h
-; CHECK-NEXT:    uzp1 v3.8h, v16.8h, v3.8h
-; CHECK-NEXT:    uzp1 v4.8h, v5.8h, v4.8h
-; CHECK-NEXT:    uzp1 v0.8h, v2.8h, v0.8h
+; CHECK-NEXT:    uzp1 v5.4s, v18.4s, v16.4s
+; CHECK-NEXT:    uzp1 v16.4s, v21.4s, v20.4s
+; CHECK-NEXT:    uzp1 v17.4s, v23.4s, v22.4s
+; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v24.4s
+; CHECK-NEXT:    uzp1 v6.4s, v7.4s, v6.4s
+; CHECK-NEXT:    uzp1 v1.4s, v2.4s, v1.4s
+; CHECK-NEXT:    uzp1 v2.4s, v19.4s, v3.4s
+; CHECK-NEXT:    uzp1 v3.8h, v16.8h, v5.8h
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v17.8h
+; CHECK-NEXT:    uzp1 v4.8h, v6.8h, v4.8h
+; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
 ; CHECK-NEXT:    dup v2.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI11_0
-; CHECK-NEXT:    uzp1 v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    dup v3.16b, w10
-; CHECK-NEXT:    uzp1 v0.16b, v4.16b, v0.16b
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v3.16b
+; CHECK-NEXT:    uzp1 v1.16b, v1.16b, v4.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    orr v1.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI11_0]
-; CHECK-NEXT:    orr v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
 ; CHECK-NEXT:    addv h0, v0.8h
-; CHECK-NEXT:    str h1, [x8]
+; CHECK-NEXT:    addv h1, v1.8h
 ; CHECK-NEXT:    str h0, [x8, #2]
+; CHECK-NEXT:    str h1, [x8]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 2)
@@ -321,12 +402,15 @@ define <8 x i1> @whilewr_32_expand(ptr %a, ptr %b) {
 ; CHECK-NEXT:    mov z3.d, z0.d
 ; CHECK-NEXT:    mov z4.d, z0.d
 ; CHECK-NEXT:    dup v1.2d, x8
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
 ; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
 ; CHECK-NEXT:    add z2.d, z2.d, #4 // =0x4
 ; CHECK-NEXT:    add z3.d, z3.d, #2 // =0x2
 ; CHECK-NEXT:    cmhi v0.2d, v1.2d, v0.2d
+; CHECK-NEXT:    ccmp x9, #7, #2, ge
+; CHECK-NEXT:    cset w8, hi
 ; CHECK-NEXT:    cmhi v4.2d, v1.2d, v4.2d
 ; CHECK-NEXT:    cmhi v2.2d, v1.2d, v2.2d
 ; CHECK-NEXT:    cmhi v1.2d, v1.2d, v3.2d
@@ -358,7 +442,7 @@ define <16 x i1> @whilewr_32_expand2(ptr %a, ptr %b) {
 ; CHECK-NEXT:    mov z7.d, z0.d
 ; CHECK-NEXT:    mov z16.d, z0.d
 ; CHECK-NEXT:    dup v3.2d, x8
-; CHECK-NEXT:    cmp x8, #1
+; CHECK-NEXT:    cmp x8, #0
 ; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
 ; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
 ; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
@@ -367,15 +451,18 @@ define <16 x i1> @whilewr_32_expand2(ptr %a, ptr %b) {
 ; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
 ; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
 ; CHECK-NEXT:    cmhi v0.2d, v3.2d, v0.2d
-; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmhi v1.2d, v3.2d, v1.2d
 ; CHECK-NEXT:    cmhi v2.2d, v3.2d, v2.2d
 ; CHECK-NEXT:    cmhi v4.2d, v3.2d, v4.2d
 ; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
 ; CHECK-NEXT:    cmhi v6.2d, v3.2d, v6.2d
+; CHECK-NEXT:    cmp x8, #1
 ; CHECK-NEXT:    cmhi v16.2d, v3.2d, v16.2d
 ; CHECK-NEXT:    cmhi v3.2d, v3.2d, v7.2d
+; CHECK-NEXT:    ccmp x9, #15, #2, ge
 ; CHECK-NEXT:    uzp1 v2.4s, v4.4s, v2.4s
+; CHECK-NEXT:    cset w8, hi
 ; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
 ; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
 ; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
@@ -393,85 +480,95 @@ entry:
 define <32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_32_expand3:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x10, x1, x0
+; CHECK-NEXT:    subs x9, x1, x0
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x9, x10, #61
-; CHECK-NEXT:    subs x11, x10, #64
-; CHECK-NEXT:    add x12, x10, #3
-; CHECK-NEXT:    csel x9, x9, x11, mi
-; CHECK-NEXT:    asr x11, x9, #2
+; CHECK-NEXT:    add x10, x9, #3
+; CHECK-NEXT:    csel x9, x10, x9, mi
+; CHECK-NEXT:    asr x9, x9, #2
+; CHECK-NEXT:    mov z5.d, z0.d
+; CHECK-NEXT:    mov z6.d, z0.d
+; CHECK-NEXT:    mov z7.d, z0.d
+; CHECK-NEXT:    mov z16.d, z0.d
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
 ; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    cmp x11, #1
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    cset w9, lt
-; CHECK-NEXT:    cmp x10, #0
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    csel x10, x12, x10, mi
-; CHECK-NEXT:    dup v7.2d, x11
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    asr x10, x10, #2
-; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z3.d, z3.d, #8 // =0x8
-; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z5.d, z5.d, #4 // =0x4
-; CHECK-NEXT:    add z6.d, z6.d, #2 // =0x2
-; CHECK-NEXT:    dup v16.2d, x10
-; CHECK-NEXT:    cmhi v17.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v19.2d, v7.2d, v1.2d
-; CHECK-NEXT:    cmhi v20.2d, v7.2d, v2.2d
-; CHECK-NEXT:    cmhi v21.2d, v7.2d, v3.2d
-; CHECK-NEXT:    cmp x10, #1
-; CHECK-NEXT:    cmhi v22.2d, v7.2d, v4.2d
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmhi v18.2d, v16.2d, v0.2d
-; CHECK-NEXT:    add z0.d, z0.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v1.2d, v16.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v16.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v16.2d, v3.2d
-; CHECK-NEXT:    cmhi v4.2d, v16.2d, v4.2d
-; CHECK-NEXT:    cmhi v23.2d, v16.2d, v5.2d
-; CHECK-NEXT:    cmhi v24.2d, v16.2d, v6.2d
-; CHECK-NEXT:    cmhi v5.2d, v7.2d, v5.2d
-; CHECK-NEXT:    cmhi v16.2d, v16.2d, v0.2d
-; CHECK-NEXT:    cmhi v6.2d, v7.2d, v6.2d
-; CHECK-NEXT:    cmhi v0.2d, v7.2d, v0.2d
-; CHECK-NEXT:    uzp1 v7.4s, v21.4s, v20.4s
-; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    uzp1 v3.4s, v23.4s, v4.4s
-; CHECK-NEXT:    uzp1 v4.4s, v18.4s, v24.4s
-; CHECK-NEXT:    uzp1 v5.4s, v5.4s, v22.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v6.4s, v17.4s, v6.4s
-; CHECK-NEXT:    uzp1 v0.4s, v19.4s, v0.4s
-; CHECK-NEXT:    uzp1 v3.8h, v4.8h, v3.8h
+; CHECK-NEXT:    dup v4.2d, x9
+; CHECK-NEXT:    mov z17.d, z0.d
+; CHECK-NEXT:    mov z18.d, z0.d
+; CHECK-NEXT:    mov z20.d, z0.d
+; CHECK-NEXT:    mov z21.d, z0.d
+; CHECK-NEXT:    mov z22.d, z0.d
+; CHECK-NEXT:    mov z23.d, z0.d
+; CHECK-NEXT:    mov z24.d, z0.d
+; CHECK-NEXT:    add z5.d, z5.d, #14 // =0xe
+; CHECK-NEXT:    add z6.d, z6.d, #12 // =0xc
+; CHECK-NEXT:    add z7.d, z7.d, #10 // =0xa
+; CHECK-NEXT:    add z16.d, z16.d, #8 // =0x8
+; CHECK-NEXT:    add z1.d, z1.d, #6 // =0x6
+; CHECK-NEXT:    add z2.d, z2.d, #4 // =0x4
+; CHECK-NEXT:    add z17.d, z17.d, #30 // =0x1e
+; CHECK-NEXT:    cmhi v19.2d, v4.2d, v0.2d
+; CHECK-NEXT:    add z18.d, z18.d, #28 // =0x1c
+; CHECK-NEXT:    add z20.d, z20.d, #26 // =0x1a
+; CHECK-NEXT:    add z21.d, z21.d, #24 // =0x18
+; CHECK-NEXT:    cmp x9, #0
+; CHECK-NEXT:    add z22.d, z22.d, #22 // =0x16
+; CHECK-NEXT:    add z23.d, z23.d, #20 // =0x14
+; CHECK-NEXT:    cneg x10, x9, mi
+; CHECK-NEXT:    add z24.d, z24.d, #18 // =0x12
+; CHECK-NEXT:    add z0.d, z0.d, #16 // =0x10
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    add z3.d, z3.d, #2 // =0x2
+; CHECK-NEXT:    cmhi v5.2d, v4.2d, v5.2d
+; CHECK-NEXT:    ccmp x10, #15, #2, ge
+; CHECK-NEXT:    cmhi v6.2d, v4.2d, v6.2d
+; CHECK-NEXT:    cmhi v7.2d, v4.2d, v7.2d
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    cmhi v16.2d, v4.2d, v16.2d
+; CHECK-NEXT:    cmhi v17.2d, v4.2d, v17.2d
+; CHECK-NEXT:    cmhi v18.2d, v4.2d, v18.2d
+; CHECK-NEXT:    cmhi v20.2d, v4.2d, v20.2d
+; CHECK-NEXT:    cmhi v21.2d, v4.2d, v21.2d
+; CHECK-NEXT:    cmhi v22.2d, v4.2d, v22.2d
+; CHECK-NEXT:    cmhi v23.2d, v4.2d, v23.2d
+; CHECK-NEXT:    cmhi v24.2d, v4.2d, v24.2d
+; CHECK-NEXT:    cmhi v0.2d, v4.2d, v0.2d
+; CHECK-NEXT:    cmhi v1.2d, v4.2d, v1.2d
+; CHECK-NEXT:    cmhi v2.2d, v4.2d, v2.2d
+; CHECK-NEXT:    cmhi v3.2d, v4.2d, v3.2d
+; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
+; CHECK-NEXT:    uzp1 v5.4s, v18.4s, v17.4s
+; CHECK-NEXT:    uzp1 v6.4s, v21.4s, v20.4s
+; CHECK-NEXT:    uzp1 v17.4s, v23.4s, v22.4s
+; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v24.4s
+; CHECK-NEXT:    uzp1 v7.4s, v16.4s, v7.4s
+; CHECK-NEXT:    uzp1 v1.4s, v2.4s, v1.4s
+; CHECK-NEXT:    uzp1 v2.4s, v19.4s, v3.4s
+; CHECK-NEXT:    uzp1 v3.8h, v6.8h, v5.8h
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v17.8h
+; CHECK-NEXT:    uzp1 v4.8h, v7.8h, v4.8h
 ; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v2.8h, v6.8h, v5.8h
-; CHECK-NEXT:    uzp1 v0.8h, v7.8h, v0.8h
-; CHECK-NEXT:    uzp1 v1.16b, v3.16b, v1.16b
-; CHECK-NEXT:    uzp1 v0.16b, v2.16b, v0.16b
-; CHECK-NEXT:    dup v3.16b, w10
 ; CHECK-NEXT:    dup v2.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI14_0
-; CHECK-NEXT:    orr v1.16b, v1.16b, v3.16b
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v3.16b
+; CHECK-NEXT:    uzp1 v1.16b, v1.16b, v4.16b
 ; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    orr v1.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI14_0]
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
 ; CHECK-NEXT:    addv h0, v0.8h
-; CHECK-NEXT:    str h1, [x8]
+; CHECK-NEXT:    addv h1, v1.8h
 ; CHECK-NEXT:    str h0, [x8, #2]
+; CHECK-NEXT:    str h1, [x8]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 4)
@@ -488,10 +585,13 @@ define <4 x i1> @whilewr_64_expand(ptr %a, ptr %b) {
 ; CHECK-NEXT:    asr x8, x8, #3
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    dup v2.2d, x8
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
 ; CHECK-NEXT:    add z1.d, z1.d, #2 // =0x2
+; CHECK-NEXT:    ccmp x9, #3, #2, ge
 ; CHECK-NEXT:    cmhi v0.2d, v2.2d, v0.2d
+; CHECK-NEXT:    cset w8, hi
 ; CHECK-NEXT:    cmhi v1.2d, v2.2d, v1.2d
 ; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    dup v1.4h, w8
@@ -515,12 +615,15 @@ define <8 x i1> @whilewr_64_expand2(ptr %a, ptr %b) {
 ; CHECK-NEXT:    mov z3.d, z0.d
 ; CHECK-NEXT:    mov z4.d, z0.d
 ; CHECK-NEXT:    dup v1.2d, x8
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
 ; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
 ; CHECK-NEXT:    add z2.d, z2.d, #4 // =0x4
 ; CHECK-NEXT:    add z3.d, z3.d, #2 // =0x2
 ; CHECK-NEXT:    cmhi v0.2d, v1.2d, v0.2d
+; CHECK-NEXT:    ccmp x9, #7, #2, ge
+; CHECK-NEXT:    cset w8, hi
 ; CHECK-NEXT:    cmhi v4.2d, v1.2d, v4.2d
 ; CHECK-NEXT:    cmhi v2.2d, v1.2d, v2.2d
 ; CHECK-NEXT:    cmhi v1.2d, v1.2d, v3.2d
@@ -552,7 +655,7 @@ define <16 x i1> @whilewr_64_expand3(ptr %a, ptr %b) {
 ; CHECK-NEXT:    mov z7.d, z0.d
 ; CHECK-NEXT:    mov z16.d, z0.d
 ; CHECK-NEXT:    dup v3.2d, x8
-; CHECK-NEXT:    cmp x8, #1
+; CHECK-NEXT:    cmp x8, #0
 ; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
 ; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
 ; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
@@ -561,15 +664,18 @@ define <16 x i1> @whilewr_64_expand3(ptr %a, ptr %b) {
 ; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
 ; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
 ; CHECK-NEXT:    cmhi v0.2d, v3.2d, v0.2d
-; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmhi v1.2d, v3.2d, v1.2d
 ; CHECK-NEXT:    cmhi v2.2d, v3.2d, v2.2d
 ; CHECK-NEXT:    cmhi v4.2d, v3.2d, v4.2d
 ; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
 ; CHECK-NEXT:    cmhi v6.2d, v3.2d, v6.2d
+; CHECK-NEXT:    cmp x8, #1
 ; CHECK-NEXT:    cmhi v16.2d, v3.2d, v16.2d
 ; CHECK-NEXT:    cmhi v3.2d, v3.2d, v7.2d
+; CHECK-NEXT:    ccmp x9, #15, #2, ge
 ; CHECK-NEXT:    uzp1 v2.4s, v4.4s, v2.4s
+; CHECK-NEXT:    cset w8, hi
 ; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
 ; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
 ; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
@@ -587,85 +693,95 @@ entry:
 define <32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_64_expand4:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x10, x1, x0
+; CHECK-NEXT:    subs x9, x1, x0
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x9, x10, #121
-; CHECK-NEXT:    subs x11, x10, #128
-; CHECK-NEXT:    add x12, x10, #7
-; CHECK-NEXT:    csel x9, x9, x11, mi
-; CHECK-NEXT:    asr x11, x9, #3
+; CHECK-NEXT:    add x10, x9, #7
+; CHECK-NEXT:    csel x9, x10, x9, mi
+; CHECK-NEXT:    asr x9, x9, #3
+; CHECK-NEXT:    mov z5.d, z0.d
+; CHECK-NEXT:    mov z6.d, z0.d
+; CHECK-NEXT:    mov z7.d, z0.d
+; CHECK-NEXT:    mov z16.d, z0.d
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
 ; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    cmp x11, #1
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    cset w9, lt
-; CHECK-NEXT:    cmp x10, #0
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    csel x10, x12, x10, mi
-; CHECK-NEXT:    dup v7.2d, x11
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    asr x10, x10, #3
-; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z3.d, z3.d, #8 // =0x8
-; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z5.d, z5.d, #4 // =0x4
-; CHECK-NEXT:    add z6.d, z6.d, #2 // =0x2
-; CHECK-NEXT:    dup v16.2d, x10
-; CHECK-NEXT:    cmhi v17.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v19.2d, v7.2d, v1.2d
-; CHECK-NEXT:    cmhi v20.2d, v7.2d, v2.2d
-; CHECK-NEXT:    cmhi v21.2d, v7.2d, v3.2d
-; CHECK-NEXT:    cmp x10, #1
-; CHECK-NEXT:    cmhi v22.2d, v7.2d, v4.2d
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmhi v18.2d, v16.2d, v0.2d
-; CHECK-NEXT:    add z0.d, z0.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v1.2d, v16.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v16.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v16.2d, v3.2d
-; CHECK-NEXT:    cmhi v4.2d, v16.2d, v4.2d
-; CHECK-NEXT:    cmhi v23.2d, v16.2d, v5.2d
-; CHECK-NEXT:    cmhi v24.2d, v16.2d, v6.2d
-; CHECK-NEXT:    cmhi v5.2d, v7.2d, v5.2d
-; CHECK-NEXT:    cmhi v16.2d, v16.2d, v0.2d
-; CHECK-NEXT:    cmhi v6.2d, v7.2d, v6.2d
-; CHECK-NEXT:    cmhi v0.2d, v7.2d, v0.2d
-; CHECK-NEXT:    uzp1 v7.4s, v21.4s, v20.4s
-; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    uzp1 v3.4s, v23.4s, v4.4s
-; CHECK-NEXT:    uzp1 v4.4s, v18.4s, v24.4s
-; CHECK-NEXT:    uzp1 v5.4s, v5.4s, v22.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v6.4s, v17.4s, v6.4s
-; CHECK-NEXT:    uzp1 v0.4s, v19.4s, v0.4s
-; CHECK-NEXT:    uzp1 v3.8h, v4.8h, v3.8h
+; CHECK-NEXT:    dup v4.2d, x9
+; CHECK-NEXT:    mov z17.d, z0.d
+; CHECK-NEXT:    mov z18.d, z0.d
+; CHECK-NEXT:    mov z20.d, z0.d
+; CHECK-NEXT:    mov z21.d, z0.d
+; CHECK-NEXT:    mov z22.d, z0.d
+; CHECK-NEXT:    mov z23.d, z0.d
+; CHECK-NEXT:    mov z24.d, z0.d
+; CHECK-NEXT:    add z5.d, z5.d, #14 // =0xe
+; CHECK-NEXT:    add z6.d, z6.d, #12 // =0xc
+; CHECK-NEXT:    add z7.d, z7.d, #10 // =0xa
+; CHECK-NEXT:    add z16.d, z16.d, #8 // =0x8
+; CHECK-NEXT:    add z1.d, z1.d, #6 // =0x6
+; CHECK-NEXT:    add z2.d, z2.d, #4 // =0x4
+; CHECK-NEXT:    add z17.d, z17.d, #30 // =0x1e
+; CHECK-NEXT:    cmhi v19.2d, v4.2d, v0.2d
+; CHECK-NEXT:    add z18.d, z18.d, #28 // =0x1c
+; CHECK-NEXT:    add z20.d, z20.d, #26 // =0x1a
+; CHECK-NEXT:    add z21.d, z21.d, #24 // =0x18
+; CHECK-NEXT:    cmp x9, #0
+; CHECK-NEXT:    add z22.d, z22.d, #22 // =0x16
+; CHECK-NEXT:    add z23.d, z23.d, #20 // =0x14
+; CHECK-NEXT:    cneg x10, x9, mi
+; CHECK-NEXT:    add z24.d, z24.d, #18 // =0x12
+; CHECK-NEXT:    add z0.d, z0.d, #16 // =0x10
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    add z3.d, z3.d, #2 // =0x2
+; CHECK-NEXT:    cmhi v5.2d, v4.2d, v5.2d
+; CHECK-NEXT:    ccmp x10, #15, #2, ge
+; CHECK-NEXT:    cmhi v6.2d, v4.2d, v6.2d
+; CHECK-NEXT:    cmhi v7.2d, v4.2d, v7.2d
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    cmhi v16.2d, v4.2d, v16.2d
+; CHECK-NEXT:    cmhi v17.2d, v4.2d, v17.2d
+; CHECK-NEXT:    cmhi v18.2d, v4.2d, v18.2d
+; CHECK-NEXT:    cmhi v20.2d, v4.2d, v20.2d
+; CHECK-NEXT:    cmhi v21.2d, v4.2d, v21.2d
+; CHECK-NEXT:    cmhi v22.2d, v4.2d, v22.2d
+; CHECK-NEXT:    cmhi v23.2d, v4.2d, v23.2d
+; CHECK-NEXT:    cmhi v24.2d, v4.2d, v24.2d
+; CHECK-NEXT:    cmhi v0.2d, v4.2d, v0.2d
+; CHECK-NEXT:    cmhi v1.2d, v4.2d, v1.2d
+; CHECK-NEXT:    cmhi v2.2d, v4.2d, v2.2d
+; CHECK-NEXT:    cmhi v3.2d, v4.2d, v3.2d
+; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
+; CHECK-NEXT:    uzp1 v5.4s, v18.4s, v17.4s
+; CHECK-NEXT:    uzp1 v6.4s, v21.4s, v20.4s
+; CHECK-NEXT:    uzp1 v17.4s, v23.4s, v22.4s
+; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v24.4s
+; CHECK-NEXT:    uzp1 v7.4s, v16.4s, v7.4s
+; CHECK-NEXT:    uzp1 v1.4s, v2.4s, v1.4s
+; CHECK-NEXT:    uzp1 v2.4s, v19.4s, v3.4s
+; CHECK-NEXT:    uzp1 v3.8h, v6.8h, v5.8h
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v17.8h
+; CHECK-NEXT:    uzp1 v4.8h, v7.8h, v4.8h
 ; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v2.8h, v6.8h, v5.8h
-; CHECK-NEXT:    uzp1 v0.8h, v7.8h, v0.8h
-; CHECK-NEXT:    uzp1 v1.16b, v3.16b, v1.16b
-; CHECK-NEXT:    uzp1 v0.16b, v2.16b, v0.16b
-; CHECK-NEXT:    dup v3.16b, w10
 ; CHECK-NEXT:    dup v2.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI18_0
-; CHECK-NEXT:    orr v1.16b, v1.16b, v3.16b
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v3.16b
+; CHECK-NEXT:    uzp1 v1.16b, v1.16b, v4.16b
 ; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    orr v1.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI18_0]
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
 ; CHECK-NEXT:    addv h0, v0.8h
-; CHECK-NEXT:    str h1, [x8]
+; CHECK-NEXT:    addv h1, v1.8h
 ; CHECK-NEXT:    str h0, [x8, #2]
+; CHECK-NEXT:    str h1, [x8]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 8)
@@ -762,8 +878,9 @@ define <16 x i1> @whilewr_badimm(ptr %a, ptr %b) {
 ; CHECK-NEXT:    dup v3.2d, x8
 ; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
 ; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
 ; CHECK-NEXT:    cmhi v0.2d, v3.2d, v0.2d
 ; CHECK-NEXT:    cmhi v1.2d, v3.2d, v1.2d
 ; CHECK-NEXT:    cmhi v2.2d, v3.2d, v2.2d
@@ -772,6 +889,8 @@ define <16 x i1> @whilewr_badimm(ptr %a, ptr %b) {
 ; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
 ; CHECK-NEXT:    cmhi v6.2d, v3.2d, v6.2d
 ; CHECK-NEXT:    cmhi v3.2d, v3.2d, v7.2d
+; CHECK-NEXT:    ccmp x9, #15, #2, ge
+; CHECK-NEXT:    cset w8, hi
 ; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
 ; CHECK-NEXT:    uzp1 v2.4s, v4.4s, v2.4s
 ; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_nosve.ll b/llvm/test/CodeGen/AArch64/alias_mask_nosve.ll
index 922b37c2f2a08..a62adf4eb7e00 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_nosve.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_nosve.ll
@@ -4,41 +4,27 @@
 define <16 x i1> @whilewr_8(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    adrp x9, .LCPI0_0
 ; CHECK-NEXT:    adrp x10, .LCPI0_1
-; CHECK-NEXT:    sub x9, x1, x0
-; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI0_0]
-; CHECK-NEXT:    adrp x8, .LCPI0_2
-; CHECK-NEXT:    ldr q1, [x10, :lo12:.LCPI0_1]
-; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI0_2]
-; CHECK-NEXT:    adrp x8, .LCPI0_4
-; CHECK-NEXT:    adrp x10, .LCPI0_3
-; CHECK-NEXT:    ldr q5, [x8, :lo12:.LCPI0_4]
-; CHECK-NEXT:    adrp x8, .LCPI0_5
-; CHECK-NEXT:    dup v2.2d, x9
-; CHECK-NEXT:    ldr q4, [x10, :lo12:.LCPI0_3]
-; CHECK-NEXT:    adrp x10, .LCPI0_6
-; CHECK-NEXT:    ldr q6, [x8, :lo12:.LCPI0_5]
-; CHECK-NEXT:    adrp x8, .LCPI0_7
-; CHECK-NEXT:    ldr q7, [x10, :lo12:.LCPI0_6]
-; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    ldr q16, [x8, :lo12:.LCPI0_7]
-; CHECK-NEXT:    cmhi v0.2d, v2.2d, v0.2d
-; CHECK-NEXT:    cmhi v1.2d, v2.2d, v1.2d
-; CHECK-NEXT:    cmhi v3.2d, v2.2d, v3.2d
-; CHECK-NEXT:    cmhi v4.2d, v2.2d, v4.2d
-; CHECK-NEXT:    cmhi v5.2d, v2.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v2.2d, v6.2d
-; CHECK-NEXT:    cmhi v7.2d, v2.2d, v7.2d
-; CHECK-NEXT:    cmhi v2.2d, v2.2d, v16.2d
-; CHECK-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    uzp1 v1.4s, v4.4s, v3.4s
-; CHECK-NEXT:    uzp1 v3.4s, v6.4s, v5.4s
-; CHECK-NEXT:    uzp1 v2.4s, v2.4s, v7.4s
-; CHECK-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v3.8h
-; CHECK-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    dup v0.4s, w8
+; CHECK-NEXT:    adrp x11, .LCPI0_2
+; CHECK-NEXT:    ldr q1, [x9, :lo12:.LCPI0_0]
+; CHECK-NEXT:    adrp x9, .LCPI0_3
+; CHECK-NEXT:    ldr q2, [x10, :lo12:.LCPI0_1]
+; CHECK-NEXT:    ldr q3, [x11, :lo12:.LCPI0_2]
+; CHECK-NEXT:    ldr q4, [x9, :lo12:.LCPI0_3]
+; CHECK-NEXT:    cneg x9, x8, mi
+; CHECK-NEXT:    cmp x8, #1
+; CHECK-NEXT:    cmhi v1.4s, v0.4s, v1.4s
+; CHECK-NEXT:    cmhi v2.4s, v0.4s, v2.4s
+; CHECK-NEXT:    cmhi v3.4s, v0.4s, v3.4s
+; CHECK-NEXT:    cmhi v0.4s, v0.4s, v4.4s
+; CHECK-NEXT:    ccmp x9, #15, #2, ge
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v3.8h
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    dup v1.16b, w8
 ; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index 3435ceca28e17..22440d95d78ca 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -84,9 +84,33 @@ entry:
 define <vscale x 32 x i1> @whilewr_8_split(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_split:
 ; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w8, #16 // =0x10
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    index z0.s, w8, #1
+; CHECK-NEXT:    sub x8, x1, x0
+; CHECK-NEXT:    mov z3.s, w8
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    cmphi p1.s, p0/z, z3.s, z0.s
+; CHECK-NEXT:    incw z1.s
+; CHECK-NEXT:    incw z2.s, all, mul #2
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    cmphi p2.s, p0/z, z3.s, z1.s
+; CHECK-NEXT:    cmphi p3.s, p0/z, z3.s, z2.s
+; CHECK-NEXT:    incw z4.s, all, mul #2
+; CHECK-NEXT:    uzp1 p1.h, p1.h, p2.h
+; CHECK-NEXT:    cmphi p0.s, p0/z, z3.s, z4.s
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
+; CHECK-NEXT:    cmp x8, #1
+; CHECK-NEXT:    ccmp x9, #15, #2, ge
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    uzp1 p0.h, p3.h, p0.h
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
+; CHECK-NEXT:    whilelo p1.b, xzr, x8
+; CHECK-NEXT:    mov p1.b, p0/m, p0.b
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
-; CHECK-NEXT:    incb x0
-; CHECK-NEXT:    whilewr p1.b, x0, x1
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 32 x i1> @llvm.loop.dependence.war.mask.nxv32i1(ptr %a, ptr %b, i64 1)
@@ -96,14 +120,84 @@ entry:
 define <vscale x 64 x i1> @whilewr_8_split2(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_split2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    index z0.s, #0, #1
+; CHECK-NEXT:    sub x8, x1, x0
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    mov z4.s, w8
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    mov z5.d, z0.d
+; CHECK-NEXT:    incw z1.s
+; CHECK-NEXT:    incw z2.s, all, mul #2
+; CHECK-NEXT:    add z5.s, z5.s, #16 // =0x10
+; CHECK-NEXT:    mov z3.d, z1.d
+; CHECK-NEXT:    mov z6.d, z2.d
+; CHECK-NEXT:    mov z7.d, z1.d
+; CHECK-NEXT:    cmphi p2.s, p0/z, z4.s, z5.s
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    add z2.s, z2.s, #48 // =0x30
+; CHECK-NEXT:    incw z3.s, all, mul #2
+; CHECK-NEXT:    add z6.s, z6.s, #16 // =0x10
+; CHECK-NEXT:    add z7.s, z7.s, #16 // =0x10
+; CHECK-NEXT:    add z5.s, z5.s, #32 // =0x20
+; CHECK-NEXT:    mov z24.d, z3.d
+; CHECK-NEXT:    cmphi p1.s, p0/z, z4.s, z6.s
+; CHECK-NEXT:    cmphi p4.s, p0/z, z4.s, z7.s
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    mov z7.d, z0.d
+; CHECK-NEXT:    add z3.s, z3.s, #48 // =0x30
+; CHECK-NEXT:    add z0.s, z0.s, #48 // =0x30
+; CHECK-NEXT:    add z24.s, z24.s, #16 // =0x10
+; CHECK-NEXT:    add z6.s, z6.s, #32 // =0x20
+; CHECK-NEXT:    add z7.s, z7.s, #32 // =0x20
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p4.h
+; CHECK-NEXT:    cmphi p3.s, p0/z, z4.s, z24.s
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    add z1.s, z1.s, #48 // =0x30
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
+; CHECK-NEXT:    cmp x8, #1
+; CHECK-NEXT:    add z24.s, z24.s, #32 // =0x20
+; CHECK-NEXT:    ccmp x9, #15, #2, ge
+; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmphi p5.s, p0/z, z4.s, z6.s
+; CHECK-NEXT:    cmphi p6.s, p0/z, z4.s, z5.s
+; CHECK-NEXT:    cmphi p3.s, p0/z, z4.s, z7.s
+; CHECK-NEXT:    cmphi p7.s, p0/z, z4.s, z3.s
+; CHECK-NEXT:    cmphi p4.s, p0/z, z4.s, z24.s
+; CHECK-NEXT:    cmphi p8.s, p0/z, z4.s, z2.s
+; CHECK-NEXT:    uzp1 p1.b, p2.b, p1.b
+; CHECK-NEXT:    cmphi p2.s, p0/z, z4.s, z1.s
+; CHECK-NEXT:    cmphi p0.s, p0/z, z4.s, z0.s
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    uzp1 p5.h, p6.h, p5.h
+; CHECK-NEXT:    uzp1 p3.h, p3.h, p4.h
+; CHECK-NEXT:    uzp1 p4.h, p8.h, p7.h
+; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p2.h, p0.h, p2.h
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    whilelo p6.b, xzr, x8
+; CHECK-NEXT:    uzp1 p3.b, p3.b, p5.b
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p4.b, p2.b, p4.b
+; CHECK-NEXT:    sel p1.b, p1, p1.b, p6.b
+; CHECK-NEXT:    sel p2.b, p3, p3.b, p6.b
+; CHECK-NEXT:    sel p3.b, p4, p4.b, p6.b
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
-; CHECK-NEXT:    addvl x9, x0, #3
-; CHECK-NEXT:    incb x0, all, mul #2
-; CHECK-NEXT:    incb x8
-; CHECK-NEXT:    whilewr p3.b, x9, x1
-; CHECK-NEXT:    whilewr p2.b, x0, x1
-; CHECK-NEXT:    whilewr p1.b, x8, x1
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 64 x i1> @llvm.loop.dependence.war.mask.nxv64i1(ptr %a, ptr %b, i64 1)
@@ -154,11 +248,14 @@ define <vscale x 16 x i1> @whilewr_16_expand(ptr %a, ptr %b) {
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z0.d
 ; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    ccmp x9, #15, #2, ge
 ; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
+; CHECK-NEXT:    cset w8, hi
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
 ; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
 ; CHECK-NEXT:    whilelo p1.b, xzr, x8
@@ -176,7 +273,6 @@ define <vscale x 32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
@@ -186,74 +282,75 @@ define <vscale x 32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    incb x0, all, mul #2
-; CHECK-NEXT:    add x8, x8, x8, lsr #63
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    add x8, x8, x8, lsr #63
 ; CHECK-NEXT:    asr x8, x8, #1
-; CHECK-NEXT:    sub x9, x1, x0
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    add x9, x9, x9, lsr #63
+; CHECK-NEXT:    mov z5.d, z0.d
+; CHECK-NEXT:    mov z3.d, x8
 ; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    incd z3.d, all, mul #4
-; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    asr x9, x9, #1
+; CHECK-NEXT:    incd z5.d, all, mul #4
+; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z0.d
+; CHECK-NEXT:    add z0.d, z0.d, #16 // =0x10
 ; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    mov z6.d, z1.d
-; CHECK-NEXT:    mov z7.d, z2.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    mov z6.d, z2.d
+; CHECK-NEXT:    mov z7.d, z1.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z1.d
+; CHECK-NEXT:    cmphi p1.d, p0/z, z3.d, z2.d
+; CHECK-NEXT:    cmphi p4.d, p0/z, z3.d, z5.d
+; CHECK-NEXT:    add z5.d, z5.d, #16 // =0x10
+; CHECK-NEXT:    add z2.d, z2.d, #16 // =0x10
+; CHECK-NEXT:    add z1.d, z1.d, #16 // =0x10
 ; CHECK-NEXT:    incd z4.d, all, mul #2
 ; CHECK-NEXT:    incd z6.d, all, mul #4
 ; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
+; CHECK-NEXT:    uzp1 p3.s, p5.s, p3.s
 ; CHECK-NEXT:    mov z24.d, z4.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
+; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z4.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z3.d, z7.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z3.d, z6.d
+; CHECK-NEXT:    add z6.d, z6.d, #16 // =0x10
+; CHECK-NEXT:    add z7.d, z7.d, #16 // =0x10
+; CHECK-NEXT:    add z4.d, z4.d, #16 // =0x10
 ; CHECK-NEXT:    incd z24.d, all, mul #4
-; CHECK-NEXT:    uzp1 p2.s, p3.s, p4.s
-; CHECK-NEXT:    uzp1 p3.s, p5.s, p6.s
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    mov z5.d, x9
+; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
+; CHECK-NEXT:    uzp1 p2.s, p4.s, p6.s
+; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z24.d
+; CHECK-NEXT:    uzp1 p1.h, p3.h, p1.h
+; CHECK-NEXT:    add z24.d, z24.d, #16 // =0x10
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    uzp1 p7.s, p7.s, p8.s
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    ccmp x9, #15, #2, ge
+; CHECK-NEXT:    uzp1 p3.s, p7.s, p5.s
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmphi p4.d, p0/z, z3.d, z24.d
+; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z6.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z3.d, z7.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z3.d, z5.d
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p3.h
+; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z4.d
+; CHECK-NEXT:    cmphi p8.d, p0/z, z3.d, z2.d
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p7.h
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    uzp1 p1.b, p1.b, p2.b
+; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z1.d
+; CHECK-NEXT:    cmphi p0.d, p0/z, z3.d, z0.d
 ; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
-; CHECK-NEXT:    uzp1 p5.s, p9.s, p6.s
-; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT:    whilelo p6.b, xzr, x8
+; CHECK-NEXT:    uzp1 p5.s, p7.s, p6.s
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    uzp1 p3.s, p8.s, p3.s
-; CHECK-NEXT:    cmp x9, #1
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p7.s
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p4.h, p5.h, p4.h
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    uzp1 p0.s, p0.s, p2.s
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p2.h, p5.h, p4.h
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
-; CHECK-NEXT:    uzp1 p1.b, p1.b, p2.b
-; CHECK-NEXT:    uzp1 p2.b, p0.b, p4.b
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    whilelo p3.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p1, p1.b, p6.b
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p2.b, p0.b, p2.b
+; CHECK-NEXT:    sel p0.b, p1, p1.b, p3.b
 ; CHECK-NEXT:    sel p1.b, p2, p2.b, p3.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -274,21 +371,24 @@ define <vscale x 8 x i1> @whilewr_32_expand(ptr %a, ptr %b) {
 ; CHECK-NEXT:    asr x8, x8, #2
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, x8
+; CHECK-NEXT:    mov z4.d, x8
 ; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    cmphi p1.d, p0/z, z3.d, z0.d
-; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z2.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
-; CHECK-NEXT:    cmphi p0.d, p0/z, z3.d, z4.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z4.d, z0.d
+; CHECK-NEXT:    mov z3.d, z1.d
+; CHECK-NEXT:    cmphi p1.d, p0/z, z4.d, z2.d
+; CHECK-NEXT:    incd z3.d, all, mul #2
+; CHECK-NEXT:    cmphi p2.d, p0/z, z4.d, z3.d
+; CHECK-NEXT:    cmphi p0.d, p0/z, z4.d, z1.d
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    ccmp x9, #7, #2, ge
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
 ; CHECK-NEXT:    uzp1 p0.s, p3.s, p0.s
-; CHECK-NEXT:    uzp1 p0.h, p1.h, p0.h
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p1.h
 ; CHECK-NEXT:    whilelo p1.h, xzr, x8
 ; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-NEXT:    ret
@@ -342,11 +442,14 @@ define <vscale x 16 x i1> @whilewr_32_expand2(ptr %a, ptr %b) {
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z0.d
 ; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    ccmp x9, #15, #2, ge
 ; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
+; CHECK-NEXT:    cset w8, hi
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
 ; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
 ; CHECK-NEXT:    whilelo p1.b, xzr, x8
@@ -364,8 +467,6 @@ define <vscale x 32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
@@ -377,76 +478,75 @@ define <vscale x 32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
 ; CHECK-NEXT:    subs x8, x1, x0
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    add x9, x8, #3
-; CHECK-NEXT:    incb x0, all, mul #4
 ; CHECK-NEXT:    csel x8, x9, x8, mi
 ; CHECK-NEXT:    asr x8, x8, #2
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, x8
+; CHECK-NEXT:    mov z5.d, z0.d
+; CHECK-NEXT:    mov z3.d, x8
 ; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
+; CHECK-NEXT:    incd z5.d, all, mul #4
+; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z0.d
+; CHECK-NEXT:    add z0.d, z0.d, #16 // =0x10
+; CHECK-NEXT:    mov z4.d, z1.d
 ; CHECK-NEXT:    mov z6.d, z2.d
 ; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
+; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z1.d
+; CHECK-NEXT:    cmphi p1.d, p0/z, z3.d, z2.d
+; CHECK-NEXT:    cmphi p4.d, p0/z, z3.d, z5.d
+; CHECK-NEXT:    add z5.d, z5.d, #16 // =0x10
+; CHECK-NEXT:    add z2.d, z2.d, #16 // =0x10
+; CHECK-NEXT:    add z1.d, z1.d, #16 // =0x10
+; CHECK-NEXT:    incd z4.d, all, mul #2
 ; CHECK-NEXT:    incd z6.d, all, mul #4
 ; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
-; CHECK-NEXT:    mov z24.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    uzp1 p3.s, p5.s, p3.s
+; CHECK-NEXT:    mov z24.d, z4.d
+; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z4.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z3.d, z7.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z3.d, z6.d
+; CHECK-NEXT:    add z6.d, z6.d, #16 // =0x10
+; CHECK-NEXT:    add z7.d, z7.d, #16 // =0x10
+; CHECK-NEXT:    add z4.d, z4.d, #16 // =0x10
 ; CHECK-NEXT:    incd z24.d, all, mul #4
-; CHECK-NEXT:    uzp1 p2.s, p2.s, p7.s
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p8.s
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p3.h, p4.h, p3.h
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p6.s, p6.s, p9.s
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p6.h
-; CHECK-NEXT:    add x9, x8, #3
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    uzp1 p2.b, p3.b, p2.b
-; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
+; CHECK-NEXT:    uzp1 p2.s, p4.s, p6.s
+; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z24.d
+; CHECK-NEXT:    uzp1 p1.h, p3.h, p1.h
+; CHECK-NEXT:    add z24.d, z24.d, #16 // =0x10
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p5.s, p7.s, p5.s
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    uzp1 p7.s, p9.s, p8.s
+; CHECK-NEXT:    ccmp x9, #15, #2, ge
+; CHECK-NEXT:    uzp1 p3.s, p7.s, p5.s
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmphi p4.d, p0/z, z3.d, z24.d
+; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z6.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z3.d, z7.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z3.d, z5.d
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p3.h
+; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z4.d
+; CHECK-NEXT:    cmphi p8.d, p0/z, z3.d, z2.d
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p4.s, p10.s, p4.s
-; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p6.s
-; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p5.h, p7.h, p5.h
+; CHECK-NEXT:    uzp1 p1.b, p1.b, p2.b
+; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z1.d
+; CHECK-NEXT:    cmphi p0.d, p0/z, z3.d, z0.d
+; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
+; CHECK-NEXT:    uzp1 p5.s, p7.s, p6.s
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p4.h
+; CHECK-NEXT:    uzp1 p3.s, p8.s, p3.s
+; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.s, p0.s, p2.s
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    whilelo p4.b, xzr, x8
-; CHECK-NEXT:    uzp1 p3.b, p0.b, p5.b
+; CHECK-NEXT:    uzp1 p2.h, p5.h, p4.h
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
-; CHECK-NEXT:    sel p1.b, p3, p3.b, p4.b
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    whilelo p3.b, xzr, x8
+; CHECK-NEXT:    uzp1 p2.b, p0.b, p2.b
+; CHECK-NEXT:    sel p0.b, p1, p1.b, p3.b
+; CHECK-NEXT:    sel p1.b, p2, p2.b, p3.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -469,10 +569,13 @@ define <vscale x 4 x i1> @whilewr_64_expand(ptr %a, ptr %b) {
 ; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z0.d
 ; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z1.d
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    ccmp x9, #3, #2, ge
+; CHECK-NEXT:    cset w8, hi
 ; CHECK-NEXT:    uzp1 p0.s, p1.s, p0.s
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-NEXT:    whilelo p1.s, xzr, x8
 ; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-NEXT:    ret
@@ -492,21 +595,24 @@ define <vscale x 8 x i1> @whilewr_64_expand2(ptr %a, ptr %b) {
 ; CHECK-NEXT:    asr x8, x8, #3
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, x8
+; CHECK-NEXT:    mov z4.d, x8
 ; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    cmphi p1.d, p0/z, z3.d, z0.d
-; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z2.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
-; CHECK-NEXT:    cmphi p0.d, p0/z, z3.d, z4.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z4.d, z0.d
+; CHECK-NEXT:    mov z3.d, z1.d
+; CHECK-NEXT:    cmphi p1.d, p0/z, z4.d, z2.d
+; CHECK-NEXT:    incd z3.d, all, mul #2
+; CHECK-NEXT:    cmphi p2.d, p0/z, z4.d, z3.d
+; CHECK-NEXT:    cmphi p0.d, p0/z, z4.d, z1.d
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    ccmp x9, #7, #2, ge
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
 ; CHECK-NEXT:    uzp1 p0.s, p3.s, p0.s
-; CHECK-NEXT:    uzp1 p0.h, p1.h, p0.h
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p1.h
 ; CHECK-NEXT:    whilelo p1.h, xzr, x8
 ; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-NEXT:    ret
@@ -560,11 +666,14 @@ define <vscale x 16 x i1> @whilewr_64_expand3(ptr %a, ptr %b) {
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z0.d
 ; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    ccmp x9, #15, #2, ge
 ; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
+; CHECK-NEXT:    cset w8, hi
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
 ; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
 ; CHECK-NEXT:    whilelo p1.b, xzr, x8
@@ -582,8 +691,6 @@ define <vscale x 32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
@@ -596,75 +703,74 @@ define <vscale x 32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    addvl x9, x0, #8
 ; CHECK-NEXT:    asr x8, x8, #3
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, x8
+; CHECK-NEXT:    mov z5.d, z0.d
+; CHECK-NEXT:    mov z3.d, x8
 ; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
+; CHECK-NEXT:    incd z5.d, all, mul #4
+; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z0.d
+; CHECK-NEXT:    add z0.d, z0.d, #16 // =0x10
+; CHECK-NEXT:    mov z4.d, z1.d
 ; CHECK-NEXT:    mov z6.d, z2.d
 ; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
+; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z1.d
+; CHECK-NEXT:    cmphi p1.d, p0/z, z3.d, z2.d
+; CHECK-NEXT:    cmphi p4.d, p0/z, z3.d, z5.d
+; CHECK-NEXT:    add z5.d, z5.d, #16 // =0x10
+; CHECK-NEXT:    add z2.d, z2.d, #16 // =0x10
+; CHECK-NEXT:    add z1.d, z1.d, #16 // =0x10
+; CHECK-NEXT:    incd z4.d, all, mul #2
 ; CHECK-NEXT:    incd z6.d, all, mul #4
 ; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
-; CHECK-NEXT:    mov z24.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z3.d
+; CHECK-NEXT:    uzp1 p3.s, p5.s, p3.s
+; CHECK-NEXT:    mov z24.d, z4.d
+; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z4.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z3.d, z7.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z3.d, z6.d
+; CHECK-NEXT:    add z6.d, z6.d, #16 // =0x10
+; CHECK-NEXT:    add z7.d, z7.d, #16 // =0x10
+; CHECK-NEXT:    add z4.d, z4.d, #16 // =0x10
 ; CHECK-NEXT:    incd z24.d, all, mul #4
-; CHECK-NEXT:    uzp1 p2.s, p2.s, p7.s
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p8.s
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p3.h, p4.h, p3.h
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p6.s, p6.s, p9.s
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    subs x8, x1, x9
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p6.h
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    uzp1 p2.b, p3.b, p2.b
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
+; CHECK-NEXT:    uzp1 p2.s, p4.s, p6.s
+; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z24.d
+; CHECK-NEXT:    uzp1 p1.h, p3.h, p1.h
+; CHECK-NEXT:    add z24.d, z24.d, #16 // =0x10
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p5.s, p7.s, p5.s
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    uzp1 p7.s, p9.s, p8.s
+; CHECK-NEXT:    ccmp x9, #15, #2, ge
+; CHECK-NEXT:    uzp1 p3.s, p7.s, p5.s
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmphi p4.d, p0/z, z3.d, z24.d
+; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z6.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z3.d, z7.d
+; CHECK-NEXT:    cmphi p7.d, p0/z, z3.d, z5.d
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p3.h
+; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z4.d
+; CHECK-NEXT:    cmphi p8.d, p0/z, z3.d, z2.d
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p4.s, p10.s, p4.s
-; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p6.s
-; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p5.h, p7.h, p5.h
+; CHECK-NEXT:    uzp1 p1.b, p1.b, p2.b
+; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z1.d
+; CHECK-NEXT:    cmphi p0.d, p0/z, z3.d, z0.d
+; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
+; CHECK-NEXT:    uzp1 p5.s, p7.s, p6.s
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p4.h
+; CHECK-NEXT:    uzp1 p3.s, p8.s, p3.s
+; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.s, p0.s, p2.s
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    whilelo p4.b, xzr, x8
-; CHECK-NEXT:    uzp1 p3.b, p0.b, p5.b
+; CHECK-NEXT:    uzp1 p2.h, p5.h, p4.h
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
-; CHECK-NEXT:    sel p1.b, p3, p3.b, p4.b
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    whilelo p3.b, xzr, x8
+; CHECK-NEXT:    uzp1 p2.b, p0.b, p2.b
+; CHECK-NEXT:    sel p0.b, p1, p1.b, p3.b
+; CHECK-NEXT:    sel p1.b, p2, p2.b, p3.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -749,11 +855,14 @@ define <vscale x 16 x i1> @whilewr_badimm(ptr %a, ptr %b) {
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z0.d
 ; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    ccmp x9, #15, #2, ge
 ; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
+; CHECK-NEXT:    cset w8, hi
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
 ; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
 ; CHECK-NEXT:    whilelo p1.b, xzr, x8
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable_nosve2.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable_nosve2.ll
index d62d0665dd332..a2bfe6ffe55bd 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable_nosve2.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable_nosve2.ll
@@ -4,54 +4,31 @@
 define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
+; CHECK-NEXT:    index z0.s, #0, #1
 ; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    mov z1.s, w8
+; CHECK-NEXT:    mov z2.d, z0.d
 ; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    incd z0.d, all, mul #4
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
-; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    incd z1.d, all, mul #4
-; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z3.d
-; CHECK-NEXT:    incd z3.d, all, mul #4
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z2.d, z3.d
-; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
-; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    uzp1 p2.s, p5.s, p6.s
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p4.s
+; CHECK-NEXT:    cmphi p3.s, p0/z, z1.s, z0.s
+; CHECK-NEXT:    incw z2.s
+; CHECK-NEXT:    incw z3.s, all, mul #2
+; CHECK-NEXT:    mov z4.d, z2.d
+; CHECK-NEXT:    cmphi p1.s, p0/z, z1.s, z3.s
+; CHECK-NEXT:    incw z4.s, all, mul #2
+; CHECK-NEXT:    cmphi p2.s, p0/z, z1.s, z4.s
+; CHECK-NEXT:    cmphi p0.s, p0/z, z1.s, z2.s
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
+; CHECK-NEXT:    ccmp x9, #15, #2, ge
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    uzp1 p1.h, p1.h, p2.h
+; CHECK-NEXT:    uzp1 p0.h, p3.h, p0.h
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
-; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
+; CHECK-NEXT:    uzp1 p0.b, p0.b, p1.b
 ; CHECK-NEXT:    whilelo p1.b, xzr, x8
 ; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
-; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 1)

>From 71e5d6fde3c5639b5abbfc6bef71b5e4d29d343e Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Thu, 4 Dec 2025 16:14:08 +0000
Subject: [PATCH 2/3] Some fixups

---
 llvm/include/llvm/CodeGen/ISDOpcodes.h        |  13 +
 .../SelectionDAG/LegalizeVectorOps.cpp        | 119 ++-
 .../SelectionDAG/LegalizeVectorTypes.cpp      |  41 +-
 llvm/test/CodeGen/AArch64/alias_mask.ll       | 683 ++++------------
 llvm/test/CodeGen/AArch64/alias_mask_nosve.ll |  29 +-
 .../CodeGen/AArch64/alias_mask_scalable.ll    | 769 +++++-------------
 .../AArch64/alias_mask_scalable_nosve2.ll     |  29 +-
 .../AArch64/loop-dependence-mask-ccmp.ll      |  45 -
 8 files changed, 448 insertions(+), 1280 deletions(-)
 delete mode 100644 llvm/test/CodeGen/AArch64/loop-dependence-mask-ccmp.ll

diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 8d5f3f6585cbf..ca48c9bb811dc 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1571,6 +1571,19 @@ enum NodeType {
   // The `llvm.loop.dependence.{war, raw}.mask` intrinsics
   // Operands: Load pointer, Store pointer, Element size, Lane offset
   // Output: Mask
+  //
+  // Note: The semantics of these opcodes differ slightly from the intrinsics.
+  // Wherever "lane" (meaning lane index) occurs in the intrinsic definition, it
+  // is replaced with (lane + lane_offset) for the ISD opcode.
+  //
+  //  E.g., for LOOP_DEPENDENCE_WAR_MASK:
+  //    `(ptrB - ptrA) >= elementSize * lane`
+  //  Becomes:
+  //    `(ptrB - ptrA) >= elementSize * (lane + lane_offset)`
+  //
+  // This is done to allow for trivial splitting of the operation. Note: The
+  // lane offset is always a constant, for scalable masks, it is implicitly
+  // multiplied by vscale.
   LOOP_DEPENDENCE_WAR_MASK,
   LOOP_DEPENDENCE_RAW_MASK,
 
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 8c6a4209e1c33..2bee083dc3bb1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -29,6 +29,7 @@
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
 #include "llvm/Analysis/VectorUtils.h"
 #include "llvm/CodeGen/ISDOpcodes.h"
 #include "llvm/CodeGen/SelectionDAG.h"
@@ -42,7 +43,6 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
 #include <cassert>
-#include <cmath>
 #include <cstdint>
 #include <iterator>
 #include <utility>
@@ -1811,85 +1811,84 @@ SDValue VectorLegalizer::ExpandVP_FCOPYSIGN(SDNode *Node) {
   return DAG.getNode(ISD::BITCAST, DL, VT, CopiedSign);
 }
 
-// Expand a loop dependence mask.
-// First the difference is taken between the pointers and divided by the element
-// size, to see how many lanes separate them. That difference is then splat and
-// compared with a step vector to produce a mask with lanes less than the
-// difference active and the rest inactive. To capture the case where the
-// pointers are the same (or the source pointer is greater than the sink pointer
-// for write-after-read), the difference is compared to zero and that result is
-// splat to another mask. Those two masks are then ORed to produce the final
-// loop dependence mask.
 SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
   SDLoc DL(N);
+  EVT VT = N->getValueType(0);
   SDValue SourceValue = N->getOperand(0);
   SDValue SinkValue = N->getOperand(1);
-  SDValue EltSize = N->getOperand(2);
-  unsigned EltSizeInBytes = N->getConstantOperandVal(2);
-  unsigned LaneOffset = N->getConstantOperandVal(3);
+  SDValue EltSizeInBytes = N->getOperand(2);
+  const Function &F = DAG.getMachineFunction().getFunction();
+
+  // Note: The lane offset is scalable if the mask is scalable.
+  ElementCount LaneOffset =
+      ElementCount::get(N->getConstantOperandVal(3), VT.isScalableVT());
 
-  bool IsReadAfterWrite = N->getOpcode() == ISD::LOOP_DEPENDENCE_RAW_MASK;
-  EVT VT = N->getValueType(0);
   EVT PtrVT = SourceValue->getValueType(0);
+  bool IsReadAfterWrite = N->getOpcode() == ISD::LOOP_DEPENDENCE_RAW_MASK;
 
+  // Take the difference between the pointers and divided by the element size,
+  // to see how many lanes separate them.
   SDValue Diff = DAG.getNode(ISD::SUB, DL, PtrVT, SinkValue, SourceValue);
   if (IsReadAfterWrite)
     Diff = DAG.getNode(ISD::ABS, DL, PtrVT, Diff);
+  Diff = DAG.getNode(ISD::SDIV, DL, PtrVT, Diff, EltSizeInBytes);
 
-  Diff = DAG.getNode(ISD::SDIV, DL, PtrVT, Diff, EltSize);
-
-  // If the difference is positive then some elements may alias
-  EVT CmpVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
-                                     Diff.getValueType());
+  // The pointers do not alias if:
+  //  * Diff <= 0 (WAR_MASK)
+  //  * Diff == 0 (RAW_MASK)
+  EVT CmpVT = VT.getVectorElementType();
   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
   SDValue Cmp = DAG.getSetCC(DL, CmpVT, Diff, Zero,
                              IsReadAfterWrite ? ISD::SETEQ : ISD::SETLE);
 
-  // Create the mask with lanes less than the difference active and the rest
-  // inactive. For optimisation reaons we want to minimise the size of the
-  // integer used to splat the difference and add the lane offset. If we keep it
-  // as a 64 bit value then the splat will use lots of vectors unnecessarily.
-  int SplatBitWidth =
-      std::pow(2, Log2_32(VT.getVectorMinNumElements() * EltSizeInBytes) + 1);
-  SplatBitWidth = std::min(SplatBitWidth, 64);
-  EVT SplatVT = VT.changeElementType(MVT::getIntegerVT(SplatBitWidth));
-
-  // Truncate and splat the diff. If this ends up being an unsafe truncate (i.e.
-  // diff > vector length), then it's ignored later on when it's ORed with
-  // abs(diff) >= vector_length.
-  SDValue DiffTrunc = DAG.getExtOrTrunc(!IsReadAfterWrite, Diff, DL,
-                                        SplatVT.getVectorElementType());
+  // The pointers do not alias within the mask if Diff >= MaxMaskLane. As:
+  //  * `(ptrB - ptrA) >= elementSize * lane` (WAR_MASK)
+  //  * `(ptrB - ptrA) >= elementSize * lane` (RAW_MASK)
+  // Would both be all true.
+  ElementCount MaxMaskLaneEC = LaneOffset + VT.getVectorElementCount();
+  SDValue MaxMaskLane = DAG.getElementCount(DL, PtrVT, MaxMaskLaneEC);
+  Cmp = DAG.getNode(ISD::OR, DL, CmpVT, Cmp,
+                    DAG.getSetCC(DL, CmpVT, Diff, MaxMaskLane, ISD::SETUGE));
+
+  // Attempt to determine the max "meaningful" value of Diff for the comparison
+  // with the lane step_vector. We do not have to consider values that would
+  // result in an "all-true" mask due to any of the above cases. This puts a
+  // fairly low upper bound on the element bitwidth needed for the comparison,
+  // which results in efficient codegen (since fewer vectors are needed). Note:
+  // If the upper bound is scalable, we must know the vscale range (otherwise,
+  // we fall back to a very conservative bound).
+  unsigned MaxMeaningfulDiff = 0;
+  if (MaxMaskLaneEC.isScalable()) {
+    ConstantRange VScaleRange = getVScaleRange(&F, /*BitWidth*/ 64);
+    if (!VScaleRange.isFullSet())
+      MaxMeaningfulDiff = MaxMaskLaneEC.getKnownMinValue() *
+                          VScaleRange.getUpper().getZExtValue();
+  } else {
+    MaxMeaningfulDiff = MaxMaskLaneEC.getFixedValue();
+  }
+
+  // Note: MaxMeaningfulDiff is zero if the upper bound is unknown.
+  unsigned SplatBitWidth =
+      !MaxMeaningfulDiff
+          ? 32 // Surely 2**32 lanes is enough.
+          : std::max<unsigned>(PowerOf2Ceil(Log2_32(MaxMeaningfulDiff) + 1), 8);
+  EVT SplatEltVT = MVT::getIntegerVT(SplatBitWidth);
+  EVT SplatVT = VT.changeElementType(SplatEltVT);
+
+  // Truncate and splat the diff. If this ends up being an unsafe truncate (i.e,
+  // it does not fit within SplatBitWidth bits), the mask is already all-true.
+  SDValue DiffTrunc =
+      DAG.getExtOrTrunc(!IsReadAfterWrite, Diff, DL, SplatEltVT);
   SDValue DiffSplat = DAG.getSplat(SplatVT, DL, DiffTrunc);
+
   SDValue VectorStep = DAG.getStepVector(DL, SplatVT);
   // Add the lane offset. A non-zero lane offset often comes from a
   // larger-than-legal vector length being split in two.
-  VectorStep = DAG.getNode(
+  SDValue LaneIndices = DAG.getNode(
       ISD::ADD, DL, SplatVT, VectorStep,
-      DAG.getSplat(
-          SplatVT, DL,
-          DAG.getConstant(LaneOffset, DL, SplatVT.getVectorElementType())));
-  EVT MaskVT = VT.changeElementType(MVT::i1);
-  SDValue DiffMask =
-      DAG.getSetCC(DL, MaskVT, VectorStep, DiffSplat, ISD::CondCode::SETULT);
-
-  EVT EltVT = VT.getVectorElementType();
-  // Extend the diff setcc in case the intrinsic has been promoted to a vector
-  // type with elements larger than i1
-  if (EltVT.getScalarSizeInBits() > MaskVT.getScalarSizeInBits())
-    DiffMask = DAG.getNode(ISD::ANY_EXTEND, DL, VT, DiffMask);
-
-  if (CmpVT.getScalarSizeInBits() < EltVT.getScalarSizeInBits())
-    Cmp = DAG.getNode(ISD::ZERO_EXTEND, DL, EltVT, Cmp);
-
-  // If the pointer difference was greater than or equal to the max number of
-  // lanes in the mask, then the truncated pointer difference should be ignored
-  // since the truncate could have been unsafe. Use a mask of all active lanes
-  // instead since a pointer difference >= the number of lanes has no loop
-  // depedencies anyway.
-  SDValue AbsDiff = DAG.getNode(ISD::ABS, DL, PtrVT, Diff);
-  SDValue NumElts = DAG.getConstant(VT.getVectorMinNumElements(), DL, PtrVT);
-  Cmp = DAG.getNode(ISD::OR, DL, CmpVT, Cmp,
-                    DAG.getSetCC(DL, CmpVT, AbsDiff, NumElts, ISD::SETUGE));
+      DAG.getSplat(SplatVT, DL,
+                   DAG.getElementCount(DL, SplatEltVT, LaneOffset)));
+  SDValue DiffMask = DAG.getSetCC(DL, VT, LaneIndices, DiffSplat, ISD::SETULT);
 
   SDValue Splat = DAG.getSplat(VT, DL, Cmp);
   return DAG.getNode(ISD::OR, DL, VT, DiffMask, Splat);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index f7aec7f25b746..0e1cba4bed74a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -404,26 +404,33 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_MERGE_VALUES(SDNode *N,
 }
 
 SDValue DAGTypeLegalizer::ScalarizeVecRes_LOOP_DEPENDENCE_MASK(SDNode *N) {
+  SDLoc DL(N);
   SDValue SourceValue = N->getOperand(0);
   SDValue SinkValue = N->getOperand(1);
-  SDValue EltSize = N->getOperand(2);
-  SDValue Offset = N->getOperand(3);
-  EVT PtrVT = SourceValue->getValueType(0);
-  SDLoc DL(N);
+  SDValue EltSizeInBytes = N->getOperand(2);
+  SDValue LaneOffset = N->getOperand(3);
 
-  // Increment the source pointer by the lane offset multiplied by the element
-  // size. A non-zero offset is normally used when a larger-than-legal mask has
-  // been split.
-  Offset = DAG.getNode(ISD::MUL, DL, PtrVT, Offset, EltSize);
-  SourceValue = DAG.getNode(ISD::ADD, DL, PtrVT, SourceValue, Offset);
+  EVT PtrVT = SourceValue->getValueType(0);
+  bool IsReadAfterWrite = N->getOpcode() == ISD::LOOP_DEPENDENCE_RAW_MASK;
 
+  // Take the difference between the pointers and divided by the element size,
+  // to see how many lanes separate them.
   SDValue Diff = DAG.getNode(ISD::SUB, DL, PtrVT, SinkValue, SourceValue);
+  if (IsReadAfterWrite)
+    Diff = DAG.getNode(ISD::ABS, DL, PtrVT, Diff);
+  Diff = DAG.getNode(ISD::SDIV, DL, PtrVT, Diff, EltSizeInBytes);
+
+  // The pointers do not alias if:
+  //  * Diff <= 0 || LaneOffset < Diff (WAR_MASK)
+  //  * Diff == 0 || LaneOffset < abs(Diff) (RAW_MASK)
+  // Note: If LaneOffset is zero, both cases will fold to "true".
   EVT CmpVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
                                      Diff.getValueType());
   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
-  return DAG.getNode(ISD::OR, DL, CmpVT,
-                     DAG.getSetCC(DL, CmpVT, Diff, EltSize, ISD::SETGE),
-                     DAG.getSetCC(DL, CmpVT, Diff, Zero, ISD::SETEQ));
+  SDValue Cmp = DAG.getSetCC(DL, CmpVT, Diff, Zero,
+                             IsReadAfterWrite ? ISD::SETEQ : ISD::SETLE);
+  return DAG.getNode(ISD::OR, DL, CmpVT, Cmp,
+                     DAG.getSetCC(DL, CmpVT, LaneOffset, Diff, ISD::SETULT));
 }
 
 SDValue DAGTypeLegalizer::ScalarizeVecRes_BITCAST(SDNode *N) {
@@ -1699,23 +1706,23 @@ void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
   Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
 }
 
-/// Split a loop dependence mask.
-/// This is done by creating a high and low mask, each of half the vector
-/// length. The low mask inherits the lane offset from the original mask, and
-/// the high mask adds half the vector length.
 void DAGTypeLegalizer::SplitVecRes_LOOP_DEPENDENCE_MASK(SDNode *N, SDValue &Lo,
                                                         SDValue &Hi) {
   SDLoc DL(N);
   EVT LoVT, HiVT;
-  std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
   SDValue PtrA = N->getOperand(0);
   SDValue PtrB = N->getOperand(1);
+  std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
 
+  // The lane offset for the "Lo" half of the mask is unchanged.
   Lo = DAG.getNode(N->getOpcode(), DL, LoVT, PtrA, PtrB,
                    /*ElementSizeInBytes=*/N->getOperand(2),
                    /*LaneOffset=*/N->getOperand(3));
+  // The lane offset for the "Hi" half of the mask is incremented by the number
+  // of elements in the "Lo" half.
   unsigned LaneOffset =
       N->getConstantOperandVal(3) + LoVT.getVectorMinNumElements();
+  // Note: The lane offset is implicitly scalable for scalable masks.
   Hi = DAG.getNode(N->getOpcode(), DL, HiVT, PtrA, PtrB,
                    /*ElementSizeInBytes=*/N->getOperand(2),
                    /*LaneOffset=*/DAG.getConstant(LaneOffset, DL, MVT::i64));
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 3e6ade6278e9d..60a4dcd81f1be 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -100,33 +100,20 @@ entry:
 define <32 x i1> @whilewr_8_split(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.s, #0, #1
-; CHECK-NEXT:    subs x9, x1, x0
-; CHECK-NEXT:    dup v1.4s, w9
-; CHECK-NEXT:    cneg x10, x9, mi
-; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    ccmp x10, #15, #2, ge
+; CHECK-NEXT:    mov w9, #16 // =0x10
+; CHECK-NEXT:    sub x10, x1, x0
+; CHECK-NEXT:    index z0.b, w9, #1
+; CHECK-NEXT:    cmp x10, #1
+; CHECK-NEXT:    dup v1.16b, w10
+; CHECK-NEXT:    ccmp x10, #31, #2, ge
 ; CHECK-NEXT:    cset w9, hi
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    add z0.s, z0.s, #16 // =0x10
-; CHECK-NEXT:    add z2.s, z2.s, #28 // =0x1c
-; CHECK-NEXT:    add z3.s, z3.s, #24 // =0x18
-; CHECK-NEXT:    add z4.s, z4.s, #20 // =0x14
-; CHECK-NEXT:    cmhi v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    cmhi v2.4s, v1.4s, v2.4s
-; CHECK-NEXT:    cmhi v3.4s, v1.4s, v3.4s
-; CHECK-NEXT:    cmhi v4.4s, v1.4s, v4.4s
-; CHECK-NEXT:    uzp1 v1.8h, v3.8h, v2.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v4.8h
-; CHECK-NEXT:    mov z2.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    dup v1.16b, w9
+; CHECK-NEXT:    dup v2.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI8_0
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    shl v1.16b, v2.16b, #7
+; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI8_0]
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
 ; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
@@ -150,93 +137,63 @@ entry:
 define <64 x i1> @whilewr_8_split2(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_split2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z1.s, #0, #1
-; CHECK-NEXT:    subs x9, x1, x0
-; CHECK-NEXT:    dup v0.4s, w9
-; CHECK-NEXT:    cneg x10, x9, mi
+; CHECK-NEXT:    index z0.b, #0, #1
+; CHECK-NEXT:    sub x9, x1, x0
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    ccmp x10, #15, #2, ge
-; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    dup v1.16b, w9
+; CHECK-NEXT:    cset w10, lt
+; CHECK-NEXT:    cmp x9, #31
+; CHECK-NEXT:    csinc w11, w10, wzr, ls
+; CHECK-NEXT:    cmp x9, #47
+; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    mov z3.d, z0.d
+; CHECK-NEXT:    add z0.b, z0.b, #48 // =0x30
+; CHECK-NEXT:    csinc w12, w10, wzr, ls
+; CHECK-NEXT:    cmp x9, #63
+; CHECK-NEXT:    dup v6.16b, w11
+; CHECK-NEXT:    csinc w9, w10, wzr, ls
+; CHECK-NEXT:    dup v5.16b, w12
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
-; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    mov z5.d, z1.d
-; CHECK-NEXT:    mov z2.d, z1.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    mov z6.d, z1.d
-; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    mov z16.d, z1.d
-; CHECK-NEXT:    mov z17.d, z1.d
-; CHECK-NEXT:    mov z18.d, z1.d
-; CHECK-NEXT:    mov z19.d, z1.d
-; CHECK-NEXT:    mov z20.d, z1.d
-; CHECK-NEXT:    add z4.s, z4.s, #28 // =0x1c
-; CHECK-NEXT:    add z5.s, z5.s, #24 // =0x18
-; CHECK-NEXT:    add z1.s, z1.s, #48 // =0x30
-; CHECK-NEXT:    add z2.s, z2.s, #20 // =0x14
-; CHECK-NEXT:    add z6.s, z6.s, #44 // =0x2c
-; CHECK-NEXT:    add z18.s, z18.s, #60 // =0x3c
-; CHECK-NEXT:    add z7.s, z7.s, #40 // =0x28
-; CHECK-NEXT:    add z19.s, z19.s, #56 // =0x38
-; CHECK-NEXT:    add z20.s, z20.s, #52 // =0x34
-; CHECK-NEXT:    add z16.s, z16.s, #36 // =0x24
-; CHECK-NEXT:    add z17.s, z17.s, #32 // =0x20
-; CHECK-NEXT:    add z3.s, z3.s, #16 // =0x10
-; CHECK-NEXT:    cmhi v4.4s, v0.4s, v4.4s
-; CHECK-NEXT:    cmhi v5.4s, v0.4s, v5.4s
-; CHECK-NEXT:    cmhi v6.4s, v0.4s, v6.4s
-; CHECK-NEXT:    cmhi v18.4s, v0.4s, v18.4s
-; CHECK-NEXT:    cmhi v19.4s, v0.4s, v19.4s
-; CHECK-NEXT:    cmhi v20.4s, v0.4s, v20.4s
-; CHECK-NEXT:    cmhi v1.4s, v0.4s, v1.4s
-; CHECK-NEXT:    cmhi v7.4s, v0.4s, v7.4s
-; CHECK-NEXT:    cmhi v16.4s, v0.4s, v16.4s
-; CHECK-NEXT:    cmhi v17.4s, v0.4s, v17.4s
-; CHECK-NEXT:    cmhi v2.4s, v0.4s, v2.4s
-; CHECK-NEXT:    cmhi v0.4s, v0.4s, v3.4s
-; CHECK-NEXT:    uzp1 v4.8h, v5.8h, v4.8h
-; CHECK-NEXT:    uzp1 v3.8h, v19.8h, v18.8h
-; CHECK-NEXT:    uzp1 v1.8h, v1.8h, v20.8h
-; CHECK-NEXT:    uzp1 v5.8h, v7.8h, v6.8h
-; CHECK-NEXT:    uzp1 v6.8h, v17.8h, v16.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v2.8h
-; CHECK-NEXT:    dup v2.16b, w9
+; CHECK-NEXT:    add z3.b, z3.b, #32 // =0x20
+; CHECK-NEXT:    add z2.b, z2.b, #16 // =0x10
+; CHECK-NEXT:    dup v4.16b, w9
+; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
 ; CHECK-NEXT:    adrp x9, .LCPI9_0
-; CHECK-NEXT:    uzp1 v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    uzp1 v3.16b, v6.16b, v5.16b
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v4.16b
-; CHECK-NEXT:    mov z4.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    orr v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    orr v3.16b, v3.16b, v2.16b
-; CHECK-NEXT:    shl v4.16b, v4.16b, #7
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
-; CHECK-NEXT:    shl v2.16b, v3.16b, #7
-; CHECK-NEXT:    cmlt v3.16b, v4.16b, #0
+; CHECK-NEXT:    cmhi v3.16b, v1.16b, v3.16b
+; CHECK-NEXT:    cmhi v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    mov z2.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    orr v0.16b, v0.16b, v4.16b
 ; CHECK-NEXT:    ldr q4, [x9, :lo12:.LCPI9_0]
+; CHECK-NEXT:    orr v3.16b, v3.16b, v5.16b
+; CHECK-NEXT:    orr v1.16b, v1.16b, v6.16b
+; CHECK-NEXT:    shl v2.16b, v2.16b, #7
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT:    shl v3.16b, v3.16b, #7
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    cmlt v2.16b, v2.16b, #0
-; CHECK-NEXT:    and v3.16b, v3.16b, v4.16b
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v4.16b
+; CHECK-NEXT:    cmlt v3.16b, v3.16b, #0
+; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    and v2.16b, v2.16b, v4.16b
-; CHECK-NEXT:    ext v5.16b, v3.16b, v3.16b, #8
 ; CHECK-NEXT:    and v0.16b, v0.16b, v4.16b
-; CHECK-NEXT:    ext v4.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v6.16b, v2.16b, v2.16b, #8
-; CHECK-NEXT:    ext v7.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    zip1 v3.16b, v3.16b, v5.16b
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v4.16b
-; CHECK-NEXT:    zip1 v2.16b, v2.16b, v6.16b
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v7.16b
-; CHECK-NEXT:    addv h3, v3.8h
-; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    and v3.16b, v3.16b, v4.16b
+; CHECK-NEXT:    and v1.16b, v1.16b, v4.16b
+; CHECK-NEXT:    ext v5.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v6.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    ext v7.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    zip1 v2.16b, v2.16b, v5.16b
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v4.16b
+; CHECK-NEXT:    zip1 v3.16b, v3.16b, v6.16b
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v7.16b
 ; CHECK-NEXT:    addv h2, v2.8h
-; CHECK-NEXT:    str h3, [x8]
 ; CHECK-NEXT:    addv h0, v0.8h
-; CHECK-NEXT:    str h1, [x8, #6]
-; CHECK-NEXT:    str h2, [x8, #4]
-; CHECK-NEXT:    str h0, [x8, #2]
+; CHECK-NEXT:    addv h3, v3.8h
+; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    str h2, [x8]
+; CHECK-NEXT:    str h0, [x8, #6]
+; CHECK-NEXT:    str h3, [x8, #4]
+; CHECK-NEXT:    str h1, [x8, #2]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <64 x i1> @llvm.loop.dependence.war.mask.v64i1(ptr %a, ptr %b, i64 1)
@@ -246,47 +203,17 @@ entry:
 define <16 x i1> @whilewr_16_expand(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_16_expand:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    sub x8, x1, x0
+; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x8, x8, x8, lsr #63
 ; CHECK-NEXT:    asr x8, x8, #1
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
-; CHECK-NEXT:    dup v3.2d, x8
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
-; CHECK-NEXT:    add z5.d, z5.d, #6 // =0x6
-; CHECK-NEXT:    add z6.d, z6.d, #4 // =0x4
-; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
-; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v0.2d, v3.2d, v0.2d
-; CHECK-NEXT:    cneg x9, x8, mi
-; CHECK-NEXT:    cmhi v1.2d, v3.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v3.2d, v2.2d
-; CHECK-NEXT:    cmhi v4.2d, v3.2d, v4.2d
-; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v3.2d, v6.2d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cmhi v16.2d, v3.2d, v16.2d
-; CHECK-NEXT:    cmhi v3.2d, v3.2d, v7.2d
-; CHECK-NEXT:    ccmp x9, #15, #2, ge
-; CHECK-NEXT:    uzp1 v2.4s, v4.4s, v2.4s
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v4.8h
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    dup v1.16b, w8
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ccmp x8, #15, #2, ge
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    dup v2.16b, w8
+; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 2)
@@ -297,78 +224,24 @@ define <32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_16_expand2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub x9, x1, x0
-; CHECK-NEXT:    index z0.d, #0, #1
+; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x9, x9, x9, lsr #63
 ; CHECK-NEXT:    asr x9, x9, #1
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
-; CHECK-NEXT:    dup v17.2d, x9
-; CHECK-NEXT:    mov z18.d, z0.d
-; CHECK-NEXT:    mov z20.d, z0.d
-; CHECK-NEXT:    mov z21.d, z0.d
-; CHECK-NEXT:    mov z22.d, z0.d
-; CHECK-NEXT:    mov z23.d, z0.d
-; CHECK-NEXT:    mov z24.d, z0.d
-; CHECK-NEXT:    add z4.d, z4.d, #14 // =0xe
-; CHECK-NEXT:    add z5.d, z5.d, #12 // =0xc
-; CHECK-NEXT:    add z6.d, z6.d, #10 // =0xa
-; CHECK-NEXT:    add z7.d, z7.d, #8 // =0x8
-; CHECK-NEXT:    add z1.d, z1.d, #6 // =0x6
-; CHECK-NEXT:    add z2.d, z2.d, #4 // =0x4
-; CHECK-NEXT:    add z16.d, z16.d, #30 // =0x1e
-; CHECK-NEXT:    add z18.d, z18.d, #28 // =0x1c
-; CHECK-NEXT:    cmhi v19.2d, v17.2d, v0.2d
-; CHECK-NEXT:    add z20.d, z20.d, #26 // =0x1a
-; CHECK-NEXT:    add z21.d, z21.d, #24 // =0x18
-; CHECK-NEXT:    cmp x9, #0
-; CHECK-NEXT:    add z22.d, z22.d, #22 // =0x16
-; CHECK-NEXT:    add z23.d, z23.d, #20 // =0x14
-; CHECK-NEXT:    cneg x10, x9, mi
-; CHECK-NEXT:    add z24.d, z24.d, #18 // =0x12
-; CHECK-NEXT:    add z0.d, z0.d, #16 // =0x10
+; CHECK-NEXT:    dup v1.16b, w9
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    add z3.d, z3.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v4.2d, v17.2d, v4.2d
-; CHECK-NEXT:    ccmp x10, #15, #2, ge
-; CHECK-NEXT:    cmhi v5.2d, v17.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v17.2d, v6.2d
-; CHECK-NEXT:    cset w9, hi
-; CHECK-NEXT:    cmhi v7.2d, v17.2d, v7.2d
-; CHECK-NEXT:    cmhi v16.2d, v17.2d, v16.2d
-; CHECK-NEXT:    cmhi v18.2d, v17.2d, v18.2d
-; CHECK-NEXT:    cmhi v20.2d, v17.2d, v20.2d
-; CHECK-NEXT:    cmhi v21.2d, v17.2d, v21.2d
-; CHECK-NEXT:    cmhi v22.2d, v17.2d, v22.2d
-; CHECK-NEXT:    cmhi v23.2d, v17.2d, v23.2d
-; CHECK-NEXT:    cmhi v24.2d, v17.2d, v24.2d
-; CHECK-NEXT:    cmhi v0.2d, v17.2d, v0.2d
-; CHECK-NEXT:    cmhi v1.2d, v17.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v17.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v17.2d, v3.2d
-; CHECK-NEXT:    uzp1 v4.4s, v5.4s, v4.4s
-; CHECK-NEXT:    uzp1 v5.4s, v18.4s, v16.4s
-; CHECK-NEXT:    uzp1 v16.4s, v21.4s, v20.4s
-; CHECK-NEXT:    uzp1 v17.4s, v23.4s, v22.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v24.4s
-; CHECK-NEXT:    uzp1 v6.4s, v7.4s, v6.4s
-; CHECK-NEXT:    uzp1 v1.4s, v2.4s, v1.4s
-; CHECK-NEXT:    uzp1 v2.4s, v19.4s, v3.4s
-; CHECK-NEXT:    uzp1 v3.8h, v16.8h, v5.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v17.8h
-; CHECK-NEXT:    uzp1 v4.8h, v6.8h, v4.8h
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    dup v2.16b, w9
+; CHECK-NEXT:    cset w10, lt
+; CHECK-NEXT:    cmp x9, #15
+; CHECK-NEXT:    csinc w11, w10, wzr, ls
+; CHECK-NEXT:    cmp x9, #31
+; CHECK-NEXT:    cmhi v2.16b, v1.16b, v0.16b
+; CHECK-NEXT:    add z0.b, z0.b, #16 // =0x10
+; CHECK-NEXT:    csinc w9, w10, wzr, ls
+; CHECK-NEXT:    dup v3.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI11_0
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    uzp1 v1.16b, v1.16b, v4.16b
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    orr v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    dup v1.16b, w11
+; CHECK-NEXT:    orr v0.16b, v0.16b, v3.16b
+; CHECK-NEXT:    orr v1.16b, v2.16b, v1.16b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI11_0]
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
 ; CHECK-NEXT:    shl v1.16b, v1.16b, #7
@@ -393,33 +266,18 @@ entry:
 define <8 x i1> @whilewr_32_expand(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_32_expand:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x9, x8, #3
 ; CHECK-NEXT:    csel x8, x9, x8, mi
 ; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    dup v1.2d, x8
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z2.d, z2.d, #4 // =0x4
-; CHECK-NEXT:    add z3.d, z3.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v0.2d, v1.2d, v0.2d
-; CHECK-NEXT:    ccmp x9, #7, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    cmhi v4.2d, v1.2d, v4.2d
-; CHECK-NEXT:    cmhi v2.2d, v1.2d, v2.2d
-; CHECK-NEXT:    cmhi v1.2d, v1.2d, v3.2d
-; CHECK-NEXT:    uzp1 v2.4s, v2.4s, v4.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    dup v1.8b, w8
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v2.8h
-; CHECK-NEXT:    xtn v0.8b, v0.8h
-; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    ccmp x8, #7, #2, ge
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    dup v2.8b, w8
+; CHECK-NEXT:    cmhi v0.8b, v1.8b, v0.8b
+; CHECK-NEXT:    orr v0.8b, v0.8b, v2.8b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 4)
@@ -429,48 +287,18 @@ entry:
 define <16 x i1> @whilewr_32_expand2(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_32_expand2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x9, x8, #3
 ; CHECK-NEXT:    csel x8, x9, x8, mi
 ; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
-; CHECK-NEXT:    dup v3.2d, x8
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
-; CHECK-NEXT:    add z5.d, z5.d, #6 // =0x6
-; CHECK-NEXT:    add z6.d, z6.d, #4 // =0x4
-; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
-; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v0.2d, v3.2d, v0.2d
-; CHECK-NEXT:    cneg x9, x8, mi
-; CHECK-NEXT:    cmhi v1.2d, v3.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v3.2d, v2.2d
-; CHECK-NEXT:    cmhi v4.2d, v3.2d, v4.2d
-; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v3.2d, v6.2d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cmhi v16.2d, v3.2d, v16.2d
-; CHECK-NEXT:    cmhi v3.2d, v3.2d, v7.2d
-; CHECK-NEXT:    ccmp x9, #15, #2, ge
-; CHECK-NEXT:    uzp1 v2.4s, v4.4s, v2.4s
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v4.8h
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    dup v1.16b, w8
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ccmp x8, #15, #2, ge
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    dup v2.16b, w8
+; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 4)
@@ -481,79 +309,25 @@ define <32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_32_expand3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs x9, x1, x0
-; CHECK-NEXT:    index z0.d, #0, #1
+; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x10, x9, #3
 ; CHECK-NEXT:    csel x9, x10, x9, mi
 ; CHECK-NEXT:    asr x9, x9, #2
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    dup v4.2d, x9
-; CHECK-NEXT:    mov z17.d, z0.d
-; CHECK-NEXT:    mov z18.d, z0.d
-; CHECK-NEXT:    mov z20.d, z0.d
-; CHECK-NEXT:    mov z21.d, z0.d
-; CHECK-NEXT:    mov z22.d, z0.d
-; CHECK-NEXT:    mov z23.d, z0.d
-; CHECK-NEXT:    mov z24.d, z0.d
-; CHECK-NEXT:    add z5.d, z5.d, #14 // =0xe
-; CHECK-NEXT:    add z6.d, z6.d, #12 // =0xc
-; CHECK-NEXT:    add z7.d, z7.d, #10 // =0xa
-; CHECK-NEXT:    add z16.d, z16.d, #8 // =0x8
-; CHECK-NEXT:    add z1.d, z1.d, #6 // =0x6
-; CHECK-NEXT:    add z2.d, z2.d, #4 // =0x4
-; CHECK-NEXT:    add z17.d, z17.d, #30 // =0x1e
-; CHECK-NEXT:    cmhi v19.2d, v4.2d, v0.2d
-; CHECK-NEXT:    add z18.d, z18.d, #28 // =0x1c
-; CHECK-NEXT:    add z20.d, z20.d, #26 // =0x1a
-; CHECK-NEXT:    add z21.d, z21.d, #24 // =0x18
-; CHECK-NEXT:    cmp x9, #0
-; CHECK-NEXT:    add z22.d, z22.d, #22 // =0x16
-; CHECK-NEXT:    add z23.d, z23.d, #20 // =0x14
-; CHECK-NEXT:    cneg x10, x9, mi
-; CHECK-NEXT:    add z24.d, z24.d, #18 // =0x12
-; CHECK-NEXT:    add z0.d, z0.d, #16 // =0x10
+; CHECK-NEXT:    dup v1.16b, w9
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    add z3.d, z3.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v5.2d, v4.2d, v5.2d
-; CHECK-NEXT:    ccmp x10, #15, #2, ge
-; CHECK-NEXT:    cmhi v6.2d, v4.2d, v6.2d
-; CHECK-NEXT:    cmhi v7.2d, v4.2d, v7.2d
-; CHECK-NEXT:    cset w9, hi
-; CHECK-NEXT:    cmhi v16.2d, v4.2d, v16.2d
-; CHECK-NEXT:    cmhi v17.2d, v4.2d, v17.2d
-; CHECK-NEXT:    cmhi v18.2d, v4.2d, v18.2d
-; CHECK-NEXT:    cmhi v20.2d, v4.2d, v20.2d
-; CHECK-NEXT:    cmhi v21.2d, v4.2d, v21.2d
-; CHECK-NEXT:    cmhi v22.2d, v4.2d, v22.2d
-; CHECK-NEXT:    cmhi v23.2d, v4.2d, v23.2d
-; CHECK-NEXT:    cmhi v24.2d, v4.2d, v24.2d
-; CHECK-NEXT:    cmhi v0.2d, v4.2d, v0.2d
-; CHECK-NEXT:    cmhi v1.2d, v4.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v4.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v4.2d, v3.2d
-; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
-; CHECK-NEXT:    uzp1 v5.4s, v18.4s, v17.4s
-; CHECK-NEXT:    uzp1 v6.4s, v21.4s, v20.4s
-; CHECK-NEXT:    uzp1 v17.4s, v23.4s, v22.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v24.4s
-; CHECK-NEXT:    uzp1 v7.4s, v16.4s, v7.4s
-; CHECK-NEXT:    uzp1 v1.4s, v2.4s, v1.4s
-; CHECK-NEXT:    uzp1 v2.4s, v19.4s, v3.4s
-; CHECK-NEXT:    uzp1 v3.8h, v6.8h, v5.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v17.8h
-; CHECK-NEXT:    uzp1 v4.8h, v7.8h, v4.8h
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    dup v2.16b, w9
+; CHECK-NEXT:    cset w10, lt
+; CHECK-NEXT:    cmp x9, #15
+; CHECK-NEXT:    csinc w11, w10, wzr, ls
+; CHECK-NEXT:    cmp x9, #31
+; CHECK-NEXT:    cmhi v2.16b, v1.16b, v0.16b
+; CHECK-NEXT:    add z0.b, z0.b, #16 // =0x10
+; CHECK-NEXT:    csinc w9, w10, wzr, ls
+; CHECK-NEXT:    dup v3.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI14_0
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    uzp1 v1.16b, v1.16b, v4.16b
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    orr v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    dup v1.16b, w11
+; CHECK-NEXT:    orr v0.16b, v0.16b, v3.16b
+; CHECK-NEXT:    orr v1.16b, v2.16b, v1.16b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI14_0]
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
 ; CHECK-NEXT:    shl v1.16b, v1.16b, #7
@@ -578,25 +352,19 @@ entry:
 define <4 x i1> @whilewr_64_expand(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_64_expand:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    index z0.h, #0, #1
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
 ; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    dup v2.2d, x8
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
+; CHECK-NEXT:    dup v1.4h, w8
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    add z1.d, z1.d, #2 // =0x2
-; CHECK-NEXT:    ccmp x9, #3, #2, ge
-; CHECK-NEXT:    cmhi v0.2d, v2.2d, v0.2d
+; CHECK-NEXT:    ccmp x8, #3, #2, ge
 ; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    cmhi v1.2d, v2.2d, v1.2d
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    dup v1.4h, w8
-; CHECK-NEXT:    xtn v0.4h, v0.4s
-; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    dup v2.4h, w8
+; CHECK-NEXT:    bic v1.4h, #255, lsl #8
+; CHECK-NEXT:    cmhi v0.4h, v1.4h, v0.4h
+; CHECK-NEXT:    orr v0.8b, v0.8b, v2.8b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 8)
@@ -606,33 +374,18 @@ entry:
 define <8 x i1> @whilewr_64_expand2(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_64_expand2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
 ; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    dup v1.2d, x8
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z2.d, z2.d, #4 // =0x4
-; CHECK-NEXT:    add z3.d, z3.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v0.2d, v1.2d, v0.2d
-; CHECK-NEXT:    ccmp x9, #7, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    cmhi v4.2d, v1.2d, v4.2d
-; CHECK-NEXT:    cmhi v2.2d, v1.2d, v2.2d
-; CHECK-NEXT:    cmhi v1.2d, v1.2d, v3.2d
-; CHECK-NEXT:    uzp1 v2.4s, v2.4s, v4.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    dup v1.8b, w8
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v2.8h
-; CHECK-NEXT:    xtn v0.8b, v0.8h
-; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    ccmp x8, #7, #2, ge
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    dup v2.8b, w8
+; CHECK-NEXT:    cmhi v0.8b, v1.8b, v0.8b
+; CHECK-NEXT:    orr v0.8b, v0.8b, v2.8b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 8)
@@ -642,48 +395,18 @@ entry:
 define <16 x i1> @whilewr_64_expand3(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_64_expand3:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
 ; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
-; CHECK-NEXT:    dup v3.2d, x8
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
-; CHECK-NEXT:    add z5.d, z5.d, #6 // =0x6
-; CHECK-NEXT:    add z6.d, z6.d, #4 // =0x4
-; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
-; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v0.2d, v3.2d, v0.2d
-; CHECK-NEXT:    cneg x9, x8, mi
-; CHECK-NEXT:    cmhi v1.2d, v3.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v3.2d, v2.2d
-; CHECK-NEXT:    cmhi v4.2d, v3.2d, v4.2d
-; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v3.2d, v6.2d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cmhi v16.2d, v3.2d, v16.2d
-; CHECK-NEXT:    cmhi v3.2d, v3.2d, v7.2d
-; CHECK-NEXT:    ccmp x9, #15, #2, ge
-; CHECK-NEXT:    uzp1 v2.4s, v4.4s, v2.4s
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v4.8h
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    dup v1.16b, w8
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ccmp x8, #15, #2, ge
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    dup v2.16b, w8
+; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 8)
@@ -694,79 +417,25 @@ define <32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_64_expand4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs x9, x1, x0
-; CHECK-NEXT:    index z0.d, #0, #1
+; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x10, x9, #7
 ; CHECK-NEXT:    csel x9, x10, x9, mi
 ; CHECK-NEXT:    asr x9, x9, #3
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    dup v4.2d, x9
-; CHECK-NEXT:    mov z17.d, z0.d
-; CHECK-NEXT:    mov z18.d, z0.d
-; CHECK-NEXT:    mov z20.d, z0.d
-; CHECK-NEXT:    mov z21.d, z0.d
-; CHECK-NEXT:    mov z22.d, z0.d
-; CHECK-NEXT:    mov z23.d, z0.d
-; CHECK-NEXT:    mov z24.d, z0.d
-; CHECK-NEXT:    add z5.d, z5.d, #14 // =0xe
-; CHECK-NEXT:    add z6.d, z6.d, #12 // =0xc
-; CHECK-NEXT:    add z7.d, z7.d, #10 // =0xa
-; CHECK-NEXT:    add z16.d, z16.d, #8 // =0x8
-; CHECK-NEXT:    add z1.d, z1.d, #6 // =0x6
-; CHECK-NEXT:    add z2.d, z2.d, #4 // =0x4
-; CHECK-NEXT:    add z17.d, z17.d, #30 // =0x1e
-; CHECK-NEXT:    cmhi v19.2d, v4.2d, v0.2d
-; CHECK-NEXT:    add z18.d, z18.d, #28 // =0x1c
-; CHECK-NEXT:    add z20.d, z20.d, #26 // =0x1a
-; CHECK-NEXT:    add z21.d, z21.d, #24 // =0x18
-; CHECK-NEXT:    cmp x9, #0
-; CHECK-NEXT:    add z22.d, z22.d, #22 // =0x16
-; CHECK-NEXT:    add z23.d, z23.d, #20 // =0x14
-; CHECK-NEXT:    cneg x10, x9, mi
-; CHECK-NEXT:    add z24.d, z24.d, #18 // =0x12
-; CHECK-NEXT:    add z0.d, z0.d, #16 // =0x10
+; CHECK-NEXT:    dup v1.16b, w9
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    add z3.d, z3.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v5.2d, v4.2d, v5.2d
-; CHECK-NEXT:    ccmp x10, #15, #2, ge
-; CHECK-NEXT:    cmhi v6.2d, v4.2d, v6.2d
-; CHECK-NEXT:    cmhi v7.2d, v4.2d, v7.2d
-; CHECK-NEXT:    cset w9, hi
-; CHECK-NEXT:    cmhi v16.2d, v4.2d, v16.2d
-; CHECK-NEXT:    cmhi v17.2d, v4.2d, v17.2d
-; CHECK-NEXT:    cmhi v18.2d, v4.2d, v18.2d
-; CHECK-NEXT:    cmhi v20.2d, v4.2d, v20.2d
-; CHECK-NEXT:    cmhi v21.2d, v4.2d, v21.2d
-; CHECK-NEXT:    cmhi v22.2d, v4.2d, v22.2d
-; CHECK-NEXT:    cmhi v23.2d, v4.2d, v23.2d
-; CHECK-NEXT:    cmhi v24.2d, v4.2d, v24.2d
-; CHECK-NEXT:    cmhi v0.2d, v4.2d, v0.2d
-; CHECK-NEXT:    cmhi v1.2d, v4.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v4.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v4.2d, v3.2d
-; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
-; CHECK-NEXT:    uzp1 v5.4s, v18.4s, v17.4s
-; CHECK-NEXT:    uzp1 v6.4s, v21.4s, v20.4s
-; CHECK-NEXT:    uzp1 v17.4s, v23.4s, v22.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v24.4s
-; CHECK-NEXT:    uzp1 v7.4s, v16.4s, v7.4s
-; CHECK-NEXT:    uzp1 v1.4s, v2.4s, v1.4s
-; CHECK-NEXT:    uzp1 v2.4s, v19.4s, v3.4s
-; CHECK-NEXT:    uzp1 v3.8h, v6.8h, v5.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v17.8h
-; CHECK-NEXT:    uzp1 v4.8h, v7.8h, v4.8h
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    dup v2.16b, w9
+; CHECK-NEXT:    cset w10, lt
+; CHECK-NEXT:    cmp x9, #15
+; CHECK-NEXT:    csinc w11, w10, wzr, ls
+; CHECK-NEXT:    cmp x9, #31
+; CHECK-NEXT:    cmhi v2.16b, v1.16b, v0.16b
+; CHECK-NEXT:    add z0.b, z0.b, #16 // =0x10
+; CHECK-NEXT:    csinc w9, w10, wzr, ls
+; CHECK-NEXT:    dup v3.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI18_0
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    uzp1 v1.16b, v1.16b, v4.16b
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    orr v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    dup v1.16b, w11
+; CHECK-NEXT:    orr v0.16b, v0.16b, v3.16b
+; CHECK-NEXT:    orr v1.16b, v2.16b, v1.16b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI18_0]
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
 ; CHECK-NEXT:    shl v1.16b, v1.16b, #7
@@ -859,47 +528,17 @@ define <16 x i1> @whilewr_badimm(ptr %a, ptr %b) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, #6148914691236517205 // =0x5555555555555555
 ; CHECK-NEXT:    sub x9, x1, x0
-; CHECK-NEXT:    index z0.d, #0, #1
+; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    movk x8, #21846
 ; CHECK-NEXT:    smulh x8, x9, x8
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
 ; CHECK-NEXT:    add x8, x8, x8, lsr #63
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
-; CHECK-NEXT:    add z5.d, z5.d, #6 // =0x6
-; CHECK-NEXT:    add z6.d, z6.d, #4 // =0x4
-; CHECK-NEXT:    dup v3.2d, x8
-; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
-; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cmhi v0.2d, v3.2d, v0.2d
-; CHECK-NEXT:    cmhi v1.2d, v3.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v3.2d, v2.2d
-; CHECK-NEXT:    cmhi v4.2d, v3.2d, v4.2d
-; CHECK-NEXT:    cmhi v16.2d, v3.2d, v16.2d
-; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v3.2d, v6.2d
-; CHECK-NEXT:    cmhi v3.2d, v3.2d, v7.2d
-; CHECK-NEXT:    ccmp x9, #15, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v2.4s, v4.4s, v2.4s
-; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v4.8h
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    dup v1.16b, w8
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ccmp x8, #15, #2, ge
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    dup v2.16b, w8
+; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 3)
@@ -911,9 +550,7 @@ entry:
 define <1 x i1> @whilewr_8_scalarize(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_scalarize:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    cmn x8, #1
-; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <1 x i1> @llvm.loop.dependence.war.mask.v1i1(ptr %a, ptr %b, i64 1)
@@ -923,10 +560,7 @@ entry:
 define <1 x i1> @whilewr_16_scalarize(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_16_scalarize:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x8, #0, #4, le
-; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <1 x i1> @llvm.loop.dependence.war.mask.v1i1(ptr %a, ptr %b, i64 2)
@@ -936,10 +570,7 @@ entry:
 define <1 x i1> @whilewr_32_scalarize(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_32_scalarize:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    cmp x8, #3
-; CHECK-NEXT:    ccmp x8, #0, #4, le
-; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <1 x i1> @llvm.loop.dependence.war.mask.v1i1(ptr %a, ptr %b, i64 4)
@@ -949,10 +580,7 @@ entry:
 define <1 x i1> @whilewr_64_scalarize(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_64_scalarize:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    cmp x8, #7
-; CHECK-NEXT:    ccmp x8, #0, #4, le
-; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <1 x i1> @llvm.loop.dependence.war.mask.v1i1(ptr %a, ptr %b, i64 8)
@@ -962,9 +590,7 @@ entry:
 define <1 x i1> @whilerw_8_scalarize(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilerw_8_scalarize:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    cmn x8, #1
-; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <1 x i1> @llvm.loop.dependence.raw.mask.v1i1(ptr %a, ptr %b, i64 1)
@@ -974,10 +600,7 @@ entry:
 define <1 x i1> @whilerw_16_scalarize(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilerw_16_scalarize:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x8, #0, #4, le
-; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <1 x i1> @llvm.loop.dependence.raw.mask.v1i1(ptr %a, ptr %b, i64 2)
@@ -987,10 +610,7 @@ entry:
 define <1 x i1> @whilerw_32_scalarize(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilerw_32_scalarize:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    cmp x8, #3
-; CHECK-NEXT:    ccmp x8, #0, #4, le
-; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <1 x i1> @llvm.loop.dependence.raw.mask.v1i1(ptr %a, ptr %b, i64 4)
@@ -1000,10 +620,7 @@ entry:
 define <1 x i1> @whilerw_64_scalarize(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilerw_64_scalarize:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    cmp x8, #7
-; CHECK-NEXT:    ccmp x8, #0, #4, le
-; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <1 x i1> @llvm.loop.dependence.raw.mask.v1i1(ptr %a, ptr %b, i64 8)
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_nosve.ll b/llvm/test/CodeGen/AArch64/alias_mask_nosve.ll
index a62adf4eb7e00..3fd1c5f949487 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_nosve.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_nosve.ll
@@ -4,29 +4,16 @@
 define <16 x i1> @whilewr_8(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    adrp x9, .LCPI0_0
-; CHECK-NEXT:    adrp x10, .LCPI0_1
-; CHECK-NEXT:    dup v0.4s, w8
-; CHECK-NEXT:    adrp x11, .LCPI0_2
-; CHECK-NEXT:    ldr q1, [x9, :lo12:.LCPI0_0]
-; CHECK-NEXT:    adrp x9, .LCPI0_3
-; CHECK-NEXT:    ldr q2, [x10, :lo12:.LCPI0_1]
-; CHECK-NEXT:    ldr q3, [x11, :lo12:.LCPI0_2]
-; CHECK-NEXT:    ldr q4, [x9, :lo12:.LCPI0_3]
-; CHECK-NEXT:    cneg x9, x8, mi
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cmhi v1.4s, v0.4s, v1.4s
-; CHECK-NEXT:    cmhi v2.4s, v0.4s, v2.4s
-; CHECK-NEXT:    cmhi v3.4s, v0.4s, v3.4s
-; CHECK-NEXT:    cmhi v0.4s, v0.4s, v4.4s
+; CHECK-NEXT:    sub x9, x1, x0
+; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    dup v0.16b, w9
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI0_0]
 ; CHECK-NEXT:    ccmp x9, #15, #2, ge
 ; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v3.8h
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    dup v1.16b, w8
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    dup v2.16b, w8
+; CHECK-NEXT:    cmhi v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 1)
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index 22440d95d78ca..221086ff77b20 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s
 
-define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) {
+define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
@@ -11,7 +11,7 @@ entry:
   ret <vscale x 16 x i1> %0
 }
 
-define <vscale x 8 x i1> @whilewr_16(ptr %a, ptr %b) {
+define <vscale x 8 x i1> @whilewr_16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    whilewr p0.h, x0, x1
@@ -21,7 +21,7 @@ entry:
   ret <vscale x 8 x i1> %0
 }
 
-define <vscale x 4 x i1> @whilewr_32(ptr %a, ptr %b) {
+define <vscale x 4 x i1> @whilewr_32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    whilewr p0.s, x0, x1
@@ -31,7 +31,7 @@ entry:
   ret <vscale x 4 x i1> %0
 }
 
-define <vscale x 2 x i1> @whilewr_64(ptr %a, ptr %b) {
+define <vscale x 2 x i1> @whilewr_64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    whilewr p0.d, x0, x1
@@ -41,7 +41,7 @@ entry:
   ret <vscale x 2 x i1> %0
 }
 
-define <vscale x 16 x i1> @whilerw_8(ptr %a, ptr %b) {
+define <vscale x 16 x i1> @whilerw_8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilerw_8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    whilerw p0.b, x0, x1
@@ -51,7 +51,7 @@ entry:
   ret <vscale x 16 x i1> %0
 }
 
-define <vscale x 8 x i1> @whilerw_16(ptr %a, ptr %b) {
+define <vscale x 8 x i1> @whilerw_16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilerw_16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    whilerw p0.h, x0, x1
@@ -61,7 +61,7 @@ entry:
   ret <vscale x 8 x i1> %0
 }
 
-define <vscale x 4 x i1> @whilerw_32(ptr %a, ptr %b) {
+define <vscale x 4 x i1> @whilerw_32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilerw_32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    whilerw p0.s, x0, x1
@@ -71,7 +71,7 @@ entry:
   ret <vscale x 4 x i1> %0
 }
 
-define <vscale x 2 x i1> @whilerw_64(ptr %a, ptr %b) {
+define <vscale x 2 x i1> @whilerw_64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilerw_64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    whilerw p0.d, x0, x1
@@ -81,33 +81,20 @@ entry:
   ret <vscale x 2 x i1> %0
 }
 
-define <vscale x 32 x i1> @whilewr_8_split(ptr %a, ptr %b) {
+define <vscale x 32 x i1> @whilewr_8_split(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_8_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #16 // =0x10
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    index z0.s, w8, #1
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    mov z3.s, w8
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    cmphi p1.s, p0/z, z3.s, z0.s
-; CHECK-NEXT:    incw z1.s
-; CHECK-NEXT:    incw z2.s, all, mul #2
-; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    cmphi p2.s, p0/z, z3.s, z1.s
-; CHECK-NEXT:    cmphi p3.s, p0/z, z3.s, z2.s
-; CHECK-NEXT:    incw z4.s, all, mul #2
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p2.h
-; CHECK-NEXT:    cmphi p0.s, p0/z, z3.s, z4.s
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x9, #15, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    uzp1 p0.h, p3.h, p0.h
+; CHECK-NEXT:    rdvl x8, #1
+; CHECK-NEXT:    sub x9, x1, x0
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    index z0.b, w8, #1
+; CHECK-NEXT:    rdvl x8, #2
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    mov z1.b, w9
+; CHECK-NEXT:    ccmp x9, x8, #2, ge
+; CHECK-NEXT:    cset w8, hs
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
+; CHECK-NEXT:    cmphi p0.b, p0/z, z1.b, z0.b
 ; CHECK-NEXT:    whilelo p1.b, xzr, x8
 ; CHECK-NEXT:    mov p1.b, p0/m, p0.b
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
@@ -117,85 +104,52 @@ entry:
   ret <vscale x 32 x i1> %0
 }
 
-define <vscale x 64 x i1> @whilewr_8_split2(ptr %a, ptr %b) {
+define <vscale x 64 x i1> @whilewr_8_split2(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_8_split2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.s, #0, #1
+; CHECK-NEXT:    rdvl x8, #1
+; CHECK-NEXT:    index z0.b, #0, #1
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    mov z1.b, w8
 ; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    mov z4.s, w8
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    incw z1.s
-; CHECK-NEXT:    incw z2.s, all, mul #2
-; CHECK-NEXT:    add z5.s, z5.s, #16 // =0x10
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    mov z6.d, z2.d
-; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    cmphi p2.s, p0/z, z4.s, z5.s
-; CHECK-NEXT:    mov z5.d, z2.d
-; CHECK-NEXT:    add z2.s, z2.s, #48 // =0x30
-; CHECK-NEXT:    incw z3.s, all, mul #2
-; CHECK-NEXT:    add z6.s, z6.s, #16 // =0x10
-; CHECK-NEXT:    add z7.s, z7.s, #16 // =0x10
-; CHECK-NEXT:    add z5.s, z5.s, #32 // =0x20
-; CHECK-NEXT:    mov z24.d, z3.d
-; CHECK-NEXT:    cmphi p1.s, p0/z, z4.s, z6.s
-; CHECK-NEXT:    cmphi p4.s, p0/z, z4.s, z7.s
-; CHECK-NEXT:    mov z6.d, z3.d
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    add z3.s, z3.s, #48 // =0x30
-; CHECK-NEXT:    add z0.s, z0.s, #48 // =0x30
-; CHECK-NEXT:    add z24.s, z24.s, #16 // =0x10
-; CHECK-NEXT:    add z6.s, z6.s, #32 // =0x20
-; CHECK-NEXT:    add z7.s, z7.s, #32 // =0x20
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p4.h
-; CHECK-NEXT:    cmphi p3.s, p0/z, z4.s, z24.s
-; CHECK-NEXT:    mov z24.d, z1.d
-; CHECK-NEXT:    add z1.s, z1.s, #48 // =0x30
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
+; CHECK-NEXT:    rdvl x9, #2
+; CHECK-NEXT:    mov z2.b, w8
+; CHECK-NEXT:    rdvl x11, #3
+; CHECK-NEXT:    mov z3.b, w11
+; CHECK-NEXT:    add z1.b, z0.b, z1.b
+; CHECK-NEXT:    cmphi p1.b, p0/z, z2.b, z1.b
+; CHECK-NEXT:    mov z1.b, w9
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    add z24.s, z24.s, #32 // =0x20
-; CHECK-NEXT:    ccmp x9, #15, #2, ge
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    cmphi p5.s, p0/z, z4.s, z6.s
-; CHECK-NEXT:    cmphi p6.s, p0/z, z4.s, z5.s
-; CHECK-NEXT:    cmphi p3.s, p0/z, z4.s, z7.s
-; CHECK-NEXT:    cmphi p7.s, p0/z, z4.s, z3.s
-; CHECK-NEXT:    cmphi p4.s, p0/z, z4.s, z24.s
-; CHECK-NEXT:    cmphi p8.s, p0/z, z4.s, z2.s
-; CHECK-NEXT:    uzp1 p1.b, p2.b, p1.b
-; CHECK-NEXT:    cmphi p2.s, p0/z, z4.s, z1.s
-; CHECK-NEXT:    cmphi p0.s, p0/z, z4.s, z0.s
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p5.h, p6.h, p5.h
-; CHECK-NEXT:    uzp1 p3.h, p3.h, p4.h
-; CHECK-NEXT:    uzp1 p4.h, p8.h, p7.h
-; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p2.h, p0.h, p2.h
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    whilelo p6.b, xzr, x8
-; CHECK-NEXT:    uzp1 p3.b, p3.b, p5.b
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p4.b, p2.b, p4.b
-; CHECK-NEXT:    sel p1.b, p1, p1.b, p6.b
-; CHECK-NEXT:    sel p2.b, p3, p3.b, p6.b
-; CHECK-NEXT:    sel p3.b, p4, p4.b, p6.b
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    cset w10, lt
+; CHECK-NEXT:    cmp x8, x9
+; CHECK-NEXT:    csinc w9, w10, wzr, lo
+; CHECK-NEXT:    add z1.b, z0.b, z1.b
+; CHECK-NEXT:    add z0.b, z0.b, z3.b
+; CHECK-NEXT:    sbfx x9, x9, #0, #1
+; CHECK-NEXT:    cmphi p2.b, p0/z, z2.b, z1.b
+; CHECK-NEXT:    whilelo p3.b, xzr, x9
+; CHECK-NEXT:    cmp x8, x11
+; CHECK-NEXT:    csinc w9, w10, wzr, lo
+; CHECK-NEXT:    cmphi p5.b, p0/z, z2.b, z0.b
+; CHECK-NEXT:    sel p1.b, p1, p1.b, p3.b
+; CHECK-NEXT:    sbfx x9, x9, #0, #1
+; CHECK-NEXT:    whilelo p4.b, xzr, x9
+; CHECK-NEXT:    rdvl x9, #4
+; CHECK-NEXT:    cmp x8, x9
+; CHECK-NEXT:    csinc w8, w10, wzr, lo
+; CHECK-NEXT:    sel p2.b, p2, p2.b, p4.b
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    whilelo p3.b, xzr, x8
+; CHECK-NEXT:    mov p3.b, p5/m, p5.b
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -204,192 +158,80 @@ entry:
   ret <vscale x 64 x i1> %0
 }
 
-define <vscale x 16 x i1> @whilewr_16_expand(ptr %a, ptr %b) {
+define <vscale x 16 x i1> @whilewr_16_expand(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_16_expand:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    index z0.b, #0, #1
+; CHECK-NEXT:    rdvl x9, #1
 ; CHECK-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    asr x8, x8, #1
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    incd z5.d, all, mul #4
-; CHECK-NEXT:    cmphi p2.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    incd z1.d, all, mul #4
-; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z5.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
-; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
-; CHECK-NEXT:    mov z0.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z3.d
-; CHECK-NEXT:    uzp1 p2.s, p4.s, p5.s
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    incd z0.d, all, mul #4
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p6.s
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x9, #15, #2, ge
-; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    mov z1.b, w8
+; CHECK-NEXT:    ccmp x8, x9, #2, ge
+; CHECK-NEXT:    cset w8, hs
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
-; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
+; CHECK-NEXT:    cmphi p0.b, p0/z, z1.b, z0.b
 ; CHECK-NEXT:    whilelo p1.b, xzr, x8
 ; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
-; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 2)
   ret <vscale x 16 x i1> %0
 }
 
-define <vscale x 32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
+define <vscale x 32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_16_expand2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    rdvl x9, #1
+; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-NEXT:    mov z1.b, w9
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    rdvl x10, #2
 ; CHECK-NEXT:    asr x8, x8, #1
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z3.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    incd z5.d, all, mul #4
-; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z0.d
-; CHECK-NEXT:    add z0.d, z0.d, #16 // =0x10
-; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    mov z6.d, z2.d
-; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z3.d, z2.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z3.d, z5.d
-; CHECK-NEXT:    add z5.d, z5.d, #16 // =0x10
-; CHECK-NEXT:    add z2.d, z2.d, #16 // =0x10
-; CHECK-NEXT:    add z1.d, z1.d, #16 // =0x10
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    incd z6.d, all, mul #4
-; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p3.s, p5.s, p3.s
-; CHECK-NEXT:    mov z24.d, z4.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z3.d, z7.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z3.d, z6.d
-; CHECK-NEXT:    add z6.d, z6.d, #16 // =0x10
-; CHECK-NEXT:    add z7.d, z7.d, #16 // =0x10
-; CHECK-NEXT:    add z4.d, z4.d, #16 // =0x10
-; CHECK-NEXT:    incd z24.d, all, mul #4
-; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
-; CHECK-NEXT:    uzp1 p2.s, p4.s, p6.s
-; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z24.d
-; CHECK-NEXT:    uzp1 p1.h, p3.h, p1.h
-; CHECK-NEXT:    add z24.d, z24.d, #16 // =0x10
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
+; CHECK-NEXT:    add z1.b, z0.b, z1.b
+; CHECK-NEXT:    mov z2.b, w8
+; CHECK-NEXT:    cmphi p1.b, p0/z, z2.b, z1.b
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x9, #15, #2, ge
-; CHECK-NEXT:    uzp1 p3.s, p7.s, p5.s
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    cmphi p4.d, p0/z, z3.d, z24.d
-; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z6.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z3.d, z7.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z3.d, z5.d
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p3.h
-; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z3.d, z2.d
+; CHECK-NEXT:    cset w11, lt
+; CHECK-NEXT:    cmp x8, x10
+; CHECK-NEXT:    csinc w10, w11, wzr, lo
+; CHECK-NEXT:    sbfx x10, x10, #0, #1
+; CHECK-NEXT:    whilelo p2.b, xzr, x10
+; CHECK-NEXT:    cmp x8, x9
+; CHECK-NEXT:    csinc w8, w11, wzr, lo
+; CHECK-NEXT:    cmphi p0.b, p0/z, z2.b, z0.b
+; CHECK-NEXT:    sel p1.b, p1, p1.b, p2.b
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p1.b, p1.b, p2.b
-; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z1.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z3.d, z0.d
-; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
-; CHECK-NEXT:    uzp1 p5.s, p7.s, p6.s
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p3.s, p8.s, p3.s
-; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p2.s
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p2.h, p5.h, p4.h
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    whilelo p3.b, xzr, x8
-; CHECK-NEXT:    uzp1 p2.b, p0.b, p2.b
-; CHECK-NEXT:    sel p0.b, p1, p1.b, p3.b
-; CHECK-NEXT:    sel p1.b, p2, p2.b, p3.b
-; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    sel p0.b, p0, p0.b, p3.b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 32 x i1> @llvm.loop.dependence.war.mask.nxv32i1(ptr %a, ptr %b, i64 2)
   ret <vscale x 32 x i1> %0
 }
 
-define <vscale x 8 x i1> @whilewr_32_expand(ptr %a, ptr %b) {
+define <vscale x 8 x i1> @whilewr_32_expand(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_32_expand:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    index z1.h, #0, #1
+; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    add x9, x8, #3
 ; CHECK-NEXT:    csel x8, x9, x8, mi
+; CHECK-NEXT:    cnth x9
 ; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    cmphi p3.d, p0/z, z4.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z4.d, z2.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
-; CHECK-NEXT:    cmphi p2.d, p0/z, z4.d, z3.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z4.d, z1.d
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
+; CHECK-NEXT:    mov z0.h, w8
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x9, #7, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
-; CHECK-NEXT:    uzp1 p0.s, p3.s, p0.s
+; CHECK-NEXT:    ccmp x8, x9, #2, ge
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    and z0.h, z0.h, #0xff
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p1.h
 ; CHECK-NEXT:    whilelo p1.h, xzr, x8
+; CHECK-NEXT:    cmphi p0.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-NEXT:    ret
 entry:
@@ -397,156 +239,64 @@ entry:
   ret <vscale x 8 x i1> %0
 }
 
-define <vscale x 16 x i1> @whilewr_32_expand2(ptr %a, ptr %b) {
+define <vscale x 16 x i1> @whilewr_32_expand2(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_32_expand2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    index z0.b, #0, #1
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add x9, x8, #3
 ; CHECK-NEXT:    csel x8, x9, x8, mi
+; CHECK-NEXT:    rdvl x9, #1
 ; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    incd z5.d, all, mul #4
-; CHECK-NEXT:    cmphi p2.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    incd z1.d, all, mul #4
-; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z5.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
-; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
-; CHECK-NEXT:    mov z0.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z3.d
-; CHECK-NEXT:    uzp1 p2.s, p4.s, p5.s
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    incd z0.d, all, mul #4
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p6.s
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x9, #15, #2, ge
-; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    mov z1.b, w8
+; CHECK-NEXT:    ccmp x8, x9, #2, ge
+; CHECK-NEXT:    cset w8, hs
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
-; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
+; CHECK-NEXT:    cmphi p0.b, p0/z, z1.b, z0.b
 ; CHECK-NEXT:    whilelo p1.b, xzr, x8
 ; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
-; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 4)
   ret <vscale x 16 x i1> %0
 }
 
-define <vscale x 32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
+define <vscale x 32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_32_expand3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x9, x8, #3
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z3.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    incd z5.d, all, mul #4
-; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z0.d
-; CHECK-NEXT:    add z0.d, z0.d, #16 // =0x10
-; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    mov z6.d, z2.d
-; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z3.d, z2.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z3.d, z5.d
-; CHECK-NEXT:    add z5.d, z5.d, #16 // =0x10
-; CHECK-NEXT:    add z2.d, z2.d, #16 // =0x10
-; CHECK-NEXT:    add z1.d, z1.d, #16 // =0x10
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    incd z6.d, all, mul #4
-; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p3.s, p5.s, p3.s
-; CHECK-NEXT:    mov z24.d, z4.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z3.d, z7.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z3.d, z6.d
-; CHECK-NEXT:    add z6.d, z6.d, #16 // =0x10
-; CHECK-NEXT:    add z7.d, z7.d, #16 // =0x10
-; CHECK-NEXT:    add z4.d, z4.d, #16 // =0x10
-; CHECK-NEXT:    incd z24.d, all, mul #4
-; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
-; CHECK-NEXT:    uzp1 p2.s, p4.s, p6.s
-; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z24.d
-; CHECK-NEXT:    uzp1 p1.h, p3.h, p1.h
-; CHECK-NEXT:    add z24.d, z24.d, #16 // =0x10
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x9, #15, #2, ge
-; CHECK-NEXT:    uzp1 p3.s, p7.s, p5.s
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    cmphi p4.d, p0/z, z3.d, z24.d
-; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z6.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z3.d, z7.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z3.d, z5.d
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p3.h
-; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z3.d, z2.d
+; CHECK-NEXT:    subs x9, x1, x0
+; CHECK-NEXT:    rdvl x8, #1
+; CHECK-NEXT:    index z0.b, #0, #1
+; CHECK-NEXT:    add x10, x9, #3
+; CHECK-NEXT:    mov z1.b, w8
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    csel x9, x10, x9, mi
+; CHECK-NEXT:    asr x9, x9, #2
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    mov z2.b, w9
+; CHECK-NEXT:    cset w10, lt
+; CHECK-NEXT:    cmp x9, x8
+; CHECK-NEXT:    csinc w11, w10, wzr, lo
+; CHECK-NEXT:    sbfx x8, x11, #0, #1
+; CHECK-NEXT:    cmphi p2.b, p0/z, z2.b, z0.b
+; CHECK-NEXT:    add z0.b, z0.b, z1.b
+; CHECK-NEXT:    whilelo p1.b, xzr, x8
+; CHECK-NEXT:    rdvl x8, #2
+; CHECK-NEXT:    cmp x9, x8
+; CHECK-NEXT:    csinc w8, w10, wzr, lo
+; CHECK-NEXT:    cmphi p3.b, p0/z, z2.b, z0.b
+; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p1.b, p1.b, p2.b
-; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z1.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z3.d, z0.d
-; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
-; CHECK-NEXT:    uzp1 p5.s, p7.s, p6.s
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p3.s, p8.s, p3.s
-; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p2.s
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p2.h, p5.h, p4.h
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
+; CHECK-NEXT:    whilelo p4.b, xzr, x8
+; CHECK-NEXT:    sel p1.b, p3, p3.b, p4.b
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    whilelo p3.b, xzr, x8
-; CHECK-NEXT:    uzp1 p2.b, p0.b, p2.b
-; CHECK-NEXT:    sel p0.b, p1, p1.b, p3.b
-; CHECK-NEXT:    sel p1.b, p2, p2.b, p3.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -555,27 +305,23 @@ entry:
   ret <vscale x 32 x i1> %0
 }
 
-define <vscale x 4 x i1> @whilewr_64_expand(ptr %a, ptr %b) {
+define <vscale x 4 x i1> @whilewr_64_expand(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_64_expand:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    index z0.s, #0, #1
+; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
 ; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
+; CHECK-NEXT:    and w9, w8, #0xff
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x9, #3, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    uzp1 p0.s, p1.s, p0.s
+; CHECK-NEXT:    mov z1.s, w9
+; CHECK-NEXT:    cntw x9
+; CHECK-NEXT:    ccmp x8, x9, #2, ge
+; CHECK-NEXT:    cset w8, hs
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    cmphi p0.s, p0/z, z1.s, z0.s
 ; CHECK-NEXT:    whilelo p1.s, xzr, x8
 ; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-NEXT:    ret
@@ -584,36 +330,24 @@ entry:
   ret <vscale x 4 x i1> %0
 }
 
-define <vscale x 8 x i1> @whilewr_64_expand2(ptr %a, ptr %b) {
+define <vscale x 8 x i1> @whilewr_64_expand2(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_64_expand2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    index z1.h, #0, #1
+; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
+; CHECK-NEXT:    cnth x9
 ; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    cmphi p3.d, p0/z, z4.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z4.d, z2.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
-; CHECK-NEXT:    cmphi p2.d, p0/z, z4.d, z3.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z4.d, z1.d
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
+; CHECK-NEXT:    mov z0.h, w8
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x9, #7, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
-; CHECK-NEXT:    uzp1 p0.s, p3.s, p0.s
+; CHECK-NEXT:    ccmp x8, x9, #2, ge
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    and z0.h, z0.h, #0xff
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p1.h
 ; CHECK-NEXT:    whilelo p1.h, xzr, x8
+; CHECK-NEXT:    cmphi p0.h, p0/z, z0.h, z1.h
 ; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-NEXT:    ret
 entry:
@@ -621,156 +355,64 @@ entry:
   ret <vscale x 8 x i1> %0
 }
 
-define <vscale x 16 x i1> @whilewr_64_expand3(ptr %a, ptr %b) {
+define <vscale x 16 x i1> @whilewr_64_expand3(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_64_expand3:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    index z0.b, #0, #1
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
+; CHECK-NEXT:    rdvl x9, #1
 ; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    incd z5.d, all, mul #4
-; CHECK-NEXT:    cmphi p2.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    incd z1.d, all, mul #4
-; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z5.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
-; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
-; CHECK-NEXT:    mov z0.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z3.d
-; CHECK-NEXT:    uzp1 p2.s, p4.s, p5.s
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    incd z0.d, all, mul #4
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p6.s
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x9, #15, #2, ge
-; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    mov z1.b, w8
+; CHECK-NEXT:    ccmp x8, x9, #2, ge
+; CHECK-NEXT:    cset w8, hs
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
-; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
+; CHECK-NEXT:    cmphi p0.b, p0/z, z1.b, z0.b
 ; CHECK-NEXT:    whilelo p1.b, xzr, x8
 ; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
-; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 8)
   ret <vscale x 16 x i1> %0
 }
 
-define <vscale x 32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
+define <vscale x 32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_64_expand4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z3.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    incd z5.d, all, mul #4
-; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z0.d
-; CHECK-NEXT:    add z0.d, z0.d, #16 // =0x10
-; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    mov z6.d, z2.d
-; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z3.d, z2.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z3.d, z5.d
-; CHECK-NEXT:    add z5.d, z5.d, #16 // =0x10
-; CHECK-NEXT:    add z2.d, z2.d, #16 // =0x10
-; CHECK-NEXT:    add z1.d, z1.d, #16 // =0x10
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    incd z6.d, all, mul #4
-; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p3.s, p5.s, p3.s
-; CHECK-NEXT:    mov z24.d, z4.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z3.d, z7.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z3.d, z6.d
-; CHECK-NEXT:    add z6.d, z6.d, #16 // =0x10
-; CHECK-NEXT:    add z7.d, z7.d, #16 // =0x10
-; CHECK-NEXT:    add z4.d, z4.d, #16 // =0x10
-; CHECK-NEXT:    incd z24.d, all, mul #4
-; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
-; CHECK-NEXT:    uzp1 p2.s, p4.s, p6.s
-; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z24.d
-; CHECK-NEXT:    uzp1 p1.h, p3.h, p1.h
-; CHECK-NEXT:    add z24.d, z24.d, #16 // =0x10
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x9, #15, #2, ge
-; CHECK-NEXT:    uzp1 p3.s, p7.s, p5.s
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    cmphi p4.d, p0/z, z3.d, z24.d
-; CHECK-NEXT:    cmphi p5.d, p0/z, z3.d, z6.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z3.d, z7.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z3.d, z5.d
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p3.h
-; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z3.d, z2.d
+; CHECK-NEXT:    subs x9, x1, x0
+; CHECK-NEXT:    rdvl x8, #1
+; CHECK-NEXT:    index z0.b, #0, #1
+; CHECK-NEXT:    add x10, x9, #7
+; CHECK-NEXT:    mov z1.b, w8
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    csel x9, x10, x9, mi
+; CHECK-NEXT:    asr x9, x9, #3
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    mov z2.b, w9
+; CHECK-NEXT:    cset w10, lt
+; CHECK-NEXT:    cmp x9, x8
+; CHECK-NEXT:    csinc w11, w10, wzr, lo
+; CHECK-NEXT:    sbfx x8, x11, #0, #1
+; CHECK-NEXT:    cmphi p2.b, p0/z, z2.b, z0.b
+; CHECK-NEXT:    add z0.b, z0.b, z1.b
+; CHECK-NEXT:    whilelo p1.b, xzr, x8
+; CHECK-NEXT:    rdvl x8, #2
+; CHECK-NEXT:    cmp x9, x8
+; CHECK-NEXT:    csinc w8, w10, wzr, lo
+; CHECK-NEXT:    cmphi p3.b, p0/z, z2.b, z0.b
+; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p1.b, p1.b, p2.b
-; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z1.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z3.d, z0.d
-; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
-; CHECK-NEXT:    uzp1 p5.s, p7.s, p6.s
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p3.s, p8.s, p3.s
-; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p2.s
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p2.h, p5.h, p4.h
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
+; CHECK-NEXT:    whilelo p4.b, xzr, x8
+; CHECK-NEXT:    sel p1.b, p3, p3.b, p4.b
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    whilelo p3.b, xzr, x8
-; CHECK-NEXT:    uzp1 p2.b, p0.b, p2.b
-; CHECK-NEXT:    sel p0.b, p1, p1.b, p3.b
-; CHECK-NEXT:    sel p1.b, p2, p2.b, p3.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -779,7 +421,7 @@ entry:
   ret <vscale x 32 x i1> %0
 }
 
-define <vscale x 9 x i1> @whilewr_8_widen(ptr %a, ptr %b) {
+define <vscale x 9 x i1> @whilewr_8_widen(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_8_widen:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
@@ -789,7 +431,7 @@ entry:
   ret <vscale x 9 x i1> %0
 }
 
-define <vscale x 7 x i1> @whilewr_16_widen(ptr %a, ptr %b) {
+define <vscale x 7 x i1> @whilewr_16_widen(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_16_widen:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    whilewr p0.h, x0, x1
@@ -799,7 +441,7 @@ entry:
   ret <vscale x 7 x i1> %0
 }
 
-define <vscale x 3 x i1> @whilewr_32_widen(ptr %a, ptr %b) {
+define <vscale x 3 x i1> @whilewr_32_widen(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_32_widen:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    whilewr p0.s, x0, x1
@@ -809,68 +451,29 @@ entry:
   ret <vscale x 3 x i1> %0
 }
 
-define <vscale x 16 x i1> @whilewr_badimm(ptr %a, ptr %b) {
+define <vscale x 16 x i1> @whilewr_badimm(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_badimm:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    mov x8, #6148914691236517205 // =0x5555555555555555
 ; CHECK-NEXT:    sub x9, x1, x0
+; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    movk x8, #21846
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    smulh x8, x9, x8
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    incd z1.d
+; CHECK-NEXT:    rdvl x9, #1
 ; CHECK-NEXT:    add x8, x8, x8, lsr #63
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    incd z5.d, all, mul #4
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    incd z1.d, all, mul #4
-; CHECK-NEXT:    incd z3.d, all, mul #2
-; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z5.d
-; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    mov z0.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z3.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
-; CHECK-NEXT:    incd z0.d, all, mul #4
-; CHECK-NEXT:    uzp1 p2.s, p4.s, p5.s
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p6.s
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x9, #15, #2, ge
-; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    mov z1.b, w8
+; CHECK-NEXT:    ccmp x8, x9, #2, ge
+; CHECK-NEXT:    cset w8, hs
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
-; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
+; CHECK-NEXT:    cmphi p0.b, p0/z, z1.b, z0.b
 ; CHECK-NEXT:    whilelo p1.b, xzr, x8
 ; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
-; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 3)
   ret <vscale x 16 x i1> %0
 }
+
+attributes #0 = { vscale_range(1, 2) }
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable_nosve2.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable_nosve2.ll
index a2bfe6ffe55bd..dc6ffb5154d3f 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable_nosve2.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable_nosve2.ll
@@ -1,32 +1,19 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc -mtriple=aarch64 -mattr=+sve %s -o - | FileCheck %s
 
-define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) {
+define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) vscale_range(1, 4) {
 ; CHECK-LABEL: whilewr_8:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.s, #0, #1
+; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    mov z1.s, w8
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    cmphi p3.s, p0/z, z1.s, z0.s
-; CHECK-NEXT:    incw z2.s
-; CHECK-NEXT:    incw z3.s, all, mul #2
-; CHECK-NEXT:    mov z4.d, z2.d
-; CHECK-NEXT:    cmphi p1.s, p0/z, z1.s, z3.s
-; CHECK-NEXT:    incw z4.s, all, mul #2
-; CHECK-NEXT:    cmphi p2.s, p0/z, z1.s, z4.s
-; CHECK-NEXT:    cmphi p0.s, p0/z, z1.s, z2.s
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cneg x9, x8, mi
+; CHECK-NEXT:    rdvl x9, #1
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x9, #15, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p2.h
-; CHECK-NEXT:    uzp1 p0.h, p3.h, p0.h
+; CHECK-NEXT:    mov z1.b, w8
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ccmp x8, x9, #2, ge
+; CHECK-NEXT:    cset w8, hs
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.b, p0.b, p1.b
+; CHECK-NEXT:    cmphi p0.b, p0/z, z1.b, z0.b
 ; CHECK-NEXT:    whilelo p1.b, xzr, x8
 ; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/loop-dependence-mask-ccmp.ll b/llvm/test/CodeGen/AArch64/loop-dependence-mask-ccmp.ll
deleted file mode 100644
index 2c5e351ee9ba7..0000000000000
--- a/llvm/test/CodeGen/AArch64/loop-dependence-mask-ccmp.ll
+++ /dev/null
@@ -1,45 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=aarch64 -mattr=+sve2 -verify-machineinstrs -stop-after=finalize-isel %s -o - | FileCheck %s
-
-; Regression test for a bug where getTargetConstant(0) was used instead of
-; getConstant(0) in ScalarizeVecRes_LOOP_DEPENDENCE_MASK, causing instruction
-; selection to incorrectly generate CCMPXr (register form) with an immediate
-; operand instead of CCMPXi (immediate form).
-;
-
-define <1 x i1> @test_war_mask_ccmp(ptr %a, ptr %b) {
-  ; CHECK-LABEL: name: test_war_mask_ccmp
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x0, $x1
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr64 = COPY $x1
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr64 = COPY $x0
-  ; CHECK-NEXT:   [[SUBSXrr:%[0-9]+]]:gpr64common = SUBSXrr [[COPY]], [[COPY1]], implicit-def dead $nzcv
-  ; CHECK-NEXT:   [[ADDSXri:%[0-9]+]]:gpr64 = ADDSXri killed [[SUBSXrr]], 1, 0, implicit-def $nzcv
-  ; CHECK-NEXT:   [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
-  ; CHECK-NEXT:   $w0 = COPY [[CSINCWr]]
-  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
-entry:
-  %0 = call <1 x i1> @llvm.loop.dependence.war.mask.v1i1(ptr %a, ptr %b, i64 1)
-  ret <1 x i1> %0
-}
-
-define <1 x i1> @test_raw_mask_ccmp(ptr %a, ptr %b) {
-  ; CHECK-LABEL: name: test_raw_mask_ccmp
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x0, $x1
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr64 = COPY $x1
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr64 = COPY $x0
-  ; CHECK-NEXT:   [[SUBSXrr:%[0-9]+]]:gpr64common = SUBSXrr [[COPY]], [[COPY1]], implicit-def dead $nzcv
-  ; CHECK-NEXT:   [[ADDSXri:%[0-9]+]]:gpr64 = ADDSXri killed [[SUBSXrr]], 1, 0, implicit-def $nzcv
-  ; CHECK-NEXT:   [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
-  ; CHECK-NEXT:   $w0 = COPY [[CSINCWr]]
-  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
-entry:
-  %0 = call <1 x i1> @llvm.loop.dependence.raw.mask.v1i1(ptr %a, ptr %b, i64 1)
-  ret <1 x i1> %0
-}
-
-declare <1 x i1> @llvm.loop.dependence.war.mask.v1i1(ptr, ptr, i64)
-declare <1 x i1> @llvm.loop.dependence.raw.mask.v1i1(ptr, ptr, i64)

>From 270d2e73fc88eee9b9228a978d09fa7f02f9e226 Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Wed, 10 Dec 2025 10:47:58 +0000
Subject: [PATCH 3/3] Simplify lowering/expansion

---
 llvm/include/llvm/CodeGen/ISDOpcodes.h        |   4 +-
 .../SelectionDAG/LegalizeVectorOps.cpp        |  63 +----
 .../Target/AArch64/AArch64ISelLowering.cpp    |   1 -
 llvm/test/CodeGen/AArch64/alias_mask.ll       | 245 +++++++-----------
 llvm/test/CodeGen/AArch64/alias_mask_nosve.ll |  16 +-
 .../CodeGen/AArch64/alias_mask_scalable.ll    | 244 +++--------------
 .../AArch64/alias_mask_scalable_nosve2.ll     |  12 +-
 7 files changed, 146 insertions(+), 439 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index ca48c9bb811dc..22b7de44262ac 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1577,9 +1577,9 @@ enum NodeType {
   // is replaced with (lane + lane_offset) for the ISD opcode.
   //
   //  E.g., for LOOP_DEPENDENCE_WAR_MASK:
-  //    `(ptrB - ptrA) >= elementSize * lane`
+  //    `(ptrB - ptrA) > elementSize * lane`
   //  Becomes:
-  //    `(ptrB - ptrA) >= elementSize * (lane + lane_offset)`
+  //    `(ptrB - ptrA) > elementSize * (lane + lane_offset)`
   //
   // This is done to allow for trivial splitting of the operation. Note: The
   // lane offset is always a constant, for scalable masks, it is implicitly
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 2bee083dc3bb1..4ac8b652d589b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -29,7 +29,6 @@
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/Analysis/ValueTracking.h"
 #include "llvm/Analysis/VectorUtils.h"
 #include "llvm/CodeGen/ISDOpcodes.h"
 #include "llvm/CodeGen/SelectionDAG.h"
@@ -1817,10 +1816,9 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
   SDValue SourceValue = N->getOperand(0);
   SDValue SinkValue = N->getOperand(1);
   SDValue EltSizeInBytes = N->getOperand(2);
-  const Function &F = DAG.getMachineFunction().getFunction();
 
   // Note: The lane offset is scalable if the mask is scalable.
-  ElementCount LaneOffset =
+  ElementCount LaneOffsetEC =
       ElementCount::get(N->getConstantOperandVal(3), VT.isScalableVT());
 
   EVT PtrVT = SourceValue->getValueType(0);
@@ -1836,62 +1834,19 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
   // The pointers do not alias if:
   //  * Diff <= 0 (WAR_MASK)
   //  * Diff == 0 (RAW_MASK)
-  EVT CmpVT = VT.getVectorElementType();
+  EVT CmpVT =
+      TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), PtrVT);
   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
   SDValue Cmp = DAG.getSetCC(DL, CmpVT, Diff, Zero,
                              IsReadAfterWrite ? ISD::SETEQ : ISD::SETLE);
 
-  // The pointers do not alias within the mask if Diff >= MaxMaskLane. As:
-  //  * `(ptrB - ptrA) >= elementSize * lane` (WAR_MASK)
-  //  * `(ptrB - ptrA) >= elementSize * lane` (RAW_MASK)
-  // Would both be all true.
-  ElementCount MaxMaskLaneEC = LaneOffset + VT.getVectorElementCount();
-  SDValue MaxMaskLane = DAG.getElementCount(DL, PtrVT, MaxMaskLaneEC);
-  Cmp = DAG.getNode(ISD::OR, DL, CmpVT, Cmp,
-                    DAG.getSetCC(DL, CmpVT, Diff, MaxMaskLane, ISD::SETUGE));
-
-  // Attempt to determine the max "meaningful" value of Diff for the comparison
-  // with the lane step_vector. We do not have to consider values that would
-  // result in an "all-true" mask due to any of the above cases. This puts a
-  // fairly low upper bound on the element bitwidth needed for the comparison,
-  // which results in efficient codegen (since fewer vectors are needed). Note:
-  // If the upper bound is scalable, we must know the vscale range (otherwise,
-  // we fall back to a very conservative bound).
-  unsigned MaxMeaningfulDiff = 0;
-  if (MaxMaskLaneEC.isScalable()) {
-    ConstantRange VScaleRange = getVScaleRange(&F, /*BitWidth*/ 64);
-    if (!VScaleRange.isFullSet())
-      MaxMeaningfulDiff = MaxMaskLaneEC.getKnownMinValue() *
-                          VScaleRange.getUpper().getZExtValue();
-  } else {
-    MaxMeaningfulDiff = MaxMaskLaneEC.getFixedValue();
-  }
+  // The pointers do not alias if:
+  // Lane + LaneOffset < Diff (WAR/RAW_MASK)
+  SDValue LaneOffset = DAG.getElementCount(DL, PtrVT, LaneOffsetEC);
+  SDValue MaskN =
+      DAG.getSelect(DL, PtrVT, Cmp, DAG.getConstant(-1, DL, PtrVT), Diff);
 
-  // Note: MaxMeaningfulDiff is zero if the upper bound is unknown.
-  unsigned SplatBitWidth =
-      !MaxMeaningfulDiff
-          ? 32 // Surely 2**32 lanes is enough.
-          : std::max<unsigned>(PowerOf2Ceil(Log2_32(MaxMeaningfulDiff) + 1), 8);
-  EVT SplatEltVT = MVT::getIntegerVT(SplatBitWidth);
-  EVT SplatVT = VT.changeElementType(SplatEltVT);
-
-  // Truncate and splat the diff. If this ends up being an unsafe truncate (i.e,
-  // it does not fit within SplatBitWidth bits), the mask is already all-true.
-  SDValue DiffTrunc =
-      DAG.getExtOrTrunc(!IsReadAfterWrite, Diff, DL, SplatEltVT);
-  SDValue DiffSplat = DAG.getSplat(SplatVT, DL, DiffTrunc);
-
-  SDValue VectorStep = DAG.getStepVector(DL, SplatVT);
-  // Add the lane offset. A non-zero lane offset often comes from a
-  // larger-than-legal vector length being split in two.
-  SDValue LaneIndices = DAG.getNode(
-      ISD::ADD, DL, SplatVT, VectorStep,
-      DAG.getSplat(SplatVT, DL,
-                   DAG.getElementCount(DL, SplatEltVT, LaneOffset)));
-  SDValue DiffMask = DAG.getSetCC(DL, VT, LaneIndices, DiffSplat, ISD::SETULT);
-
-  SDValue Splat = DAG.getSplat(VT, DL, Cmp);
-  return DAG.getNode(ISD::OR, DL, VT, DiffMask, Splat);
+  return DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, DL, VT, LaneOffset, MaskN);
 }
 
 void VectorLegalizer::ExpandFP_TO_UINT(SDNode *Node,
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7fb98c3d7ed5c..9da79890a8aca 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5461,7 +5461,6 @@ AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
     return SDValue();
   }
 
-  // TODO: Support split masks
   unsigned LaneOffset = Op.getConstantOperandVal(3);
   if (LaneOffset != 0)
     return SDValue();
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 60a4dcd81f1be..bf393b6e87710 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -100,34 +100,30 @@ entry:
 define <32 x i1> @whilewr_8_split(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w9, #16 // =0x10
-; CHECK-NEXT:    sub x10, x1, x0
-; CHECK-NEXT:    index z0.b, w9, #1
-; CHECK-NEXT:    cmp x10, #1
-; CHECK-NEXT:    dup v1.16b, w10
-; CHECK-NEXT:    ccmp x10, #31, #2, ge
-; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    sub x9, x1, x0
+; CHECK-NEXT:    mov w10, #16 // =0x10
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    csinv x9, x9, xzr, ge
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
-; CHECK-NEXT:    dup v2.16b, w9
+; CHECK-NEXT:    whilelo p1.b, x10, x9
 ; CHECK-NEXT:    adrp x9, .LCPI8_0
-; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
-; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI8_0]
+; CHECK-NEXT:    mov z1.b, p1/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
 ; CHECK-NEXT:    addv h0, v0.8h
-; CHECK-NEXT:    str h1, [x8]
-; CHECK-NEXT:    str h0, [x8, #2]
+; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    str h0, [x8]
+; CHECK-NEXT:    str h1, [x8, #2]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 1)
@@ -137,63 +133,50 @@ entry:
 define <64 x i1> @whilewr_8_split2(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_8_split2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    sub x9, x1, x0
+; CHECK-NEXT:    mov w10, #48 // =0x30
+; CHECK-NEXT:    mov w11, #32 // =0x20
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    dup v1.16b, w9
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmp x9, #31
-; CHECK-NEXT:    csinc w11, w10, wzr, ls
-; CHECK-NEXT:    cmp x9, #47
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    add z0.b, z0.b, #48 // =0x30
-; CHECK-NEXT:    csinc w12, w10, wzr, ls
-; CHECK-NEXT:    cmp x9, #63
-; CHECK-NEXT:    dup v6.16b, w11
-; CHECK-NEXT:    csinc w9, w10, wzr, ls
-; CHECK-NEXT:    dup v5.16b, w12
+; CHECK-NEXT:    csinv x9, x9, xzr, ge
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
-; CHECK-NEXT:    add z3.b, z3.b, #32 // =0x20
-; CHECK-NEXT:    add z2.b, z2.b, #16 // =0x10
-; CHECK-NEXT:    dup v4.16b, w9
-; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    whilelo p1.b, x10, x9
+; CHECK-NEXT:    mov w10, #16 // =0x10
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    whilelo p0.b, x11, x9
+; CHECK-NEXT:    mov z1.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    whilelo p1.b, x10, x9
 ; CHECK-NEXT:    adrp x9, .LCPI9_0
-; CHECK-NEXT:    cmhi v3.16b, v1.16b, v3.16b
-; CHECK-NEXT:    cmhi v1.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    mov z2.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    orr v0.16b, v0.16b, v4.16b
 ; CHECK-NEXT:    ldr q4, [x9, :lo12:.LCPI9_0]
-; CHECK-NEXT:    orr v3.16b, v3.16b, v5.16b
-; CHECK-NEXT:    orr v1.16b, v1.16b, v6.16b
-; CHECK-NEXT:    shl v2.16b, v2.16b, #7
+; CHECK-NEXT:    mov z3.b, p1/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    shl v3.16b, v3.16b, #7
 ; CHECK-NEXT:    shl v1.16b, v1.16b, #7
-; CHECK-NEXT:    cmlt v2.16b, v2.16b, #0
+; CHECK-NEXT:    shl v2.16b, v2.16b, #7
+; CHECK-NEXT:    shl v3.16b, v3.16b, #7
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-NEXT:    cmlt v3.16b, v3.16b, #0
 ; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
-; CHECK-NEXT:    and v2.16b, v2.16b, v4.16b
+; CHECK-NEXT:    cmlt v2.16b, v2.16b, #0
+; CHECK-NEXT:    cmlt v3.16b, v3.16b, #0
 ; CHECK-NEXT:    and v0.16b, v0.16b, v4.16b
-; CHECK-NEXT:    and v3.16b, v3.16b, v4.16b
 ; CHECK-NEXT:    and v1.16b, v1.16b, v4.16b
-; CHECK-NEXT:    ext v5.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    and v2.16b, v2.16b, v4.16b
+; CHECK-NEXT:    and v3.16b, v3.16b, v4.16b
 ; CHECK-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v6.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT:    ext v7.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    zip1 v2.16b, v2.16b, v5.16b
+; CHECK-NEXT:    ext v5.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v6.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    ext v7.16b, v3.16b, v3.16b, #8
 ; CHECK-NEXT:    zip1 v0.16b, v0.16b, v4.16b
-; CHECK-NEXT:    zip1 v3.16b, v3.16b, v6.16b
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v7.16b
-; CHECK-NEXT:    addv h2, v2.8h
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v5.16b
+; CHECK-NEXT:    zip1 v2.16b, v2.16b, v6.16b
+; CHECK-NEXT:    zip1 v3.16b, v3.16b, v7.16b
 ; CHECK-NEXT:    addv h0, v0.8h
-; CHECK-NEXT:    addv h3, v3.8h
 ; CHECK-NEXT:    addv h1, v1.8h
-; CHECK-NEXT:    str h2, [x8]
-; CHECK-NEXT:    str h0, [x8, #6]
-; CHECK-NEXT:    str h3, [x8, #4]
-; CHECK-NEXT:    str h1, [x8, #2]
+; CHECK-NEXT:    addv h2, v2.8h
+; CHECK-NEXT:    addv h3, v3.8h
+; CHECK-NEXT:    str h0, [x8]
+; CHECK-NEXT:    str h1, [x8, #6]
+; CHECK-NEXT:    str h2, [x8, #4]
+; CHECK-NEXT:    str h3, [x8, #2]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <64 x i1> @llvm.loop.dependence.war.mask.v64i1(ptr %a, ptr %b, i64 1)
@@ -204,16 +187,13 @@ define <16 x i1> @whilewr_16_expand(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_16_expand:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x8, x8, x8, lsr #63
 ; CHECK-NEXT:    asr x8, x8, #1
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    dup v1.16b, w8
-; CHECK-NEXT:    ccmp x8, #15, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    dup v2.16b, w8
-; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    csinv x8, x8, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, xzr, x8
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 2)
@@ -224,25 +204,17 @@ define <32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_16_expand2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub x9, x1, x0
-; CHECK-NEXT:    index z0.b, #0, #1
+; CHECK-NEXT:    mov w10, #16 // =0x10
 ; CHECK-NEXT:    add x9, x9, x9, lsr #63
 ; CHECK-NEXT:    asr x9, x9, #1
-; CHECK-NEXT:    dup v1.16b, w9
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmp x9, #15
-; CHECK-NEXT:    csinc w11, w10, wzr, ls
-; CHECK-NEXT:    cmp x9, #31
-; CHECK-NEXT:    cmhi v2.16b, v1.16b, v0.16b
-; CHECK-NEXT:    add z0.b, z0.b, #16 // =0x10
-; CHECK-NEXT:    csinc w9, w10, wzr, ls
-; CHECK-NEXT:    dup v3.16b, w9
+; CHECK-NEXT:    csinv x9, x9, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, x10, x9
+; CHECK-NEXT:    whilelo p1.b, xzr, x9
 ; CHECK-NEXT:    adrp x9, .LCPI11_0
-; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
-; CHECK-NEXT:    dup v1.16b, w11
-; CHECK-NEXT:    orr v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    orr v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI11_0]
+; CHECK-NEXT:    mov z1.b, p1/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
 ; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
@@ -267,17 +239,14 @@ define <8 x i1> @whilewr_32_expand(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_32_expand:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x9, x8, #3
 ; CHECK-NEXT:    csel x8, x9, x8, mi
 ; CHECK-NEXT:    asr x8, x8, #2
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    dup v1.8b, w8
-; CHECK-NEXT:    ccmp x8, #7, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    dup v2.8b, w8
-; CHECK-NEXT:    cmhi v0.8b, v1.8b, v0.8b
-; CHECK-NEXT:    orr v0.8b, v0.8b, v2.8b
+; CHECK-NEXT:    csinv x8, x8, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, xzr, x8
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 4)
@@ -288,17 +257,14 @@ define <16 x i1> @whilewr_32_expand2(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_32_expand2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x9, x8, #3
 ; CHECK-NEXT:    csel x8, x9, x8, mi
 ; CHECK-NEXT:    asr x8, x8, #2
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    dup v1.16b, w8
-; CHECK-NEXT:    ccmp x8, #15, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    dup v2.16b, w8
-; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    csinv x8, x8, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, xzr, x8
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 4)
@@ -309,26 +275,18 @@ define <32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_32_expand3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs x9, x1, x0
-; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x10, x9, #3
 ; CHECK-NEXT:    csel x9, x10, x9, mi
+; CHECK-NEXT:    mov w10, #16 // =0x10
 ; CHECK-NEXT:    asr x9, x9, #2
-; CHECK-NEXT:    dup v1.16b, w9
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmp x9, #15
-; CHECK-NEXT:    csinc w11, w10, wzr, ls
-; CHECK-NEXT:    cmp x9, #31
-; CHECK-NEXT:    cmhi v2.16b, v1.16b, v0.16b
-; CHECK-NEXT:    add z0.b, z0.b, #16 // =0x10
-; CHECK-NEXT:    csinc w9, w10, wzr, ls
-; CHECK-NEXT:    dup v3.16b, w9
+; CHECK-NEXT:    csinv x9, x9, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, x10, x9
+; CHECK-NEXT:    whilelo p1.b, xzr, x9
 ; CHECK-NEXT:    adrp x9, .LCPI14_0
-; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
-; CHECK-NEXT:    dup v1.16b, w11
-; CHECK-NEXT:    orr v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    orr v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI14_0]
+; CHECK-NEXT:    mov z1.b, p1/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
 ; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
@@ -353,18 +311,14 @@ define <4 x i1> @whilewr_64_expand(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_64_expand:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    index z0.h, #0, #1
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
 ; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    dup v1.4h, w8
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x8, #3, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    dup v2.4h, w8
-; CHECK-NEXT:    bic v1.4h, #255, lsl #8
-; CHECK-NEXT:    cmhi v0.4h, v1.4h, v0.4h
-; CHECK-NEXT:    orr v0.8b, v0.8b, v2.8b
+; CHECK-NEXT:    csinv x8, x8, xzr, ge
+; CHECK-NEXT:    whilelo p0.h, xzr, x8
+; CHECK-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 8)
@@ -375,17 +329,14 @@ define <8 x i1> @whilewr_64_expand2(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_64_expand2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
 ; CHECK-NEXT:    asr x8, x8, #3
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    dup v1.8b, w8
-; CHECK-NEXT:    ccmp x8, #7, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    dup v2.8b, w8
-; CHECK-NEXT:    cmhi v0.8b, v1.8b, v0.8b
-; CHECK-NEXT:    orr v0.8b, v0.8b, v2.8b
+; CHECK-NEXT:    csinv x8, x8, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, xzr, x8
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 8)
@@ -396,17 +347,14 @@ define <16 x i1> @whilewr_64_expand3(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_64_expand3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
 ; CHECK-NEXT:    asr x8, x8, #3
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    dup v1.16b, w8
-; CHECK-NEXT:    ccmp x8, #15, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    dup v2.16b, w8
-; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    csinv x8, x8, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, xzr, x8
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 8)
@@ -417,26 +365,18 @@ define <32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_64_expand4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs x9, x1, x0
-; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x10, x9, #7
 ; CHECK-NEXT:    csel x9, x10, x9, mi
+; CHECK-NEXT:    mov w10, #16 // =0x10
 ; CHECK-NEXT:    asr x9, x9, #3
-; CHECK-NEXT:    dup v1.16b, w9
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmp x9, #15
-; CHECK-NEXT:    csinc w11, w10, wzr, ls
-; CHECK-NEXT:    cmp x9, #31
-; CHECK-NEXT:    cmhi v2.16b, v1.16b, v0.16b
-; CHECK-NEXT:    add z0.b, z0.b, #16 // =0x10
-; CHECK-NEXT:    csinc w9, w10, wzr, ls
-; CHECK-NEXT:    dup v3.16b, w9
+; CHECK-NEXT:    csinv x9, x9, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, x10, x9
+; CHECK-NEXT:    whilelo p1.b, xzr, x9
 ; CHECK-NEXT:    adrp x9, .LCPI18_0
-; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
-; CHECK-NEXT:    dup v1.16b, w11
-; CHECK-NEXT:    orr v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    orr v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI18_0]
+; CHECK-NEXT:    mov z1.b, p1/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
 ; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
@@ -528,17 +468,14 @@ define <16 x i1> @whilewr_badimm(ptr %a, ptr %b) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, #6148914691236517205 // =0x5555555555555555
 ; CHECK-NEXT:    sub x9, x1, x0
-; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    movk x8, #21846
 ; CHECK-NEXT:    smulh x8, x9, x8
 ; CHECK-NEXT:    add x8, x8, x8, lsr #63
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    dup v1.16b, w8
-; CHECK-NEXT:    ccmp x8, #15, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    dup v2.16b, w8
-; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    csinv x8, x8, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, xzr, x8
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 3)
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_nosve.ll b/llvm/test/CodeGen/AArch64/alias_mask_nosve.ll
index 3fd1c5f949487..0b1221244a757 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_nosve.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_nosve.ll
@@ -1,20 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s
 
+; TODO: Currently lowering get_active_lane_mask requires +sve
+; XFAIL: *
+
 define <16 x i1> @whilewr_8(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_8:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x9, x1, x0
-; CHECK-NEXT:    adrp x8, .LCPI0_0
-; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    dup v0.16b, w9
-; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI0_0]
-; CHECK-NEXT:    ccmp x9, #15, #2, ge
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    dup v2.16b, w8
-; CHECK-NEXT:    cmhi v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 1)
   ret <16 x i1> %0
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index 221086ff77b20..c7c372e17b7f2 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -84,20 +84,12 @@ entry:
 define <vscale x 32 x i1> @whilewr_8_split(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_8_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    rdvl x8, #1
 ; CHECK-NEXT:    sub x9, x1, x0
-; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    index z0.b, w8, #1
-; CHECK-NEXT:    rdvl x8, #2
+; CHECK-NEXT:    rdvl x8, #1
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    mov z1.b, w9
-; CHECK-NEXT:    ccmp x9, x8, #2, ge
-; CHECK-NEXT:    cset w8, hs
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    cmphi p0.b, p0/z, z1.b, z0.b
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    mov p1.b, p0/m, p0.b
+; CHECK-NEXT:    csinv x9, x9, xzr, ge
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    whilelo p1.b, x8, x9
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 32 x i1> @llvm.loop.dependence.war.mask.nxv32i1(ptr %a, ptr %b, i64 1)
@@ -107,51 +99,16 @@ entry:
 define <vscale x 64 x i1> @whilewr_8_split2(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_8_split2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    sub x9, x1, x0
 ; CHECK-NEXT:    rdvl x8, #1
-; CHECK-NEXT:    index z0.b, #0, #1
-; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    mov z1.b, w8
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    rdvl x9, #2
-; CHECK-NEXT:    mov z2.b, w8
-; CHECK-NEXT:    rdvl x11, #3
-; CHECK-NEXT:    mov z3.b, w11
-; CHECK-NEXT:    add z1.b, z0.b, z1.b
-; CHECK-NEXT:    cmphi p1.b, p0/z, z2.b, z1.b
-; CHECK-NEXT:    mov z1.b, w9
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmp x8, x9
-; CHECK-NEXT:    csinc w9, w10, wzr, lo
-; CHECK-NEXT:    add z1.b, z0.b, z1.b
-; CHECK-NEXT:    add z0.b, z0.b, z3.b
-; CHECK-NEXT:    sbfx x9, x9, #0, #1
-; CHECK-NEXT:    cmphi p2.b, p0/z, z2.b, z1.b
-; CHECK-NEXT:    whilelo p3.b, xzr, x9
-; CHECK-NEXT:    cmp x8, x11
-; CHECK-NEXT:    csinc w9, w10, wzr, lo
-; CHECK-NEXT:    cmphi p5.b, p0/z, z2.b, z0.b
-; CHECK-NEXT:    sel p1.b, p1, p1.b, p3.b
-; CHECK-NEXT:    sbfx x9, x9, #0, #1
-; CHECK-NEXT:    whilelo p4.b, xzr, x9
-; CHECK-NEXT:    rdvl x9, #4
-; CHECK-NEXT:    cmp x8, x9
-; CHECK-NEXT:    csinc w8, w10, wzr, lo
-; CHECK-NEXT:    sel p2.b, p2, p2.b, p4.b
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    rdvl x10, #2
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    csinv x9, x9, xzr, ge
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
-; CHECK-NEXT:    whilelo p3.b, xzr, x8
-; CHECK-NEXT:    mov p3.b, p5/m, p5.b
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    whilelo p1.b, x8, x9
+; CHECK-NEXT:    rdvl x8, #3
+; CHECK-NEXT:    whilelo p2.b, x10, x9
+; CHECK-NEXT:    whilelo p3.b, x8, x9
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 64 x i1> @llvm.loop.dependence.war.mask.nxv64i1(ptr %a, ptr %b, i64 1)
@@ -162,19 +119,11 @@ define <vscale x 16 x i1> @whilewr_16_expand(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_16_expand:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    index z0.b, #0, #1
-; CHECK-NEXT:    rdvl x9, #1
 ; CHECK-NEXT:    add x8, x8, x8, lsr #63
-; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    asr x8, x8, #1
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    mov z1.b, w8
-; CHECK-NEXT:    ccmp x8, x9, #2, ge
-; CHECK-NEXT:    cset w8, hs
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    cmphi p0.b, p0/z, z1.b, z0.b
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-NEXT:    csinv x8, x8, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, xzr, x8
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 2)
@@ -184,30 +133,14 @@ entry:
 define <vscale x 32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_16_expand2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    rdvl x9, #1
-; CHECK-NEXT:    index z0.b, #0, #1
-; CHECK-NEXT:    add x8, x8, x8, lsr #63
-; CHECK-NEXT:    mov z1.b, w9
-; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    rdvl x10, #2
-; CHECK-NEXT:    asr x8, x8, #1
-; CHECK-NEXT:    add z1.b, z0.b, z1.b
-; CHECK-NEXT:    mov z2.b, w8
-; CHECK-NEXT:    cmphi p1.b, p0/z, z2.b, z1.b
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w11, lt
-; CHECK-NEXT:    cmp x8, x10
-; CHECK-NEXT:    csinc w10, w11, wzr, lo
-; CHECK-NEXT:    sbfx x10, x10, #0, #1
-; CHECK-NEXT:    whilelo p2.b, xzr, x10
-; CHECK-NEXT:    cmp x8, x9
-; CHECK-NEXT:    csinc w8, w11, wzr, lo
-; CHECK-NEXT:    cmphi p0.b, p0/z, z2.b, z0.b
-; CHECK-NEXT:    sel p1.b, p1, p1.b, p2.b
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    whilelo p3.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p3.b
+; CHECK-NEXT:    sub x9, x1, x0
+; CHECK-NEXT:    rdvl x8, #1
+; CHECK-NEXT:    add x9, x9, x9, lsr #63
+; CHECK-NEXT:    asr x9, x9, #1
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    csinv x9, x9, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, xzr, x9
+; CHECK-NEXT:    whilelo p1.b, x8, x9
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 32 x i1> @llvm.loop.dependence.war.mask.nxv32i1(ptr %a, ptr %b, i64 2)
@@ -218,21 +151,12 @@ define <vscale x 8 x i1> @whilewr_32_expand(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_32_expand:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    index z1.h, #0, #1
-; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    add x9, x8, #3
 ; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    cnth x9
 ; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z0.h, w8
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x8, x9, #2, ge
-; CHECK-NEXT:    cset w8, hs
-; CHECK-NEXT:    and z0.h, z0.h, #0xff
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    whilelo p1.h, xzr, x8
-; CHECK-NEXT:    cmphi p0.h, p0/z, z0.h, z1.h
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-NEXT:    csinv x8, x8, xzr, ge
+; CHECK-NEXT:    whilelo p0.h, xzr, x8
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 8 x i1> @llvm.loop.dependence.war.mask.nxv8i1(ptr %a, ptr %b, i64 4)
@@ -243,20 +167,12 @@ define <vscale x 16 x i1> @whilewr_32_expand2(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_32_expand2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    index z0.b, #0, #1
-; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add x9, x8, #3
 ; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    rdvl x9, #1
 ; CHECK-NEXT:    asr x8, x8, #2
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    mov z1.b, w8
-; CHECK-NEXT:    ccmp x8, x9, #2, ge
-; CHECK-NEXT:    cset w8, hs
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    cmphi p0.b, p0/z, z1.b, z0.b
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-NEXT:    csinv x8, x8, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, xzr, x8
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 4)
@@ -266,39 +182,15 @@ entry:
 define <vscale x 32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_32_expand3:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    subs x9, x1, x0
 ; CHECK-NEXT:    rdvl x8, #1
-; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x10, x9, #3
-; CHECK-NEXT:    mov z1.b, w8
-; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    csel x9, x10, x9, mi
 ; CHECK-NEXT:    asr x9, x9, #2
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    mov z2.b, w9
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmp x9, x8
-; CHECK-NEXT:    csinc w11, w10, wzr, lo
-; CHECK-NEXT:    sbfx x8, x11, #0, #1
-; CHECK-NEXT:    cmphi p2.b, p0/z, z2.b, z0.b
-; CHECK-NEXT:    add z0.b, z0.b, z1.b
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    rdvl x8, #2
-; CHECK-NEXT:    cmp x9, x8
-; CHECK-NEXT:    csinc w8, w10, wzr, lo
-; CHECK-NEXT:    cmphi p3.b, p0/z, z2.b, z0.b
-; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    whilelo p4.b, xzr, x8
-; CHECK-NEXT:    sel p1.b, p3, p3.b, p4.b
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    csinv x9, x9, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, xzr, x9
+; CHECK-NEXT:    whilelo p1.b, x8, x9
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 32 x i1> @llvm.loop.dependence.war.mask.nxv32i1(ptr %a, ptr %b, i64 4)
@@ -309,21 +201,12 @@ define <vscale x 4 x i1> @whilewr_64_expand(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_64_expand:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    index z0.s, #0, #1
-; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
 ; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    and w9, w8, #0xff
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    mov z1.s, w9
-; CHECK-NEXT:    cntw x9
-; CHECK-NEXT:    ccmp x8, x9, #2, ge
-; CHECK-NEXT:    cset w8, hs
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    cmphi p0.s, p0/z, z1.s, z0.s
-; CHECK-NEXT:    whilelo p1.s, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-NEXT:    csinv x8, x8, xzr, ge
+; CHECK-NEXT:    whilelo p0.s, xzr, x8
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 4 x i1> @llvm.loop.dependence.war.mask.nxv4i1(ptr %a, ptr %b, i64 8)
@@ -334,21 +217,12 @@ define <vscale x 8 x i1> @whilewr_64_expand2(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_64_expand2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    index z1.h, #0, #1
-; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    cnth x9
 ; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z0.h, w8
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    ccmp x8, x9, #2, ge
-; CHECK-NEXT:    cset w8, hs
-; CHECK-NEXT:    and z0.h, z0.h, #0xff
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    whilelo p1.h, xzr, x8
-; CHECK-NEXT:    cmphi p0.h, p0/z, z0.h, z1.h
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-NEXT:    csinv x8, x8, xzr, ge
+; CHECK-NEXT:    whilelo p0.h, xzr, x8
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 8 x i1> @llvm.loop.dependence.war.mask.nxv8i1(ptr %a, ptr %b, i64 8)
@@ -359,20 +233,12 @@ define <vscale x 16 x i1> @whilewr_64_expand3(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_64_expand3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    index z0.b, #0, #1
-; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    rdvl x9, #1
 ; CHECK-NEXT:    asr x8, x8, #3
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    mov z1.b, w8
-; CHECK-NEXT:    ccmp x8, x9, #2, ge
-; CHECK-NEXT:    cset w8, hs
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    cmphi p0.b, p0/z, z1.b, z0.b
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-NEXT:    csinv x8, x8, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, xzr, x8
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 8)
@@ -382,39 +248,15 @@ entry:
 define <vscale x 32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: whilewr_64_expand4:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    subs x9, x1, x0
 ; CHECK-NEXT:    rdvl x8, #1
-; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    add x10, x9, #7
-; CHECK-NEXT:    mov z1.b, w8
-; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    csel x9, x10, x9, mi
 ; CHECK-NEXT:    asr x9, x9, #3
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    mov z2.b, w9
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmp x9, x8
-; CHECK-NEXT:    csinc w11, w10, wzr, lo
-; CHECK-NEXT:    sbfx x8, x11, #0, #1
-; CHECK-NEXT:    cmphi p2.b, p0/z, z2.b, z0.b
-; CHECK-NEXT:    add z0.b, z0.b, z1.b
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    rdvl x8, #2
-; CHECK-NEXT:    cmp x9, x8
-; CHECK-NEXT:    csinc w8, w10, wzr, lo
-; CHECK-NEXT:    cmphi p3.b, p0/z, z2.b, z0.b
-; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    whilelo p4.b, xzr, x8
-; CHECK-NEXT:    sel p1.b, p3, p3.b, p4.b
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    csinv x9, x9, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, xzr, x9
+; CHECK-NEXT:    whilelo p1.b, x8, x9
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 32 x i1> @llvm.loop.dependence.war.mask.nxv32i1(ptr %a, ptr %b, i64 8)
@@ -456,20 +298,12 @@ define <vscale x 16 x i1> @whilewr_badimm(ptr %a, ptr %b) #0 {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, #6148914691236517205 // =0x5555555555555555
 ; CHECK-NEXT:    sub x9, x1, x0
-; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    movk x8, #21846
-; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    smulh x8, x9, x8
-; CHECK-NEXT:    rdvl x9, #1
 ; CHECK-NEXT:    add x8, x8, x8, lsr #63
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    mov z1.b, w8
-; CHECK-NEXT:    ccmp x8, x9, #2, ge
-; CHECK-NEXT:    cset w8, hs
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    cmphi p0.b, p0/z, z1.b, z0.b
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-NEXT:    csinv x8, x8, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, xzr, x8
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 3)
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable_nosve2.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable_nosve2.ll
index dc6ffb5154d3f..972afd70fab45 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable_nosve2.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable_nosve2.ll
@@ -4,18 +4,10 @@
 define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) vscale_range(1, 4) {
 ; CHECK-LABEL: whilewr_8:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    rdvl x9, #1
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    mov z1.b, w8
-; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    ccmp x8, x9, #2, ge
-; CHECK-NEXT:    cset w8, hs
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    cmphi p0.b, p0/z, z1.b, z0.b
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-NEXT:    csinv x8, x8, xzr, ge
+; CHECK-NEXT:    whilelo p0.b, xzr, x8
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 1)



More information about the llvm-commits mailing list