[llvm] [Intrinsics][AArch64] Add intrinsic to mask off aliasing vector lanes (PR #117007)

Sam Tebbs via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 8 09:41:51 PDT 2025


https://github.com/SamTebbs33 updated https://github.com/llvm/llvm-project/pull/117007

>From 426120a0dc6c3c35897b6535069fc138e9900d3b Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Fri, 15 Nov 2024 10:24:46 +0000
Subject: [PATCH 01/29] [Intrinsics][AArch64] Add intrinsic to mask off
 aliasing vector lanes

It can be unsafe to load a vector from an address and write a vector to
an address if those two addresses have overlapping lanes within a
vectorised loop iteration.

This PR adds an intrinsic designed to create a mask with lanes disabled
if they overlap between the two pointer arguments, so that only safe
lanes are loaded, operated on and stored.

Along with the two pointer parameters, the intrinsic also takes an
immediate that represents the size in bytes of the vector element
types, as well as an immediate i1 that is true if there is a write
after-read-hazard or false if there is a read-after-write hazard.

This will be used by #100579 and replaces the existing lowering for
whilewr since that isn't needed now we have the intrinsic.
---
 llvm/docs/LangRef.rst                         |  84 ++++
 llvm/include/llvm/CodeGen/TargetLowering.h    |   7 +
 llvm/include/llvm/IR/Intrinsics.td            |   5 +
 .../SelectionDAG/SelectionDAGBuilder.cpp      |  50 +++
 .../Target/AArch64/AArch64ISelLowering.cpp    |  85 +++-
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |   3 +
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td |  11 +-
 llvm/lib/Target/AArch64/SVEInstrFormats.td    |  10 +-
 llvm/test/CodeGen/AArch64/alias_mask.ll       | 421 ++++++++++++++++++
 .../CodeGen/AArch64/alias_mask_scalable.ll    | 195 ++++++++
 10 files changed, 861 insertions(+), 10 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/alias_mask.ll
 create mode 100644 llvm/test/CodeGen/AArch64/alias_mask_scalable.ll

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index d8cd3b894cda4..f41e50d36a984 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24045,6 +24045,90 @@ Examples:
       %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 %elem0, i64 429)
       %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
 
+.. _int_experimental_get_alias_lane_mask:
+
+'``llvm.experimental.get.alias.lane.mask.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+This is an overloaded intrinsic.
+
+::
+
+      declare <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %ptrA, i64 %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %ptrA, i64 %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i32(i64 %ptrA, i64 %ptrB, i32 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.nxv16i1.i64.i32(i64 %ptrA, i64 %ptrB, i32 immarg %elementSize, i1 immarg %writeAfterRead)
+
+
+Overview:
+"""""""""
+
+Create a mask representing lanes that do or not overlap between two pointers
+across one vector loop iteration.
+
+
+Arguments:
+""""""""""
+
+The first two arguments have the same scalar integer type.
+The final two are immediates and the result is a vector with the i1 element type.
+
+Semantics:
+""""""""""
+
+The intrinsic will return poison if ``%ptrA`` and ``%ptrB`` are within
+VF * ``%elementSize`` of each other and ``%ptrA`` + VF * ``%elementSize`` wraps.
+In other cases when ``%writeAfterRead`` is true, the
+'``llvm.experimental.get.alias.lane.mask.*``' intrinsics are semantically
+equivalent to:
+
+::
+
+      %diff = (%ptrB - %ptrA) / %elementSize
+      %m[i] = (icmp ult i, %diff) || (%diff <= 0)
+
+When the return value is not poison and ``%writeAfterRead`` is false, the
+'``llvm.experimental.get.alias.lane.mask.*``' intrinsics are semantically
+equivalent to:
+
+::
+
+      %diff = abs(%ptrB - %ptrA) / %elementSize
+      %m[i] = (icmp ult i, %diff) || (%diff == 0)
+
+where ``%m`` is a vector (mask) of active/inactive lanes with its elements
+indexed by ``i``,  and ``%ptrA``, ``%ptrB`` are the two i64 arguments to
+``llvm.experimental.get.alias.lane.mask.*`` and ``%elementSize`` is the first
+immediate argument. The ``%writeAfterRead`` argument is expected to be true if
+``%ptrB`` is stored to after ``%ptrA`` is read from.
+The above is equivalent to:
+
+::
+
+      %m = @llvm.experimental.get.alias.lane.mask(%ptrA, %ptrB, %elementSize, %writeAfterRead)
+
+This can, for example, be emitted by the loop vectorizer in which case
+``%ptrA`` is a pointer that is read from within the loop, and ``%ptrB`` is a
+pointer that is stored to within the loop.
+If the difference between these pointers is less than the vector factor, then
+they overlap (alias) within a loop iteration.
+An example is if ``%ptrA`` is 20 and ``%ptrB`` is 23 with a vector factor of 8,
+then lanes 3, 4, 5, 6 and 7 of the vector loaded from ``%ptrA``
+share addresses with lanes 0, 1, 2, 3, 4 and 5 from the vector stored to at
+``%ptrB``.
+
+
+Examples:
+"""""""""
+
+.. code-block:: llvm
+
+      %alias.lane.mask = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i32(i64 %ptrA, i64 %ptrB, i32 4, i1 1)
+      %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptrA, i32 4, <4 x i1> %alias.lane.mask, <4 x i32> poison)
+      [...]
+      call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, <4 x i32>* %ptrB, i32 4, <4 x i1> %alias.lane.mask)
 
 .. _int_experimental_vp_splice:
 
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 52729e9e7cee7..7d1b9f403ba1a 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -482,6 +482,13 @@ class LLVM_ABI TargetLoweringBase {
     return true;
   }
 
+  /// Return true if the @llvm.experimental.get.alias.lane.mask intrinsic should
+  /// be expanded using generic code in SelectionDAGBuilder.
+  virtual bool shouldExpandGetAliasLaneMask(EVT VT, EVT PtrVT,
+                                            unsigned EltSize) const {
+    return true;
+  }
+
   virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF,
                                            bool IsScalable) const {
     return true;
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index bd6f94ac1286c..e3fb9cf1b7a49 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -2418,6 +2418,11 @@ let IntrProperties = [IntrNoMem, ImmArg<ArgIndex<1>>] in {
                                     llvm_i32_ty]>;
 }
 
+def int_experimental_get_alias_lane_mask:
+  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+            [llvm_anyint_ty, LLVMMatchType<1>, llvm_anyint_ty, llvm_i1_ty],
+            [IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
+
 def int_get_active_lane_mask:
   DefaultAttrsIntrinsic<[llvm_anyvector_ty],
             [llvm_anyint_ty, LLVMMatchType<1>],
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index ac0440fef5f60..fad88905610dd 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8271,6 +8271,56 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
     visitVectorExtractLastActive(I, Intrinsic);
     return;
   }
+  case Intrinsic::experimental_get_alias_lane_mask: {
+    SDValue SourceValue = getValue(I.getOperand(0));
+    SDValue SinkValue = getValue(I.getOperand(1));
+    SDValue EltSize = getValue(I.getOperand(2));
+    bool IsWriteAfterRead =
+        cast<ConstantSDNode>(getValue(I.getOperand(3)))->getZExtValue() != 0;
+    auto IntrinsicVT = EVT::getEVT(I.getType());
+    auto PtrVT = SourceValue->getValueType(0);
+
+    if (!TLI.shouldExpandGetAliasLaneMask(
+            IntrinsicVT, PtrVT,
+            cast<ConstantSDNode>(EltSize)->getSExtValue())) {
+      visitTargetIntrinsic(I, Intrinsic);
+      return;
+    }
+
+    SDValue Diff = DAG.getNode(ISD::SUB, sdl, PtrVT, SinkValue, SourceValue);
+    if (!IsWriteAfterRead)
+      Diff = DAG.getNode(ISD::ABS, sdl, PtrVT, Diff);
+
+    Diff = DAG.getNode(ISD::SDIV, sdl, PtrVT, Diff, EltSize);
+    SDValue Zero = DAG.getTargetConstant(0, sdl, PtrVT);
+
+    // If the difference is positive then some elements may alias
+    auto CmpVT =
+        TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), PtrVT);
+    SDValue Cmp = DAG.getSetCC(sdl, CmpVT, Diff, Zero,
+                               IsWriteAfterRead ? ISD::SETLE : ISD::SETEQ);
+
+    // Splat the compare result then OR it with a lane mask
+    SDValue Splat = DAG.getSplat(IntrinsicVT, sdl, Cmp);
+
+    SDValue DiffMask;
+    // Don't emit an active lane mask if the target doesn't support it
+    if (TLI.shouldExpandGetActiveLaneMask(IntrinsicVT, PtrVT)) {
+      EVT VecTy = EVT::getVectorVT(*DAG.getContext(), PtrVT,
+                                   IntrinsicVT.getVectorElementCount());
+      SDValue DiffSplat = DAG.getSplat(VecTy, sdl, Diff);
+      SDValue VectorStep = DAG.getStepVector(sdl, VecTy);
+      DiffMask = DAG.getSetCC(sdl, IntrinsicVT, VectorStep, DiffSplat,
+                              ISD::CondCode::SETULT);
+    } else {
+      DiffMask = DAG.getNode(
+          ISD::INTRINSIC_WO_CHAIN, sdl, IntrinsicVT,
+          DAG.getTargetConstant(Intrinsic::get_active_lane_mask, sdl, MVT::i64),
+          Zero, Diff);
+    }
+    SDValue Or = DAG.getNode(ISD::OR, sdl, IntrinsicVT, DiffMask, Splat);
+    setValue(&I, Or);
+  }
   }
 }
 
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 2b6ea86ee1af5..a0b1b1959ebc9 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2160,6 +2160,25 @@ bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
   return false;
 }
 
+bool AArch64TargetLowering::shouldExpandGetAliasLaneMask(
+    EVT VT, EVT PtrVT, unsigned EltSize) const {
+  if (!Subtarget->hasSVE2())
+    return true;
+
+  if (PtrVT != MVT::i64)
+    return true;
+
+  if (VT == MVT::v2i1 || VT == MVT::nxv2i1)
+    return EltSize != 8;
+  if (VT == MVT::v4i1 || VT == MVT::nxv4i1)
+    return EltSize != 4;
+  if (VT == MVT::v8i1 || VT == MVT::nxv8i1)
+    return EltSize != 2;
+  if (VT == MVT::v16i1 || VT == MVT::nxv16i1)
+    return EltSize != 1;
+  return true;
+}
+
 bool AArch64TargetLowering::shouldExpandPartialReductionIntrinsic(
     const IntrinsicInst *I) const {
   assert(I->getIntrinsicID() ==
@@ -5959,6 +5978,18 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     EVT PtrVT = getPointerTy(DAG.getDataLayout());
     return DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
   }
+  case Intrinsic::aarch64_sve_whilewr_b:
+  case Intrinsic::aarch64_sve_whilewr_h:
+  case Intrinsic::aarch64_sve_whilewr_s:
+  case Intrinsic::aarch64_sve_whilewr_d:
+    return DAG.getNode(AArch64ISD::WHILEWR, dl, Op.getValueType(),
+                       Op.getOperand(1), Op.getOperand(2));
+  case Intrinsic::aarch64_sve_whilerw_b:
+  case Intrinsic::aarch64_sve_whilerw_h:
+  case Intrinsic::aarch64_sve_whilerw_s:
+  case Intrinsic::aarch64_sve_whilerw_d:
+    return DAG.getNode(AArch64ISD::WHILERW, dl, Op.getValueType(),
+                       Op.getOperand(1), Op.getOperand(2));
   case Intrinsic::aarch64_neon_abs: {
     EVT Ty = Op.getValueType();
     if (Ty == MVT::i64) {
@@ -6433,6 +6464,52 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     return DAG.getNode(AArch64ISD::USDOT, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
   }
+  case Intrinsic::experimental_get_alias_lane_mask: {
+    unsigned IntrinsicID = 0;
+      uint64_t EltSize = Op.getOperand(3)->getAsZExtVal();
+      bool IsWriteAfterRead = Op.getOperand(4)->getAsZExtVal() == 1;
+      switch (EltSize) {
+      case 1:
+        IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
+                                       : Intrinsic::aarch64_sve_whilerw_b;
+        break;
+      case 2:
+        IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
+                                       : Intrinsic::aarch64_sve_whilerw_h;
+        break;
+      case 4:
+        IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
+                                       : Intrinsic::aarch64_sve_whilerw_s;
+        break;
+      case 8:
+        IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
+                                       : Intrinsic::aarch64_sve_whilerw_d;
+        break;
+      default:
+        llvm_unreachable("Unexpected element size for get.alias.lane.mask");
+        break;
+      }
+    SDValue ID = DAG.getTargetConstant(IntrinsicID, dl, MVT::i64);
+
+    EVT VT = Op.getValueType();
+    if (VT.isScalableVector())
+      return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, ID, Op.getOperand(1),
+                         Op.getOperand(2));
+
+    // We can use the SVE whilewr/whilerw instruction to lower this
+    // intrinsic by creating the appropriate sequence of scalable vector
+    // operations and then extracting a fixed-width subvector from the scalable
+    // vector.
+
+    EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
+    EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
+
+    SDValue Mask = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, WhileVT, ID,
+                               Op.getOperand(1), Op.getOperand(2));
+    SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, dl, ContainerVT, Mask);
+    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, MaskAsInt,
+                       DAG.getVectorIdxConstant(0, dl));
+  }
   case Intrinsic::aarch64_neon_saddlv:
   case Intrinsic::aarch64_neon_uaddlv: {
     EVT OpVT = Op.getOperand(1).getValueType();
@@ -19925,7 +20002,10 @@ static bool isPredicateCCSettingOp(SDValue N) {
         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilele ||
         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelo ||
         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilels ||
-        N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelt)))
+        N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelt ||
+        // get_alias_lane_mask is lowered to a whilewr/rw instruction.
+        N.getConstantOperandVal(0) ==
+            Intrinsic::experimental_get_alias_lane_mask)))
     return true;
 
   return false;
@@ -28172,7 +28252,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
           DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, RuntimePStateSM));
       return;
     }
-    case Intrinsic::experimental_vector_match: {
+    case Intrinsic::experimental_vector_match:
+    case Intrinsic::experimental_get_alias_lane_mask: {
       if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
         return;
 
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 88876570ac811..4f59b4a9f497c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -511,6 +511,9 @@ class AArch64TargetLowering : public TargetLowering {
 
   bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override;
 
+  bool shouldExpandGetAliasLaneMask(EVT VT, EVT PtrVT,
+                                    unsigned EltSize) const override;
+
   bool
   shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const override;
 
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 0c4b4f4c3ed88..ad7c234daa951 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -167,6 +167,11 @@ def AArch64st1q_scatter : SDNode<"AArch64ISD::SST1Q_PRED", SDT_AArch64_SCATTER_V
 // AArch64 SVE/SVE2 - the remaining node definitions
 //
 
+// Alias masks
+def SDT_AArch64Mask : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<1>, SDTCisSameAs<2, 1>, SDTCVecEltisVT<0,i1>]>;
+def AArch64whilewr : SDNode<"AArch64ISD::WHILEWR", SDT_AArch64Mask>;
+def AArch64whilerw : SDNode<"AArch64ISD::WHILERW", SDT_AArch64Mask>;
+
 // SVE CNT/INC/RDVL
 def sve_rdvl_imm : ComplexPattern<i64, 1, "SelectRDVLImm<-32, 31, 16>">;
 def sve_cnth_imm : ComplexPattern<i64, 1, "SelectRDVLImm<1, 16, 8>">;
@@ -4123,9 +4128,9 @@ let Predicates = [HasSVE2_or_SME] in {
   defm WHILEHI_PXX : sve_int_while8_rr<0b101, "whilehi", int_aarch64_sve_whilehi, get_active_lane_mask>;
 
   // SVE2 pointer conflict compare
-  defm WHILEWR_PXX : sve2_int_while_rr<0b0, "whilewr", "int_aarch64_sve_whilewr">;
-  defm WHILERW_PXX : sve2_int_while_rr<0b1, "whilerw", "int_aarch64_sve_whilerw">;
-} // End HasSVE2_or_SME
+  defm WHILEWR_PXX : sve2_int_while_rr<0b0, "whilewr", AArch64whilewr>;
+  defm WHILERW_PXX : sve2_int_while_rr<0b1, "whilerw", AArch64whilerw>;
+} // End HasSVE2orSME
 
 let Predicates = [HasSVEAES, HasNonStreamingSVE_or_SSVE_AES] in {
   // SVE2 crypto destructive binary operations
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index a0320f919e8c5..e88758bf5133a 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -5940,16 +5940,16 @@ class sve2_int_while_rr<bits<2> sz8_64, bits<1> rw, string asm,
   let isWhile = 1;
 }
 
-multiclass sve2_int_while_rr<bits<1> rw, string asm, string op> {
+multiclass sve2_int_while_rr<bits<1> rw, string asm, SDPatternOperator op> {
   def _B : sve2_int_while_rr<0b00, rw, asm, PPR8>;
   def _H : sve2_int_while_rr<0b01, rw, asm, PPR16>;
   def _S : sve2_int_while_rr<0b10, rw, asm, PPR32>;
   def _D : sve2_int_while_rr<0b11, rw, asm, PPR64>;
 
-  def : SVE_2_Op_Pat<nxv16i1, !cast<SDPatternOperator>(op # _b), i64, i64, !cast<Instruction>(NAME # _B)>;
-  def : SVE_2_Op_Pat<nxv8i1,  !cast<SDPatternOperator>(op # _h), i64, i64, !cast<Instruction>(NAME # _H)>;
-  def : SVE_2_Op_Pat<nxv4i1,  !cast<SDPatternOperator>(op # _s), i64, i64, !cast<Instruction>(NAME # _S)>;
-  def : SVE_2_Op_Pat<nxv2i1,  !cast<SDPatternOperator>(op # _d), i64, i64, !cast<Instruction>(NAME # _D)>;
+  def : SVE_2_Op_Pat<nxv16i1, op, i64, i64, !cast<Instruction>(NAME # _B)>;
+  def : SVE_2_Op_Pat<nxv8i1,  op, i64, i64, !cast<Instruction>(NAME # _H)>;
+  def : SVE_2_Op_Pat<nxv4i1,  op, i64, i64, !cast<Instruction>(NAME # _S)>;
+  def : SVE_2_Op_Pat<nxv2i1,  op, i64, i64, !cast<Instruction>(NAME # _D)>;
 }
 
 //===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
new file mode 100644
index 0000000000000..84a22822f1702
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -0,0 +1,421 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s --check-prefix=CHECK-SVE
+; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefix=CHECK-NOSVE
+
+define <16 x i1> @whilewr_8(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilewr_8:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    whilewr p0.b, x0, x1
+; CHECK-SVE-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilewr_8:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI0_1
+; CHECK-NOSVE-NEXT:    sub x9, x1, x0
+; CHECK-NOSVE-NEXT:    ldr q0, [x8, :lo12:.LCPI0_0]
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI0_2
+; CHECK-NOSVE-NEXT:    ldr q1, [x10, :lo12:.LCPI0_1]
+; CHECK-NOSVE-NEXT:    ldr q3, [x8, :lo12:.LCPI0_2]
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI0_4
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI0_3
+; CHECK-NOSVE-NEXT:    ldr q5, [x8, :lo12:.LCPI0_4]
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI0_5
+; CHECK-NOSVE-NEXT:    dup v2.2d, x9
+; CHECK-NOSVE-NEXT:    ldr q4, [x10, :lo12:.LCPI0_3]
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI0_6
+; CHECK-NOSVE-NEXT:    ldr q6, [x8, :lo12:.LCPI0_5]
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI0_7
+; CHECK-NOSVE-NEXT:    ldr q7, [x10, :lo12:.LCPI0_6]
+; CHECK-NOSVE-NEXT:    cmp x9, #1
+; CHECK-NOSVE-NEXT:    ldr q16, [x8, :lo12:.LCPI0_7]
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v2.2d, v0.2d
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v2.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v3.2d, v2.2d, v3.2d
+; CHECK-NOSVE-NEXT:    cmhi v4.2d, v2.2d, v4.2d
+; CHECK-NOSVE-NEXT:    cmhi v5.2d, v2.2d, v5.2d
+; CHECK-NOSVE-NEXT:    cmhi v6.2d, v2.2d, v6.2d
+; CHECK-NOSVE-NEXT:    cmhi v7.2d, v2.2d, v7.2d
+; CHECK-NOSVE-NEXT:    cmhi v2.2d, v2.2d, v16.2d
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NOSVE-NEXT:    cset w8, lt
+; CHECK-NOSVE-NEXT:    uzp1 v1.4s, v4.4s, v3.4s
+; CHECK-NOSVE-NEXT:    uzp1 v3.4s, v6.4s, v5.4s
+; CHECK-NOSVE-NEXT:    uzp1 v2.4s, v2.4s, v7.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NOSVE-NEXT:    uzp1 v1.8h, v2.8h, v3.8h
+; CHECK-NOSVE-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
+; CHECK-NOSVE-NEXT:    dup v1.16b, w8
+; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
+  ret <16 x i1> %0
+}
+
+define <8 x i1> @whilewr_16(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilewr_16:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    whilewr p0.b, x0, x1
+; CHECK-SVE-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilewr_16:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    sub x8, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI1_0
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI1_1
+; CHECK-NOSVE-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-NOSVE-NEXT:    adrp x11, .LCPI1_2
+; CHECK-NOSVE-NEXT:    ldr q1, [x9, :lo12:.LCPI1_0]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI1_3
+; CHECK-NOSVE-NEXT:    ldr q2, [x10, :lo12:.LCPI1_1]
+; CHECK-NOSVE-NEXT:    ldr q3, [x11, :lo12:.LCPI1_2]
+; CHECK-NOSVE-NEXT:    asr x8, x8, #1
+; CHECK-NOSVE-NEXT:    ldr q4, [x9, :lo12:.LCPI1_3]
+; CHECK-NOSVE-NEXT:    dup v0.2d, x8
+; CHECK-NOSVE-NEXT:    cmp x8, #1
+; CHECK-NOSVE-NEXT:    cset w8, lt
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v2.2d, v0.2d, v2.2d
+; CHECK-NOSVE-NEXT:    cmhi v3.2d, v0.2d, v3.2d
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v0.2d, v4.2d
+; CHECK-NOSVE-NEXT:    uzp1 v1.4s, v2.4s, v1.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-NOSVE-NEXT:    dup v1.8b, w8
+; CHECK-NOSVE-NEXT:    xtn v0.8b, v0.8h
+; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 1)
+  ret <8 x i1> %0
+}
+
+define <4 x i1> @whilewr_32(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilewr_32:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    whilewr p0.h, x0, x1
+; CHECK-SVE-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilewr_32:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    sub x9, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI2_0
+; CHECK-NOSVE-NEXT:    add x10, x9, #3
+; CHECK-NOSVE-NEXT:    cmp x9, #0
+; CHECK-NOSVE-NEXT:    ldr q1, [x8, :lo12:.LCPI2_0]
+; CHECK-NOSVE-NEXT:    csel x9, x10, x9, lt
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI2_1
+; CHECK-NOSVE-NEXT:    asr x9, x9, #2
+; CHECK-NOSVE-NEXT:    ldr q2, [x10, :lo12:.LCPI2_1]
+; CHECK-NOSVE-NEXT:    dup v0.2d, x9
+; CHECK-NOSVE-NEXT:    cmp x9, #1
+; CHECK-NOSVE-NEXT:    cset w8, lt
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v0.2d, v2.2d
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-NOSVE-NEXT:    dup v1.4h, w8
+; CHECK-NOSVE-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
+  ret <4 x i1> %0
+}
+
+define <2 x i1> @whilewr_64(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilewr_64:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    whilewr p0.s, x0, x1
+; CHECK-SVE-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilewr_64:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    sub x9, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI3_0
+; CHECK-NOSVE-NEXT:    add x10, x9, #7
+; CHECK-NOSVE-NEXT:    cmp x9, #0
+; CHECK-NOSVE-NEXT:    ldr q1, [x8, :lo12:.LCPI3_0]
+; CHECK-NOSVE-NEXT:    csel x9, x10, x9, lt
+; CHECK-NOSVE-NEXT:    asr x9, x9, #3
+; CHECK-NOSVE-NEXT:    dup v0.2d, x9
+; CHECK-NOSVE-NEXT:    cmp x9, #1
+; CHECK-NOSVE-NEXT:    cset w8, lt
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT:    dup v1.2s, w8
+; CHECK-NOSVE-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
+  ret <2 x i1> %0
+}
+
+define <16 x i1> @whilerw_8(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilerw_8:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    whilerw p0.b, x0, x1
+; CHECK-SVE-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilerw_8:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI4_0
+; CHECK-NOSVE-NEXT:    subs x9, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI4_1
+; CHECK-NOSVE-NEXT:    ldr q0, [x8, :lo12:.LCPI4_0]
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI4_2
+; CHECK-NOSVE-NEXT:    cneg x9, x9, mi
+; CHECK-NOSVE-NEXT:    ldr q2, [x8, :lo12:.LCPI4_2]
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI4_3
+; CHECK-NOSVE-NEXT:    ldr q1, [x10, :lo12:.LCPI4_1]
+; CHECK-NOSVE-NEXT:    ldr q4, [x8, :lo12:.LCPI4_3]
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI4_4
+; CHECK-NOSVE-NEXT:    dup v3.2d, x9
+; CHECK-NOSVE-NEXT:    ldr q5, [x8, :lo12:.LCPI4_4]
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI4_5
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI4_6
+; CHECK-NOSVE-NEXT:    ldr q6, [x8, :lo12:.LCPI4_5]
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI4_7
+; CHECK-NOSVE-NEXT:    ldr q7, [x10, :lo12:.LCPI4_6]
+; CHECK-NOSVE-NEXT:    ldr q16, [x8, :lo12:.LCPI4_7]
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v3.2d, v0.2d
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v3.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v2.2d, v3.2d, v2.2d
+; CHECK-NOSVE-NEXT:    cmhi v4.2d, v3.2d, v4.2d
+; CHECK-NOSVE-NEXT:    cmhi v5.2d, v3.2d, v5.2d
+; CHECK-NOSVE-NEXT:    cmhi v6.2d, v3.2d, v6.2d
+; CHECK-NOSVE-NEXT:    cmhi v7.2d, v3.2d, v7.2d
+; CHECK-NOSVE-NEXT:    cmhi v3.2d, v3.2d, v16.2d
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NOSVE-NEXT:    cmp x9, #0
+; CHECK-NOSVE-NEXT:    uzp1 v1.4s, v4.4s, v2.4s
+; CHECK-NOSVE-NEXT:    cset w8, eq
+; CHECK-NOSVE-NEXT:    uzp1 v2.4s, v6.4s, v5.4s
+; CHECK-NOSVE-NEXT:    uzp1 v3.4s, v3.4s, v7.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NOSVE-NEXT:    uzp1 v1.8h, v3.8h, v2.8h
+; CHECK-NOSVE-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
+; CHECK-NOSVE-NEXT:    dup v1.16b, w8
+; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
+  ret <16 x i1> %0
+}
+
+define <8 x i1> @whilerw_16(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilerw_16:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    whilerw p0.b, x0, x1
+; CHECK-SVE-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilerw_16:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    subs x8, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI5_0
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI5_1
+; CHECK-NOSVE-NEXT:    cneg x8, x8, mi
+; CHECK-NOSVE-NEXT:    adrp x11, .LCPI5_2
+; CHECK-NOSVE-NEXT:    ldr q1, [x9, :lo12:.LCPI5_0]
+; CHECK-NOSVE-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI5_3
+; CHECK-NOSVE-NEXT:    ldr q2, [x10, :lo12:.LCPI5_1]
+; CHECK-NOSVE-NEXT:    ldr q3, [x11, :lo12:.LCPI5_2]
+; CHECK-NOSVE-NEXT:    ldr q4, [x9, :lo12:.LCPI5_3]
+; CHECK-NOSVE-NEXT:    asr x8, x8, #1
+; CHECK-NOSVE-NEXT:    dup v0.2d, x8
+; CHECK-NOSVE-NEXT:    cmp x8, #0
+; CHECK-NOSVE-NEXT:    cset w8, eq
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v2.2d, v0.2d, v2.2d
+; CHECK-NOSVE-NEXT:    cmhi v3.2d, v0.2d, v3.2d
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v0.2d, v4.2d
+; CHECK-NOSVE-NEXT:    uzp1 v1.4s, v2.4s, v1.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-NOSVE-NEXT:    dup v1.8b, w8
+; CHECK-NOSVE-NEXT:    xtn v0.8b, v0.8h
+; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 0)
+  ret <8 x i1> %0
+}
+
+define <4 x i1> @whilerw_32(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilerw_32:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    whilerw p0.h, x0, x1
+; CHECK-SVE-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilerw_32:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    subs x9, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI6_0
+; CHECK-NOSVE-NEXT:    cneg x9, x9, mi
+; CHECK-NOSVE-NEXT:    ldr q1, [x8, :lo12:.LCPI6_0]
+; CHECK-NOSVE-NEXT:    add x10, x9, #3
+; CHECK-NOSVE-NEXT:    cmp x9, #0
+; CHECK-NOSVE-NEXT:    csel x9, x10, x9, lt
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI6_1
+; CHECK-NOSVE-NEXT:    asr x9, x9, #2
+; CHECK-NOSVE-NEXT:    ldr q2, [x10, :lo12:.LCPI6_1]
+; CHECK-NOSVE-NEXT:    dup v0.2d, x9
+; CHECK-NOSVE-NEXT:    cmp x9, #0
+; CHECK-NOSVE-NEXT:    cset w8, eq
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v0.2d, v2.2d
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-NOSVE-NEXT:    dup v1.4h, w8
+; CHECK-NOSVE-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
+  ret <4 x i1> %0
+}
+
+define <2 x i1> @whilerw_64(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilerw_64:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    whilerw p0.s, x0, x1
+; CHECK-SVE-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilerw_64:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    subs x9, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI7_0
+; CHECK-NOSVE-NEXT:    cneg x9, x9, mi
+; CHECK-NOSVE-NEXT:    ldr q1, [x8, :lo12:.LCPI7_0]
+; CHECK-NOSVE-NEXT:    add x10, x9, #7
+; CHECK-NOSVE-NEXT:    cmp x9, #0
+; CHECK-NOSVE-NEXT:    csel x9, x10, x9, lt
+; CHECK-NOSVE-NEXT:    asr x9, x9, #3
+; CHECK-NOSVE-NEXT:    dup v0.2d, x9
+; CHECK-NOSVE-NEXT:    cmp x9, #0
+; CHECK-NOSVE-NEXT:    cset w8, eq
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT:    dup v1.2s, w8
+; CHECK-NOSVE-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
+  ret <2 x i1> %0
+}
+
+define <16 x i1> @not_whilewr_wrong_eltsize(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: not_whilewr_wrong_eltsize:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    sub x8, x1, x0
+; CHECK-SVE-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-SVE-NEXT:    asr x8, x8, #1
+; CHECK-SVE-NEXT:    cmp x8, #1
+; CHECK-SVE-NEXT:    cset w9, lt
+; CHECK-SVE-NEXT:    whilelo p0.b, #0, x8
+; CHECK-SVE-NEXT:    dup v0.16b, w9
+; CHECK-SVE-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    orr v0.16b, v1.16b, v0.16b
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: not_whilewr_wrong_eltsize:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    sub x8, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_0
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI8_1
+; CHECK-NOSVE-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-NOSVE-NEXT:    ldr q0, [x9, :lo12:.LCPI8_0]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_2
+; CHECK-NOSVE-NEXT:    ldr q2, [x9, :lo12:.LCPI8_2]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_4
+; CHECK-NOSVE-NEXT:    ldr q1, [x10, :lo12:.LCPI8_1]
+; CHECK-NOSVE-NEXT:    asr x8, x8, #1
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI8_3
+; CHECK-NOSVE-NEXT:    ldr q5, [x9, :lo12:.LCPI8_4]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_6
+; CHECK-NOSVE-NEXT:    ldr q3, [x10, :lo12:.LCPI8_3]
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI8_5
+; CHECK-NOSVE-NEXT:    dup v4.2d, x8
+; CHECK-NOSVE-NEXT:    ldr q7, [x9, :lo12:.LCPI8_6]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_7
+; CHECK-NOSVE-NEXT:    ldr q6, [x10, :lo12:.LCPI8_5]
+; CHECK-NOSVE-NEXT:    ldr q16, [x9, :lo12:.LCPI8_7]
+; CHECK-NOSVE-NEXT:    cmp x8, #1
+; CHECK-NOSVE-NEXT:    cset w8, lt
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v4.2d, v0.2d
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v4.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v2.2d, v4.2d, v2.2d
+; CHECK-NOSVE-NEXT:    cmhi v3.2d, v4.2d, v3.2d
+; CHECK-NOSVE-NEXT:    cmhi v5.2d, v4.2d, v5.2d
+; CHECK-NOSVE-NEXT:    cmhi v6.2d, v4.2d, v6.2d
+; CHECK-NOSVE-NEXT:    cmhi v7.2d, v4.2d, v7.2d
+; CHECK-NOSVE-NEXT:    cmhi v4.2d, v4.2d, v16.2d
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NOSVE-NEXT:    uzp1 v1.4s, v3.4s, v2.4s
+; CHECK-NOSVE-NEXT:    uzp1 v2.4s, v6.4s, v5.4s
+; CHECK-NOSVE-NEXT:    uzp1 v3.4s, v4.4s, v7.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NOSVE-NEXT:    uzp1 v1.8h, v3.8h, v2.8h
+; CHECK-NOSVE-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
+; CHECK-NOSVE-NEXT:    dup v1.16b, w8
+; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 1)
+  ret <16 x i1> %0
+}
+
+define <2 x i1> @not_whilerw_ptr32(i32 %a, i32 %b) {
+; CHECK-SVE-LABEL: not_whilerw_ptr32:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    subs w8, w1, w0
+; CHECK-SVE-NEXT:    cneg w8, w8, mi
+; CHECK-SVE-NEXT:    add w9, w8, #7
+; CHECK-SVE-NEXT:    cmp w8, #0
+; CHECK-SVE-NEXT:    csel w8, w9, w8, lt
+; CHECK-SVE-NEXT:    asr w8, w8, #3
+; CHECK-SVE-NEXT:    cmp w8, #0
+; CHECK-SVE-NEXT:    cset w9, eq
+; CHECK-SVE-NEXT:    whilelo p0.s, #0, w8
+; CHECK-SVE-NEXT:    dup v0.2s, w9
+; CHECK-SVE-NEXT:    mov z1.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    orr v0.8b, v1.8b, v0.8b
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: not_whilerw_ptr32:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    subs w9, w1, w0
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI9_0
+; CHECK-NOSVE-NEXT:    cneg w9, w9, mi
+; CHECK-NOSVE-NEXT:    ldr d1, [x8, :lo12:.LCPI9_0]
+; CHECK-NOSVE-NEXT:    add w10, w9, #7
+; CHECK-NOSVE-NEXT:    cmp w9, #0
+; CHECK-NOSVE-NEXT:    csel w9, w10, w9, lt
+; CHECK-NOSVE-NEXT:    asr w9, w9, #3
+; CHECK-NOSVE-NEXT:    dup v0.2s, w9
+; CHECK-NOSVE-NEXT:    cmp w9, #0
+; CHECK-NOSVE-NEXT:    cset w8, eq
+; CHECK-NOSVE-NEXT:    dup v2.2s, w8
+; CHECK-NOSVE-NEXT:    cmhi v0.2s, v0.2s, v1.2s
+; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v2.8b
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i32.i32(i32 %a, i32 %b, i32 8, i1 0)
+  ret <2 x i1> %0
+}
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
new file mode 100644
index 0000000000000..be5ec8b2a82bf
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -0,0 +1,195 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s --check-prefix=CHECK-SVE2
+; RUN: llc -mtriple=aarch64 -mattr=+sve %s -o - | FileCheck %s --check-prefix=CHECK-SVE
+
+define <vscale x 16 x i1> @whilewr_8(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilewr_8:
+; CHECK-SVE2:       // %bb.0: // %entry
+; CHECK-SVE2-NEXT:    whilewr p0.b, x0, x1
+; CHECK-SVE2-NEXT:    ret
+;
+; CHECK-SVE-LABEL: whilewr_8:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    sub x8, x1, x0
+; CHECK-SVE-NEXT:    cmp x8, #1
+; CHECK-SVE-NEXT:    cset w9, lt
+; CHECK-SVE-NEXT:    whilelo p0.b, #0, x8
+; CHECK-SVE-NEXT:    sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT:    whilelo p1.b, xzr, x8
+; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-SVE-NEXT:    ret
+entry:
+  %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
+  ret <vscale x 16 x i1> %0
+}
+
+define <vscale x 8 x i1> @whilewr_16(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilewr_16:
+; CHECK-SVE2:       // %bb.0: // %entry
+; CHECK-SVE2-NEXT:    whilewr p0.h, x0, x1
+; CHECK-SVE2-NEXT:    ret
+;
+; CHECK-SVE-LABEL: whilewr_16:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    sub x8, x1, x0
+; CHECK-SVE-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-SVE-NEXT:    asr x8, x8, #1
+; CHECK-SVE-NEXT:    cmp x8, #1
+; CHECK-SVE-NEXT:    cset w9, lt
+; CHECK-SVE-NEXT:    whilelo p0.h, #0, x8
+; CHECK-SVE-NEXT:    sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT:    whilelo p1.h, xzr, x8
+; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-SVE-NEXT:    ret
+entry:
+  %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 1)
+  ret <vscale x 8 x i1> %0
+}
+
+define <vscale x 4 x i1> @whilewr_32(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilewr_32:
+; CHECK-SVE2:       // %bb.0: // %entry
+; CHECK-SVE2-NEXT:    whilewr p0.s, x0, x1
+; CHECK-SVE2-NEXT:    ret
+;
+; CHECK-SVE-LABEL: whilewr_32:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    sub x8, x1, x0
+; CHECK-SVE-NEXT:    add x9, x8, #3
+; CHECK-SVE-NEXT:    cmp x8, #0
+; CHECK-SVE-NEXT:    csel x8, x9, x8, lt
+; CHECK-SVE-NEXT:    asr x8, x8, #2
+; CHECK-SVE-NEXT:    cmp x8, #1
+; CHECK-SVE-NEXT:    cset w9, lt
+; CHECK-SVE-NEXT:    whilelo p1.s, #0, x8
+; CHECK-SVE-NEXT:    sbfx x9, x9, #0, #1
+; CHECK-SVE-NEXT:    whilelo p0.s, xzr, x9
+; CHECK-SVE-NEXT:    mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT:    ret
+entry:
+  %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
+  ret <vscale x 4 x i1> %0
+}
+
+define <vscale x 2 x i1> @whilewr_64(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilewr_64:
+; CHECK-SVE2:       // %bb.0: // %entry
+; CHECK-SVE2-NEXT:    whilewr p0.d, x0, x1
+; CHECK-SVE2-NEXT:    ret
+;
+; CHECK-SVE-LABEL: whilewr_64:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    sub x8, x1, x0
+; CHECK-SVE-NEXT:    add x9, x8, #7
+; CHECK-SVE-NEXT:    cmp x8, #0
+; CHECK-SVE-NEXT:    csel x8, x9, x8, lt
+; CHECK-SVE-NEXT:    asr x8, x8, #3
+; CHECK-SVE-NEXT:    cmp x8, #1
+; CHECK-SVE-NEXT:    cset w9, lt
+; CHECK-SVE-NEXT:    whilelo p1.d, #0, x8
+; CHECK-SVE-NEXT:    sbfx x9, x9, #0, #1
+; CHECK-SVE-NEXT:    whilelo p0.d, xzr, x9
+; CHECK-SVE-NEXT:    mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT:    ret
+entry:
+  %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
+  ret <vscale x 2 x i1> %0
+}
+
+define <vscale x 16 x i1> @whilerw_8(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilerw_8:
+; CHECK-SVE2:       // %bb.0: // %entry
+; CHECK-SVE2-NEXT:    whilerw p0.b, x0, x1
+; CHECK-SVE2-NEXT:    ret
+;
+; CHECK-SVE-LABEL: whilerw_8:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    subs x8, x1, x0
+; CHECK-SVE-NEXT:    cneg x8, x8, mi
+; CHECK-SVE-NEXT:    cmp x8, #0
+; CHECK-SVE-NEXT:    cset w9, eq
+; CHECK-SVE-NEXT:    whilelo p0.b, #0, x8
+; CHECK-SVE-NEXT:    sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT:    whilelo p1.b, xzr, x8
+; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-SVE-NEXT:    ret
+entry:
+  %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
+  ret <vscale x 16 x i1> %0
+}
+
+define <vscale x 8 x i1> @whilerw_16(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilerw_16:
+; CHECK-SVE2:       // %bb.0: // %entry
+; CHECK-SVE2-NEXT:    whilerw p0.h, x0, x1
+; CHECK-SVE2-NEXT:    ret
+;
+; CHECK-SVE-LABEL: whilerw_16:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    subs x8, x1, x0
+; CHECK-SVE-NEXT:    cneg x8, x8, mi
+; CHECK-SVE-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-SVE-NEXT:    asr x8, x8, #1
+; CHECK-SVE-NEXT:    cmp x8, #0
+; CHECK-SVE-NEXT:    cset w9, eq
+; CHECK-SVE-NEXT:    whilelo p0.h, #0, x8
+; CHECK-SVE-NEXT:    sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT:    whilelo p1.h, xzr, x8
+; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-SVE-NEXT:    ret
+entry:
+  %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 0)
+  ret <vscale x 8 x i1> %0
+}
+
+define <vscale x 4 x i1> @whilerw_32(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilerw_32:
+; CHECK-SVE2:       // %bb.0: // %entry
+; CHECK-SVE2-NEXT:    whilerw p0.s, x0, x1
+; CHECK-SVE2-NEXT:    ret
+;
+; CHECK-SVE-LABEL: whilerw_32:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    subs x8, x1, x0
+; CHECK-SVE-NEXT:    cneg x8, x8, mi
+; CHECK-SVE-NEXT:    add x9, x8, #3
+; CHECK-SVE-NEXT:    cmp x8, #0
+; CHECK-SVE-NEXT:    csel x8, x9, x8, lt
+; CHECK-SVE-NEXT:    asr x8, x8, #2
+; CHECK-SVE-NEXT:    cmp x8, #0
+; CHECK-SVE-NEXT:    cset w9, eq
+; CHECK-SVE-NEXT:    whilelo p1.s, #0, x8
+; CHECK-SVE-NEXT:    sbfx x9, x9, #0, #1
+; CHECK-SVE-NEXT:    whilelo p0.s, xzr, x9
+; CHECK-SVE-NEXT:    mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT:    ret
+entry:
+  %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
+  ret <vscale x 4 x i1> %0
+}
+
+define <vscale x 2 x i1> @whilerw_64(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilerw_64:
+; CHECK-SVE2:       // %bb.0: // %entry
+; CHECK-SVE2-NEXT:    whilerw p0.d, x0, x1
+; CHECK-SVE2-NEXT:    ret
+;
+; CHECK-SVE-LABEL: whilerw_64:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    subs x8, x1, x0
+; CHECK-SVE-NEXT:    cneg x8, x8, mi
+; CHECK-SVE-NEXT:    add x9, x8, #7
+; CHECK-SVE-NEXT:    cmp x8, #0
+; CHECK-SVE-NEXT:    csel x8, x9, x8, lt
+; CHECK-SVE-NEXT:    asr x8, x8, #3
+; CHECK-SVE-NEXT:    cmp x8, #0
+; CHECK-SVE-NEXT:    cset w9, eq
+; CHECK-SVE-NEXT:    whilelo p1.d, #0, x8
+; CHECK-SVE-NEXT:    sbfx x9, x9, #0, #1
+; CHECK-SVE-NEXT:    whilelo p0.d, xzr, x9
+; CHECK-SVE-NEXT:    mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT:    ret
+entry:
+  %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
+  ret <vscale x 2 x i1> %0
+}

>From 19dd02933c8f40047baeb3044718a64f652e1e94 Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Fri, 10 Jan 2025 11:37:37 +0000
Subject: [PATCH 02/29] Rework lowering location

---
 llvm/include/llvm/CodeGen/ISDOpcodes.h        |   5 +
 .../SelectionDAG/LegalizeIntegerTypes.cpp     |  22 ++
 llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h |   2 +
 .../SelectionDAG/LegalizeVectorOps.cpp        |  41 ++++
 .../SelectionDAG/SelectionDAGBuilder.cpp      |  53 +----
 .../SelectionDAG/SelectionDAGDumper.cpp       |   2 +
 llvm/lib/CodeGen/TargetLoweringBase.cpp       |   3 +
 .../Target/AArch64/AArch64ISelLowering.cpp    |  96 +++++++-
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |   1 +
 llvm/test/CodeGen/AArch64/alias_mask.ll       | 120 ++--------
 .../CodeGen/AArch64/alias_mask_scalable.ll    | 210 ++++++++++++++----
 11 files changed, 358 insertions(+), 197 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 465e4a0a9d0d8..3acedb67fc7c0 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1558,6 +1558,11 @@ enum NodeType {
   // bits conform to getBooleanContents similar to the SETCC operator.
   GET_ACTIVE_LANE_MASK,
 
+  // The `llvm.experimental.get.alias.lane.mask.*` intrinsics
+  // Operands: Load pointer, Store pointer, Element size, Write after read
+  // Output: Mask
+  EXPERIMENTAL_ALIAS_LANE_MASK,
+
   // llvm.clear_cache intrinsic
   // Operands: Input Chain, Start Addres, End Address
   // Outputs: Output Chain
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index a5bd97ace169e..68e8db38fa5ec 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -56,6 +56,9 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
     N->dump(&DAG); dbgs() << "\n";
 #endif
     report_fatal_error("Do not know how to promote this operator!");
+  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+    Res = PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(N);
+    break;
   case ISD::MERGE_VALUES:Res = PromoteIntRes_MERGE_VALUES(N, ResNo); break;
   case ISD::AssertSext:  Res = PromoteIntRes_AssertSext(N); break;
   case ISD::AssertZext:  Res = PromoteIntRes_AssertZext(N); break;
@@ -374,6 +377,14 @@ SDValue DAGTypeLegalizer::PromoteIntRes_MERGE_VALUES(SDNode *N,
   return GetPromotedInteger(Op);
 }
 
+SDValue
+DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N) {
+  EVT VT = N->getValueType(0);
+  EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
+  return DAG.getNode(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, SDLoc(N), NewVT,
+                     N->ops());
+}
+
 SDValue DAGTypeLegalizer::PromoteIntRes_AssertSext(SDNode *N) {
   // Sign-extend the new bits, and continue the assertion.
   SDValue Op = SExtPromotedInteger(N->getOperand(0));
@@ -2104,6 +2115,9 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
   case ISD::PARTIAL_REDUCE_SUMLA:
     Res = PromoteIntOp_PARTIAL_REDUCE_MLA(N);
     break;
+  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+    Res = DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(N, OpNo);
+    break;
   }
 
   // If the result is null, the sub-method took care of registering results etc.
@@ -2937,6 +2951,14 @@ SDValue DAGTypeLegalizer::PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N) {
   return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
 }
 
+SDValue
+DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N,
+                                                            unsigned OpNo) {
+  SmallVector<SDValue, 4> NewOps(N->ops());
+  NewOps[OpNo] = GetPromotedInteger(N->getOperand(OpNo));
+  return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
+}
+
 //===----------------------------------------------------------------------===//
 //  Integer Result Expansion
 //===----------------------------------------------------------------------===//
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 2e13b1854bf29..da473a435b461 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -382,6 +382,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue PromoteIntRes_VECTOR_FIND_LAST_ACTIVE(SDNode *N);
   SDValue PromoteIntRes_GET_ACTIVE_LANE_MASK(SDNode *N);
   SDValue PromoteIntRes_PARTIAL_REDUCE_MLA(SDNode *N);
+  SDValue PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N);
 
   // Integer Operand Promotion.
   bool PromoteIntegerOperand(SDNode *N, unsigned OpNo);
@@ -436,6 +437,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(SDNode *N, unsigned OpNo);
   SDValue PromoteIntOp_GET_ACTIVE_LANE_MASK(SDNode *N);
   SDValue PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N);
+  SDValue PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N, unsigned OpNo);
 
   void SExtOrZExtPromotedOperands(SDValue &LHS, SDValue &RHS);
   void PromoteSetCCOperands(SDValue &LHS,SDValue &RHS, ISD::CondCode Code);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index d2ecc13331e02..c9e5c7f8bbdb0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -138,6 +138,7 @@ class VectorLegalizer {
   SDValue ExpandVP_FNEG(SDNode *Node);
   SDValue ExpandVP_FABS(SDNode *Node);
   SDValue ExpandVP_FCOPYSIGN(SDNode *Node);
+  SDValue ExpandEXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N);
   SDValue ExpandSELECT(SDNode *Node);
   std::pair<SDValue, SDValue> ExpandLoad(SDNode *N);
   SDValue ExpandStore(SDNode *N);
@@ -475,6 +476,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
   case ISD::VECTOR_COMPRESS:
   case ISD::SCMP:
   case ISD::UCMP:
+  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
     Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
     break;
   case ISD::SMULFIX:
@@ -1291,6 +1293,9 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
   case ISD::UCMP:
     Results.push_back(TLI.expandCMP(Node, DAG));
     return;
+  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+    Results.push_back(ExpandEXPERIMENTAL_ALIAS_LANE_MASK(Node));
+    return;
 
   case ISD::FADD:
   case ISD::FMUL:
@@ -1796,6 +1801,42 @@ SDValue VectorLegalizer::ExpandVP_FCOPYSIGN(SDNode *Node) {
   return DAG.getNode(ISD::BITCAST, DL, VT, CopiedSign);
 }
 
+SDValue VectorLegalizer::ExpandEXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N) {
+  SDLoc DL(N);
+  SDValue SourceValue = N->getOperand(0);
+  SDValue SinkValue = N->getOperand(1);
+  SDValue EltSize = N->getOperand(2);
+
+  bool IsWriteAfterRead =
+      cast<ConstantSDNode>(N->getOperand(3))->getZExtValue() != 0;
+  auto VT = N->getValueType(0);
+  auto PtrVT = SourceValue->getValueType(0);
+
+  SDValue Diff = DAG.getNode(ISD::SUB, DL, PtrVT, SinkValue, SourceValue);
+  if (!IsWriteAfterRead)
+    Diff = DAG.getNode(ISD::ABS, DL, PtrVT, Diff);
+
+  Diff = DAG.getNode(ISD::SDIV, DL, PtrVT, Diff, EltSize);
+  SDValue Zero = DAG.getTargetConstant(0, DL, PtrVT);
+
+  // If the difference is positive then some elements may alias
+  auto CmpVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
+                                      Diff.getValueType());
+  SDValue Cmp = DAG.getSetCC(DL, CmpVT, Diff, Zero,
+                             IsWriteAfterRead ? ISD::SETLE : ISD::SETEQ);
+
+  EVT SplatTY =
+      EVT::getVectorVT(*DAG.getContext(), PtrVT, VT.getVectorElementCount());
+  SDValue DiffSplat = DAG.getSplat(SplatTY, DL, Diff);
+  SDValue VectorStep = DAG.getStepVector(DL, SplatTY);
+  SDValue DiffMask =
+      DAG.getSetCC(DL, VT, VectorStep, DiffSplat, ISD::CondCode::SETULT);
+
+  // Splat the compare result then OR it with a lane mask
+  SDValue Splat = DAG.getSplat(VT, DL, Cmp);
+  return DAG.getNode(ISD::OR, DL, VT, DiffMask, Splat);
+}
+
 void VectorLegalizer::ExpandFP_TO_UINT(SDNode *Node,
                                        SmallVectorImpl<SDValue> &Results) {
   // Attempt to expand using TargetLowering.
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index fad88905610dd..005bc9da3d62b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8272,54 +8272,13 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
     return;
   }
   case Intrinsic::experimental_get_alias_lane_mask: {
-    SDValue SourceValue = getValue(I.getOperand(0));
-    SDValue SinkValue = getValue(I.getOperand(1));
-    SDValue EltSize = getValue(I.getOperand(2));
-    bool IsWriteAfterRead =
-        cast<ConstantSDNode>(getValue(I.getOperand(3)))->getZExtValue() != 0;
     auto IntrinsicVT = EVT::getEVT(I.getType());
-    auto PtrVT = SourceValue->getValueType(0);
-
-    if (!TLI.shouldExpandGetAliasLaneMask(
-            IntrinsicVT, PtrVT,
-            cast<ConstantSDNode>(EltSize)->getSExtValue())) {
-      visitTargetIntrinsic(I, Intrinsic);
-      return;
-    }
-
-    SDValue Diff = DAG.getNode(ISD::SUB, sdl, PtrVT, SinkValue, SourceValue);
-    if (!IsWriteAfterRead)
-      Diff = DAG.getNode(ISD::ABS, sdl, PtrVT, Diff);
-
-    Diff = DAG.getNode(ISD::SDIV, sdl, PtrVT, Diff, EltSize);
-    SDValue Zero = DAG.getTargetConstant(0, sdl, PtrVT);
-
-    // If the difference is positive then some elements may alias
-    auto CmpVT =
-        TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), PtrVT);
-    SDValue Cmp = DAG.getSetCC(sdl, CmpVT, Diff, Zero,
-                               IsWriteAfterRead ? ISD::SETLE : ISD::SETEQ);
-
-    // Splat the compare result then OR it with a lane mask
-    SDValue Splat = DAG.getSplat(IntrinsicVT, sdl, Cmp);
-
-    SDValue DiffMask;
-    // Don't emit an active lane mask if the target doesn't support it
-    if (TLI.shouldExpandGetActiveLaneMask(IntrinsicVT, PtrVT)) {
-      EVT VecTy = EVT::getVectorVT(*DAG.getContext(), PtrVT,
-                                   IntrinsicVT.getVectorElementCount());
-      SDValue DiffSplat = DAG.getSplat(VecTy, sdl, Diff);
-      SDValue VectorStep = DAG.getStepVector(sdl, VecTy);
-      DiffMask = DAG.getSetCC(sdl, IntrinsicVT, VectorStep, DiffSplat,
-                              ISD::CondCode::SETULT);
-    } else {
-      DiffMask = DAG.getNode(
-          ISD::INTRINSIC_WO_CHAIN, sdl, IntrinsicVT,
-          DAG.getTargetConstant(Intrinsic::get_active_lane_mask, sdl, MVT::i64),
-          Zero, Diff);
-    }
-    SDValue Or = DAG.getNode(ISD::OR, sdl, IntrinsicVT, DiffMask, Splat);
-    setValue(&I, Or);
+    SmallVector<SDValue, 4> Ops;
+    for (auto &Op : I.operands())
+      Ops.push_back(getValue(Op));
+    SDValue Mask =
+        DAG.getNode(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, sdl, IntrinsicVT, Ops);
+    setValue(&I, Mask);
   }
   }
 }
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 900da7645504f..41aa8c9ebfa8f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -587,6 +587,8 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
     return "partial_reduce_smla";
   case ISD::PARTIAL_REDUCE_SUMLA:
     return "partial_reduce_sumla";
+  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+    return "alias_mask";
 
     // Vector Predication
 #define BEGIN_REGISTER_VP_SDNODE(SDID, LEGALARG, NAME, ...)                    \
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 9f525ea4abc3c..0e1326798678d 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -900,6 +900,9 @@ void TargetLoweringBase::initActions() {
     // Masked vector extracts default to expand.
     setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Expand);
 
+    // Aliasing lanes mask default to expand
+    setOperationAction(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, VT, Expand);
+
     // FP environment operations default to expand.
     setOperationAction(ISD::GET_FPENV, VT, Expand);
     setOperationAction(ISD::SET_FPENV, VT, Expand);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index a0b1b1959ebc9..1766c7d58e1de 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1915,6 +1915,15 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
     }
   }
 
+  // Handle non-aliasing elements mask
+  if (Subtarget->hasSVE2() ||
+      (Subtarget->hasSME() && Subtarget->isStreaming())) {
+    for (auto VT : {MVT::v2i32, MVT::v4i16, MVT::v8i8, MVT::v16i8, MVT::nxv2i1,
+                    MVT::nxv4i1, MVT::nxv8i1, MVT::nxv16i1}) {
+      setOperationAction(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, VT, Custom);
+    }
+  }
+
   // Handle operations that are only available in non-streaming SVE mode.
   if (Subtarget->isSVEAvailable()) {
     for (auto VT : {MVT::nxv16i8,  MVT::nxv8i16, MVT::nxv4i32,  MVT::nxv2i64,
@@ -5220,6 +5229,65 @@ SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
 
 static MVT getSVEContainerType(EVT ContentTy);
 
+SDValue AArch64TargetLowering::LowerALIAS_LANE_MASK(SDValue Op,
+                                                    SelectionDAG &DAG) const {
+  SDLoc DL(Op);
+  unsigned IntrinsicID = 0;
+  uint64_t EltSize = Op.getOperand(2)->getAsZExtVal();
+  bool IsWriteAfterRead = Op.getOperand(3)->getAsZExtVal() == 1;
+  EVT VT = Op.getValueType();
+  MVT SimpleVT = VT.getSimpleVT();
+  // Make sure that the promoted mask size and element size match
+  switch (EltSize) {
+  case 1:
+    assert((SimpleVT == MVT::v16i8 || SimpleVT == MVT::nxv16i1) &&
+           "Unexpected mask or element size");
+    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
+                                   : Intrinsic::aarch64_sve_whilerw_b;
+    break;
+  case 2:
+    assert((SimpleVT == MVT::v8i8 || SimpleVT == MVT::nxv8i1) &&
+           "Unexpected mask or element size");
+    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
+                                   : Intrinsic::aarch64_sve_whilerw_h;
+    break;
+  case 4:
+    assert((SimpleVT == MVT::v4i16 || SimpleVT == MVT::nxv4i1) &&
+           "Unexpected mask or element size");
+    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
+                                   : Intrinsic::aarch64_sve_whilerw_s;
+    break;
+  case 8:
+    assert((SimpleVT == MVT::v2i32 || SimpleVT == MVT::nxv2i1) &&
+           "Unexpected mask or element size");
+    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
+                                   : Intrinsic::aarch64_sve_whilerw_d;
+    break;
+  default:
+    llvm_unreachable("Unexpected element size for get.alias.lane.mask");
+    break;
+  }
+  SDValue ID = DAG.getTargetConstant(IntrinsicID, DL, MVT::i64);
+
+  if (VT.isScalableVector())
+    return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, ID, Op.getOperand(0),
+                       Op.getOperand(1));
+
+  // We can use the SVE whilewr/whilerw instruction to lower this
+  // intrinsic by creating the appropriate sequence of scalable vector
+  // operations and then extracting a fixed-width subvector from the scalable
+  // vector.
+
+  EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
+  EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
+
+  SDValue Mask = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WhileVT, ID,
+                             Op.getOperand(0), Op.getOperand(1));
+  SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, DL, ContainerVT, Mask);
+  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, MaskAsInt,
+                     DAG.getVectorIdxConstant(0, DL));
+}
+
 SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op,
                                             SelectionDAG &DAG) const {
   EVT OpVT = Op.getValueType();
@@ -7395,6 +7463,8 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
   default:
     llvm_unreachable("unimplemented operand");
     return SDValue();
+  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+    return LowerALIAS_LANE_MASK(Op, DAG);
   case ISD::BITCAST:
     return LowerBITCAST(Op, DAG);
   case ISD::GlobalAddress:
@@ -19991,9 +20061,10 @@ static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
                         AArch64CC::CondCode Cond);
 
 static bool isPredicateCCSettingOp(SDValue N) {
-  if ((N.getOpcode() == ISD::SETCC) ||
+  if ((N.getOpcode() == ISD::SETCC ||
       // get_active_lane_mask is lowered to a whilelo instruction.
-      (N.getOpcode() == ISD::GET_ACTIVE_LANE_MASK) ||
+       N.getOpcode() == ISD::GET_ACTIVE_LANE_MASK ||
+       N.getOpcode() == ISD::EXPERIMENTAL_ALIAS_LANE_MASK) ||
       (N.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
        (N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilege ||
         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilegt ||
@@ -20002,10 +20073,7 @@ static bool isPredicateCCSettingOp(SDValue N) {
         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilele ||
         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelo ||
         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilels ||
-        N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelt ||
-        // get_alias_lane_mask is lowered to a whilewr/rw instruction.
-        N.getConstantOperandVal(0) ==
-            Intrinsic::experimental_get_alias_lane_mask)))
+        N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelt)))
     return true;
 
   return false;
@@ -28197,6 +28265,22 @@ void AArch64TargetLowering::ReplaceNodeResults(
   case ISD::GET_ACTIVE_LANE_MASK:
     ReplaceGetActiveLaneMaskResults(N, Results, DAG);
     return;
+  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK: {
+    EVT VT = N->getValueType(0);
+    if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
+      return;
+
+    // NOTE: Only trivial type promotion is supported.
+    EVT NewVT = getTypeToTransformTo(*DAG.getContext(), VT);
+    if (NewVT.getVectorNumElements() != VT.getVectorNumElements())
+      return;
+
+    SDLoc DL(N);
+    auto V =
+        DAG.getNode(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, DL, NewVT, N->ops());
+    Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
+    return;
+  }
   case ISD::INTRINSIC_WO_CHAIN: {
     EVT VT = N->getValueType(0);
 
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 4f59b4a9f497c..3c429cc611c3c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -729,6 +729,7 @@ class AArch64TargetLowering : public TargetLowering {
   SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerALIAS_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 84a22822f1702..9b344f03da077 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -48,10 +48,12 @@ define <16 x i1> @whilewr_8(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    uzp1 v1.8h, v2.8h, v3.8h
 ; CHECK-NOSVE-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
 ; CHECK-NOSVE-NEXT:    dup v1.16b, w8
+; CHECK-NOSVE-NEXT:    shl v0.16b, v0.16b, #7
+; CHECK-NOSVE-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
+  %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
   ret <16 x i1> %0
 }
 
@@ -88,6 +90,8 @@ define <8 x i1> @whilewr_16(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v0.8h, v1.8h
 ; CHECK-NOSVE-NEXT:    dup v1.8b, w8
 ; CHECK-NOSVE-NEXT:    xtn v0.8b, v0.8h
+; CHECK-NOSVE-NEXT:    shl v0.8b, v0.8b, #7
+; CHECK-NOSVE-NEXT:    cmlt v0.8b, v0.8b, #0
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
@@ -125,7 +129,7 @@ define <4 x i1> @whilewr_32(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
+  %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
   ret <4 x i1> %0
 }
 
@@ -155,7 +159,7 @@ define <2 x i1> @whilewr_64(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
+  %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
   ret <2 x i1> %0
 }
 
@@ -206,10 +210,12 @@ define <16 x i1> @whilerw_8(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    uzp1 v1.8h, v3.8h, v2.8h
 ; CHECK-NOSVE-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
 ; CHECK-NOSVE-NEXT:    dup v1.16b, w8
+; CHECK-NOSVE-NEXT:    shl v0.16b, v0.16b, #7
+; CHECK-NOSVE-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
+  %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
   ret <16 x i1> %0
 }
 
@@ -247,6 +253,8 @@ define <8 x i1> @whilerw_16(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v0.8h, v1.8h
 ; CHECK-NOSVE-NEXT:    dup v1.8b, w8
 ; CHECK-NOSVE-NEXT:    xtn v0.8b, v0.8h
+; CHECK-NOSVE-NEXT:    shl v0.8b, v0.8b, #7
+; CHECK-NOSVE-NEXT:    cmlt v0.8b, v0.8b, #0
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
@@ -285,7 +293,7 @@ define <4 x i1> @whilerw_32(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
+  %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
   ret <4 x i1> %0
 }
 
@@ -316,106 +324,6 @@ define <2 x i1> @whilerw_64(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
-  ret <2 x i1> %0
-}
-
-define <16 x i1> @not_whilewr_wrong_eltsize(i64 %a, i64 %b) {
-; CHECK-SVE-LABEL: not_whilewr_wrong_eltsize:
-; CHECK-SVE:       // %bb.0: // %entry
-; CHECK-SVE-NEXT:    sub x8, x1, x0
-; CHECK-SVE-NEXT:    add x8, x8, x8, lsr #63
-; CHECK-SVE-NEXT:    asr x8, x8, #1
-; CHECK-SVE-NEXT:    cmp x8, #1
-; CHECK-SVE-NEXT:    cset w9, lt
-; CHECK-SVE-NEXT:    whilelo p0.b, #0, x8
-; CHECK-SVE-NEXT:    dup v0.16b, w9
-; CHECK-SVE-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-SVE-NEXT:    orr v0.16b, v1.16b, v0.16b
-; CHECK-SVE-NEXT:    ret
-;
-; CHECK-NOSVE-LABEL: not_whilewr_wrong_eltsize:
-; CHECK-NOSVE:       // %bb.0: // %entry
-; CHECK-NOSVE-NEXT:    sub x8, x1, x0
-; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_0
-; CHECK-NOSVE-NEXT:    adrp x10, .LCPI8_1
-; CHECK-NOSVE-NEXT:    add x8, x8, x8, lsr #63
-; CHECK-NOSVE-NEXT:    ldr q0, [x9, :lo12:.LCPI8_0]
-; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_2
-; CHECK-NOSVE-NEXT:    ldr q2, [x9, :lo12:.LCPI8_2]
-; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_4
-; CHECK-NOSVE-NEXT:    ldr q1, [x10, :lo12:.LCPI8_1]
-; CHECK-NOSVE-NEXT:    asr x8, x8, #1
-; CHECK-NOSVE-NEXT:    adrp x10, .LCPI8_3
-; CHECK-NOSVE-NEXT:    ldr q5, [x9, :lo12:.LCPI8_4]
-; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_6
-; CHECK-NOSVE-NEXT:    ldr q3, [x10, :lo12:.LCPI8_3]
-; CHECK-NOSVE-NEXT:    adrp x10, .LCPI8_5
-; CHECK-NOSVE-NEXT:    dup v4.2d, x8
-; CHECK-NOSVE-NEXT:    ldr q7, [x9, :lo12:.LCPI8_6]
-; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_7
-; CHECK-NOSVE-NEXT:    ldr q6, [x10, :lo12:.LCPI8_5]
-; CHECK-NOSVE-NEXT:    ldr q16, [x9, :lo12:.LCPI8_7]
-; CHECK-NOSVE-NEXT:    cmp x8, #1
-; CHECK-NOSVE-NEXT:    cset w8, lt
-; CHECK-NOSVE-NEXT:    cmhi v0.2d, v4.2d, v0.2d
-; CHECK-NOSVE-NEXT:    cmhi v1.2d, v4.2d, v1.2d
-; CHECK-NOSVE-NEXT:    cmhi v2.2d, v4.2d, v2.2d
-; CHECK-NOSVE-NEXT:    cmhi v3.2d, v4.2d, v3.2d
-; CHECK-NOSVE-NEXT:    cmhi v5.2d, v4.2d, v5.2d
-; CHECK-NOSVE-NEXT:    cmhi v6.2d, v4.2d, v6.2d
-; CHECK-NOSVE-NEXT:    cmhi v7.2d, v4.2d, v7.2d
-; CHECK-NOSVE-NEXT:    cmhi v4.2d, v4.2d, v16.2d
-; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
-; CHECK-NOSVE-NEXT:    uzp1 v1.4s, v3.4s, v2.4s
-; CHECK-NOSVE-NEXT:    uzp1 v2.4s, v6.4s, v5.4s
-; CHECK-NOSVE-NEXT:    uzp1 v3.4s, v4.4s, v7.4s
-; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
-; CHECK-NOSVE-NEXT:    uzp1 v1.8h, v3.8h, v2.8h
-; CHECK-NOSVE-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
-; CHECK-NOSVE-NEXT:    dup v1.16b, w8
-; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
-; CHECK-NOSVE-NEXT:    ret
-entry:
-  %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 1)
-  ret <16 x i1> %0
-}
-
-define <2 x i1> @not_whilerw_ptr32(i32 %a, i32 %b) {
-; CHECK-SVE-LABEL: not_whilerw_ptr32:
-; CHECK-SVE:       // %bb.0: // %entry
-; CHECK-SVE-NEXT:    subs w8, w1, w0
-; CHECK-SVE-NEXT:    cneg w8, w8, mi
-; CHECK-SVE-NEXT:    add w9, w8, #7
-; CHECK-SVE-NEXT:    cmp w8, #0
-; CHECK-SVE-NEXT:    csel w8, w9, w8, lt
-; CHECK-SVE-NEXT:    asr w8, w8, #3
-; CHECK-SVE-NEXT:    cmp w8, #0
-; CHECK-SVE-NEXT:    cset w9, eq
-; CHECK-SVE-NEXT:    whilelo p0.s, #0, w8
-; CHECK-SVE-NEXT:    dup v0.2s, w9
-; CHECK-SVE-NEXT:    mov z1.s, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-SVE-NEXT:    orr v0.8b, v1.8b, v0.8b
-; CHECK-SVE-NEXT:    ret
-;
-; CHECK-NOSVE-LABEL: not_whilerw_ptr32:
-; CHECK-NOSVE:       // %bb.0: // %entry
-; CHECK-NOSVE-NEXT:    subs w9, w1, w0
-; CHECK-NOSVE-NEXT:    adrp x8, .LCPI9_0
-; CHECK-NOSVE-NEXT:    cneg w9, w9, mi
-; CHECK-NOSVE-NEXT:    ldr d1, [x8, :lo12:.LCPI9_0]
-; CHECK-NOSVE-NEXT:    add w10, w9, #7
-; CHECK-NOSVE-NEXT:    cmp w9, #0
-; CHECK-NOSVE-NEXT:    csel w9, w10, w9, lt
-; CHECK-NOSVE-NEXT:    asr w9, w9, #3
-; CHECK-NOSVE-NEXT:    dup v0.2s, w9
-; CHECK-NOSVE-NEXT:    cmp w9, #0
-; CHECK-NOSVE-NEXT:    cset w8, eq
-; CHECK-NOSVE-NEXT:    dup v2.2s, w8
-; CHECK-NOSVE-NEXT:    cmhi v0.2s, v0.2s, v1.2s
-; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v2.8b
-; CHECK-NOSVE-NEXT:    ret
-entry:
-  %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i32.i32(i32 %a, i32 %b, i32 8, i1 0)
+  %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
   ret <2 x i1> %0
 }
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index be5ec8b2a82bf..a7c9c5e3cdd33 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -10,16 +10,57 @@ define <vscale x 16 x i1> @whilewr_8(i64 %a, i64 %b) {
 ;
 ; CHECK-SVE-LABEL: whilewr_8:
 ; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-SVE-NEXT:    addvl sp, sp, #-1
+; CHECK-SVE-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-SVE-NEXT:    .cfi_offset w29, -16
+; CHECK-SVE-NEXT:    index z0.d, #0, #1
 ; CHECK-SVE-NEXT:    sub x8, x1, x0
+; CHECK-SVE-NEXT:    ptrue p0.d
+; CHECK-SVE-NEXT:    mov z2.d, x8
+; CHECK-SVE-NEXT:    mov z1.d, z0.d
+; CHECK-SVE-NEXT:    mov z3.d, z0.d
+; CHECK-SVE-NEXT:    cmphi p1.d, p0/z, z2.d, z0.d
+; CHECK-SVE-NEXT:    incd z0.d, all, mul #4
+; CHECK-SVE-NEXT:    incd z1.d
+; CHECK-SVE-NEXT:    incd z3.d, all, mul #2
+; CHECK-SVE-NEXT:    cmphi p5.d, p0/z, z2.d, z0.d
+; CHECK-SVE-NEXT:    mov z4.d, z1.d
+; CHECK-SVE-NEXT:    cmphi p2.d, p0/z, z2.d, z1.d
+; CHECK-SVE-NEXT:    incd z1.d, all, mul #4
+; CHECK-SVE-NEXT:    cmphi p3.d, p0/z, z2.d, z3.d
+; CHECK-SVE-NEXT:    incd z3.d, all, mul #4
+; CHECK-SVE-NEXT:    incd z4.d, all, mul #2
+; CHECK-SVE-NEXT:    cmphi p6.d, p0/z, z2.d, z1.d
+; CHECK-SVE-NEXT:    cmphi p7.d, p0/z, z2.d, z3.d
+; CHECK-SVE-NEXT:    uzp1 p1.s, p1.s, p2.s
+; CHECK-SVE-NEXT:    cmphi p4.d, p0/z, z2.d, z4.d
+; CHECK-SVE-NEXT:    incd z4.d, all, mul #4
+; CHECK-SVE-NEXT:    uzp1 p2.s, p5.s, p6.s
+; CHECK-SVE-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT:    cmphi p0.d, p0/z, z2.d, z4.d
+; CHECK-SVE-NEXT:    uzp1 p3.s, p3.s, p4.s
 ; CHECK-SVE-NEXT:    cmp x8, #1
-; CHECK-SVE-NEXT:    cset w9, lt
-; CHECK-SVE-NEXT:    whilelo p0.b, #0, x8
-; CHECK-SVE-NEXT:    sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT:    cset w8, lt
+; CHECK-SVE-NEXT:    uzp1 p1.h, p1.h, p3.h
+; CHECK-SVE-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT:    uzp1 p0.s, p7.s, p0.s
+; CHECK-SVE-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT:    uzp1 p0.h, p2.h, p0.h
+; CHECK-SVE-NEXT:    uzp1 p0.b, p1.b, p0.b
 ; CHECK-SVE-NEXT:    whilelo p1.b, xzr, x8
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-SVE-NEXT:    addvl sp, sp, #1
+; CHECK-SVE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
+  %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
   ret <vscale x 16 x i1> %0
 }
 
@@ -31,13 +72,28 @@ define <vscale x 8 x i1> @whilewr_16(i64 %a, i64 %b) {
 ;
 ; CHECK-SVE-LABEL: whilewr_16:
 ; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    index z0.d, #0, #1
 ; CHECK-SVE-NEXT:    sub x8, x1, x0
+; CHECK-SVE-NEXT:    ptrue p0.d
 ; CHECK-SVE-NEXT:    add x8, x8, x8, lsr #63
 ; CHECK-SVE-NEXT:    asr x8, x8, #1
+; CHECK-SVE-NEXT:    mov z1.d, z0.d
+; CHECK-SVE-NEXT:    mov z2.d, z0.d
+; CHECK-SVE-NEXT:    mov z3.d, x8
+; CHECK-SVE-NEXT:    incd z1.d
+; CHECK-SVE-NEXT:    incd z2.d, all, mul #2
+; CHECK-SVE-NEXT:    cmphi p1.d, p0/z, z3.d, z0.d
+; CHECK-SVE-NEXT:    mov z4.d, z1.d
+; CHECK-SVE-NEXT:    cmphi p2.d, p0/z, z3.d, z1.d
+; CHECK-SVE-NEXT:    cmphi p3.d, p0/z, z3.d, z2.d
+; CHECK-SVE-NEXT:    incd z4.d, all, mul #2
+; CHECK-SVE-NEXT:    uzp1 p1.s, p1.s, p2.s
+; CHECK-SVE-NEXT:    cmphi p0.d, p0/z, z3.d, z4.d
 ; CHECK-SVE-NEXT:    cmp x8, #1
-; CHECK-SVE-NEXT:    cset w9, lt
-; CHECK-SVE-NEXT:    whilelo p0.h, #0, x8
-; CHECK-SVE-NEXT:    sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT:    cset w8, lt
+; CHECK-SVE-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT:    uzp1 p0.s, p3.s, p0.s
+; CHECK-SVE-NEXT:    uzp1 p0.h, p1.h, p0.h
 ; CHECK-SVE-NEXT:    whilelo p1.h, xzr, x8
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
@@ -54,20 +110,27 @@ define <vscale x 4 x i1> @whilewr_32(i64 %a, i64 %b) {
 ;
 ; CHECK-SVE-LABEL: whilewr_32:
 ; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    index z0.d, #0, #1
 ; CHECK-SVE-NEXT:    sub x8, x1, x0
+; CHECK-SVE-NEXT:    ptrue p0.d
 ; CHECK-SVE-NEXT:    add x9, x8, #3
 ; CHECK-SVE-NEXT:    cmp x8, #0
 ; CHECK-SVE-NEXT:    csel x8, x9, x8, lt
 ; CHECK-SVE-NEXT:    asr x8, x8, #2
+; CHECK-SVE-NEXT:    mov z1.d, z0.d
+; CHECK-SVE-NEXT:    mov z2.d, x8
+; CHECK-SVE-NEXT:    incd z1.d
+; CHECK-SVE-NEXT:    cmphi p1.d, p0/z, z2.d, z0.d
+; CHECK-SVE-NEXT:    cmphi p0.d, p0/z, z2.d, z1.d
 ; CHECK-SVE-NEXT:    cmp x8, #1
-; CHECK-SVE-NEXT:    cset w9, lt
-; CHECK-SVE-NEXT:    whilelo p1.s, #0, x8
-; CHECK-SVE-NEXT:    sbfx x9, x9, #0, #1
-; CHECK-SVE-NEXT:    whilelo p0.s, xzr, x9
-; CHECK-SVE-NEXT:    mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT:    cset w8, lt
+; CHECK-SVE-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT:    uzp1 p0.s, p1.s, p0.s
+; CHECK-SVE-NEXT:    whilelo p1.s, xzr, x8
+; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
+  %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
   ret <vscale x 4 x i1> %0
 }
 
@@ -80,19 +143,22 @@ define <vscale x 2 x i1> @whilewr_64(i64 %a, i64 %b) {
 ; CHECK-SVE-LABEL: whilewr_64:
 ; CHECK-SVE:       // %bb.0: // %entry
 ; CHECK-SVE-NEXT:    sub x8, x1, x0
+; CHECK-SVE-NEXT:    index z0.d, #0, #1
+; CHECK-SVE-NEXT:    ptrue p0.d
 ; CHECK-SVE-NEXT:    add x9, x8, #7
 ; CHECK-SVE-NEXT:    cmp x8, #0
 ; CHECK-SVE-NEXT:    csel x8, x9, x8, lt
 ; CHECK-SVE-NEXT:    asr x8, x8, #3
+; CHECK-SVE-NEXT:    mov z1.d, x8
+; CHECK-SVE-NEXT:    cmphi p0.d, p0/z, z1.d, z0.d
 ; CHECK-SVE-NEXT:    cmp x8, #1
-; CHECK-SVE-NEXT:    cset w9, lt
-; CHECK-SVE-NEXT:    whilelo p1.d, #0, x8
-; CHECK-SVE-NEXT:    sbfx x9, x9, #0, #1
-; CHECK-SVE-NEXT:    whilelo p0.d, xzr, x9
-; CHECK-SVE-NEXT:    mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT:    cset w8, lt
+; CHECK-SVE-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT:    whilelo p1.d, xzr, x8
+; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
+  %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
   ret <vscale x 2 x i1> %0
 }
 
@@ -104,17 +170,60 @@ define <vscale x 16 x i1> @whilerw_8(i64 %a, i64 %b) {
 ;
 ; CHECK-SVE-LABEL: whilerw_8:
 ; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-SVE-NEXT:    addvl sp, sp, #-1
+; CHECK-SVE-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-SVE-NEXT:    .cfi_offset w29, -16
+; CHECK-SVE-NEXT:    index z0.d, #0, #1
 ; CHECK-SVE-NEXT:    subs x8, x1, x0
+; CHECK-SVE-NEXT:    ptrue p0.d
 ; CHECK-SVE-NEXT:    cneg x8, x8, mi
+; CHECK-SVE-NEXT:    mov z1.d, x8
+; CHECK-SVE-NEXT:    mov z2.d, z0.d
+; CHECK-SVE-NEXT:    mov z4.d, z0.d
+; CHECK-SVE-NEXT:    mov z5.d, z0.d
+; CHECK-SVE-NEXT:    cmphi p2.d, p0/z, z1.d, z0.d
+; CHECK-SVE-NEXT:    incd z2.d
+; CHECK-SVE-NEXT:    incd z4.d, all, mul #2
+; CHECK-SVE-NEXT:    incd z5.d, all, mul #4
+; CHECK-SVE-NEXT:    mov z3.d, z2.d
+; CHECK-SVE-NEXT:    cmphi p1.d, p0/z, z1.d, z2.d
+; CHECK-SVE-NEXT:    incd z2.d, all, mul #4
+; CHECK-SVE-NEXT:    cmphi p3.d, p0/z, z1.d, z4.d
+; CHECK-SVE-NEXT:    incd z4.d, all, mul #4
+; CHECK-SVE-NEXT:    cmphi p4.d, p0/z, z1.d, z5.d
+; CHECK-SVE-NEXT:    incd z3.d, all, mul #2
+; CHECK-SVE-NEXT:    cmphi p5.d, p0/z, z1.d, z2.d
+; CHECK-SVE-NEXT:    cmphi p7.d, p0/z, z1.d, z4.d
+; CHECK-SVE-NEXT:    uzp1 p1.s, p2.s, p1.s
+; CHECK-SVE-NEXT:    mov z0.d, z3.d
+; CHECK-SVE-NEXT:    cmphi p6.d, p0/z, z1.d, z3.d
+; CHECK-SVE-NEXT:    uzp1 p2.s, p4.s, p5.s
+; CHECK-SVE-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT:    incd z0.d, all, mul #4
+; CHECK-SVE-NEXT:    uzp1 p3.s, p3.s, p6.s
+; CHECK-SVE-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT:    cmphi p0.d, p0/z, z1.d, z0.d
+; CHECK-SVE-NEXT:    uzp1 p1.h, p1.h, p3.h
 ; CHECK-SVE-NEXT:    cmp x8, #0
-; CHECK-SVE-NEXT:    cset w9, eq
-; CHECK-SVE-NEXT:    whilelo p0.b, #0, x8
-; CHECK-SVE-NEXT:    sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT:    cset w8, eq
+; CHECK-SVE-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT:    uzp1 p0.s, p7.s, p0.s
+; CHECK-SVE-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT:    uzp1 p0.h, p2.h, p0.h
+; CHECK-SVE-NEXT:    uzp1 p0.b, p1.b, p0.b
 ; CHECK-SVE-NEXT:    whilelo p1.b, xzr, x8
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-SVE-NEXT:    addvl sp, sp, #1
+; CHECK-SVE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
+  %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
   ret <vscale x 16 x i1> %0
 }
 
@@ -126,14 +235,29 @@ define <vscale x 8 x i1> @whilerw_16(i64 %a, i64 %b) {
 ;
 ; CHECK-SVE-LABEL: whilerw_16:
 ; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    index z0.d, #0, #1
 ; CHECK-SVE-NEXT:    subs x8, x1, x0
+; CHECK-SVE-NEXT:    ptrue p0.d
 ; CHECK-SVE-NEXT:    cneg x8, x8, mi
 ; CHECK-SVE-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-SVE-NEXT:    mov z1.d, z0.d
+; CHECK-SVE-NEXT:    mov z2.d, z0.d
 ; CHECK-SVE-NEXT:    asr x8, x8, #1
+; CHECK-SVE-NEXT:    mov z3.d, x8
+; CHECK-SVE-NEXT:    incd z1.d
+; CHECK-SVE-NEXT:    incd z2.d, all, mul #2
+; CHECK-SVE-NEXT:    cmphi p1.d, p0/z, z3.d, z0.d
+; CHECK-SVE-NEXT:    mov z4.d, z1.d
+; CHECK-SVE-NEXT:    cmphi p2.d, p0/z, z3.d, z1.d
+; CHECK-SVE-NEXT:    cmphi p3.d, p0/z, z3.d, z2.d
+; CHECK-SVE-NEXT:    incd z4.d, all, mul #2
+; CHECK-SVE-NEXT:    uzp1 p1.s, p1.s, p2.s
+; CHECK-SVE-NEXT:    cmphi p0.d, p0/z, z3.d, z4.d
 ; CHECK-SVE-NEXT:    cmp x8, #0
-; CHECK-SVE-NEXT:    cset w9, eq
-; CHECK-SVE-NEXT:    whilelo p0.h, #0, x8
-; CHECK-SVE-NEXT:    sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT:    cset w8, eq
+; CHECK-SVE-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT:    uzp1 p0.s, p3.s, p0.s
+; CHECK-SVE-NEXT:    uzp1 p0.h, p1.h, p0.h
 ; CHECK-SVE-NEXT:    whilelo p1.h, xzr, x8
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
@@ -150,21 +274,28 @@ define <vscale x 4 x i1> @whilerw_32(i64 %a, i64 %b) {
 ;
 ; CHECK-SVE-LABEL: whilerw_32:
 ; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    index z0.d, #0, #1
 ; CHECK-SVE-NEXT:    subs x8, x1, x0
+; CHECK-SVE-NEXT:    ptrue p0.d
 ; CHECK-SVE-NEXT:    cneg x8, x8, mi
 ; CHECK-SVE-NEXT:    add x9, x8, #3
 ; CHECK-SVE-NEXT:    cmp x8, #0
 ; CHECK-SVE-NEXT:    csel x8, x9, x8, lt
+; CHECK-SVE-NEXT:    mov z1.d, z0.d
 ; CHECK-SVE-NEXT:    asr x8, x8, #2
+; CHECK-SVE-NEXT:    mov z2.d, x8
+; CHECK-SVE-NEXT:    incd z1.d
+; CHECK-SVE-NEXT:    cmphi p1.d, p0/z, z2.d, z0.d
+; CHECK-SVE-NEXT:    cmphi p0.d, p0/z, z2.d, z1.d
 ; CHECK-SVE-NEXT:    cmp x8, #0
-; CHECK-SVE-NEXT:    cset w9, eq
-; CHECK-SVE-NEXT:    whilelo p1.s, #0, x8
-; CHECK-SVE-NEXT:    sbfx x9, x9, #0, #1
-; CHECK-SVE-NEXT:    whilelo p0.s, xzr, x9
-; CHECK-SVE-NEXT:    mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT:    cset w8, eq
+; CHECK-SVE-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT:    uzp1 p0.s, p1.s, p0.s
+; CHECK-SVE-NEXT:    whilelo p1.s, xzr, x8
+; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
+  %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
   ret <vscale x 4 x i1> %0
 }
 
@@ -177,19 +308,22 @@ define <vscale x 2 x i1> @whilerw_64(i64 %a, i64 %b) {
 ; CHECK-SVE-LABEL: whilerw_64:
 ; CHECK-SVE:       // %bb.0: // %entry
 ; CHECK-SVE-NEXT:    subs x8, x1, x0
+; CHECK-SVE-NEXT:    index z0.d, #0, #1
+; CHECK-SVE-NEXT:    ptrue p0.d
 ; CHECK-SVE-NEXT:    cneg x8, x8, mi
 ; CHECK-SVE-NEXT:    add x9, x8, #7
 ; CHECK-SVE-NEXT:    cmp x8, #0
 ; CHECK-SVE-NEXT:    csel x8, x9, x8, lt
 ; CHECK-SVE-NEXT:    asr x8, x8, #3
+; CHECK-SVE-NEXT:    mov z1.d, x8
+; CHECK-SVE-NEXT:    cmphi p0.d, p0/z, z1.d, z0.d
 ; CHECK-SVE-NEXT:    cmp x8, #0
-; CHECK-SVE-NEXT:    cset w9, eq
-; CHECK-SVE-NEXT:    whilelo p1.d, #0, x8
-; CHECK-SVE-NEXT:    sbfx x9, x9, #0, #1
-; CHECK-SVE-NEXT:    whilelo p0.d, xzr, x9
-; CHECK-SVE-NEXT:    mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT:    cset w8, eq
+; CHECK-SVE-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT:    whilelo p1.d, xzr, x8
+; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
+  %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
   ret <vscale x 2 x i1> %0
 }

>From 1f8b4beada7748136f19111e5186feb0232fc678 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Wed, 15 Jan 2025 16:16:31 +0000
Subject: [PATCH 03/29] Fix ISD node name string and remove shouldExpand
 function

---
 .../SelectionDAG/SelectionDAGDumper.cpp       |  2 +-
 .../Target/AArch64/AArch64ISelLowering.cpp    | 19 -------------------
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |  3 ---
 3 files changed, 1 insertion(+), 23 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 41aa8c9ebfa8f..2693e86c36ae8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -588,7 +588,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
   case ISD::PARTIAL_REDUCE_SUMLA:
     return "partial_reduce_sumla";
   case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
-    return "alias_mask";
+    return "alias_lane_mask";
 
     // Vector Predication
 #define BEGIN_REGISTER_VP_SDNODE(SDID, LEGALARG, NAME, ...)                    \
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 1766c7d58e1de..37f035beb66f5 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2169,25 +2169,6 @@ bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
   return false;
 }
 
-bool AArch64TargetLowering::shouldExpandGetAliasLaneMask(
-    EVT VT, EVT PtrVT, unsigned EltSize) const {
-  if (!Subtarget->hasSVE2())
-    return true;
-
-  if (PtrVT != MVT::i64)
-    return true;
-
-  if (VT == MVT::v2i1 || VT == MVT::nxv2i1)
-    return EltSize != 8;
-  if (VT == MVT::v4i1 || VT == MVT::nxv4i1)
-    return EltSize != 4;
-  if (VT == MVT::v8i1 || VT == MVT::nxv8i1)
-    return EltSize != 2;
-  if (VT == MVT::v16i1 || VT == MVT::nxv16i1)
-    return EltSize != 1;
-  return true;
-}
-
 bool AArch64TargetLowering::shouldExpandPartialReductionIntrinsic(
     const IntrinsicInst *I) const {
   assert(I->getIntrinsicID() ==
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 3c429cc611c3c..c8ee5721c3627 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -511,9 +511,6 @@ class AArch64TargetLowering : public TargetLowering {
 
   bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override;
 
-  bool shouldExpandGetAliasLaneMask(EVT VT, EVT PtrVT,
-                                    unsigned EltSize) const override;
-
   bool
   shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const override;
 

>From 5298aa025b880cfd95fff248694bebaec8a2330c Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 16 Jan 2025 10:24:59 +0000
Subject: [PATCH 04/29] Format

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 37f035beb66f5..b0ddbed2fb580 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5221,24 +5221,32 @@ SDValue AArch64TargetLowering::LowerALIAS_LANE_MASK(SDValue Op,
   // Make sure that the promoted mask size and element size match
   switch (EltSize) {
   case 1:
+    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
+                                   : Intrinsic::aarch64_sve_whilerw_b;
     assert((SimpleVT == MVT::v16i8 || SimpleVT == MVT::nxv16i1) &&
            "Unexpected mask or element size");
     IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
                                    : Intrinsic::aarch64_sve_whilerw_b;
     break;
   case 2:
+    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
+                                   : Intrinsic::aarch64_sve_whilerw_h;
     assert((SimpleVT == MVT::v8i8 || SimpleVT == MVT::nxv8i1) &&
            "Unexpected mask or element size");
     IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
                                    : Intrinsic::aarch64_sve_whilerw_h;
     break;
   case 4:
+    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
+                                   : Intrinsic::aarch64_sve_whilerw_s;
     assert((SimpleVT == MVT::v4i16 || SimpleVT == MVT::nxv4i1) &&
            "Unexpected mask or element size");
     IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
                                    : Intrinsic::aarch64_sve_whilerw_s;
     break;
   case 8:
+    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
+                                   : Intrinsic::aarch64_sve_whilerw_d;
     assert((SimpleVT == MVT::v2i32 || SimpleVT == MVT::nxv2i1) &&
            "Unexpected mask or element size");
     IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d

>From 13a64e536f0170ef6f3c68a3bfeba90c92f6acbd Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Mon, 27 Jan 2025 14:17:16 +0000
Subject: [PATCH 05/29] Move promote case

---
 llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 68e8db38fa5ec..ac5ec3787824a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -56,9 +56,6 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
     N->dump(&DAG); dbgs() << "\n";
 #endif
     report_fatal_error("Do not know how to promote this operator!");
-  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
-    Res = PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(N);
-    break;
   case ISD::MERGE_VALUES:Res = PromoteIntRes_MERGE_VALUES(N, ResNo); break;
   case ISD::AssertSext:  Res = PromoteIntRes_AssertSext(N); break;
   case ISD::AssertZext:  Res = PromoteIntRes_AssertZext(N); break;
@@ -327,6 +324,10 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
     Res = PromoteIntRes_VP_REDUCE(N);
     break;
 
+  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+    Res = PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(N);
+    break;
+
   case ISD::FREEZE:
     Res = PromoteIntRes_FREEZE(N);
     break;

>From 48af641992fd8b711e324d79d0628eea44b1ee31 Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Mon, 27 Jan 2025 14:17:30 +0000
Subject: [PATCH 06/29] Fix tablegen comment

---
 llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index ad7c234daa951..e8c69fa3cadf9 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -4130,7 +4130,7 @@ let Predicates = [HasSVE2_or_SME] in {
   // SVE2 pointer conflict compare
   defm WHILEWR_PXX : sve2_int_while_rr<0b0, "whilewr", AArch64whilewr>;
   defm WHILERW_PXX : sve2_int_while_rr<0b1, "whilerw", AArch64whilerw>;
-} // End HasSVE2orSME
+} // End HasSVE2_or_SME
 
 let Predicates = [HasSVEAES, HasNonStreamingSVE_or_SSVE_AES] in {
   // SVE2 crypto destructive binary operations

>From e398a01bba838f8a4c37522a3e1303e45aa1da57 Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Mon, 27 Jan 2025 14:20:31 +0000
Subject: [PATCH 07/29] Remove DAGTypeLegalizer::

---
 llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index ac5ec3787824a..12983f53f988f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -2117,7 +2117,7 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
     Res = PromoteIntOp_PARTIAL_REDUCE_MLA(N);
     break;
   case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
-    Res = DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(N, OpNo);
+    Res = PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(N, OpNo);
     break;
   }
 

>From 8e27911a40ee34b5f3fcf9586f385922c3c51cf7 Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Mon, 27 Jan 2025 14:20:39 +0000
Subject: [PATCH 08/29] Use getConstantOperandVal

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index b0ddbed2fb580..4e7348251e749 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5214,8 +5214,8 @@ SDValue AArch64TargetLowering::LowerALIAS_LANE_MASK(SDValue Op,
                                                     SelectionDAG &DAG) const {
   SDLoc DL(Op);
   unsigned IntrinsicID = 0;
-  uint64_t EltSize = Op.getOperand(2)->getAsZExtVal();
-  bool IsWriteAfterRead = Op.getOperand(3)->getAsZExtVal() == 1;
+  uint64_t EltSize = Op.getConstantOperandVal(2);
+  bool IsWriteAfterRead = Op.getConstantOperandVal(3) == 1;
   EVT VT = Op.getValueType();
   MVT SimpleVT = VT.getSimpleVT();
   // Make sure that the promoted mask size and element size match

>From 8e2a8a7befcae55f15c688e13a3b223c9527d056 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Wed, 29 Jan 2025 11:40:32 +0000
Subject: [PATCH 09/29] Remove isPredicateCCSettingOp case

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 51 +++++++++----------
 1 file changed, 25 insertions(+), 26 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4e7348251e749..9564f1972514a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -6523,29 +6523,29 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
   }
   case Intrinsic::experimental_get_alias_lane_mask: {
     unsigned IntrinsicID = 0;
-      uint64_t EltSize = Op.getOperand(3)->getAsZExtVal();
-      bool IsWriteAfterRead = Op.getOperand(4)->getAsZExtVal() == 1;
-      switch (EltSize) {
-      case 1:
-        IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
-                                       : Intrinsic::aarch64_sve_whilerw_b;
-        break;
-      case 2:
-        IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
-                                       : Intrinsic::aarch64_sve_whilerw_h;
-        break;
-      case 4:
-        IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
-                                       : Intrinsic::aarch64_sve_whilerw_s;
-        break;
-      case 8:
-        IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
-                                       : Intrinsic::aarch64_sve_whilerw_d;
-        break;
-      default:
-        llvm_unreachable("Unexpected element size for get.alias.lane.mask");
-        break;
-      }
+    uint64_t EltSize = Op.getOperand(3)->getAsZExtVal();
+    bool IsWriteAfterRead = Op.getOperand(4)->getAsZExtVal() == 1;
+    switch (EltSize) {
+    case 1:
+      IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
+                                     : Intrinsic::aarch64_sve_whilerw_b;
+      break;
+    case 2:
+      IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
+                                     : Intrinsic::aarch64_sve_whilerw_h;
+      break;
+    case 4:
+      IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
+                                     : Intrinsic::aarch64_sve_whilerw_s;
+      break;
+    case 8:
+      IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
+                                     : Intrinsic::aarch64_sve_whilerw_d;
+      break;
+    default:
+      llvm_unreachable("Unexpected element size for get.alias.lane.mask");
+      break;
+    }
     SDValue ID = DAG.getTargetConstant(IntrinsicID, dl, MVT::i64);
 
     EVT VT = Op.getValueType();
@@ -20051,9 +20051,8 @@ static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
 
 static bool isPredicateCCSettingOp(SDValue N) {
   if ((N.getOpcode() == ISD::SETCC ||
-      // get_active_lane_mask is lowered to a whilelo instruction.
-       N.getOpcode() == ISD::GET_ACTIVE_LANE_MASK ||
-       N.getOpcode() == ISD::EXPERIMENTAL_ALIAS_LANE_MASK) ||
+       // get_active_lane_mask is lowered to a whilelo instruction.
+       N.getOpcode() == ISD::GET_ACTIVE_LANE_MASK) ||
       (N.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
        (N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilege ||
         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilegt ||

>From a3f0ca2fe83c9d3db5c7f2a8dbb97847f40d68a6 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 30 Jan 2025 14:40:12 +0000
Subject: [PATCH 10/29] Remove overloads for pointer and element size
 parameters

---
 llvm/docs/LangRef.rst                         | 12 +++----
 llvm/include/llvm/IR/Intrinsics.td            |  2 +-
 .../SelectionDAG/LegalizeVectorOps.cpp        | 11 ++++---
 llvm/test/CodeGen/AArch64/alias_mask.ll       | 32 +++++++++----------
 .../CodeGen/AArch64/alias_mask_scalable.ll    | 32 +++++++++----------
 5 files changed, 46 insertions(+), 43 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index f41e50d36a984..26fe98067e7b4 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24056,10 +24056,10 @@ This is an overloaded intrinsic.
 
 ::
 
-      declare <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %ptrA, i64 %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
-      declare <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %ptrA, i64 %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
-      declare <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i32(i64 %ptrA, i64 %ptrB, i32 immarg %elementSize, i1 immarg %writeAfterRead)
-      declare <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.nxv16i1.i64.i32(i64 %ptrA, i64 %ptrB, i32 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
 
 
 Overview:
@@ -24099,7 +24099,7 @@ equivalent to:
       %m[i] = (icmp ult i, %diff) || (%diff == 0)
 
 where ``%m`` is a vector (mask) of active/inactive lanes with its elements
-indexed by ``i``,  and ``%ptrA``, ``%ptrB`` are the two i64 arguments to
+indexed by ``i``,  and ``%ptrA``, ``%ptrB`` are the two ptr arguments to
 ``llvm.experimental.get.alias.lane.mask.*`` and ``%elementSize`` is the first
 immediate argument. The ``%writeAfterRead`` argument is expected to be true if
 ``%ptrB`` is stored to after ``%ptrA`` is read from.
@@ -24125,7 +24125,7 @@ Examples:
 
 .. code-block:: llvm
 
-      %alias.lane.mask = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i32(i64 %ptrA, i64 %ptrB, i32 4, i1 1)
+      %alias.lane.mask = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4, i1 1)
       %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptrA, i32 4, <4 x i1> %alias.lane.mask, <4 x i32> poison)
       [...]
       call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, <4 x i32>* %ptrB, i32 4, <4 x i1> %alias.lane.mask)
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index e3fb9cf1b7a49..804327acd483a 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -2420,7 +2420,7 @@ let IntrProperties = [IntrNoMem, ImmArg<ArgIndex<1>>] in {
 
 def int_experimental_get_alias_lane_mask:
   DefaultAttrsIntrinsic<[llvm_anyvector_ty],
-            [llvm_anyint_ty, LLVMMatchType<1>, llvm_anyint_ty, llvm_i1_ty],
+            [llvm_anyptr_ty, LLVMMatchType<1>, llvm_i64_ty, llvm_i1_ty],
             [IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
 
 def int_get_active_lane_mask:
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index c9e5c7f8bbdb0..3df412db95e26 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1807,8 +1807,7 @@ SDValue VectorLegalizer::ExpandEXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N) {
   SDValue SinkValue = N->getOperand(1);
   SDValue EltSize = N->getOperand(2);
 
-  bool IsWriteAfterRead =
-      cast<ConstantSDNode>(N->getOperand(3))->getZExtValue() != 0;
+  bool IsWriteAfterRead = N->getConstantOperandVal(3) != 0;
   auto VT = N->getValueType(0);
   auto PtrVT = SourceValue->getValueType(0);
 
@@ -1817,14 +1816,15 @@ SDValue VectorLegalizer::ExpandEXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N) {
     Diff = DAG.getNode(ISD::ABS, DL, PtrVT, Diff);
 
   Diff = DAG.getNode(ISD::SDIV, DL, PtrVT, Diff, EltSize);
-  SDValue Zero = DAG.getTargetConstant(0, DL, PtrVT);
 
   // If the difference is positive then some elements may alias
   auto CmpVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
                                       Diff.getValueType());
+  SDValue Zero = DAG.getTargetConstant(0, DL, PtrVT);
   SDValue Cmp = DAG.getSetCC(DL, CmpVT, Diff, Zero,
                              IsWriteAfterRead ? ISD::SETLE : ISD::SETEQ);
 
+  // Create the lane mask
   EVT SplatTY =
       EVT::getVectorVT(*DAG.getContext(), PtrVT, VT.getVectorElementCount());
   SDValue DiffSplat = DAG.getSplat(SplatTY, DL, Diff);
@@ -1832,7 +1832,10 @@ SDValue VectorLegalizer::ExpandEXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N) {
   SDValue DiffMask =
       DAG.getSetCC(DL, VT, VectorStep, DiffSplat, ISD::CondCode::SETULT);
 
-  // Splat the compare result then OR it with a lane mask
+  // Splat the compare result then OR it with the lane mask
+  auto VTElementTy = VT.getVectorElementType();
+  if (CmpVT.getScalarSizeInBits() < VTElementTy.getScalarSizeInBits())
+    Cmp = DAG.getNode(ISD::ZERO_EXTEND, DL, VTElementTy, Cmp);
   SDValue Splat = DAG.getSplat(VT, DL, Cmp);
   return DAG.getNode(ISD::OR, DL, VT, DiffMask, Splat);
 }
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 9b344f03da077..f88baeece0356 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s --check-prefix=CHECK-SVE
 ; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefix=CHECK-NOSVE
 
-define <16 x i1> @whilewr_8(i64 %a, i64 %b) {
+define <16 x i1> @whilewr_8(ptr %a, ptr %b) {
 ; CHECK-SVE-LABEL: whilewr_8:
 ; CHECK-SVE:       // %bb.0: // %entry
 ; CHECK-SVE-NEXT:    whilewr p0.b, x0, x1
@@ -53,11 +53,11 @@ define <16 x i1> @whilewr_8(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
+  %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
   ret <16 x i1> %0
 }
 
-define <8 x i1> @whilewr_16(i64 %a, i64 %b) {
+define <8 x i1> @whilewr_16(ptr %a, ptr %b) {
 ; CHECK-SVE-LABEL: whilewr_16:
 ; CHECK-SVE:       // %bb.0: // %entry
 ; CHECK-SVE-NEXT:    whilewr p0.b, x0, x1
@@ -95,11 +95,11 @@ define <8 x i1> @whilewr_16(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 1)
+  %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
   ret <8 x i1> %0
 }
 
-define <4 x i1> @whilewr_32(i64 %a, i64 %b) {
+define <4 x i1> @whilewr_32(ptr %a, ptr %b) {
 ; CHECK-SVE-LABEL: whilewr_32:
 ; CHECK-SVE:       // %bb.0: // %entry
 ; CHECK-SVE-NEXT:    whilewr p0.h, x0, x1
@@ -129,11 +129,11 @@ define <4 x i1> @whilewr_32(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
+  %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
   ret <4 x i1> %0
 }
 
-define <2 x i1> @whilewr_64(i64 %a, i64 %b) {
+define <2 x i1> @whilewr_64(ptr %a, ptr %b) {
 ; CHECK-SVE-LABEL: whilewr_64:
 ; CHECK-SVE:       // %bb.0: // %entry
 ; CHECK-SVE-NEXT:    whilewr p0.s, x0, x1
@@ -159,11 +159,11 @@ define <2 x i1> @whilewr_64(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
+  %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
   ret <2 x i1> %0
 }
 
-define <16 x i1> @whilerw_8(i64 %a, i64 %b) {
+define <16 x i1> @whilerw_8(ptr %a, ptr %b) {
 ; CHECK-SVE-LABEL: whilerw_8:
 ; CHECK-SVE:       // %bb.0: // %entry
 ; CHECK-SVE-NEXT:    whilerw p0.b, x0, x1
@@ -215,11 +215,11 @@ define <16 x i1> @whilerw_8(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
+  %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
   ret <16 x i1> %0
 }
 
-define <8 x i1> @whilerw_16(i64 %a, i64 %b) {
+define <8 x i1> @whilerw_16(ptr %a, ptr %b) {
 ; CHECK-SVE-LABEL: whilerw_16:
 ; CHECK-SVE:       // %bb.0: // %entry
 ; CHECK-SVE-NEXT:    whilerw p0.b, x0, x1
@@ -258,11 +258,11 @@ define <8 x i1> @whilerw_16(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 0)
+  %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
   ret <8 x i1> %0
 }
 
-define <4 x i1> @whilerw_32(i64 %a, i64 %b) {
+define <4 x i1> @whilerw_32(ptr %a, ptr %b) {
 ; CHECK-SVE-LABEL: whilerw_32:
 ; CHECK-SVE:       // %bb.0: // %entry
 ; CHECK-SVE-NEXT:    whilerw p0.h, x0, x1
@@ -293,11 +293,11 @@ define <4 x i1> @whilerw_32(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
+  %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
   ret <4 x i1> %0
 }
 
-define <2 x i1> @whilerw_64(i64 %a, i64 %b) {
+define <2 x i1> @whilerw_64(ptr %a, ptr %b) {
 ; CHECK-SVE-LABEL: whilerw_64:
 ; CHECK-SVE:       // %bb.0: // %entry
 ; CHECK-SVE-NEXT:    whilerw p0.s, x0, x1
@@ -324,6 +324,6 @@ define <2 x i1> @whilerw_64(i64 %a, i64 %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
+  %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
   ret <2 x i1> %0
 }
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index a7c9c5e3cdd33..3d0f293b4687a 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s --check-prefix=CHECK-SVE2
 ; RUN: llc -mtriple=aarch64 -mattr=+sve %s -o - | FileCheck %s --check-prefix=CHECK-SVE
 
-define <vscale x 16 x i1> @whilewr_8(i64 %a, i64 %b) {
+define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) {
 ; CHECK-SVE2-LABEL: whilewr_8:
 ; CHECK-SVE2:       // %bb.0: // %entry
 ; CHECK-SVE2-NEXT:    whilewr p0.b, x0, x1
@@ -60,11 +60,11 @@ define <vscale x 16 x i1> @whilewr_8(i64 %a, i64 %b) {
 ; CHECK-SVE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
+  %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
   ret <vscale x 16 x i1> %0
 }
 
-define <vscale x 8 x i1> @whilewr_16(i64 %a, i64 %b) {
+define <vscale x 8 x i1> @whilewr_16(ptr %a, ptr %b) {
 ; CHECK-SVE2-LABEL: whilewr_16:
 ; CHECK-SVE2:       // %bb.0: // %entry
 ; CHECK-SVE2-NEXT:    whilewr p0.h, x0, x1
@@ -98,11 +98,11 @@ define <vscale x 8 x i1> @whilewr_16(i64 %a, i64 %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 1)
+  %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
   ret <vscale x 8 x i1> %0
 }
 
-define <vscale x 4 x i1> @whilewr_32(i64 %a, i64 %b) {
+define <vscale x 4 x i1> @whilewr_32(ptr %a, ptr %b) {
 ; CHECK-SVE2-LABEL: whilewr_32:
 ; CHECK-SVE2:       // %bb.0: // %entry
 ; CHECK-SVE2-NEXT:    whilewr p0.s, x0, x1
@@ -130,11 +130,11 @@ define <vscale x 4 x i1> @whilewr_32(i64 %a, i64 %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
+  %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
   ret <vscale x 4 x i1> %0
 }
 
-define <vscale x 2 x i1> @whilewr_64(i64 %a, i64 %b) {
+define <vscale x 2 x i1> @whilewr_64(ptr %a, ptr %b) {
 ; CHECK-SVE2-LABEL: whilewr_64:
 ; CHECK-SVE2:       // %bb.0: // %entry
 ; CHECK-SVE2-NEXT:    whilewr p0.d, x0, x1
@@ -158,11 +158,11 @@ define <vscale x 2 x i1> @whilewr_64(i64 %a, i64 %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
+  %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
   ret <vscale x 2 x i1> %0
 }
 
-define <vscale x 16 x i1> @whilerw_8(i64 %a, i64 %b) {
+define <vscale x 16 x i1> @whilerw_8(ptr %a, ptr %b) {
 ; CHECK-SVE2-LABEL: whilerw_8:
 ; CHECK-SVE2:       // %bb.0: // %entry
 ; CHECK-SVE2-NEXT:    whilerw p0.b, x0, x1
@@ -223,11 +223,11 @@ define <vscale x 16 x i1> @whilerw_8(i64 %a, i64 %b) {
 ; CHECK-SVE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
+  %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
   ret <vscale x 16 x i1> %0
 }
 
-define <vscale x 8 x i1> @whilerw_16(i64 %a, i64 %b) {
+define <vscale x 8 x i1> @whilerw_16(ptr %a, ptr %b) {
 ; CHECK-SVE2-LABEL: whilerw_16:
 ; CHECK-SVE2:       // %bb.0: // %entry
 ; CHECK-SVE2-NEXT:    whilerw p0.h, x0, x1
@@ -262,11 +262,11 @@ define <vscale x 8 x i1> @whilerw_16(i64 %a, i64 %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 0)
+  %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
   ret <vscale x 8 x i1> %0
 }
 
-define <vscale x 4 x i1> @whilerw_32(i64 %a, i64 %b) {
+define <vscale x 4 x i1> @whilerw_32(ptr %a, ptr %b) {
 ; CHECK-SVE2-LABEL: whilerw_32:
 ; CHECK-SVE2:       // %bb.0: // %entry
 ; CHECK-SVE2-NEXT:    whilerw p0.s, x0, x1
@@ -295,11 +295,11 @@ define <vscale x 4 x i1> @whilerw_32(i64 %a, i64 %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
+  %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
   ret <vscale x 4 x i1> %0
 }
 
-define <vscale x 2 x i1> @whilerw_64(i64 %a, i64 %b) {
+define <vscale x 2 x i1> @whilerw_64(ptr %a, ptr %b) {
 ; CHECK-SVE2-LABEL: whilerw_64:
 ; CHECK-SVE2:       // %bb.0: // %entry
 ; CHECK-SVE2-NEXT:    whilerw p0.d, x0, x1
@@ -324,6 +324,6 @@ define <vscale x 2 x i1> @whilerw_64(i64 %a, i64 %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
+  %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
   ret <vscale x 2 x i1> %0
 }

>From 1d4ed61a7c3c5afca609532db1543c15a89f5a48 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 30 Jan 2025 15:16:57 +0000
Subject: [PATCH 11/29] Clarify elementSize and writeAfterRead = 0

---
 llvm/docs/LangRef.rst | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 26fe98067e7b4..36a3f5f35608e 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24078,6 +24078,7 @@ The final two are immediates and the result is a vector with the i1 element type
 Semantics:
 """"""""""
 
+``%elementSize`` is the size of the accessed elements in bytes.
 The intrinsic will return poison if ``%ptrA`` and ``%ptrB`` are within
 VF * ``%elementSize`` of each other and ``%ptrA`` + VF * ``%elementSize`` wraps.
 In other cases when ``%writeAfterRead`` is true, the
@@ -24102,7 +24103,8 @@ where ``%m`` is a vector (mask) of active/inactive lanes with its elements
 indexed by ``i``,  and ``%ptrA``, ``%ptrB`` are the two ptr arguments to
 ``llvm.experimental.get.alias.lane.mask.*`` and ``%elementSize`` is the first
 immediate argument. The ``%writeAfterRead`` argument is expected to be true if
-``%ptrB`` is stored to after ``%ptrA`` is read from.
+``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise it is false for
+a read after write.
 The above is equivalent to:
 
 ::

>From 08afd28599d4adc901cb63055e36f0eedf4192fa Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 30 Jan 2025 15:23:46 +0000
Subject: [PATCH 12/29] Add i=0 to VF-1

---
 llvm/docs/LangRef.rst | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 36a3f5f35608e..3ea338158859f 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24100,11 +24100,11 @@ equivalent to:
       %m[i] = (icmp ult i, %diff) || (%diff == 0)
 
 where ``%m`` is a vector (mask) of active/inactive lanes with its elements
-indexed by ``i``,  and ``%ptrA``, ``%ptrB`` are the two ptr arguments to
-``llvm.experimental.get.alias.lane.mask.*`` and ``%elementSize`` is the first
-immediate argument. The ``%writeAfterRead`` argument is expected to be true if
-``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise it is false for
-a read after write.
+indexed by ``i`` (i = 0 to VF - 1),  and ``%ptrA``, ``%ptrB`` are the two ptr
+arguments to ``llvm.experimental.get.alias.lane.mask.*`` and ``%elementSize``
+is the first immediate argument. The ``%writeAfterRead`` argument is expected
+to be true if ``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise
+it is false for a read after write.
 The above is equivalent to:
 
 ::

>From dc22872db713389d39f84257056225c203ae5a0f Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 30 Jan 2025 16:08:47 +0000
Subject: [PATCH 13/29] Rename to get.nonalias.lane.mask

---
 llvm/docs/LangRef.rst                         | 28 +++++++++----------
 llvm/include/llvm/CodeGen/ISDOpcodes.h        |  2 +-
 llvm/include/llvm/IR/Intrinsics.td            |  4 +--
 .../SelectionDAG/LegalizeIntegerTypes.cpp     | 16 +++++------
 llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h |  5 ++--
 .../SelectionDAG/LegalizeVectorOps.cpp        | 10 +++----
 .../SelectionDAG/SelectionDAGBuilder.cpp      |  6 ++--
 .../SelectionDAG/SelectionDAGDumper.cpp       |  2 +-
 llvm/lib/CodeGen/TargetLoweringBase.cpp       |  4 +--
 .../Target/AArch64/AArch64ISelLowering.cpp    | 19 +++++++------
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |  2 +-
 llvm/test/CodeGen/AArch64/alias_mask.ll       | 16 +++++------
 .../CodeGen/AArch64/alias_mask_scalable.ll    | 16 +++++------
 13 files changed, 66 insertions(+), 64 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 3ea338158859f..00cfbecfc8eb3 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24045,9 +24045,9 @@ Examples:
       %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 %elem0, i64 429)
       %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
 
-.. _int_experimental_get_alias_lane_mask:
+.. _int_experimental_get_nonalias_lane_mask:
 
-'``llvm.experimental.get.alias.lane.mask.*``' Intrinsics
+'``llvm.experimental.get.nonalias.lane.mask.*``' Intrinsics
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Syntax:
@@ -24056,16 +24056,16 @@ This is an overloaded intrinsic.
 
 ::
 
-      declare <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
-      declare <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
-      declare <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
-      declare <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <vscale x 16 x i1> @llvm.experimental.get.nonalias.lane.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
 
 
 Overview:
 """""""""
 
-Create a mask representing lanes that do or not overlap between two pointers
+Create a mask enabling lanes that do not overlap between two pointers
 across one vector loop iteration.
 
 
@@ -24082,7 +24082,7 @@ Semantics:
 The intrinsic will return poison if ``%ptrA`` and ``%ptrB`` are within
 VF * ``%elementSize`` of each other and ``%ptrA`` + VF * ``%elementSize`` wraps.
 In other cases when ``%writeAfterRead`` is true, the
-'``llvm.experimental.get.alias.lane.mask.*``' intrinsics are semantically
+'``llvm.experimental.get.nonalias.lane.mask.*``' intrinsics are semantically
 equivalent to:
 
 ::
@@ -24091,7 +24091,7 @@ equivalent to:
       %m[i] = (icmp ult i, %diff) || (%diff <= 0)
 
 When the return value is not poison and ``%writeAfterRead`` is false, the
-'``llvm.experimental.get.alias.lane.mask.*``' intrinsics are semantically
+'``llvm.experimental.get.nonalias.lane.mask.*``' intrinsics are semantically
 equivalent to:
 
 ::
@@ -24101,7 +24101,7 @@ equivalent to:
 
 where ``%m`` is a vector (mask) of active/inactive lanes with its elements
 indexed by ``i`` (i = 0 to VF - 1),  and ``%ptrA``, ``%ptrB`` are the two ptr
-arguments to ``llvm.experimental.get.alias.lane.mask.*`` and ``%elementSize``
+arguments to ``llvm.experimental.get.nonalias.lane.mask.*`` and ``%elementSize``
 is the first immediate argument. The ``%writeAfterRead`` argument is expected
 to be true if ``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise
 it is false for a read after write.
@@ -24109,7 +24109,7 @@ The above is equivalent to:
 
 ::
 
-      %m = @llvm.experimental.get.alias.lane.mask(%ptrA, %ptrB, %elementSize, %writeAfterRead)
+      %m = @llvm.experimental.get.nonalias.lane.mask(%ptrA, %ptrB, %elementSize, %writeAfterRead)
 
 This can, for example, be emitted by the loop vectorizer in which case
 ``%ptrA`` is a pointer that is read from within the loop, and ``%ptrB`` is a
@@ -24127,10 +24127,10 @@ Examples:
 
 .. code-block:: llvm
 
-      %alias.lane.mask = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4, i1 1)
-      %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptrA, i32 4, <4 x i1> %alias.lane.mask, <4 x i32> poison)
+      %nonalias.lane.mask = call <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4, i1 1)
+      %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptrA, i32 4, <4 x i1> %nonalias.lane.mask, <4 x i32> poison)
       [...]
-      call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, <4 x i32>* %ptrB, i32 4, <4 x i1> %alias.lane.mask)
+      call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, <4 x i32>* %ptrB, i32 4, <4 x i1> %nonalias.lane.mask)
 
 .. _int_experimental_vp_splice:
 
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 3acedb67fc7c0..e1e903d3063af 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1561,7 +1561,7 @@ enum NodeType {
   // The `llvm.experimental.get.alias.lane.mask.*` intrinsics
   // Operands: Load pointer, Store pointer, Element size, Write after read
   // Output: Mask
-  EXPERIMENTAL_ALIAS_LANE_MASK,
+  EXPERIMENTAL_NONALIAS_LANE_MASK,
 
   // llvm.clear_cache intrinsic
   // Operands: Input Chain, Start Addres, End Address
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 804327acd483a..b5053bed20095 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -2418,9 +2418,9 @@ let IntrProperties = [IntrNoMem, ImmArg<ArgIndex<1>>] in {
                                     llvm_i32_ty]>;
 }
 
-def int_experimental_get_alias_lane_mask:
+def int_experimental_get_nonalias_lane_mask:
   DefaultAttrsIntrinsic<[llvm_anyvector_ty],
-            [llvm_anyptr_ty, LLVMMatchType<1>, llvm_i64_ty, llvm_i1_ty],
+            [llvm_ptr_ty, llvm_ptr_ty, llvm_i64_ty, llvm_i1_ty],
             [IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
 
 def int_get_active_lane_mask:
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 12983f53f988f..d57f60fbe4830 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -324,8 +324,8 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
     Res = PromoteIntRes_VP_REDUCE(N);
     break;
 
-  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
-    Res = PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(N);
+  case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
+    Res = PromoteIntRes_EXPERIMENTAL_NONALIAS_LANE_MASK(N);
     break;
 
   case ISD::FREEZE:
@@ -379,10 +379,10 @@ SDValue DAGTypeLegalizer::PromoteIntRes_MERGE_VALUES(SDNode *N,
 }
 
 SDValue
-DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N) {
+DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N) {
   EVT VT = N->getValueType(0);
   EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
-  return DAG.getNode(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, SDLoc(N), NewVT,
+  return DAG.getNode(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, SDLoc(N), NewVT,
                      N->ops());
 }
 
@@ -2116,8 +2116,8 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
   case ISD::PARTIAL_REDUCE_SUMLA:
     Res = PromoteIntOp_PARTIAL_REDUCE_MLA(N);
     break;
-  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
-    Res = PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(N, OpNo);
+  case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
+    Res = PromoteIntOp_EXPERIMENTAL_NONALIAS_LANE_MASK(N, OpNo);
     break;
   }
 
@@ -2953,8 +2953,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N) {
 }
 
 SDValue
-DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N,
-                                                            unsigned OpNo) {
+DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N,
+                                                               unsigned OpNo) {
   SmallVector<SDValue, 4> NewOps(N->ops());
   NewOps[OpNo] = GetPromotedInteger(N->getOperand(OpNo));
   return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index da473a435b461..e7e2524725fd1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -382,7 +382,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue PromoteIntRes_VECTOR_FIND_LAST_ACTIVE(SDNode *N);
   SDValue PromoteIntRes_GET_ACTIVE_LANE_MASK(SDNode *N);
   SDValue PromoteIntRes_PARTIAL_REDUCE_MLA(SDNode *N);
-  SDValue PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N);
+  SDValue PromoteIntRes_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N);
 
   // Integer Operand Promotion.
   bool PromoteIntegerOperand(SDNode *N, unsigned OpNo);
@@ -437,7 +437,8 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(SDNode *N, unsigned OpNo);
   SDValue PromoteIntOp_GET_ACTIVE_LANE_MASK(SDNode *N);
   SDValue PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N);
-  SDValue PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N, unsigned OpNo);
+  SDValue PromoteIntOp_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N,
+                                                       unsigned OpNo);
 
   void SExtOrZExtPromotedOperands(SDValue &LHS, SDValue &RHS);
   void PromoteSetCCOperands(SDValue &LHS,SDValue &RHS, ISD::CondCode Code);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 3df412db95e26..59539fa658c58 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -138,7 +138,7 @@ class VectorLegalizer {
   SDValue ExpandVP_FNEG(SDNode *Node);
   SDValue ExpandVP_FABS(SDNode *Node);
   SDValue ExpandVP_FCOPYSIGN(SDNode *Node);
-  SDValue ExpandEXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N);
+  SDValue ExpandEXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N);
   SDValue ExpandSELECT(SDNode *Node);
   std::pair<SDValue, SDValue> ExpandLoad(SDNode *N);
   SDValue ExpandStore(SDNode *N);
@@ -476,7 +476,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
   case ISD::VECTOR_COMPRESS:
   case ISD::SCMP:
   case ISD::UCMP:
-  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+  case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
     Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
     break;
   case ISD::SMULFIX:
@@ -1293,8 +1293,8 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
   case ISD::UCMP:
     Results.push_back(TLI.expandCMP(Node, DAG));
     return;
-  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
-    Results.push_back(ExpandEXPERIMENTAL_ALIAS_LANE_MASK(Node));
+  case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
+    Results.push_back(ExpandEXPERIMENTAL_NONALIAS_LANE_MASK(Node));
     return;
 
   case ISD::FADD:
@@ -1801,7 +1801,7 @@ SDValue VectorLegalizer::ExpandVP_FCOPYSIGN(SDNode *Node) {
   return DAG.getNode(ISD::BITCAST, DL, VT, CopiedSign);
 }
 
-SDValue VectorLegalizer::ExpandEXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N) {
+SDValue VectorLegalizer::ExpandEXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N) {
   SDLoc DL(N);
   SDValue SourceValue = N->getOperand(0);
   SDValue SinkValue = N->getOperand(1);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 005bc9da3d62b..b55c665f1946a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8271,13 +8271,13 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
     visitVectorExtractLastActive(I, Intrinsic);
     return;
   }
-  case Intrinsic::experimental_get_alias_lane_mask: {
+  case Intrinsic::experimental_get_nonalias_lane_mask: {
     auto IntrinsicVT = EVT::getEVT(I.getType());
     SmallVector<SDValue, 4> Ops;
     for (auto &Op : I.operands())
       Ops.push_back(getValue(Op));
-    SDValue Mask =
-        DAG.getNode(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, sdl, IntrinsicVT, Ops);
+    SDValue Mask = DAG.getNode(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, sdl,
+                               IntrinsicVT, Ops);
     setValue(&I, Mask);
   }
   }
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 2693e86c36ae8..4bfdae4a80e99 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -587,7 +587,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
     return "partial_reduce_smla";
   case ISD::PARTIAL_REDUCE_SUMLA:
     return "partial_reduce_sumla";
-  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+  case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
     return "alias_lane_mask";
 
     // Vector Predication
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 0e1326798678d..24060cf02ae28 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -900,8 +900,8 @@ void TargetLoweringBase::initActions() {
     // Masked vector extracts default to expand.
     setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Expand);
 
-    // Aliasing lanes mask default to expand
-    setOperationAction(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, VT, Expand);
+    // Non-aliasing lanes mask default to expand
+    setOperationAction(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, VT, Expand);
 
     // FP environment operations default to expand.
     setOperationAction(ISD::GET_FPENV, VT, Expand);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 9564f1972514a..0937e9f06d555 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1920,7 +1920,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       (Subtarget->hasSME() && Subtarget->isStreaming())) {
     for (auto VT : {MVT::v2i32, MVT::v4i16, MVT::v8i8, MVT::v16i8, MVT::nxv2i1,
                     MVT::nxv4i1, MVT::nxv8i1, MVT::nxv16i1}) {
-      setOperationAction(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, VT, Custom);
+      setOperationAction(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, VT, Custom);
     }
   }
 
@@ -5210,8 +5210,9 @@ SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
 
 static MVT getSVEContainerType(EVT ContentTy);
 
-SDValue AArch64TargetLowering::LowerALIAS_LANE_MASK(SDValue Op,
-                                                    SelectionDAG &DAG) const {
+SDValue
+AArch64TargetLowering::LowerNONALIAS_LANE_MASK(SDValue Op,
+                                               SelectionDAG &DAG) const {
   SDLoc DL(Op);
   unsigned IntrinsicID = 0;
   uint64_t EltSize = Op.getConstantOperandVal(2);
@@ -6521,7 +6522,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     return DAG.getNode(AArch64ISD::USDOT, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
   }
-  case Intrinsic::experimental_get_alias_lane_mask: {
+  case Intrinsic::experimental_get_nonalias_lane_mask: {
     unsigned IntrinsicID = 0;
     uint64_t EltSize = Op.getOperand(3)->getAsZExtVal();
     bool IsWriteAfterRead = Op.getOperand(4)->getAsZExtVal() == 1;
@@ -7452,8 +7453,8 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
   default:
     llvm_unreachable("unimplemented operand");
     return SDValue();
-  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
-    return LowerALIAS_LANE_MASK(Op, DAG);
+  case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
+    return LowerNONALIAS_LANE_MASK(Op, DAG);
   case ISD::BITCAST:
     return LowerBITCAST(Op, DAG);
   case ISD::GlobalAddress:
@@ -28253,7 +28254,7 @@ void AArch64TargetLowering::ReplaceNodeResults(
   case ISD::GET_ACTIVE_LANE_MASK:
     ReplaceGetActiveLaneMaskResults(N, Results, DAG);
     return;
-  case ISD::EXPERIMENTAL_ALIAS_LANE_MASK: {
+  case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK: {
     EVT VT = N->getValueType(0);
     if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
       return;
@@ -28265,7 +28266,7 @@ void AArch64TargetLowering::ReplaceNodeResults(
 
     SDLoc DL(N);
     auto V =
-        DAG.getNode(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, DL, NewVT, N->ops());
+        DAG.getNode(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, DL, NewVT, N->ops());
     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
     return;
   }
@@ -28325,7 +28326,7 @@ void AArch64TargetLowering::ReplaceNodeResults(
       return;
     }
     case Intrinsic::experimental_vector_match:
-    case Intrinsic::experimental_get_alias_lane_mask: {
+    case Intrinsic::experimental_get_nonalias_lane_mask: {
       if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
         return;
 
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index c8ee5721c3627..17f1d7af6f5ce 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -726,7 +726,7 @@ class AArch64TargetLowering : public TargetLowering {
   SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerALIAS_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerNONALIAS_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index f88baeece0356..5ef6b588fe767 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -53,7 +53,7 @@ define <16 x i1> @whilewr_8(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
+  %0 = call <16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
   ret <16 x i1> %0
 }
 
@@ -95,7 +95,7 @@ define <8 x i1> @whilewr_16(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
+  %0 = call <8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
   ret <8 x i1> %0
 }
 
@@ -129,7 +129,7 @@ define <4 x i1> @whilewr_32(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
+  %0 = call <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
   ret <4 x i1> %0
 }
 
@@ -159,7 +159,7 @@ define <2 x i1> @whilewr_64(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
+  %0 = call <2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
   ret <2 x i1> %0
 }
 
@@ -215,7 +215,7 @@ define <16 x i1> @whilerw_8(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
+  %0 = call <16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
   ret <16 x i1> %0
 }
 
@@ -258,7 +258,7 @@ define <8 x i1> @whilerw_16(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
+  %0 = call <8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
   ret <8 x i1> %0
 }
 
@@ -293,7 +293,7 @@ define <4 x i1> @whilerw_32(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
+  %0 = call <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
   ret <4 x i1> %0
 }
 
@@ -324,6 +324,6 @@ define <2 x i1> @whilerw_64(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
+  %0 = call <2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
   ret <2 x i1> %0
 }
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index 3d0f293b4687a..6884f14d685b5 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -60,7 +60,7 @@ define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
+  %0 = call <vscale x 16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
   ret <vscale x 16 x i1> %0
 }
 
@@ -98,7 +98,7 @@ define <vscale x 8 x i1> @whilewr_16(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
+  %0 = call <vscale x 8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
   ret <vscale x 8 x i1> %0
 }
 
@@ -130,7 +130,7 @@ define <vscale x 4 x i1> @whilewr_32(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
+  %0 = call <vscale x 4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
   ret <vscale x 4 x i1> %0
 }
 
@@ -158,7 +158,7 @@ define <vscale x 2 x i1> @whilewr_64(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
+  %0 = call <vscale x 2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
   ret <vscale x 2 x i1> %0
 }
 
@@ -223,7 +223,7 @@ define <vscale x 16 x i1> @whilerw_8(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
+  %0 = call <vscale x 16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
   ret <vscale x 16 x i1> %0
 }
 
@@ -262,7 +262,7 @@ define <vscale x 8 x i1> @whilerw_16(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
+  %0 = call <vscale x 8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
   ret <vscale x 8 x i1> %0
 }
 
@@ -295,7 +295,7 @@ define <vscale x 4 x i1> @whilerw_32(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
+  %0 = call <vscale x 4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
   ret <vscale x 4 x i1> %0
 }
 
@@ -324,6 +324,6 @@ define <vscale x 2 x i1> @whilerw_64(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
+  %0 = call <vscale x 2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
   ret <vscale x 2 x i1> %0
 }

>From 46807c0cb541bb3687684449ef7469753c72a01e Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 30 Jan 2025 16:15:26 +0000
Subject: [PATCH 14/29] Fix pointer types in example

---
 llvm/docs/LangRef.rst | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 00cfbecfc8eb3..50ec4e5575968 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24128,9 +24128,9 @@ Examples:
 .. code-block:: llvm
 
       %nonalias.lane.mask = call <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4, i1 1)
-      %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptrA, i32 4, <4 x i1> %nonalias.lane.mask, <4 x i32> poison)
+      %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %nonalias.lane.mask, <4 x i32> poison)
       [...]
-      call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, <4 x i32>* %ptrB, i32 4, <4 x i1> %nonalias.lane.mask)
+      call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %nonalias.lane.mask)
 
 .. _int_experimental_vp_splice:
 

>From 3c9e437076430f9656434315dbcf1f6347db1165 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 30 Jan 2025 16:15:35 +0000
Subject: [PATCH 15/29] Remove shouldExpandGetAliasLaneMask

---
 llvm/include/llvm/CodeGen/TargetLowering.h | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 7d1b9f403ba1a..52729e9e7cee7 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -482,13 +482,6 @@ class LLVM_ABI TargetLoweringBase {
     return true;
   }
 
-  /// Return true if the @llvm.experimental.get.alias.lane.mask intrinsic should
-  /// be expanded using generic code in SelectionDAGBuilder.
-  virtual bool shouldExpandGetAliasLaneMask(EVT VT, EVT PtrVT,
-                                            unsigned EltSize) const {
-    return true;
-  }
-
   virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF,
                                            bool IsScalable) const {
     return true;

>From 83c5404bd466ecf27680956218423235755b7f88 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 30 Jan 2025 16:25:35 +0000
Subject: [PATCH 16/29] Lower to ISD node rather than intrinsic

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 19 +++++--------------
 1 file changed, 5 insertions(+), 14 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 0937e9f06d555..76721ac419d36 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5214,40 +5214,33 @@ SDValue
 AArch64TargetLowering::LowerNONALIAS_LANE_MASK(SDValue Op,
                                                SelectionDAG &DAG) const {
   SDLoc DL(Op);
-  unsigned IntrinsicID = 0;
   uint64_t EltSize = Op.getConstantOperandVal(2);
   bool IsWriteAfterRead = Op.getConstantOperandVal(3) == 1;
+  unsigned Opcode =
+      IsWriteAfterRead ? AArch64ISD::WHILEWR : AArch64ISD::WHILERW;
   EVT VT = Op.getValueType();
   MVT SimpleVT = VT.getSimpleVT();
   // Make sure that the promoted mask size and element size match
   switch (EltSize) {
   case 1:
-    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
-                                   : Intrinsic::aarch64_sve_whilerw_b;
     assert((SimpleVT == MVT::v16i8 || SimpleVT == MVT::nxv16i1) &&
            "Unexpected mask or element size");
     IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
                                    : Intrinsic::aarch64_sve_whilerw_b;
     break;
   case 2:
-    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
-                                   : Intrinsic::aarch64_sve_whilerw_h;
     assert((SimpleVT == MVT::v8i8 || SimpleVT == MVT::nxv8i1) &&
            "Unexpected mask or element size");
     IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
                                    : Intrinsic::aarch64_sve_whilerw_h;
     break;
   case 4:
-    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
-                                   : Intrinsic::aarch64_sve_whilerw_s;
     assert((SimpleVT == MVT::v4i16 || SimpleVT == MVT::nxv4i1) &&
            "Unexpected mask or element size");
     IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
                                    : Intrinsic::aarch64_sve_whilerw_s;
     break;
   case 8:
-    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
-                                   : Intrinsic::aarch64_sve_whilerw_d;
     assert((SimpleVT == MVT::v2i32 || SimpleVT == MVT::nxv2i1) &&
            "Unexpected mask or element size");
     IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
@@ -5257,11 +5250,9 @@ AArch64TargetLowering::LowerNONALIAS_LANE_MASK(SDValue Op,
     llvm_unreachable("Unexpected element size for get.alias.lane.mask");
     break;
   }
-  SDValue ID = DAG.getTargetConstant(IntrinsicID, DL, MVT::i64);
 
   if (VT.isScalableVector())
-    return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, ID, Op.getOperand(0),
-                       Op.getOperand(1));
+    return DAG.getNode(Opcode, DL, VT, Op.getOperand(0), Op.getOperand(1));
 
   // We can use the SVE whilewr/whilerw instruction to lower this
   // intrinsic by creating the appropriate sequence of scalable vector
@@ -5271,8 +5262,8 @@ AArch64TargetLowering::LowerNONALIAS_LANE_MASK(SDValue Op,
   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
   EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
 
-  SDValue Mask = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WhileVT, ID,
-                             Op.getOperand(0), Op.getOperand(1));
+  SDValue Mask =
+      DAG.getNode(Opcode, DL, WhileVT, Op.getOperand(0), Op.getOperand(1));
   SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, DL, ContainerVT, Mask);
   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, MaskAsInt,
                      DAG.getVectorIdxConstant(0, DL));

>From f0976197d640ad56b3a22ed94f1c92633d7f112d Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Fri, 31 Jan 2025 14:24:11 +0000
Subject: [PATCH 17/29] Rename to noalias

---
 llvm/docs/LangRef.rst                         | 28 +++++++++----------
 llvm/include/llvm/CodeGen/ISDOpcodes.h        |  2 +-
 llvm/include/llvm/IR/Intrinsics.td            |  2 +-
 .../SelectionDAG/LegalizeIntegerTypes.cpp     | 16 +++++------
 llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h |  5 ++--
 .../SelectionDAG/LegalizeVectorOps.cpp        | 10 +++----
 .../SelectionDAG/SelectionDAGBuilder.cpp      |  6 ++--
 .../SelectionDAG/SelectionDAGDumper.cpp       |  2 +-
 llvm/lib/CodeGen/TargetLoweringBase.cpp       |  2 +-
 .../Target/AArch64/AArch64ISelLowering.cpp    | 25 ++++++-----------
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |  2 +-
 llvm/test/CodeGen/AArch64/alias_mask.ll       | 16 +++++------
 .../CodeGen/AArch64/alias_mask_scalable.ll    | 16 +++++------
 13 files changed, 61 insertions(+), 71 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 50ec4e5575968..a20cad0c25065 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24045,10 +24045,10 @@ Examples:
       %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 %elem0, i64 429)
       %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
 
-.. _int_experimental_get_nonalias_lane_mask:
+.. _int_experimental_get_noalias_lane_mask:
 
-'``llvm.experimental.get.nonalias.lane.mask.*``' Intrinsics
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+'``llvm.experimental.get.noalias.lane.mask.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Syntax:
 """""""
@@ -24056,10 +24056,10 @@ This is an overloaded intrinsic.
 
 ::
 
-      declare <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
-      declare <8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
-      declare <16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
-      declare <vscale x 16 x i1> @llvm.experimental.get.nonalias.lane.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <vscale x 16 x i1> @llvm.experimental.get.noalias.lane.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
 
 
 Overview:
@@ -24082,7 +24082,7 @@ Semantics:
 The intrinsic will return poison if ``%ptrA`` and ``%ptrB`` are within
 VF * ``%elementSize`` of each other and ``%ptrA`` + VF * ``%elementSize`` wraps.
 In other cases when ``%writeAfterRead`` is true, the
-'``llvm.experimental.get.nonalias.lane.mask.*``' intrinsics are semantically
+'``llvm.experimental.get.noalias.lane.mask.*``' intrinsics are semantically
 equivalent to:
 
 ::
@@ -24091,7 +24091,7 @@ equivalent to:
       %m[i] = (icmp ult i, %diff) || (%diff <= 0)
 
 When the return value is not poison and ``%writeAfterRead`` is false, the
-'``llvm.experimental.get.nonalias.lane.mask.*``' intrinsics are semantically
+'``llvm.experimental.get.noalias.lane.mask.*``' intrinsics are semantically
 equivalent to:
 
 ::
@@ -24101,7 +24101,7 @@ equivalent to:
 
 where ``%m`` is a vector (mask) of active/inactive lanes with its elements
 indexed by ``i`` (i = 0 to VF - 1),  and ``%ptrA``, ``%ptrB`` are the two ptr
-arguments to ``llvm.experimental.get.nonalias.lane.mask.*`` and ``%elementSize``
+arguments to ``llvm.experimental.get.noalias.lane.mask.*`` and ``%elementSize``
 is the first immediate argument. The ``%writeAfterRead`` argument is expected
 to be true if ``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise
 it is false for a read after write.
@@ -24109,7 +24109,7 @@ The above is equivalent to:
 
 ::
 
-      %m = @llvm.experimental.get.nonalias.lane.mask(%ptrA, %ptrB, %elementSize, %writeAfterRead)
+      %m = @llvm.experimental.get.noalias.lane.mask(%ptrA, %ptrB, %elementSize, %writeAfterRead)
 
 This can, for example, be emitted by the loop vectorizer in which case
 ``%ptrA`` is a pointer that is read from within the loop, and ``%ptrB`` is a
@@ -24127,10 +24127,10 @@ Examples:
 
 .. code-block:: llvm
 
-      %nonalias.lane.mask = call <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4, i1 1)
-      %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %nonalias.lane.mask, <4 x i32> poison)
+      %noalias.lane.mask = call <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4, i1 1)
+      %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %noalias.lane.mask, <4 x i32> poison)
       [...]
-      call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %nonalias.lane.mask)
+      call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %noalias.lane.mask)
 
 .. _int_experimental_vp_splice:
 
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index e1e903d3063af..c4f8e13814c81 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1561,7 +1561,7 @@ enum NodeType {
   // The `llvm.experimental.get.alias.lane.mask.*` intrinsics
   // Operands: Load pointer, Store pointer, Element size, Write after read
   // Output: Mask
-  EXPERIMENTAL_NONALIAS_LANE_MASK,
+  EXPERIMENTAL_NOALIAS_LANE_MASK,
 
   // llvm.clear_cache intrinsic
   // Operands: Input Chain, Start Addres, End Address
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index b5053bed20095..4ede49afeaf2c 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -2418,7 +2418,7 @@ let IntrProperties = [IntrNoMem, ImmArg<ArgIndex<1>>] in {
                                     llvm_i32_ty]>;
 }
 
-def int_experimental_get_nonalias_lane_mask:
+def int_experimental_get_noalias_lane_mask:
   DefaultAttrsIntrinsic<[llvm_anyvector_ty],
             [llvm_ptr_ty, llvm_ptr_ty, llvm_i64_ty, llvm_i1_ty],
             [IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index d57f60fbe4830..e4580a17eef1a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -324,8 +324,8 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
     Res = PromoteIntRes_VP_REDUCE(N);
     break;
 
-  case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
-    Res = PromoteIntRes_EXPERIMENTAL_NONALIAS_LANE_MASK(N);
+  case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
+    Res = PromoteIntRes_EXPERIMENTAL_NOALIAS_LANE_MASK(N);
     break;
 
   case ISD::FREEZE:
@@ -379,10 +379,10 @@ SDValue DAGTypeLegalizer::PromoteIntRes_MERGE_VALUES(SDNode *N,
 }
 
 SDValue
-DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N) {
+DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N) {
   EVT VT = N->getValueType(0);
   EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
-  return DAG.getNode(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, SDLoc(N), NewVT,
+  return DAG.getNode(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, SDLoc(N), NewVT,
                      N->ops());
 }
 
@@ -2116,8 +2116,8 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
   case ISD::PARTIAL_REDUCE_SUMLA:
     Res = PromoteIntOp_PARTIAL_REDUCE_MLA(N);
     break;
-  case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
-    Res = PromoteIntOp_EXPERIMENTAL_NONALIAS_LANE_MASK(N, OpNo);
+  case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
+    Res = PromoteIntOp_EXPERIMENTAL_NOALIAS_LANE_MASK(N, OpNo);
     break;
   }
 
@@ -2953,8 +2953,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N) {
 }
 
 SDValue
-DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N,
-                                                               unsigned OpNo) {
+DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N,
+                                                              unsigned OpNo) {
   SmallVector<SDValue, 4> NewOps(N->ops());
   NewOps[OpNo] = GetPromotedInteger(N->getOperand(OpNo));
   return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index e7e2524725fd1..c83dff8e38d91 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -382,7 +382,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue PromoteIntRes_VECTOR_FIND_LAST_ACTIVE(SDNode *N);
   SDValue PromoteIntRes_GET_ACTIVE_LANE_MASK(SDNode *N);
   SDValue PromoteIntRes_PARTIAL_REDUCE_MLA(SDNode *N);
-  SDValue PromoteIntRes_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N);
+  SDValue PromoteIntRes_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N);
 
   // Integer Operand Promotion.
   bool PromoteIntegerOperand(SDNode *N, unsigned OpNo);
@@ -437,8 +437,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(SDNode *N, unsigned OpNo);
   SDValue PromoteIntOp_GET_ACTIVE_LANE_MASK(SDNode *N);
   SDValue PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N);
-  SDValue PromoteIntOp_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N,
-                                                       unsigned OpNo);
+  SDValue PromoteIntOp_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N, unsigned OpNo);
 
   void SExtOrZExtPromotedOperands(SDValue &LHS, SDValue &RHS);
   void PromoteSetCCOperands(SDValue &LHS,SDValue &RHS, ISD::CondCode Code);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 59539fa658c58..7f45bfdfe6758 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -138,7 +138,7 @@ class VectorLegalizer {
   SDValue ExpandVP_FNEG(SDNode *Node);
   SDValue ExpandVP_FABS(SDNode *Node);
   SDValue ExpandVP_FCOPYSIGN(SDNode *Node);
-  SDValue ExpandEXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N);
+  SDValue ExpandEXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N);
   SDValue ExpandSELECT(SDNode *Node);
   std::pair<SDValue, SDValue> ExpandLoad(SDNode *N);
   SDValue ExpandStore(SDNode *N);
@@ -476,7 +476,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
   case ISD::VECTOR_COMPRESS:
   case ISD::SCMP:
   case ISD::UCMP:
-  case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
+  case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
     Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
     break;
   case ISD::SMULFIX:
@@ -1293,8 +1293,8 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
   case ISD::UCMP:
     Results.push_back(TLI.expandCMP(Node, DAG));
     return;
-  case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
-    Results.push_back(ExpandEXPERIMENTAL_NONALIAS_LANE_MASK(Node));
+  case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
+    Results.push_back(ExpandEXPERIMENTAL_NOALIAS_LANE_MASK(Node));
     return;
 
   case ISD::FADD:
@@ -1801,7 +1801,7 @@ SDValue VectorLegalizer::ExpandVP_FCOPYSIGN(SDNode *Node) {
   return DAG.getNode(ISD::BITCAST, DL, VT, CopiedSign);
 }
 
-SDValue VectorLegalizer::ExpandEXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N) {
+SDValue VectorLegalizer::ExpandEXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N) {
   SDLoc DL(N);
   SDValue SourceValue = N->getOperand(0);
   SDValue SinkValue = N->getOperand(1);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index b55c665f1946a..1531f8254f265 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8271,13 +8271,13 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
     visitVectorExtractLastActive(I, Intrinsic);
     return;
   }
-  case Intrinsic::experimental_get_nonalias_lane_mask: {
+  case Intrinsic::experimental_get_noalias_lane_mask: {
     auto IntrinsicVT = EVT::getEVT(I.getType());
     SmallVector<SDValue, 4> Ops;
     for (auto &Op : I.operands())
       Ops.push_back(getValue(Op));
-    SDValue Mask = DAG.getNode(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, sdl,
-                               IntrinsicVT, Ops);
+    SDValue Mask =
+        DAG.getNode(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, sdl, IntrinsicVT, Ops);
     setValue(&I, Mask);
   }
   }
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 4bfdae4a80e99..7a751fc09faaa 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -587,7 +587,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
     return "partial_reduce_smla";
   case ISD::PARTIAL_REDUCE_SUMLA:
     return "partial_reduce_sumla";
-  case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
+  case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
     return "alias_lane_mask";
 
     // Vector Predication
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 24060cf02ae28..4811d4fd07e98 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -901,7 +901,7 @@ void TargetLoweringBase::initActions() {
     setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Expand);
 
     // Non-aliasing lanes mask default to expand
-    setOperationAction(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, VT, Expand);
+    setOperationAction(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, VT, Expand);
 
     // FP environment operations default to expand.
     setOperationAction(ISD::GET_FPENV, VT, Expand);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 76721ac419d36..602d5c7147287 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1920,7 +1920,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       (Subtarget->hasSME() && Subtarget->isStreaming())) {
     for (auto VT : {MVT::v2i32, MVT::v4i16, MVT::v8i8, MVT::v16i8, MVT::nxv2i1,
                     MVT::nxv4i1, MVT::nxv8i1, MVT::nxv16i1}) {
-      setOperationAction(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, VT, Custom);
+      setOperationAction(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, VT, Custom);
     }
   }
 
@@ -5210,9 +5210,8 @@ SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
 
 static MVT getSVEContainerType(EVT ContentTy);
 
-SDValue
-AArch64TargetLowering::LowerNONALIAS_LANE_MASK(SDValue Op,
-                                               SelectionDAG &DAG) const {
+SDValue AArch64TargetLowering::LowerNOALIAS_LANE_MASK(SDValue Op,
+                                                      SelectionDAG &DAG) const {
   SDLoc DL(Op);
   uint64_t EltSize = Op.getConstantOperandVal(2);
   bool IsWriteAfterRead = Op.getConstantOperandVal(3) == 1;
@@ -5225,26 +5224,18 @@ AArch64TargetLowering::LowerNONALIAS_LANE_MASK(SDValue Op,
   case 1:
     assert((SimpleVT == MVT::v16i8 || SimpleVT == MVT::nxv16i1) &&
            "Unexpected mask or element size");
-    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
-                                   : Intrinsic::aarch64_sve_whilerw_b;
     break;
   case 2:
     assert((SimpleVT == MVT::v8i8 || SimpleVT == MVT::nxv8i1) &&
            "Unexpected mask or element size");
-    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
-                                   : Intrinsic::aarch64_sve_whilerw_h;
     break;
   case 4:
     assert((SimpleVT == MVT::v4i16 || SimpleVT == MVT::nxv4i1) &&
            "Unexpected mask or element size");
-    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
-                                   : Intrinsic::aarch64_sve_whilerw_s;
     break;
   case 8:
     assert((SimpleVT == MVT::v2i32 || SimpleVT == MVT::nxv2i1) &&
            "Unexpected mask or element size");
-    IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
-                                   : Intrinsic::aarch64_sve_whilerw_d;
     break;
   default:
     llvm_unreachable("Unexpected element size for get.alias.lane.mask");
@@ -7444,8 +7435,8 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
   default:
     llvm_unreachable("unimplemented operand");
     return SDValue();
-  case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
-    return LowerNONALIAS_LANE_MASK(Op, DAG);
+  case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
+    return LowerNOALIAS_LANE_MASK(Op, DAG);
   case ISD::BITCAST:
     return LowerBITCAST(Op, DAG);
   case ISD::GlobalAddress:
@@ -28245,7 +28236,7 @@ void AArch64TargetLowering::ReplaceNodeResults(
   case ISD::GET_ACTIVE_LANE_MASK:
     ReplaceGetActiveLaneMaskResults(N, Results, DAG);
     return;
-  case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK: {
+  case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK: {
     EVT VT = N->getValueType(0);
     if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
       return;
@@ -28257,7 +28248,7 @@ void AArch64TargetLowering::ReplaceNodeResults(
 
     SDLoc DL(N);
     auto V =
-        DAG.getNode(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, DL, NewVT, N->ops());
+        DAG.getNode(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, DL, NewVT, N->ops());
     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
     return;
   }
@@ -28317,7 +28308,7 @@ void AArch64TargetLowering::ReplaceNodeResults(
       return;
     }
     case Intrinsic::experimental_vector_match:
-    case Intrinsic::experimental_get_nonalias_lane_mask: {
+    case Intrinsic::experimental_get_noalias_lane_mask: {
       if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
         return;
 
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 17f1d7af6f5ce..3a545c6eebf8b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -726,7 +726,7 @@ class AArch64TargetLowering : public TargetLowering {
   SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerNONALIAS_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerNOALIAS_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 5ef6b588fe767..21eff3b11c001 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -53,7 +53,7 @@ define <16 x i1> @whilewr_8(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
+  %0 = call <16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
   ret <16 x i1> %0
 }
 
@@ -95,7 +95,7 @@ define <8 x i1> @whilewr_16(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
+  %0 = call <8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
   ret <8 x i1> %0
 }
 
@@ -129,7 +129,7 @@ define <4 x i1> @whilewr_32(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
+  %0 = call <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
   ret <4 x i1> %0
 }
 
@@ -159,7 +159,7 @@ define <2 x i1> @whilewr_64(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
+  %0 = call <2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
   ret <2 x i1> %0
 }
 
@@ -215,7 +215,7 @@ define <16 x i1> @whilerw_8(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
+  %0 = call <16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
   ret <16 x i1> %0
 }
 
@@ -258,7 +258,7 @@ define <8 x i1> @whilerw_16(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
+  %0 = call <8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
   ret <8 x i1> %0
 }
 
@@ -293,7 +293,7 @@ define <4 x i1> @whilerw_32(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
+  %0 = call <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
   ret <4 x i1> %0
 }
 
@@ -324,6 +324,6 @@ define <2 x i1> @whilerw_64(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
+  %0 = call <2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
   ret <2 x i1> %0
 }
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index 6884f14d685b5..b29619c7f397d 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -60,7 +60,7 @@ define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
+  %0 = call <vscale x 16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
   ret <vscale x 16 x i1> %0
 }
 
@@ -98,7 +98,7 @@ define <vscale x 8 x i1> @whilewr_16(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
+  %0 = call <vscale x 8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
   ret <vscale x 8 x i1> %0
 }
 
@@ -130,7 +130,7 @@ define <vscale x 4 x i1> @whilewr_32(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
+  %0 = call <vscale x 4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
   ret <vscale x 4 x i1> %0
 }
 
@@ -158,7 +158,7 @@ define <vscale x 2 x i1> @whilewr_64(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
+  %0 = call <vscale x 2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
   ret <vscale x 2 x i1> %0
 }
 
@@ -223,7 +223,7 @@ define <vscale x 16 x i1> @whilerw_8(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
+  %0 = call <vscale x 16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
   ret <vscale x 16 x i1> %0
 }
 
@@ -262,7 +262,7 @@ define <vscale x 8 x i1> @whilerw_16(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
+  %0 = call <vscale x 8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
   ret <vscale x 8 x i1> %0
 }
 
@@ -295,7 +295,7 @@ define <vscale x 4 x i1> @whilerw_32(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
+  %0 = call <vscale x 4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
   ret <vscale x 4 x i1> %0
 }
 
@@ -324,6 +324,6 @@ define <vscale x 2 x i1> @whilerw_64(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
+  %0 = call <vscale x 2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
   ret <vscale x 2 x i1> %0
 }

>From e17b06f4fb3fb1d762d806aea49fabf9d0891ab5 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Wed, 26 Feb 2025 23:39:53 +0000
Subject: [PATCH 18/29] Rename to loop.dependence.raw/war.mask

---
 llvm/include/llvm/CodeGen/ISDOpcodes.h        |  7 ++---
 llvm/include/llvm/IR/Intrinsics.td            | 11 +++++---
 .../SelectionDAG/LegalizeIntegerTypes.cpp     | 20 +++++++-------
 llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h |  5 ++--
 .../SelectionDAG/LegalizeVectorOps.cpp        | 15 ++++++-----
 .../SelectionDAG/SelectionDAGBuilder.cpp      |  9 ++++---
 .../SelectionDAG/SelectionDAGDumper.cpp       |  6 +++--
 llvm/lib/CodeGen/TargetLoweringBase.cpp       |  5 ++--
 .../Target/AArch64/AArch64ISelLowering.cpp    | 27 ++++++++++++-------
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |  2 +-
 llvm/test/CodeGen/AArch64/alias_mask.ll       | 16 +++++------
 .../CodeGen/AArch64/alias_mask_scalable.ll    | 16 +++++------
 12 files changed, 81 insertions(+), 58 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index c4f8e13814c81..57c3873e1451d 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1558,10 +1558,11 @@ enum NodeType {
   // bits conform to getBooleanContents similar to the SETCC operator.
   GET_ACTIVE_LANE_MASK,
 
-  // The `llvm.experimental.get.alias.lane.mask.*` intrinsics
-  // Operands: Load pointer, Store pointer, Element size, Write after read
+  // The `llvm.experimental.loop.dependence.{war, raw}.mask` intrinsics
+  // Operands: Load pointer, Store pointer, Element size
   // Output: Mask
-  EXPERIMENTAL_NOALIAS_LANE_MASK,
+  EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK,
+  EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK,
 
   // llvm.clear_cache intrinsic
   // Operands: Input Chain, Start Addres, End Address
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 4ede49afeaf2c..1a9eb15112268 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -2418,10 +2418,15 @@ let IntrProperties = [IntrNoMem, ImmArg<ArgIndex<1>>] in {
                                     llvm_i32_ty]>;
 }
 
-def int_experimental_get_noalias_lane_mask:
+def int_experimental_loop_dependence_raw_mask:
   DefaultAttrsIntrinsic<[llvm_anyvector_ty],
-            [llvm_ptr_ty, llvm_ptr_ty, llvm_i64_ty, llvm_i1_ty],
-            [IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
+            [llvm_ptr_ty, llvm_ptr_ty, llvm_i64_ty],
+            [IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>]>;
+
+def int_experimental_loop_dependence_war_mask:
+  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+            [llvm_ptr_ty, llvm_ptr_ty, llvm_i64_ty],
+            [IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>]>;
 
 def int_get_active_lane_mask:
   DefaultAttrsIntrinsic<[llvm_anyvector_ty],
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index e4580a17eef1a..3f8caeb1d4f42 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -324,8 +324,9 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
     Res = PromoteIntRes_VP_REDUCE(N);
     break;
 
-  case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
-    Res = PromoteIntRes_EXPERIMENTAL_NOALIAS_LANE_MASK(N);
+  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+    Res = PromoteIntRes_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(N);
     break;
 
   case ISD::FREEZE:
@@ -379,11 +380,10 @@ SDValue DAGTypeLegalizer::PromoteIntRes_MERGE_VALUES(SDNode *N,
 }
 
 SDValue
-DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N) {
+DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(SDNode *N) {
   EVT VT = N->getValueType(0);
   EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
-  return DAG.getNode(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, SDLoc(N), NewVT,
-                     N->ops());
+  return DAG.getNode(N->getOpcode(), SDLoc(N), NewVT, N->ops());
 }
 
 SDValue DAGTypeLegalizer::PromoteIntRes_AssertSext(SDNode *N) {
@@ -2116,8 +2116,9 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
   case ISD::PARTIAL_REDUCE_SUMLA:
     Res = PromoteIntOp_PARTIAL_REDUCE_MLA(N);
     break;
-  case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
-    Res = PromoteIntOp_EXPERIMENTAL_NOALIAS_LANE_MASK(N, OpNo);
+  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+    Res = PromoteIntOp_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(N, OpNo);
     break;
   }
 
@@ -2952,9 +2953,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N) {
   return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
 }
 
-SDValue
-DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N,
-                                                              unsigned OpNo) {
+SDValue DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(
+    SDNode *N, unsigned OpNo) {
   SmallVector<SDValue, 4> NewOps(N->ops());
   NewOps[OpNo] = GetPromotedInteger(N->getOperand(OpNo));
   return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index c83dff8e38d91..44f3c6790edca 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -382,7 +382,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue PromoteIntRes_VECTOR_FIND_LAST_ACTIVE(SDNode *N);
   SDValue PromoteIntRes_GET_ACTIVE_LANE_MASK(SDNode *N);
   SDValue PromoteIntRes_PARTIAL_REDUCE_MLA(SDNode *N);
-  SDValue PromoteIntRes_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N);
+  SDValue PromoteIntRes_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(SDNode *N);
 
   // Integer Operand Promotion.
   bool PromoteIntegerOperand(SDNode *N, unsigned OpNo);
@@ -437,7 +437,8 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(SDNode *N, unsigned OpNo);
   SDValue PromoteIntOp_GET_ACTIVE_LANE_MASK(SDNode *N);
   SDValue PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N);
-  SDValue PromoteIntOp_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N, unsigned OpNo);
+  SDValue PromoteIntOp_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(SDNode *N,
+                                                         unsigned OpNo);
 
   void SExtOrZExtPromotedOperands(SDValue &LHS, SDValue &RHS);
   void PromoteSetCCOperands(SDValue &LHS,SDValue &RHS, ISD::CondCode Code);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 7f45bfdfe6758..86720471e9584 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -138,7 +138,7 @@ class VectorLegalizer {
   SDValue ExpandVP_FNEG(SDNode *Node);
   SDValue ExpandVP_FABS(SDNode *Node);
   SDValue ExpandVP_FCOPYSIGN(SDNode *Node);
-  SDValue ExpandEXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N);
+  SDValue ExpandLOOP_DEPENDENCE_MASK(SDNode *N);
   SDValue ExpandSELECT(SDNode *Node);
   std::pair<SDValue, SDValue> ExpandLoad(SDNode *N);
   SDValue ExpandStore(SDNode *N);
@@ -476,7 +476,8 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
   case ISD::VECTOR_COMPRESS:
   case ISD::SCMP:
   case ISD::UCMP:
-  case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
+  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
     Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
     break;
   case ISD::SMULFIX:
@@ -1293,8 +1294,9 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
   case ISD::UCMP:
     Results.push_back(TLI.expandCMP(Node, DAG));
     return;
-  case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
-    Results.push_back(ExpandEXPERIMENTAL_NOALIAS_LANE_MASK(Node));
+  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+    Results.push_back(ExpandLOOP_DEPENDENCE_MASK(Node));
     return;
 
   case ISD::FADD:
@@ -1801,13 +1803,14 @@ SDValue VectorLegalizer::ExpandVP_FCOPYSIGN(SDNode *Node) {
   return DAG.getNode(ISD::BITCAST, DL, VT, CopiedSign);
 }
 
-SDValue VectorLegalizer::ExpandEXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N) {
+SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
   SDLoc DL(N);
   SDValue SourceValue = N->getOperand(0);
   SDValue SinkValue = N->getOperand(1);
   SDValue EltSize = N->getOperand(2);
 
-  bool IsWriteAfterRead = N->getConstantOperandVal(3) != 0;
+  bool IsWriteAfterRead =
+      N->getOpcode() == ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK;
   auto VT = N->getValueType(0);
   auto PtrVT = SourceValue->getValueType(0);
 
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 1531f8254f265..c4aabe748c7c5 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8271,13 +8271,16 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
     visitVectorExtractLastActive(I, Intrinsic);
     return;
   }
-  case Intrinsic::experimental_get_noalias_lane_mask: {
+  case Intrinsic::experimental_loop_dependence_war_mask:
+  case Intrinsic::experimental_loop_dependence_raw_mask: {
     auto IntrinsicVT = EVT::getEVT(I.getType());
     SmallVector<SDValue, 4> Ops;
     for (auto &Op : I.operands())
       Ops.push_back(getValue(Op));
-    SDValue Mask =
-        DAG.getNode(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, sdl, IntrinsicVT, Ops);
+    unsigned ID = Intrinsic == Intrinsic::experimental_loop_dependence_war_mask
+                      ? ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK
+                      : ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK;
+    SDValue Mask = DAG.getNode(ID, sdl, IntrinsicVT, Ops);
     setValue(&I, Mask);
   }
   }
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 7a751fc09faaa..d5c8431370243 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -587,8 +587,10 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
     return "partial_reduce_smla";
   case ISD::PARTIAL_REDUCE_SUMLA:
     return "partial_reduce_sumla";
-  case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
-    return "alias_lane_mask";
+  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+    return "loop_dep_war";
+  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+    return "loop_dep_raw";
 
     // Vector Predication
 #define BEGIN_REGISTER_VP_SDNODE(SDID, LEGALARG, NAME, ...)                    \
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 4811d4fd07e98..c99f141f15154 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -900,8 +900,9 @@ void TargetLoweringBase::initActions() {
     // Masked vector extracts default to expand.
     setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Expand);
 
-    // Non-aliasing lanes mask default to expand
-    setOperationAction(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, VT, Expand);
+    // Lane mask with non-aliasing lanes enabled default to expand
+    setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK, VT, Expand);
+    setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK, VT, Expand);
 
     // FP environment operations default to expand.
     setOperationAction(ISD::GET_FPENV, VT, Expand);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 602d5c7147287..d31b5e038e1d2 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1920,7 +1920,10 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       (Subtarget->hasSME() && Subtarget->isStreaming())) {
     for (auto VT : {MVT::v2i32, MVT::v4i16, MVT::v8i8, MVT::v16i8, MVT::nxv2i1,
                     MVT::nxv4i1, MVT::nxv8i1, MVT::nxv16i1}) {
-      setOperationAction(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, VT, Custom);
+      setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK, VT,
+                         Custom);
+      setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK, VT,
+                         Custom);
     }
   }
 
@@ -5210,11 +5213,13 @@ SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
 
 static MVT getSVEContainerType(EVT ContentTy);
 
-SDValue AArch64TargetLowering::LowerNOALIAS_LANE_MASK(SDValue Op,
-                                                      SelectionDAG &DAG) const {
+SDValue
+AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
+                                                 SelectionDAG &DAG) const {
   SDLoc DL(Op);
   uint64_t EltSize = Op.getConstantOperandVal(2);
-  bool IsWriteAfterRead = Op.getConstantOperandVal(3) == 1;
+  bool IsWriteAfterRead =
+      Op.getOpcode() == ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK;
   unsigned Opcode =
       IsWriteAfterRead ? AArch64ISD::WHILEWR : AArch64ISD::WHILERW;
   EVT VT = Op.getValueType();
@@ -7435,8 +7440,9 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
   default:
     llvm_unreachable("unimplemented operand");
     return SDValue();
-  case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
-    return LowerNOALIAS_LANE_MASK(Op, DAG);
+  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+    return LowerLOOP_DEPENDENCE_MASK(Op, DAG);
   case ISD::BITCAST:
     return LowerBITCAST(Op, DAG);
   case ISD::GlobalAddress:
@@ -28236,7 +28242,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
   case ISD::GET_ACTIVE_LANE_MASK:
     ReplaceGetActiveLaneMaskResults(N, Results, DAG);
     return;
-  case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK: {
+  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK: {
     EVT VT = N->getValueType(0);
     if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
       return;
@@ -28247,8 +28254,7 @@ void AArch64TargetLowering::ReplaceNodeResults(
       return;
 
     SDLoc DL(N);
-    auto V =
-        DAG.getNode(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, DL, NewVT, N->ops());
+    auto V = DAG.getNode(N->getOpcode(), DL, NewVT, N->ops());
     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
     return;
   }
@@ -28308,7 +28314,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
       return;
     }
     case Intrinsic::experimental_vector_match:
-    case Intrinsic::experimental_get_noalias_lane_mask: {
+    case Intrinsic::experimental_loop_dependence_raw_mask:
+    case Intrinsic::experimental_loop_dependence_war_mask: {
       if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
         return;
 
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 3a545c6eebf8b..7f5d4eaef0638 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -726,7 +726,7 @@ class AArch64TargetLowering : public TargetLowering {
   SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerNOALIAS_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerLOOP_DEPENDENCE_MASK(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 21eff3b11c001..3248cb2de2644 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -53,7 +53,7 @@ define <16 x i1> @whilewr_8(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
+  %0 = call <16 x i1> @llvm.experimental.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 1)
   ret <16 x i1> %0
 }
 
@@ -95,7 +95,7 @@ define <8 x i1> @whilewr_16(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
+  %0 = call <8 x i1> @llvm.experimental.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 2)
   ret <8 x i1> %0
 }
 
@@ -129,7 +129,7 @@ define <4 x i1> @whilewr_32(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
+  %0 = call <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 4)
   ret <4 x i1> %0
 }
 
@@ -159,7 +159,7 @@ define <2 x i1> @whilewr_64(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
+  %0 = call <2 x i1> @llvm.experimental.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 8)
   ret <2 x i1> %0
 }
 
@@ -215,7 +215,7 @@ define <16 x i1> @whilerw_8(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
+  %0 = call <16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %a, ptr %b, i64 1)
   ret <16 x i1> %0
 }
 
@@ -258,7 +258,7 @@ define <8 x i1> @whilerw_16(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
+  %0 = call <8 x i1> @llvm.experimental.loop.dependence.raw.mask.v8i1(ptr %a, ptr %b, i64 2)
   ret <8 x i1> %0
 }
 
@@ -293,7 +293,7 @@ define <4 x i1> @whilerw_32(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
+  %0 = call <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %a, ptr %b, i64 4)
   ret <4 x i1> %0
 }
 
@@ -324,6 +324,6 @@ define <2 x i1> @whilerw_64(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
+  %0 = call <2 x i1> @llvm.experimental.loop.dependence.raw.mask.v2i1(ptr %a, ptr %b, i64 8)
   ret <2 x i1> %0
 }
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index b29619c7f397d..5a7c3180e2807 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -60,7 +60,7 @@ define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
+  %0 = call <vscale x 16 x i1> @llvm.experimental.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 1)
   ret <vscale x 16 x i1> %0
 }
 
@@ -98,7 +98,7 @@ define <vscale x 8 x i1> @whilewr_16(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
+  %0 = call <vscale x 8 x i1> @llvm.experimental.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 2)
   ret <vscale x 8 x i1> %0
 }
 
@@ -130,7 +130,7 @@ define <vscale x 4 x i1> @whilewr_32(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
+  %0 = call <vscale x 4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 4)
   ret <vscale x 4 x i1> %0
 }
 
@@ -158,7 +158,7 @@ define <vscale x 2 x i1> @whilewr_64(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
+  %0 = call <vscale x 2 x i1> @llvm.experimental.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 8)
   ret <vscale x 2 x i1> %0
 }
 
@@ -223,7 +223,7 @@ define <vscale x 16 x i1> @whilerw_8(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
+  %0 = call <vscale x 16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %a, ptr %b, i64 1)
   ret <vscale x 16 x i1> %0
 }
 
@@ -262,7 +262,7 @@ define <vscale x 8 x i1> @whilerw_16(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
+  %0 = call <vscale x 8 x i1> @llvm.experimental.loop.dependence.raw.mask.v8i1(ptr %a, ptr %b, i64 2)
   ret <vscale x 8 x i1> %0
 }
 
@@ -295,7 +295,7 @@ define <vscale x 4 x i1> @whilerw_32(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
+  %0 = call <vscale x 4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %a, ptr %b, i64 4)
   ret <vscale x 4 x i1> %0
 }
 
@@ -324,6 +324,6 @@ define <vscale x 2 x i1> @whilerw_64(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
+  %0 = call <vscale x 2 x i1> @llvm.experimental.loop.dependence.raw.mask.v2i1(ptr %a, ptr %b, i64 8)
   ret <vscale x 2 x i1> %0
 }

>From 68816d7bf6aa7bdacd239a84a9bb402e83c7e980 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Mon, 10 Mar 2025 13:29:48 +0000
Subject: [PATCH 19/29] Rename in langref

---
 llvm/docs/LangRef.rst | 47 +++++++++++++++++++------------------------
 1 file changed, 21 insertions(+), 26 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index a20cad0c25065..14713c669a1c9 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24045,10 +24045,12 @@ Examples:
       %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 %elem0, i64 429)
       %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
 
-.. _int_experimental_get_noalias_lane_mask:
 
-'``llvm.experimental.get.noalias.lane.mask.*``' Intrinsics
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+.. _int_experimental_loop_dependence_war_mask:
+.. _int_experimental_loop_dependence_raw_mask:
+
+'``llvm.experimental.loop.dependence.raw.mask.*``' and '``llvm.experimental.loop.dependence.war.mask.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Syntax:
 """""""
@@ -24056,10 +24058,10 @@ This is an overloaded intrinsic.
 
 ::
 
-      declare <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
-      declare <8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
-      declare <16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
-      declare <vscale x 16 x i1> @llvm.experimental.get.noalias.lane.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+      declare <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <8 x i1> @llvm.experimental.loop.dependence.war.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <vscale x 16 x i1> @llvm.experimental.loop.dependence.war.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
 
 
 Overview:
@@ -24072,8 +24074,8 @@ across one vector loop iteration.
 Arguments:
 """"""""""
 
-The first two arguments have the same scalar integer type.
-The final two are immediates and the result is a vector with the i1 element type.
+The first two arguments have the same pointer type.
+The final one is an immediate and the result is a vector with the i1 element type.
 
 Semantics:
 """"""""""
@@ -24081,8 +24083,7 @@ Semantics:
 ``%elementSize`` is the size of the accessed elements in bytes.
 The intrinsic will return poison if ``%ptrA`` and ``%ptrB`` are within
 VF * ``%elementSize`` of each other and ``%ptrA`` + VF * ``%elementSize`` wraps.
-In other cases when ``%writeAfterRead`` is true, the
-'``llvm.experimental.get.noalias.lane.mask.*``' intrinsics are semantically
+The '``llvm.experimental.loop.dependence.war.mask*``' intrinsics are semantically
 equivalent to:
 
 ::
@@ -24090,9 +24091,8 @@ equivalent to:
       %diff = (%ptrB - %ptrA) / %elementSize
       %m[i] = (icmp ult i, %diff) || (%diff <= 0)
 
-When the return value is not poison and ``%writeAfterRead`` is false, the
-'``llvm.experimental.get.noalias.lane.mask.*``' intrinsics are semantically
-equivalent to:
+When the return value is not poison the '``llvm.experimental.loop.dependence.raw.mask.*``'
+intrinsics are semantically equivalent to:
 
 ::
 
@@ -24101,15 +24101,10 @@ equivalent to:
 
 where ``%m`` is a vector (mask) of active/inactive lanes with its elements
 indexed by ``i`` (i = 0 to VF - 1),  and ``%ptrA``, ``%ptrB`` are the two ptr
-arguments to ``llvm.experimental.get.noalias.lane.mask.*`` and ``%elementSize``
-is the first immediate argument. The ``%writeAfterRead`` argument is expected
-to be true if ``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise
-it is false for a read after write.
-The above is equivalent to:
-
-::
-
-      %m = @llvm.experimental.get.noalias.lane.mask(%ptrA, %ptrB, %elementSize, %writeAfterRead)
+arguments to ``llvm.experimental.loop.dependence.{raw,war}.mask.*`` and ``%elementSize``
+is the first immediate argument. The ``war`` variant is expected to be used when
+``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise the ``raw`` variant is
+expected to be used.
 
 This can, for example, be emitted by the loop vectorizer in which case
 ``%ptrA`` is a pointer that is read from within the loop, and ``%ptrB`` is a
@@ -24127,10 +24122,10 @@ Examples:
 
 .. code-block:: llvm
 
-      %noalias.lane.mask = call <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4, i1 1)
-      %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %noalias.lane.mask, <4 x i32> poison)
+      %loop.dependence.mask = call <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
+      %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %loop.dependence.mask, <4 x i32> poison)
       [...]
-      call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %noalias.lane.mask)
+      call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %loop.dependence.mask)
 
 .. _int_experimental_vp_splice:
 

>From b43f31dc638564de4038df8b7345de4d94e2af49 Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Fri, 21 Mar 2025 17:47:00 +0000
Subject: [PATCH 20/29] Reword argument description

---
 llvm/docs/LangRef.rst | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 14713c669a1c9..e08c34de26056 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24074,8 +24074,8 @@ across one vector loop iteration.
 Arguments:
 """"""""""
 
-The first two arguments have the same pointer type.
-The final one is an immediate and the result is a vector with the i1 element type.
+The first two arguments are pointers and the last argument is an immediate.
+The result is a vector with the i1 element type.
 
 Semantics:
 """"""""""

>From 7dbdf5218e53f84e0d0ea8eaa2922c7221adf0ce Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Tue, 20 May 2025 15:47:45 +0100
Subject: [PATCH 21/29] Fixup langref

---
 llvm/docs/LangRef.rst | 112 ++++++++++++++++++++++++++++--------------
 1 file changed, 76 insertions(+), 36 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index e08c34de26056..06d131194b96e 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24047,10 +24047,9 @@ Examples:
 
 
 .. _int_experimental_loop_dependence_war_mask:
-.. _int_experimental_loop_dependence_raw_mask:
 
-'``llvm.experimental.loop.dependence.raw.mask.*``' and '``llvm.experimental.loop.dependence.war.mask.*``' Intrinsics
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+'``llvm.experimental.loop.dependence.war.mask.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Syntax:
 """""""
@@ -24058,18 +24057,22 @@ This is an overloaded intrinsic.
 
 ::
 
-      declare <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
       declare <8 x i1> @llvm.experimental.loop.dependence.war.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
-      declare <16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <16 x i1> @llvm.experimental.loop.dependence.war.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
       declare <vscale x 16 x i1> @llvm.experimental.loop.dependence.war.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
 
 
 Overview:
 """""""""
 
-Create a mask enabling lanes that do not overlap between two pointers
-across one vector loop iteration.
+Given a scalar load from %ptrA, followed by a scalar store to %ptrB, this
+instruction generates a mask where an active lane indicates that there is no
+write-after-read hazard for this lane.
 
+A write-after-read hazard occurs when a write-after-read sequence for a given
+lane in a vector ends up being executed as a read-after-write sequence due to
+the aliasing of pointers.
 
 Arguments:
 """"""""""
@@ -24081,51 +24084,88 @@ Semantics:
 """"""""""
 
 ``%elementSize`` is the size of the accessed elements in bytes.
-The intrinsic will return poison if ``%ptrA`` and ``%ptrB`` are within
-VF * ``%elementSize`` of each other and ``%ptrA`` + VF * ``%elementSize`` wraps.
-The '``llvm.experimental.loop.dependence.war.mask*``' intrinsics are semantically
-equivalent to:
+The intrinsic returns ``poison`` if the distance between ``%prtA`` and ``%ptrB``
+is smaller than ``VF * %elementsize`` and either ``%ptrA + VF * %elementSize``
+or ``%ptrB + VF * %elementSize`` wrap.
+The element of the result mask is active when no write-after-read hazard occurs,
+meaning that:
 
-::
+* (ptrB - ptrA) <= 0 (guarantees that all lanes are loaded before any stores are
+  committed), or
+* (ptrB - ptrA) >= elementSize * lane (guarantees that this lane is loaded
+  before the store to the same address is committed)
 
-      %diff = (%ptrB - %ptrA) / %elementSize
-      %m[i] = (icmp ult i, %diff) || (%diff <= 0)
+Examples:
+"""""""""
 
-When the return value is not poison the '``llvm.experimental.loop.dependence.raw.mask.*``'
-intrinsics are semantically equivalent to:
+.. code-block:: llvm
+
+      %loop.dependence.mask = call <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
+      %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %loop.dependence.mask, <4 x i32> poison)
+      [...]
+      call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %loop.dependence.mask)
+
+.. _int_experimental_loop_dependence_raw_mask:
+
+'``llvm.experimental.loop.dependence.raw.mask.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+This is an overloaded intrinsic.
 
 ::
 
-      %diff = abs(%ptrB - %ptrA) / %elementSize
-      %m[i] = (icmp ult i, %diff) || (%diff == 0)
+      declare <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <8 x i1> @llvm.experimental.loop.dependence.raw.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <vscale x 16 x i1> @llvm.experimental.loop.dependence.raw.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+
 
-where ``%m`` is a vector (mask) of active/inactive lanes with its elements
-indexed by ``i`` (i = 0 to VF - 1),  and ``%ptrA``, ``%ptrB`` are the two ptr
-arguments to ``llvm.experimental.loop.dependence.{raw,war}.mask.*`` and ``%elementSize``
-is the first immediate argument. The ``war`` variant is expected to be used when
-``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise the ``raw`` variant is
-expected to be used.
+Overview:
+"""""""""
 
-This can, for example, be emitted by the loop vectorizer in which case
-``%ptrA`` is a pointer that is read from within the loop, and ``%ptrB`` is a
-pointer that is stored to within the loop.
-If the difference between these pointers is less than the vector factor, then
-they overlap (alias) within a loop iteration.
-An example is if ``%ptrA`` is 20 and ``%ptrB`` is 23 with a vector factor of 8,
-then lanes 3, 4, 5, 6 and 7 of the vector loaded from ``%ptrA``
-share addresses with lanes 0, 1, 2, 3, 4 and 5 from the vector stored to at
-``%ptrB``.
+Given a scalar store to %ptrA, followed by a scalar load from %ptrB, this
+instruction generates a mask where an active lane indicates that there is no
+read-after-write hazard for this lane and that this lane does not introduce any
+new store-to-load forwarding hazard.
 
+A read-after-write hazard occurs when a read-after-write sequence for a given
+lane in a vector ends up being executed as a write-after-read sequence due to
+the aliasing of pointers.
+
+
+Arguments:
+""""""""""
+
+The first two arguments are pointers and the last argument is an immediate.
+The result is a vector with the i1 element type.
+
+Semantics:
+""""""""""
+
+``%elementSize`` is the size of the accessed elements in bytes.
+The intrinsic returns ``poison`` if the distance between ``%prtA`` and ``%ptrB``
+is smaller than ``VF * %elementsize`` and either ``%ptrA + VF * %elementSize``
+or ``%ptrB + VF * %elementSize`` wrap.
+The element of the result mask is active when no read-after-write hazard occurs, meaning that:
+
+  abs(ptrB - ptrA) >= elementSize * lane (guarantees that the store of this lane
+  is committed before loading from this address)
+
+Note that the case where (ptrB - ptrA) < 0 does not result in any
+read-after-write hazards, but may introduce new store-to-load-forwarding stalls
+where both the store and load partially access the same addresses.
 
 Examples:
 """""""""
 
 .. code-block:: llvm
 
-      %loop.dependence.mask = call <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
-      %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %loop.dependence.mask, <4 x i32> poison)
+      %loop.dependence.mask = call <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
+      call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrA, i32 4, <4 x i1> %loop.dependence.mask)
       [...]
-      call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %loop.dependence.mask)
+      %vecB = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrB, i32 4, <4 x i1> %loop.dependence.mask, <4 x i32> poison)
 
 .. _int_experimental_vp_splice:
 

>From bb79200125299e4c003e71402851329c79a807b2 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Tue, 20 May 2025 15:50:40 +0100
Subject: [PATCH 22/29] IsWriteAfterRead -> IsReadAfterWrite and avoid using
 ops vector

---
 llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp   | 10 +++++-----
 llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp |  7 +++----
 2 files changed, 8 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 86720471e9584..e8f0fedd78c97 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1809,13 +1809,13 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
   SDValue SinkValue = N->getOperand(1);
   SDValue EltSize = N->getOperand(2);
 
-  bool IsWriteAfterRead =
-      N->getOpcode() == ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK;
+  bool IsReadAfterWrite =
+      N->getOpcode() == ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK;
   auto VT = N->getValueType(0);
   auto PtrVT = SourceValue->getValueType(0);
 
   SDValue Diff = DAG.getNode(ISD::SUB, DL, PtrVT, SinkValue, SourceValue);
-  if (!IsWriteAfterRead)
+  if (IsReadAfterWrite)
     Diff = DAG.getNode(ISD::ABS, DL, PtrVT, Diff);
 
   Diff = DAG.getNode(ISD::SDIV, DL, PtrVT, Diff, EltSize);
@@ -1825,7 +1825,7 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
                                       Diff.getValueType());
   SDValue Zero = DAG.getTargetConstant(0, DL, PtrVT);
   SDValue Cmp = DAG.getSetCC(DL, CmpVT, Diff, Zero,
-                             IsWriteAfterRead ? ISD::SETLE : ISD::SETEQ);
+                             IsReadAfterWrite ? ISD::SETEQ : ISD::SETLE);
 
   // Create the lane mask
   EVT SplatTY =
@@ -1836,7 +1836,7 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
       DAG.getSetCC(DL, VT, VectorStep, DiffSplat, ISD::CondCode::SETULT);
 
   // Splat the compare result then OR it with the lane mask
-  auto VTElementTy = VT.getVectorElementType();
+  EVT VTElementTy = VT.getVectorElementType();
   if (CmpVT.getScalarSizeInBits() < VTElementTy.getScalarSizeInBits())
     Cmp = DAG.getNode(ISD::ZERO_EXTEND, DL, VTElementTy, Cmp);
   SDValue Splat = DAG.getSplat(VT, DL, Cmp);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index c4aabe748c7c5..484c47a178fce 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8275,13 +8275,12 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
   case Intrinsic::experimental_loop_dependence_raw_mask: {
     auto IntrinsicVT = EVT::getEVT(I.getType());
     SmallVector<SDValue, 4> Ops;
-    for (auto &Op : I.operands())
-      Ops.push_back(getValue(Op));
     unsigned ID = Intrinsic == Intrinsic::experimental_loop_dependence_war_mask
                       ? ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK
                       : ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK;
-    SDValue Mask = DAG.getNode(ID, sdl, IntrinsicVT, Ops);
-    setValue(&I, Mask);
+    setValue(&I,
+             DAG.getNode(ID, sdl, IntrinsicVT, getValue(I.getOperand(0)),
+                         getValue(I.getOperand(1)), getValue(I.getOperand(2))));
   }
   }
 }

>From c164ff0bbd4dd3d43d4721dd827bf5d5e7528d8f Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Tue, 20 May 2025 16:41:07 +0100
Subject: [PATCH 23/29] Extend vXi1 setcc to account for intrinsic VT promotion

---
 .../CodeGen/SelectionDAG/LegalizeVectorOps.cpp    | 15 ++++++++++++---
 llvm/test/CodeGen/AArch64/alias_mask.ll           |  8 --------
 2 files changed, 12 insertions(+), 11 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index e8f0fedd78c97..d72827759f549 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1829,14 +1829,23 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
 
   // Create the lane mask
   EVT SplatTY =
-      EVT::getVectorVT(*DAG.getContext(), PtrVT, VT.getVectorElementCount());
+      EVT::getVectorVT(*DAG.getContext(), PtrVT, VT.getVectorMinNumElements(),
+                       VT.isScalableVector());
   SDValue DiffSplat = DAG.getSplat(SplatTY, DL, Diff);
   SDValue VectorStep = DAG.getStepVector(DL, SplatTY);
+  EVT MaskVT =
+      EVT::getVectorVT(*DAG.getContext(), MVT::i1, VT.getVectorMinNumElements(),
+                       VT.isScalableVector());
   SDValue DiffMask =
-      DAG.getSetCC(DL, VT, VectorStep, DiffSplat, ISD::CondCode::SETULT);
+      DAG.getSetCC(DL, MaskVT, VectorStep, DiffSplat, ISD::CondCode::SETULT);
 
-  // Splat the compare result then OR it with the lane mask
   EVT VTElementTy = VT.getVectorElementType();
+  // Extend the diff setcc in case the intrinsic has been promoted to a vector
+  // type with elements larger than i1
+  if (VTElementTy.getScalarSizeInBits() > MaskVT.getScalarSizeInBits())
+    DiffMask = DAG.getNode(ISD::ANY_EXTEND, DL, VT, DiffMask);
+
+  // Splat the compare result then OR it with the lane mask
   if (CmpVT.getScalarSizeInBits() < VTElementTy.getScalarSizeInBits())
     Cmp = DAG.getNode(ISD::ZERO_EXTEND, DL, VTElementTy, Cmp);
   SDValue Splat = DAG.getSplat(VT, DL, Cmp);
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 3248cb2de2644..75ab6f62095b6 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -48,8 +48,6 @@ define <16 x i1> @whilewr_8(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    uzp1 v1.8h, v2.8h, v3.8h
 ; CHECK-NOSVE-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
 ; CHECK-NOSVE-NEXT:    dup v1.16b, w8
-; CHECK-NOSVE-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NOSVE-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
@@ -90,8 +88,6 @@ define <8 x i1> @whilewr_16(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v0.8h, v1.8h
 ; CHECK-NOSVE-NEXT:    dup v1.8b, w8
 ; CHECK-NOSVE-NEXT:    xtn v0.8b, v0.8h
-; CHECK-NOSVE-NEXT:    shl v0.8b, v0.8b, #7
-; CHECK-NOSVE-NEXT:    cmlt v0.8b, v0.8b, #0
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
@@ -210,8 +206,6 @@ define <16 x i1> @whilerw_8(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    uzp1 v1.8h, v3.8h, v2.8h
 ; CHECK-NOSVE-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
 ; CHECK-NOSVE-NEXT:    dup v1.16b, w8
-; CHECK-NOSVE-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NOSVE-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
@@ -253,8 +247,6 @@ define <8 x i1> @whilerw_16(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v0.8h, v1.8h
 ; CHECK-NOSVE-NEXT:    dup v1.8b, w8
 ; CHECK-NOSVE-NEXT:    xtn v0.8b, v0.8h
-; CHECK-NOSVE-NEXT:    shl v0.8b, v0.8b, #7
-; CHECK-NOSVE-NEXT:    cmlt v0.8b, v0.8b, #0
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:

>From 328f0154f2d7c41123b30cb7ec2a306a8755d78a Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Wed, 21 May 2025 14:35:29 +0100
Subject: [PATCH 24/29] Remove experimental from intrinsic name

---
 llvm/docs/LangRef.rst                         | 28 +++++++++----------
 llvm/include/llvm/CodeGen/ISDOpcodes.h        |  4 +--
 llvm/include/llvm/IR/Intrinsics.td            |  4 +--
 .../SelectionDAG/LegalizeIntegerTypes.cpp     | 19 ++++++-------
 llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h |  5 ++--
 .../SelectionDAG/LegalizeVectorOps.cpp        | 11 ++++----
 .../SelectionDAG/SelectionDAGBuilder.cpp      | 10 +++----
 .../SelectionDAG/SelectionDAGDumper.cpp       |  4 +--
 llvm/lib/CodeGen/TargetLoweringBase.cpp       |  4 +--
 .../Target/AArch64/AArch64ISelLowering.cpp    | 21 ++++++--------
 llvm/test/CodeGen/AArch64/alias_mask.ll       | 16 +++++------
 .../CodeGen/AArch64/alias_mask_scalable.ll    | 16 +++++------
 12 files changed, 68 insertions(+), 74 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 06d131194b96e..2ce5f362ad503 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24046,9 +24046,9 @@ Examples:
       %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
 
 
-.. _int_experimental_loop_dependence_war_mask:
+.. _int_loop_dependence_war_mask:
 
-'``llvm.experimental.loop.dependence.war.mask.*``' Intrinsics
+'``llvm.loop.dependence.war.mask.*``' Intrinsics
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Syntax:
@@ -24057,10 +24057,10 @@ This is an overloaded intrinsic.
 
 ::
 
-      declare <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
-      declare <8 x i1> @llvm.experimental.loop.dependence.war.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
-      declare <16 x i1> @llvm.experimental.loop.dependence.war.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
-      declare <vscale x 16 x i1> @llvm.experimental.loop.dependence.war.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
 
 
 Overview:
@@ -24100,14 +24100,14 @@ Examples:
 
 .. code-block:: llvm
 
-      %loop.dependence.mask = call <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
+      %loop.dependence.mask = call <4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
       %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %loop.dependence.mask, <4 x i32> poison)
       [...]
       call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %loop.dependence.mask)
 
-.. _int_experimental_loop_dependence_raw_mask:
+.. _int_loop_dependence_raw_mask:
 
-'``llvm.experimental.loop.dependence.raw.mask.*``' Intrinsics
+'``llvm.loop.dependence.raw.mask.*``' Intrinsics
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Syntax:
@@ -24116,10 +24116,10 @@ This is an overloaded intrinsic.
 
 ::
 
-      declare <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
-      declare <8 x i1> @llvm.experimental.loop.dependence.raw.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
-      declare <16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
-      declare <vscale x 16 x i1> @llvm.experimental.loop.dependence.raw.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <4 x i1> @llvm.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <8 x i1> @llvm.loop.dependence.raw.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <16 x i1> @llvm.loop.dependence.raw.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+      declare <vscale x 16 x i1> @llvm.loop.dependence.raw.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
 
 
 Overview:
@@ -24162,7 +24162,7 @@ Examples:
 
 .. code-block:: llvm
 
-      %loop.dependence.mask = call <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
+      %loop.dependence.mask = call <4 x i1> @llvm.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
       call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrA, i32 4, <4 x i1> %loop.dependence.mask)
       [...]
       %vecB = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrB, i32 4, <4 x i1> %loop.dependence.mask, <4 x i32> poison)
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 57c3873e1451d..492f83ae34f61 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1561,8 +1561,8 @@ enum NodeType {
   // The `llvm.experimental.loop.dependence.{war, raw}.mask` intrinsics
   // Operands: Load pointer, Store pointer, Element size
   // Output: Mask
-  EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK,
-  EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK,
+  LOOP_DEPENDENCE_WAR_MASK,
+  LOOP_DEPENDENCE_RAW_MASK,
 
   // llvm.clear_cache intrinsic
   // Operands: Input Chain, Start Addres, End Address
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 1a9eb15112268..880764b62232f 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -2418,12 +2418,12 @@ let IntrProperties = [IntrNoMem, ImmArg<ArgIndex<1>>] in {
                                     llvm_i32_ty]>;
 }
 
-def int_experimental_loop_dependence_raw_mask:
+def int_loop_dependence_raw_mask:
   DefaultAttrsIntrinsic<[llvm_anyvector_ty],
             [llvm_ptr_ty, llvm_ptr_ty, llvm_i64_ty],
             [IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>]>;
 
-def int_experimental_loop_dependence_war_mask:
+def int_loop_dependence_war_mask:
   DefaultAttrsIntrinsic<[llvm_anyvector_ty],
             [llvm_ptr_ty, llvm_ptr_ty, llvm_i64_ty],
             [IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>]>;
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 3f8caeb1d4f42..f3c593ae02dd8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -324,9 +324,9 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
     Res = PromoteIntRes_VP_REDUCE(N);
     break;
 
-  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
-  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
-    Res = PromoteIntRes_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(N);
+  case ISD::LOOP_DEPENDENCE_WAR_MASK:
+  case ISD::LOOP_DEPENDENCE_RAW_MASK:
+    Res = PromoteIntRes_LOOP_DEPENDENCE_MASK(N);
     break;
 
   case ISD::FREEZE:
@@ -379,8 +379,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_MERGE_VALUES(SDNode *N,
   return GetPromotedInteger(Op);
 }
 
-SDValue
-DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(SDNode *N) {
+SDValue DAGTypeLegalizer::PromoteIntRes_LOOP_DEPENDENCE_MASK(SDNode *N) {
   EVT VT = N->getValueType(0);
   EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
   return DAG.getNode(N->getOpcode(), SDLoc(N), NewVT, N->ops());
@@ -2116,9 +2115,9 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
   case ISD::PARTIAL_REDUCE_SUMLA:
     Res = PromoteIntOp_PARTIAL_REDUCE_MLA(N);
     break;
-  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
-  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
-    Res = PromoteIntOp_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(N, OpNo);
+  case ISD::LOOP_DEPENDENCE_RAW_MASK:
+  case ISD::LOOP_DEPENDENCE_WAR_MASK:
+    Res = PromoteIntOp_LOOP_DEPENDENCE_MASK(N, OpNo);
     break;
   }
 
@@ -2953,8 +2952,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N) {
   return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
 }
 
-SDValue DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(
-    SDNode *N, unsigned OpNo) {
+SDValue DAGTypeLegalizer::PromoteIntOp_LOOP_DEPENDENCE_MASK(SDNode *N,
+                                                            unsigned OpNo) {
   SmallVector<SDValue, 4> NewOps(N->ops());
   NewOps[OpNo] = GetPromotedInteger(N->getOperand(OpNo));
   return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 44f3c6790edca..fa3b26177e2cf 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -382,7 +382,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue PromoteIntRes_VECTOR_FIND_LAST_ACTIVE(SDNode *N);
   SDValue PromoteIntRes_GET_ACTIVE_LANE_MASK(SDNode *N);
   SDValue PromoteIntRes_PARTIAL_REDUCE_MLA(SDNode *N);
-  SDValue PromoteIntRes_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(SDNode *N);
+  SDValue PromoteIntRes_LOOP_DEPENDENCE_MASK(SDNode *N);
 
   // Integer Operand Promotion.
   bool PromoteIntegerOperand(SDNode *N, unsigned OpNo);
@@ -437,8 +437,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(SDNode *N, unsigned OpNo);
   SDValue PromoteIntOp_GET_ACTIVE_LANE_MASK(SDNode *N);
   SDValue PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N);
-  SDValue PromoteIntOp_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(SDNode *N,
-                                                         unsigned OpNo);
+  SDValue PromoteIntOp_LOOP_DEPENDENCE_MASK(SDNode *N, unsigned OpNo);
 
   void SExtOrZExtPromotedOperands(SDValue &LHS, SDValue &RHS);
   void PromoteSetCCOperands(SDValue &LHS,SDValue &RHS, ISD::CondCode Code);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index d72827759f549..74074d57c0291 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -476,8 +476,8 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
   case ISD::VECTOR_COMPRESS:
   case ISD::SCMP:
   case ISD::UCMP:
-  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
-  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+  case ISD::LOOP_DEPENDENCE_WAR_MASK:
+  case ISD::LOOP_DEPENDENCE_RAW_MASK:
     Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
     break;
   case ISD::SMULFIX:
@@ -1294,8 +1294,8 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
   case ISD::UCMP:
     Results.push_back(TLI.expandCMP(Node, DAG));
     return;
-  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
-  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+  case ISD::LOOP_DEPENDENCE_WAR_MASK:
+  case ISD::LOOP_DEPENDENCE_RAW_MASK:
     Results.push_back(ExpandLOOP_DEPENDENCE_MASK(Node));
     return;
 
@@ -1809,8 +1809,7 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
   SDValue SinkValue = N->getOperand(1);
   SDValue EltSize = N->getOperand(2);
 
-  bool IsReadAfterWrite =
-      N->getOpcode() == ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK;
+  bool IsReadAfterWrite = N->getOpcode() == ISD::LOOP_DEPENDENCE_RAW_MASK;
   auto VT = N->getValueType(0);
   auto PtrVT = SourceValue->getValueType(0);
 
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 484c47a178fce..23fd94ea588ce 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8271,13 +8271,13 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
     visitVectorExtractLastActive(I, Intrinsic);
     return;
   }
-  case Intrinsic::experimental_loop_dependence_war_mask:
-  case Intrinsic::experimental_loop_dependence_raw_mask: {
+  case Intrinsic::loop_dependence_war_mask:
+  case Intrinsic::loop_dependence_raw_mask: {
     auto IntrinsicVT = EVT::getEVT(I.getType());
     SmallVector<SDValue, 4> Ops;
-    unsigned ID = Intrinsic == Intrinsic::experimental_loop_dependence_war_mask
-                      ? ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK
-                      : ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK;
+    unsigned ID = Intrinsic == Intrinsic::loop_dependence_war_mask
+                      ? ISD::LOOP_DEPENDENCE_WAR_MASK
+                      : ISD::LOOP_DEPENDENCE_RAW_MASK;
     setValue(&I,
              DAG.getNode(ID, sdl, IntrinsicVT, getValue(I.getOperand(0)),
                          getValue(I.getOperand(1)), getValue(I.getOperand(2))));
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index d5c8431370243..4b2a00c2e2cfa 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -587,9 +587,9 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
     return "partial_reduce_smla";
   case ISD::PARTIAL_REDUCE_SUMLA:
     return "partial_reduce_sumla";
-  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+  case ISD::LOOP_DEPENDENCE_WAR_MASK:
     return "loop_dep_war";
-  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+  case ISD::LOOP_DEPENDENCE_RAW_MASK:
     return "loop_dep_raw";
 
     // Vector Predication
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index c99f141f15154..d05b25f7272ae 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -901,8 +901,8 @@ void TargetLoweringBase::initActions() {
     setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Expand);
 
     // Lane mask with non-aliasing lanes enabled default to expand
-    setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK, VT, Expand);
-    setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK, VT, Expand);
+    setOperationAction(ISD::LOOP_DEPENDENCE_RAW_MASK, VT, Expand);
+    setOperationAction(ISD::LOOP_DEPENDENCE_WAR_MASK, VT, Expand);
 
     // FP environment operations default to expand.
     setOperationAction(ISD::GET_FPENV, VT, Expand);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d31b5e038e1d2..89c64bb040ef7 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1920,10 +1920,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       (Subtarget->hasSME() && Subtarget->isStreaming())) {
     for (auto VT : {MVT::v2i32, MVT::v4i16, MVT::v8i8, MVT::v16i8, MVT::nxv2i1,
                     MVT::nxv4i1, MVT::nxv8i1, MVT::nxv16i1}) {
-      setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK, VT,
-                         Custom);
-      setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK, VT,
-                         Custom);
+      setOperationAction(ISD::LOOP_DEPENDENCE_RAW_MASK, VT, Custom);
+      setOperationAction(ISD::LOOP_DEPENDENCE_WAR_MASK, VT, Custom);
     }
   }
 
@@ -5218,8 +5216,7 @@ AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
                                                  SelectionDAG &DAG) const {
   SDLoc DL(Op);
   uint64_t EltSize = Op.getConstantOperandVal(2);
-  bool IsWriteAfterRead =
-      Op.getOpcode() == ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK;
+  bool IsWriteAfterRead = Op.getOpcode() == ISD::LOOP_DEPENDENCE_WAR_MASK;
   unsigned Opcode =
       IsWriteAfterRead ? AArch64ISD::WHILEWR : AArch64ISD::WHILERW;
   EVT VT = Op.getValueType();
@@ -7440,8 +7437,8 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
   default:
     llvm_unreachable("unimplemented operand");
     return SDValue();
-  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
-  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+  case ISD::LOOP_DEPENDENCE_RAW_MASK:
+  case ISD::LOOP_DEPENDENCE_WAR_MASK:
     return LowerLOOP_DEPENDENCE_MASK(Op, DAG);
   case ISD::BITCAST:
     return LowerBITCAST(Op, DAG);
@@ -28242,8 +28239,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
   case ISD::GET_ACTIVE_LANE_MASK:
     ReplaceGetActiveLaneMaskResults(N, Results, DAG);
     return;
-  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
-  case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK: {
+  case ISD::LOOP_DEPENDENCE_WAR_MASK:
+  case ISD::LOOP_DEPENDENCE_RAW_MASK: {
     EVT VT = N->getValueType(0);
     if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
       return;
@@ -28314,8 +28311,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
       return;
     }
     case Intrinsic::experimental_vector_match:
-    case Intrinsic::experimental_loop_dependence_raw_mask:
-    case Intrinsic::experimental_loop_dependence_war_mask: {
+    case Intrinsic::loop_dependence_raw_mask:
+    case Intrinsic::loop_dependence_war_mask: {
       if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
         return;
 
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 75ab6f62095b6..8b74b7101a740 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -51,7 +51,7 @@ define <16 x i1> @whilewr_8(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <16 x i1> @llvm.experimental.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 1)
+  %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 1)
   ret <16 x i1> %0
 }
 
@@ -91,7 +91,7 @@ define <8 x i1> @whilewr_16(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <8 x i1> @llvm.experimental.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 2)
+  %0 = call <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 2)
   ret <8 x i1> %0
 }
 
@@ -125,7 +125,7 @@ define <4 x i1> @whilewr_32(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 4)
+  %0 = call <4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 4)
   ret <4 x i1> %0
 }
 
@@ -155,7 +155,7 @@ define <2 x i1> @whilewr_64(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <2 x i1> @llvm.experimental.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 8)
+  %0 = call <2 x i1> @llvm.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 8)
   ret <2 x i1> %0
 }
 
@@ -209,7 +209,7 @@ define <16 x i1> @whilerw_8(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %a, ptr %b, i64 1)
+  %0 = call <16 x i1> @llvm.loop.dependence.raw.mask.v16i1(ptr %a, ptr %b, i64 1)
   ret <16 x i1> %0
 }
 
@@ -250,7 +250,7 @@ define <8 x i1> @whilerw_16(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <8 x i1> @llvm.experimental.loop.dependence.raw.mask.v8i1(ptr %a, ptr %b, i64 2)
+  %0 = call <8 x i1> @llvm.loop.dependence.raw.mask.v8i1(ptr %a, ptr %b, i64 2)
   ret <8 x i1> %0
 }
 
@@ -285,7 +285,7 @@ define <4 x i1> @whilerw_32(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %a, ptr %b, i64 4)
+  %0 = call <4 x i1> @llvm.loop.dependence.raw.mask.v4i1(ptr %a, ptr %b, i64 4)
   ret <4 x i1> %0
 }
 
@@ -316,6 +316,6 @@ define <2 x i1> @whilerw_64(ptr %a, ptr %b) {
 ; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NOSVE-NEXT:    ret
 entry:
-  %0 = call <2 x i1> @llvm.experimental.loop.dependence.raw.mask.v2i1(ptr %a, ptr %b, i64 8)
+  %0 = call <2 x i1> @llvm.loop.dependence.raw.mask.v2i1(ptr %a, ptr %b, i64 8)
   ret <2 x i1> %0
 }
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index 5a7c3180e2807..47b8e31d8b5be 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -60,7 +60,7 @@ define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i1> @llvm.experimental.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 1)
+  %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 1)
   ret <vscale x 16 x i1> %0
 }
 
@@ -98,7 +98,7 @@ define <vscale x 8 x i1> @whilewr_16(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i1> @llvm.experimental.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 2)
+  %0 = call <vscale x 8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 2)
   ret <vscale x 8 x i1> %0
 }
 
@@ -130,7 +130,7 @@ define <vscale x 4 x i1> @whilewr_32(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 4)
+  %0 = call <vscale x 4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 4)
   ret <vscale x 4 x i1> %0
 }
 
@@ -158,7 +158,7 @@ define <vscale x 2 x i1> @whilewr_64(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i1> @llvm.experimental.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 8)
+  %0 = call <vscale x 2 x i1> @llvm.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 8)
   ret <vscale x 2 x i1> %0
 }
 
@@ -223,7 +223,7 @@ define <vscale x 16 x i1> @whilerw_8(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %a, ptr %b, i64 1)
+  %0 = call <vscale x 16 x i1> @llvm.loop.dependence.raw.mask.v16i1(ptr %a, ptr %b, i64 1)
   ret <vscale x 16 x i1> %0
 }
 
@@ -262,7 +262,7 @@ define <vscale x 8 x i1> @whilerw_16(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i1> @llvm.experimental.loop.dependence.raw.mask.v8i1(ptr %a, ptr %b, i64 2)
+  %0 = call <vscale x 8 x i1> @llvm.loop.dependence.raw.mask.v8i1(ptr %a, ptr %b, i64 2)
   ret <vscale x 8 x i1> %0
 }
 
@@ -295,7 +295,7 @@ define <vscale x 4 x i1> @whilerw_32(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %a, ptr %b, i64 4)
+  %0 = call <vscale x 4 x i1> @llvm.loop.dependence.raw.mask.v4i1(ptr %a, ptr %b, i64 4)
   ret <vscale x 4 x i1> %0
 }
 
@@ -324,6 +324,6 @@ define <vscale x 2 x i1> @whilerw_64(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i1> @llvm.experimental.loop.dependence.raw.mask.v2i1(ptr %a, ptr %b, i64 8)
+  %0 = call <vscale x 2 x i1> @llvm.loop.dependence.raw.mask.v2i1(ptr %a, ptr %b, i64 8)
   ret <vscale x 2 x i1> %0
 }

>From 0ca7dd6da3d109bbc8d286a3287451afade78473 Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Wed, 21 May 2025 15:16:06 +0100
Subject: [PATCH 25/29] Clean up vector type creation

---
 llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp  | 12 ++++--------
 .../lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp |  1 -
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp      |  9 +++++----
 3 files changed, 9 insertions(+), 13 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 74074d57c0291..c7e63e813371f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1820,21 +1820,17 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
   Diff = DAG.getNode(ISD::SDIV, DL, PtrVT, Diff, EltSize);
 
   // If the difference is positive then some elements may alias
-  auto CmpVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
-                                      Diff.getValueType());
+  EVT CmpVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
+                                     Diff.getValueType());
   SDValue Zero = DAG.getTargetConstant(0, DL, PtrVT);
   SDValue Cmp = DAG.getSetCC(DL, CmpVT, Diff, Zero,
                              IsReadAfterWrite ? ISD::SETEQ : ISD::SETLE);
 
   // Create the lane mask
-  EVT SplatTY =
-      EVT::getVectorVT(*DAG.getContext(), PtrVT, VT.getVectorMinNumElements(),
-                       VT.isScalableVector());
+  EVT SplatTY = VT.changeElementType(PtrVT);
   SDValue DiffSplat = DAG.getSplat(SplatTY, DL, Diff);
   SDValue VectorStep = DAG.getStepVector(DL, SplatTY);
-  EVT MaskVT =
-      EVT::getVectorVT(*DAG.getContext(), MVT::i1, VT.getVectorMinNumElements(),
-                       VT.isScalableVector());
+  EVT MaskVT = VT.changeElementType(MVT::i1);
   SDValue DiffMask =
       DAG.getSetCC(DL, MaskVT, VectorStep, DiffSplat, ISD::CondCode::SETULT);
 
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 23fd94ea588ce..9a5e2ef1b447a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8274,7 +8274,6 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
   case Intrinsic::loop_dependence_war_mask:
   case Intrinsic::loop_dependence_raw_mask: {
     auto IntrinsicVT = EVT::getEVT(I.getType());
-    SmallVector<SDValue, 4> Ops;
     unsigned ID = Intrinsic == Intrinsic::loop_dependence_war_mask
                       ? ISD::LOOP_DEPENDENCE_WAR_MASK
                       : ISD::LOOP_DEPENDENCE_RAW_MASK;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 89c64bb040ef7..645623e865eff 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -6506,7 +6506,8 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     return DAG.getNode(AArch64ISD::USDOT, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
   }
-  case Intrinsic::experimental_get_nonalias_lane_mask: {
+  case Intrinsic::loop_dependence_war_mask:
+  case Intrinsic::loop_dependence_raw_mask: {
     unsigned IntrinsicID = 0;
     uint64_t EltSize = Op.getOperand(3)->getAsZExtVal();
     bool IsWriteAfterRead = Op.getOperand(4)->getAsZExtVal() == 1;
@@ -20036,9 +20037,9 @@ static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
                         AArch64CC::CondCode Cond);
 
 static bool isPredicateCCSettingOp(SDValue N) {
-  if ((N.getOpcode() == ISD::SETCC ||
-       // get_active_lane_mask is lowered to a whilelo instruction.
-       N.getOpcode() == ISD::GET_ACTIVE_LANE_MASK) ||
+  if ((N.getOpcode() == ISD::SETCC) ||
+      // get_active_lane_mask is lowered to a whilelo instruction.
+      (N.getOpcode() == ISD::GET_ACTIVE_LANE_MASK) ||
       (N.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
        (N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilege ||
         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilegt ||

>From 3b40d862b945bab62e6952839881be0c1f1866d2 Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Tue, 5 Aug 2025 13:48:14 +0100
Subject: [PATCH 26/29] Address review

---
 llvm/docs/LangRef.rst                         |   7 +-
 .../include/llvm/Target/TargetSelectionDAG.td |   8 ++
 .../SelectionDAG/LegalizeIntegerTypes.cpp     |  11 --
 .../SelectionDAG/LegalizeVectorOps.cpp        |  18 +--
 .../SelectionDAG/SelectionDAGBuilder.cpp      |  16 +--
 llvm/lib/CodeGen/TargetLoweringBase.cpp       |   1 -
 .../Target/AArch64/AArch64ISelLowering.cpp    | 109 +++++++-----------
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td |   9 +-
 llvm/lib/Target/AArch64/SVEInstrFormats.td    |  12 +-
 llvm/test/CodeGen/AArch64/alias_mask.ll       |  36 +++---
 .../CodeGen/AArch64/alias_mask_scalable.ll    |  16 +--
 11 files changed, 105 insertions(+), 138 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 2ce5f362ad503..33384a534b454 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24134,6 +24134,9 @@ A read-after-write hazard occurs when a read-after-write sequence for a given
 lane in a vector ends up being executed as a write-after-read sequence due to
 the aliasing of pointers.
 
+Note that the case where (ptrB - ptrA) < 0 does not result in any
+read-after-write hazards, but may introduce new store-to-load-forwarding stalls
+where both the store and load partially access the same addresses.
 
 Arguments:
 """"""""""
@@ -24153,10 +24156,6 @@ The element of the result mask is active when no read-after-write hazard occurs,
   abs(ptrB - ptrA) >= elementSize * lane (guarantees that the store of this lane
   is committed before loading from this address)
 
-Note that the case where (ptrB - ptrA) < 0 does not result in any
-read-after-write hazards, but may introduce new store-to-load-forwarding stalls
-where both the store and load partially access the same addresses.
-
 Examples:
 """""""""
 
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index a4ed62bb5715c..b4dd88a809d4e 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -833,6 +833,14 @@ def step_vector : SDNode<"ISD::STEP_VECTOR", SDTypeProfile<1, 1,
 def scalar_to_vector : SDNode<"ISD::SCALAR_TO_VECTOR", SDTypeProfile<1, 1, []>,
                               []>;
 
+def SDTLoopDepMask : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<1>,
+                                   SDTCisSameAs<2, 1>, SDTCisInt<3>,
+                                   SDTCVecEltisVT<0,i1>]>;
+def loop_dependence_war_mask : SDNode<"ISD::LOOP_DEPENDENCE_WAR_MASK",
+                                      SDTLoopDepMask, []>;
+def loop_dependence_raw_mask : SDNode<"ISD::LOOP_DEPENDENCE_RAW_MASK",
+                                      SDTLoopDepMask, []>;
+
 // vector_extract/vector_insert are similar to extractelt/insertelt but allow
 // types that require promotion (a 16i8 extract where i8 is not a legal type so
 // uses i32 for example). extractelt/insertelt are preferred where the element
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index f3c593ae02dd8..922a1e1064f6c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -2115,10 +2115,6 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
   case ISD::PARTIAL_REDUCE_SUMLA:
     Res = PromoteIntOp_PARTIAL_REDUCE_MLA(N);
     break;
-  case ISD::LOOP_DEPENDENCE_RAW_MASK:
-  case ISD::LOOP_DEPENDENCE_WAR_MASK:
-    Res = PromoteIntOp_LOOP_DEPENDENCE_MASK(N, OpNo);
-    break;
   }
 
   // If the result is null, the sub-method took care of registering results etc.
@@ -2952,13 +2948,6 @@ SDValue DAGTypeLegalizer::PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N) {
   return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
 }
 
-SDValue DAGTypeLegalizer::PromoteIntOp_LOOP_DEPENDENCE_MASK(SDNode *N,
-                                                            unsigned OpNo) {
-  SmallVector<SDValue, 4> NewOps(N->ops());
-  NewOps[OpNo] = GetPromotedInteger(N->getOperand(OpNo));
-  return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
-}
-
 //===----------------------------------------------------------------------===//
 //  Integer Result Expansion
 //===----------------------------------------------------------------------===//
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index c7e63e813371f..5420de97bd82d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1810,8 +1810,8 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
   SDValue EltSize = N->getOperand(2);
 
   bool IsReadAfterWrite = N->getOpcode() == ISD::LOOP_DEPENDENCE_RAW_MASK;
-  auto VT = N->getValueType(0);
-  auto PtrVT = SourceValue->getValueType(0);
+  EVT VT = N->getValueType(0);
+  EVT PtrVT = SourceValue->getValueType(0);
 
   SDValue Diff = DAG.getNode(ISD::SUB, DL, PtrVT, SinkValue, SourceValue);
   if (IsReadAfterWrite)
@@ -1827,22 +1827,22 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
                              IsReadAfterWrite ? ISD::SETEQ : ISD::SETLE);
 
   // Create the lane mask
-  EVT SplatTY = VT.changeElementType(PtrVT);
-  SDValue DiffSplat = DAG.getSplat(SplatTY, DL, Diff);
-  SDValue VectorStep = DAG.getStepVector(DL, SplatTY);
+  EVT SplatVT = VT.changeElementType(PtrVT);
+  SDValue DiffSplat = DAG.getSplat(SplatVT, DL, Diff);
+  SDValue VectorStep = DAG.getStepVector(DL, SplatVT);
   EVT MaskVT = VT.changeElementType(MVT::i1);
   SDValue DiffMask =
       DAG.getSetCC(DL, MaskVT, VectorStep, DiffSplat, ISD::CondCode::SETULT);
 
-  EVT VTElementTy = VT.getVectorElementType();
+  EVT EltVT = VT.getVectorElementType();
   // Extend the diff setcc in case the intrinsic has been promoted to a vector
   // type with elements larger than i1
-  if (VTElementTy.getScalarSizeInBits() > MaskVT.getScalarSizeInBits())
+  if (EltVT.getScalarSizeInBits() > MaskVT.getScalarSizeInBits())
     DiffMask = DAG.getNode(ISD::ANY_EXTEND, DL, VT, DiffMask);
 
   // Splat the compare result then OR it with the lane mask
-  if (CmpVT.getScalarSizeInBits() < VTElementTy.getScalarSizeInBits())
-    Cmp = DAG.getNode(ISD::ZERO_EXTEND, DL, VTElementTy, Cmp);
+  if (CmpVT.getScalarSizeInBits() < EltVT.getScalarSizeInBits())
+    Cmp = DAG.getNode(ISD::ZERO_EXTEND, DL, EltVT, Cmp);
   SDValue Splat = DAG.getSplat(VT, DL, Cmp);
   return DAG.getNode(ISD::OR, DL, VT, DiffMask, Splat);
 }
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 9a5e2ef1b447a..84aa0006af693 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8272,15 +8272,17 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
     return;
   }
   case Intrinsic::loop_dependence_war_mask:
-  case Intrinsic::loop_dependence_raw_mask: {
-    auto IntrinsicVT = EVT::getEVT(I.getType());
-    unsigned ID = Intrinsic == Intrinsic::loop_dependence_war_mask
-                      ? ISD::LOOP_DEPENDENCE_WAR_MASK
-                      : ISD::LOOP_DEPENDENCE_RAW_MASK;
     setValue(&I,
-             DAG.getNode(ID, sdl, IntrinsicVT, getValue(I.getOperand(0)),
+             DAG.getNode(ISD::LOOP_DEPENDENCE_WAR_MASK, sdl,
+                         EVT::getEVT(I.getType()), getValue(I.getOperand(0)),
                          getValue(I.getOperand(1)), getValue(I.getOperand(2))));
-  }
+    return;
+  case Intrinsic::loop_dependence_raw_mask:
+    setValue(&I,
+             DAG.getNode(ISD::LOOP_DEPENDENCE_RAW_MASK, sdl,
+                         EVT::getEVT(I.getType()), getValue(I.getOperand(0)),
+                         getValue(I.getOperand(1)), getValue(I.getOperand(2))));
+    return;
   }
 }
 
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index d05b25f7272ae..1c93179d6255f 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -900,7 +900,6 @@ void TargetLoweringBase::initActions() {
     // Masked vector extracts default to expand.
     setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Expand);
 
-    // Lane mask with non-aliasing lanes enabled default to expand
     setOperationAction(ISD::LOOP_DEPENDENCE_RAW_MASK, VT, Expand);
     setOperationAction(ISD::LOOP_DEPENDENCE_WAR_MASK, VT, Expand);
 
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 645623e865eff..d71aa19136ab1 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1918,11 +1918,15 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
   // Handle non-aliasing elements mask
   if (Subtarget->hasSVE2() ||
       (Subtarget->hasSME() && Subtarget->isStreaming())) {
-    for (auto VT : {MVT::v2i32, MVT::v4i16, MVT::v8i8, MVT::v16i8, MVT::nxv2i1,
-                    MVT::nxv4i1, MVT::nxv8i1, MVT::nxv16i1}) {
+    // FIXME: Support wider fixed-length types when msve-vector-bits is used.
+    for (auto VT : {MVT::v2i32, MVT::v4i16, MVT::v8i8, MVT::v16i8}) {
       setOperationAction(ISD::LOOP_DEPENDENCE_RAW_MASK, VT, Custom);
       setOperationAction(ISD::LOOP_DEPENDENCE_WAR_MASK, VT, Custom);
     }
+    for (auto VT : {MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1, MVT::nxv16i1}) {
+      setOperationAction(ISD::LOOP_DEPENDENCE_RAW_MASK, VT, Legal);
+      setOperationAction(ISD::LOOP_DEPENDENCE_WAR_MASK, VT, Legal);
+    }
   }
 
   // Handle operations that are only available in non-streaming SVE mode.
@@ -5216,27 +5220,23 @@ AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
                                                  SelectionDAG &DAG) const {
   SDLoc DL(Op);
   uint64_t EltSize = Op.getConstantOperandVal(2);
-  bool IsWriteAfterRead = Op.getOpcode() == ISD::LOOP_DEPENDENCE_WAR_MASK;
-  unsigned Opcode =
-      IsWriteAfterRead ? AArch64ISD::WHILEWR : AArch64ISD::WHILERW;
   EVT VT = Op.getValueType();
-  MVT SimpleVT = VT.getSimpleVT();
   // Make sure that the promoted mask size and element size match
   switch (EltSize) {
   case 1:
-    assert((SimpleVT == MVT::v16i8 || SimpleVT == MVT::nxv16i1) &&
+    assert((VT == MVT::v16i8 || VT == MVT::nxv16i1) &&
            "Unexpected mask or element size");
     break;
   case 2:
-    assert((SimpleVT == MVT::v8i8 || SimpleVT == MVT::nxv8i1) &&
+    assert((VT == MVT::v8i8 || VT == MVT::nxv8i1) &&
            "Unexpected mask or element size");
     break;
   case 4:
-    assert((SimpleVT == MVT::v4i16 || SimpleVT == MVT::nxv4i1) &&
+    assert((VT == MVT::v4i16 || VT == MVT::nxv4i1) &&
            "Unexpected mask or element size");
     break;
   case 8:
-    assert((SimpleVT == MVT::v2i32 || SimpleVT == MVT::nxv2i1) &&
+    assert((VT == MVT::v2i32 || VT == MVT::nxv2i1) &&
            "Unexpected mask or element size");
     break;
   default:
@@ -5244,19 +5244,17 @@ AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
     break;
   }
 
-  if (VT.isScalableVector())
-    return DAG.getNode(Opcode, DL, VT, Op.getOperand(0), Op.getOperand(1));
-
   // We can use the SVE whilewr/whilerw instruction to lower this
   // intrinsic by creating the appropriate sequence of scalable vector
   // operations and then extracting a fixed-width subvector from the scalable
-  // vector.
-
-  EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
+  // vector. Scalable vector variants are already legal.
+  EVT ContainerVT =
+      EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
+                       VT.getVectorNumElements(), true);
   EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
 
-  SDValue Mask =
-      DAG.getNode(Opcode, DL, WhileVT, Op.getOperand(0), Op.getOperand(1));
+  SDValue Mask = DAG.getNode(Op.getOpcode(), DL, WhileVT, Op.getOperand(0),
+                             Op.getOperand(1), Op.getOperand(2));
   SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, DL, ContainerVT, Mask);
   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, MaskAsInt,
                      DAG.getVectorIdxConstant(0, DL));
@@ -6021,17 +6019,37 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     return DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
   }
   case Intrinsic::aarch64_sve_whilewr_b:
+    return DAG.getNode(ISD::LOOP_DEPENDENCE_WAR_MASK, DL, Op.getValueType(),
+                       Op.getOperand(1), Op.getOperand(2),
+                       DAG.getConstant(1, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilewr_h:
+    return DAG.getNode(ISD::LOOP_DEPENDENCE_WAR_MASK, DL, Op.getValueType(),
+                       Op.getOperand(1), Op.getOperand(2),
+                       DAG.getConstant(2, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilewr_s:
+    return DAG.getNode(ISD::LOOP_DEPENDENCE_WAR_MASK, DL, Op.getValueType(),
+                       Op.getOperand(1), Op.getOperand(2),
+                       DAG.getConstant(4, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilewr_d:
-    return DAG.getNode(AArch64ISD::WHILEWR, dl, Op.getValueType(),
-                       Op.getOperand(1), Op.getOperand(2));
+    return DAG.getNode(ISD::LOOP_DEPENDENCE_WAR_MASK, DL, Op.getValueType(),
+                       Op.getOperand(1), Op.getOperand(2),
+                       DAG.getConstant(8, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilerw_b:
+    return DAG.getNode(ISD::LOOP_DEPENDENCE_RAW_MASK, DL, Op.getValueType(),
+                       Op.getOperand(1), Op.getOperand(2),
+                       DAG.getConstant(1, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilerw_h:
+    return DAG.getNode(ISD::LOOP_DEPENDENCE_RAW_MASK, DL, Op.getValueType(),
+                       Op.getOperand(1), Op.getOperand(2),
+                       DAG.getConstant(2, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilerw_s:
+    return DAG.getNode(ISD::LOOP_DEPENDENCE_RAW_MASK, DL, Op.getValueType(),
+                       Op.getOperand(1), Op.getOperand(2),
+                       DAG.getConstant(4, DL, MVT::i64));
   case Intrinsic::aarch64_sve_whilerw_d:
-    return DAG.getNode(AArch64ISD::WHILERW, dl, Op.getValueType(),
-                       Op.getOperand(1), Op.getOperand(2));
+    return DAG.getNode(ISD::LOOP_DEPENDENCE_RAW_MASK, DL, Op.getValueType(),
+                       Op.getOperand(1), Op.getOperand(2),
+                       DAG.getConstant(8, DL, MVT::i64));
   case Intrinsic::aarch64_neon_abs: {
     EVT Ty = Op.getValueType();
     if (Ty == MVT::i64) {
@@ -6506,53 +6524,6 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     return DAG.getNode(AArch64ISD::USDOT, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
   }
-  case Intrinsic::loop_dependence_war_mask:
-  case Intrinsic::loop_dependence_raw_mask: {
-    unsigned IntrinsicID = 0;
-    uint64_t EltSize = Op.getOperand(3)->getAsZExtVal();
-    bool IsWriteAfterRead = Op.getOperand(4)->getAsZExtVal() == 1;
-    switch (EltSize) {
-    case 1:
-      IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
-                                     : Intrinsic::aarch64_sve_whilerw_b;
-      break;
-    case 2:
-      IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
-                                     : Intrinsic::aarch64_sve_whilerw_h;
-      break;
-    case 4:
-      IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
-                                     : Intrinsic::aarch64_sve_whilerw_s;
-      break;
-    case 8:
-      IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
-                                     : Intrinsic::aarch64_sve_whilerw_d;
-      break;
-    default:
-      llvm_unreachable("Unexpected element size for get.alias.lane.mask");
-      break;
-    }
-    SDValue ID = DAG.getTargetConstant(IntrinsicID, dl, MVT::i64);
-
-    EVT VT = Op.getValueType();
-    if (VT.isScalableVector())
-      return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, ID, Op.getOperand(1),
-                         Op.getOperand(2));
-
-    // We can use the SVE whilewr/whilerw instruction to lower this
-    // intrinsic by creating the appropriate sequence of scalable vector
-    // operations and then extracting a fixed-width subvector from the scalable
-    // vector.
-
-    EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
-    EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
-
-    SDValue Mask = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, WhileVT, ID,
-                               Op.getOperand(1), Op.getOperand(2));
-    SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, dl, ContainerVT, Mask);
-    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, MaskAsInt,
-                       DAG.getVectorIdxConstant(0, dl));
-  }
   case Intrinsic::aarch64_neon_saddlv:
   case Intrinsic::aarch64_neon_uaddlv: {
     EVT OpVT = Op.getOperand(1).getValueType();
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index e8c69fa3cadf9..4877edd535225 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -167,11 +167,6 @@ def AArch64st1q_scatter : SDNode<"AArch64ISD::SST1Q_PRED", SDT_AArch64_SCATTER_V
 // AArch64 SVE/SVE2 - the remaining node definitions
 //
 
-// Alias masks
-def SDT_AArch64Mask : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<1>, SDTCisSameAs<2, 1>, SDTCVecEltisVT<0,i1>]>;
-def AArch64whilewr : SDNode<"AArch64ISD::WHILEWR", SDT_AArch64Mask>;
-def AArch64whilerw : SDNode<"AArch64ISD::WHILERW", SDT_AArch64Mask>;
-
 // SVE CNT/INC/RDVL
 def sve_rdvl_imm : ComplexPattern<i64, 1, "SelectRDVLImm<-32, 31, 16>">;
 def sve_cnth_imm : ComplexPattern<i64, 1, "SelectRDVLImm<1, 16, 8>">;
@@ -4128,8 +4123,8 @@ let Predicates = [HasSVE2_or_SME] in {
   defm WHILEHI_PXX : sve_int_while8_rr<0b101, "whilehi", int_aarch64_sve_whilehi, get_active_lane_mask>;
 
   // SVE2 pointer conflict compare
-  defm WHILEWR_PXX : sve2_int_while_rr<0b0, "whilewr", AArch64whilewr>;
-  defm WHILERW_PXX : sve2_int_while_rr<0b1, "whilerw", AArch64whilerw>;
+  defm WHILEWR_PXX : sve2_int_while_rr<0b0, "whilewr", loop_dependence_war_mask>;
+  defm WHILERW_PXX : sve2_int_while_rr<0b1, "whilerw", loop_dependence_raw_mask>;
 } // End HasSVE2_or_SME
 
 let Predicates = [HasSVEAES, HasNonStreamingSVE_or_SSVE_AES] in {
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index e88758bf5133a..377017ac04217 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -5946,10 +5946,14 @@ multiclass sve2_int_while_rr<bits<1> rw, string asm, SDPatternOperator op> {
   def _S : sve2_int_while_rr<0b10, rw, asm, PPR32>;
   def _D : sve2_int_while_rr<0b11, rw, asm, PPR64>;
 
-  def : SVE_2_Op_Pat<nxv16i1, op, i64, i64, !cast<Instruction>(NAME # _B)>;
-  def : SVE_2_Op_Pat<nxv8i1,  op, i64, i64, !cast<Instruction>(NAME # _H)>;
-  def : SVE_2_Op_Pat<nxv4i1,  op, i64, i64, !cast<Instruction>(NAME # _S)>;
-  def : SVE_2_Op_Pat<nxv2i1,  op, i64, i64, !cast<Instruction>(NAME # _D)>;
+  def : Pat<(nxv16i1 (op i64:$Op1, i64:$Op2, (i64 1))),
+            (!cast<Instruction>(NAME # _B) $Op1, $Op2)>;
+  def : Pat<(nxv8i1 (op i64:$Op1, i64:$Op2, (i64 2))),
+            (!cast<Instruction>(NAME # _H) $Op1, $Op2)>;
+  def : Pat<(nxv4i1 (op i64:$Op1, i64:$Op2, (i64 4))),
+            (!cast<Instruction>(NAME # _S) $Op1, $Op2)>;
+  def : Pat<(nxv2i1 (op i64:$Op1, i64:$Op2, (i64 8))),
+            (!cast<Instruction>(NAME # _D) $Op1, $Op2)>;
 }
 
 //===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 8b74b7101a740..94fc39054200f 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -58,9 +58,9 @@ entry:
 define <8 x i1> @whilewr_16(ptr %a, ptr %b) {
 ; CHECK-SVE-LABEL: whilewr_16:
 ; CHECK-SVE:       // %bb.0: // %entry
-; CHECK-SVE-NEXT:    whilewr p0.b, x0, x1
-; CHECK-SVE-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-SVE-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT:    whilewr p0.h, x0, x1
+; CHECK-SVE-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    xtn v0.8b, v0.8h
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-NOSVE-LABEL: whilewr_16:
@@ -98,9 +98,9 @@ entry:
 define <4 x i1> @whilewr_32(ptr %a, ptr %b) {
 ; CHECK-SVE-LABEL: whilewr_32:
 ; CHECK-SVE:       // %bb.0: // %entry
-; CHECK-SVE-NEXT:    whilewr p0.h, x0, x1
-; CHECK-SVE-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-SVE-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT:    whilewr p0.s, x0, x1
+; CHECK-SVE-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    xtn v0.4h, v0.4s
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-NOSVE-LABEL: whilewr_32:
@@ -132,9 +132,9 @@ entry:
 define <2 x i1> @whilewr_64(ptr %a, ptr %b) {
 ; CHECK-SVE-LABEL: whilewr_64:
 ; CHECK-SVE:       // %bb.0: // %entry
-; CHECK-SVE-NEXT:    whilewr p0.s, x0, x1
-; CHECK-SVE-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-SVE-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT:    whilewr p0.d, x0, x1
+; CHECK-SVE-NEXT:    mov z0.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    xtn v0.2s, v0.2d
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-NOSVE-LABEL: whilewr_64:
@@ -216,9 +216,9 @@ entry:
 define <8 x i1> @whilerw_16(ptr %a, ptr %b) {
 ; CHECK-SVE-LABEL: whilerw_16:
 ; CHECK-SVE:       // %bb.0: // %entry
-; CHECK-SVE-NEXT:    whilerw p0.b, x0, x1
-; CHECK-SVE-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-SVE-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT:    whilerw p0.h, x0, x1
+; CHECK-SVE-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    xtn v0.8b, v0.8h
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-NOSVE-LABEL: whilerw_16:
@@ -257,9 +257,9 @@ entry:
 define <4 x i1> @whilerw_32(ptr %a, ptr %b) {
 ; CHECK-SVE-LABEL: whilerw_32:
 ; CHECK-SVE:       // %bb.0: // %entry
-; CHECK-SVE-NEXT:    whilerw p0.h, x0, x1
-; CHECK-SVE-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-SVE-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT:    whilerw p0.s, x0, x1
+; CHECK-SVE-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    xtn v0.4h, v0.4s
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-NOSVE-LABEL: whilerw_32:
@@ -292,9 +292,9 @@ entry:
 define <2 x i1> @whilerw_64(ptr %a, ptr %b) {
 ; CHECK-SVE-LABEL: whilerw_64:
 ; CHECK-SVE:       // %bb.0: // %entry
-; CHECK-SVE-NEXT:    whilerw p0.s, x0, x1
-; CHECK-SVE-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-SVE-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT:    whilerw p0.d, x0, x1
+; CHECK-SVE-NEXT:    mov z0.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    xtn v0.2s, v0.2d
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-NOSVE-LABEL: whilerw_64:
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index 47b8e31d8b5be..484e35fbbb0e4 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -60,7 +60,7 @@ define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 1)
+  %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, ptr %b, i64 1)
   ret <vscale x 16 x i1> %0
 }
 
@@ -98,7 +98,7 @@ define <vscale x 8 x i1> @whilewr_16(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 2)
+  %0 = call <vscale x 8 x i1> @llvm.loop.dependence.war.mask.nxv8i1(ptr %a, ptr %b, i64 2)
   ret <vscale x 8 x i1> %0
 }
 
@@ -130,7 +130,7 @@ define <vscale x 4 x i1> @whilewr_32(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 4)
+  %0 = call <vscale x 4 x i1> @llvm.loop.dependence.war.mask.nxv4i1(ptr %a, ptr %b, i64 4)
   ret <vscale x 4 x i1> %0
 }
 
@@ -158,7 +158,7 @@ define <vscale x 2 x i1> @whilewr_64(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i1> @llvm.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 8)
+  %0 = call <vscale x 2 x i1> @llvm.loop.dependence.war.mask.nxv2i1(ptr %a, ptr %b, i64 8)
   ret <vscale x 2 x i1> %0
 }
 
@@ -223,7 +223,7 @@ define <vscale x 16 x i1> @whilerw_8(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i1> @llvm.loop.dependence.raw.mask.v16i1(ptr %a, ptr %b, i64 1)
+  %0 = call <vscale x 16 x i1> @llvm.loop.dependence.raw.mask.nxv16i1(ptr %a, ptr %b, i64 1)
   ret <vscale x 16 x i1> %0
 }
 
@@ -262,7 +262,7 @@ define <vscale x 8 x i1> @whilerw_16(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i1> @llvm.loop.dependence.raw.mask.v8i1(ptr %a, ptr %b, i64 2)
+  %0 = call <vscale x 8 x i1> @llvm.loop.dependence.raw.mask.nxv8i1(ptr %a, ptr %b, i64 2)
   ret <vscale x 8 x i1> %0
 }
 
@@ -295,7 +295,7 @@ define <vscale x 4 x i1> @whilerw_32(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i1> @llvm.loop.dependence.raw.mask.v4i1(ptr %a, ptr %b, i64 4)
+  %0 = call <vscale x 4 x i1> @llvm.loop.dependence.raw.mask.nxv4i1(ptr %a, ptr %b, i64 4)
   ret <vscale x 4 x i1> %0
 }
 
@@ -324,6 +324,6 @@ define <vscale x 2 x i1> @whilerw_64(ptr %a, ptr %b) {
 ; CHECK-SVE-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-SVE-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i1> @llvm.loop.dependence.raw.mask.v2i1(ptr %a, ptr %b, i64 8)
+  %0 = call <vscale x 2 x i1> @llvm.loop.dependence.raw.mask.nxv2i1(ptr %a, ptr %b, i64 8)
   ret <vscale x 2 x i1> %0
 }

>From b48626cb6e94844ace60b599e3ad692c52b1f21e Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 7 Aug 2025 15:51:45 +0100
Subject: [PATCH 27/29] Remove experimental from comment

---
 llvm/docs/LangRef.rst                  | 3 ++-
 llvm/include/llvm/CodeGen/ISDOpcodes.h | 2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 33384a534b454..3fcc96c1a39e2 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24151,7 +24151,8 @@ Semantics:
 The intrinsic returns ``poison`` if the distance between ``%prtA`` and ``%ptrB``
 is smaller than ``VF * %elementsize`` and either ``%ptrA + VF * %elementSize``
 or ``%ptrB + VF * %elementSize`` wrap.
-The element of the result mask is active when no read-after-write hazard occurs, meaning that:
+The element of the result mask is active when no read-after-write hazard occurs,
+meaning that:
 
   abs(ptrB - ptrA) >= elementSize * lane (guarantees that the store of this lane
   is committed before loading from this address)
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 492f83ae34f61..c76c83d84b3c7 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1558,7 +1558,7 @@ enum NodeType {
   // bits conform to getBooleanContents similar to the SETCC operator.
   GET_ACTIVE_LANE_MASK,
 
-  // The `llvm.experimental.loop.dependence.{war, raw}.mask` intrinsics
+  // The `llvm.loop.dependence.{war, raw}.mask` intrinsics
   // Operands: Load pointer, Store pointer, Element size
   // Output: Mask
   LOOP_DEPENDENCE_WAR_MASK,

>From 8d13f4d656944d1350d782f1135d876874682965 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 7 Aug 2025 15:52:03 +0100
Subject: [PATCH 28/29] Add splitting

---
 llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h   |  1 +
 .../SelectionDAG/LegalizeVectorTypes.cpp        | 17 +++++++++++++++++
 2 files changed, 18 insertions(+)

diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index fa3b26177e2cf..ac869a54d599c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -964,6 +964,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   void SplitVecRes_FIX(SDNode *N, SDValue &Lo, SDValue &Hi);
 
   void SplitVecRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi);
+  void SplitVecRes_LOOP_DEPENDENCE_MASK(SDNode *N, SDValue &Lo, SDValue &Hi);
   void SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo, SDValue &Hi);
   void SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo, SDValue &Hi);
   void SplitVecRes_EXTRACT_SUBVECTOR(SDNode *N, SDValue &Lo, SDValue &Hi);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 1661814d5a897..10ea3c9e038df 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1118,6 +1118,8 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
     report_fatal_error("Do not know how to split the result of this "
                        "operator!\n");
 
+  case ISD::LOOP_DEPENDENCE_RAW_MASK:
+  case ISD::LOOP_DEPENDENCE_WAR_MASK: SplitVecRes_LOOP_DEPENDENCE_MASK(N, Lo, Hi); break;
   case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, ResNo, Lo, Hi); break;
   case ISD::AssertZext:   SplitVecRes_AssertZext(N, Lo, Hi); break;
   case ISD::VSELECT:
@@ -1608,6 +1610,21 @@ void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
   Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
 }
 
+void DAGTypeLegalizer::SplitVecRes_LOOP_DEPENDENCE_MASK(SDNode *N, SDValue &Lo, SDValue &Hi) {
+    SDLoc DL(N);
+    EVT LoVT, HiVT;
+    std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+    SDValue PtrA = N->getOperand(0);
+    SDValue PtrB = N->getOperand(1);
+    Lo = DAG.getNode(N->getOpcode(), DL, LoVT, PtrA, PtrB, N->getOperand(2));
+    Lo.dumpr();
+
+    PtrA = DAG.getNode(ISD::ADD, DL, MVT::i64, PtrA, DAG.getConstant(HiVT.getStoreSizeInBits(), DL, MVT::i64));
+    PtrB = DAG.getNode(ISD::ADD, DL, MVT::i64, PtrB, DAG.getConstant(HiVT.getStoreSizeInBits(), DL, MVT::i64));
+    Hi = DAG.getNode(N->getOpcode(), DL, HiVT, PtrA, PtrB, N->getOperand(2));
+    Hi.dumpr();
+}
+
 void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo,
                                                 SDValue &Hi) {
   EVT LoVT, HiVT;

>From 5f84f1a85591dc2f2d3dfb4d02e23f2bf616e60a Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 7 Aug 2025 15:52:30 +0100
Subject: [PATCH 29/29] Add widening

---
 llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h |   1 +
 .../SelectionDAG/LegalizeVectorTypes.cpp      |  62 +-
 .../Target/AArch64/AArch64ISelLowering.cpp    | 101 ++-
 llvm/test/CodeGen/AArch64/alias_mask.ll       | 781 ++++++++++++++++++
 4 files changed, 910 insertions(+), 35 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index ac869a54d599c..1e6799343f25a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -1070,6 +1070,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue WidenVecRes_ADDRSPACECAST(SDNode *N);
   SDValue WidenVecRes_AssertZext(SDNode* N);
   SDValue WidenVecRes_BITCAST(SDNode* N);
+  SDValue WidenVecRes_LOOP_DEPENDENCE_MASK(SDNode *N);
   SDValue WidenVecRes_BUILD_VECTOR(SDNode* N);
   SDValue WidenVecRes_CONCAT_VECTORS(SDNode* N);
   SDValue WidenVecRes_EXTEND_VECTOR_INREG(SDNode* N);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 10ea3c9e038df..f634096612a9d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1119,7 +1119,9 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
                        "operator!\n");
 
   case ISD::LOOP_DEPENDENCE_RAW_MASK:
-  case ISD::LOOP_DEPENDENCE_WAR_MASK: SplitVecRes_LOOP_DEPENDENCE_MASK(N, Lo, Hi); break;
+  case ISD::LOOP_DEPENDENCE_WAR_MASK:
+    SplitVecRes_LOOP_DEPENDENCE_MASK(N, Lo, Hi);
+    break;
   case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, ResNo, Lo, Hi); break;
   case ISD::AssertZext:   SplitVecRes_AssertZext(N, Lo, Hi); break;
   case ISD::VSELECT:
@@ -1610,19 +1612,40 @@ void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
   Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
 }
 
-void DAGTypeLegalizer::SplitVecRes_LOOP_DEPENDENCE_MASK(SDNode *N, SDValue &Lo, SDValue &Hi) {
-    SDLoc DL(N);
-    EVT LoVT, HiVT;
-    std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
-    SDValue PtrA = N->getOperand(0);
-    SDValue PtrB = N->getOperand(1);
-    Lo = DAG.getNode(N->getOpcode(), DL, LoVT, PtrA, PtrB, N->getOperand(2));
-    Lo.dumpr();
-
-    PtrA = DAG.getNode(ISD::ADD, DL, MVT::i64, PtrA, DAG.getConstant(HiVT.getStoreSizeInBits(), DL, MVT::i64));
-    PtrB = DAG.getNode(ISD::ADD, DL, MVT::i64, PtrB, DAG.getConstant(HiVT.getStoreSizeInBits(), DL, MVT::i64));
-    Hi = DAG.getNode(N->getOpcode(), DL, HiVT, PtrA, PtrB, N->getOperand(2));
-    Hi.dumpr();
+void DAGTypeLegalizer::SplitVecRes_LOOP_DEPENDENCE_MASK(SDNode *N, SDValue &Lo,
+                                                        SDValue &Hi) {
+  EVT EltVT;
+  switch (N->getConstantOperandVal(2)) {
+  case 1:
+    EltVT = MVT::i8;
+    break;
+  case 2:
+    EltVT = MVT::i16;
+    break;
+  case 4:
+    EltVT = MVT::i32;
+    break;
+  case 8:
+    EltVT = MVT::i64;
+    break;
+  }
+  SDLoc DL(N);
+  EVT LoVT, HiVT;
+  std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+  SDValue PtrA = N->getOperand(0);
+  SDValue PtrB = N->getOperand(1);
+  Lo = DAG.getNode(N->getOpcode(), DL, LoVT, PtrA, PtrB, N->getOperand(2));
+
+  EVT StoreVT =
+      EVT::getVectorVT(*DAG.getContext(), EltVT, HiVT.getVectorMinNumElements(),
+                       HiVT.isScalableVT());
+  PtrA = DAG.getNode(
+      ISD::ADD, DL, MVT::i64, PtrA,
+      DAG.getConstant(StoreVT.getStoreSizeInBits() / 8, DL, MVT::i64));
+  PtrB = DAG.getNode(
+      ISD::ADD, DL, MVT::i64, PtrB,
+      DAG.getConstant(StoreVT.getStoreSizeInBits() / 8, DL, MVT::i64));
+  Hi = DAG.getNode(N->getOpcode(), DL, HiVT, PtrA, PtrB, N->getOperand(2));
 }
 
 void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo,
@@ -4686,6 +4709,10 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
 #endif
     report_fatal_error("Do not know how to widen the result of this operator!");
 
+  case ISD::LOOP_DEPENDENCE_RAW_MASK:
+  case ISD::LOOP_DEPENDENCE_WAR_MASK:
+    Res = WidenVecRes_LOOP_DEPENDENCE_MASK(N);
+    break;
   case ISD::MERGE_VALUES:      Res = WidenVecRes_MERGE_VALUES(N, ResNo); break;
   case ISD::ADDRSPACECAST:
     Res = WidenVecRes_ADDRSPACECAST(N);
@@ -5885,6 +5912,13 @@ SDValue DAGTypeLegalizer::WidenVecRes_BITCAST(SDNode *N) {
   return CreateStackStoreLoad(InOp, WidenVT);
 }
 
+SDValue DAGTypeLegalizer::WidenVecRes_LOOP_DEPENDENCE_MASK(SDNode *N) {
+  return DAG.getNode(
+      N->getOpcode(), SDLoc(N),
+      TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)),
+      N->getOperand(0), N->getOperand(1), N->getOperand(2));
+}
+
 SDValue DAGTypeLegalizer::WidenVecRes_BUILD_VECTOR(SDNode *N) {
   SDLoc dl(N);
   // Build a vector with undefined for the new nodes.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d71aa19136ab1..c72acebea9835 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5220,44 +5220,103 @@ AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
                                                  SelectionDAG &DAG) const {
   SDLoc DL(Op);
   uint64_t EltSize = Op.getConstantOperandVal(2);
-  EVT VT = Op.getValueType();
+  EVT FullVT = Op.getValueType();
+  unsigned NumElements = FullVT.getVectorMinNumElements();
+  unsigned NumSplits = 0;
+  EVT EltVT;
   // Make sure that the promoted mask size and element size match
   switch (EltSize) {
   case 1:
-    assert((VT == MVT::v16i8 || VT == MVT::nxv16i1) &&
+    assert((FullVT == MVT::v16i8 || FullVT == MVT::nxv16i1) &&
            "Unexpected mask or element size");
+    EltVT = MVT::i8;
     break;
   case 2:
-    assert((VT == MVT::v8i8 || VT == MVT::nxv8i1) &&
-           "Unexpected mask or element size");
+    if (FullVT == MVT::v16i8)
+      NumSplits = 1;
+    else
+      assert((FullVT == MVT::v8i8 || FullVT == MVT::nxv8i1) &&
+             "Unexpected mask or element size");
+    EltVT = MVT::i16;
     break;
   case 4:
-    assert((VT == MVT::v4i16 || VT == MVT::nxv4i1) &&
-           "Unexpected mask or element size");
+    if (NumElements >= 8)
+      NumSplits = NumElements / 8;
+    else
+      assert((FullVT == MVT::v4i16 || FullVT == MVT::nxv4i1) &&
+             "Unexpected mask or element size");
+    EltVT = MVT::i32;
     break;
   case 8:
-    assert((VT == MVT::v2i32 || VT == MVT::nxv2i1) &&
-           "Unexpected mask or element size");
+    if (NumElements >= 4)
+      NumSplits = NumElements / 4;
+    else
+      assert((FullVT == MVT::v2i32 || FullVT == MVT::nxv2i1) &&
+             "Unexpected mask or element size");
+    EltVT = MVT::i64;
     break;
   default:
     llvm_unreachable("Unexpected element size for get.alias.lane.mask");
     break;
   }
 
-  // We can use the SVE whilewr/whilerw instruction to lower this
-  // intrinsic by creating the appropriate sequence of scalable vector
-  // operations and then extracting a fixed-width subvector from the scalable
-  // vector. Scalable vector variants are already legal.
-  EVT ContainerVT =
-      EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
-                       VT.getVectorNumElements(), true);
-  EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
+  auto LowerToWhile = [&](EVT VT, unsigned AddrScale) {
+    SDValue PtrA = Op.getOperand(0);
+    SDValue PtrB = Op.getOperand(1);
+
+    EVT StoreVT =
+        EVT::getVectorVT(*DAG.getContext(), EltVT, VT.getVectorMinNumElements(),
+                         VT.isScalableVT());
+    PtrA = DAG.getNode(
+        ISD::ADD, DL, MVT::i64, PtrA,
+        DAG.getConstant(StoreVT.getStoreSizeInBits() / 8 * AddrScale, DL,
+                        MVT::i64));
+    PtrB = DAG.getNode(
+        ISD::ADD, DL, MVT::i64, PtrB,
+        DAG.getConstant(StoreVT.getStoreSizeInBits() / 8 * AddrScale, DL,
+                        MVT::i64));
+
+    if (VT.isScalableVT())
+      return DAG.getNode(Op.getOpcode(), DL, VT, PtrA, PtrB, Op.getOperand(2));
+
+    // We can use the SVE whilewr/whilerw instruction to lower this
+    // intrinsic by creating the appropriate sequence of scalable vector
+    // operations and then extracting a fixed-width subvector from the scalable
+    // vector. Scalable vector variants are already legal.
+    EVT ContainerVT =
+        EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
+                         VT.getVectorNumElements(), true);
+    EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
+
+    SDValue Mask =
+        DAG.getNode(Op.getOpcode(), DL, WhileVT, PtrA, PtrB, Op.getOperand(2));
+    SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, DL, ContainerVT, Mask);
+    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, MaskAsInt,
+                       DAG.getVectorIdxConstant(0, DL));
+  };
 
-  SDValue Mask = DAG.getNode(Op.getOpcode(), DL, WhileVT, Op.getOperand(0),
-                             Op.getOperand(1), Op.getOperand(2));
-  SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, DL, ContainerVT, Mask);
-  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, MaskAsInt,
-                     DAG.getVectorIdxConstant(0, DL));
+  if (NumSplits == 0)
+    return LowerToWhile(FullVT, false);
+
+  SDValue FullVec = DAG.getUNDEF(FullVT);
+
+  unsigned NumElementsPerSplit = NumElements / (2 * NumSplits);
+  for (unsigned Split = 0, InsertIdx = 0; Split < NumSplits;
+       Split++, InsertIdx += 2) {
+    EVT PartVT =
+        EVT::getVectorVT(*DAG.getContext(), FullVT.getVectorElementType(),
+                         NumElementsPerSplit, FullVT.isScalableVT());
+    SDValue Low = LowerToWhile(PartVT, InsertIdx);
+    SDValue High = LowerToWhile(PartVT, InsertIdx + 1);
+    unsigned InsertIdxLow = InsertIdx * NumElementsPerSplit;
+    unsigned InsertIdxHigh = (InsertIdx + 1) * NumElementsPerSplit;
+    SDValue Insert =
+        DAG.getNode(ISD::INSERT_SUBVECTOR, DL, FullVT, FullVec, Low,
+                    DAG.getVectorIdxConstant(InsertIdxLow, DL));
+    FullVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, FullVT, Insert, High,
+                          DAG.getVectorIdxConstant(InsertIdxHigh, DL));
+  }
+  return FullVec;
 }
 
 SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op,
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 94fc39054200f..0ee2f6f3a9b10 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -319,3 +319,784 @@ entry:
   %0 = call <2 x i1> @llvm.loop.dependence.raw.mask.v2i1(ptr %a, ptr %b, i64 8)
   ret <2 x i1> %0
 }
+
+define <32 x i1> @whilewr_8_split(ptr %a, ptr %b) {
+; CHECK-SVE-LABEL: whilewr_8_split:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    add x9, x1, #16
+; CHECK-SVE-NEXT:    add x10, x0, #16
+; CHECK-SVE-NEXT:    whilewr p0.b, x0, x1
+; CHECK-SVE-NEXT:    whilewr p1.b, x10, x9
+; CHECK-SVE-NEXT:    adrp x9, .LCPI8_0
+; CHECK-SVE-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    ldr q2, [x9, :lo12:.LCPI8_0]
+; CHECK-SVE-NEXT:    mov z1.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    shl v0.16b, v0.16b, #7
+; CHECK-SVE-NEXT:    shl v1.16b, v1.16b, #7
+; CHECK-SVE-NEXT:    cmlt v0.16b, v0.16b, #0
+; CHECK-SVE-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-SVE-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-SVE-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-SVE-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-SVE-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-SVE-NEXT:    zip1 v0.16b, v0.16b, v2.16b
+; CHECK-SVE-NEXT:    zip1 v1.16b, v1.16b, v3.16b
+; CHECK-SVE-NEXT:    addv h0, v0.8h
+; CHECK-SVE-NEXT:    addv h1, v1.8h
+; CHECK-SVE-NEXT:    str h0, [x8]
+; CHECK-SVE-NEXT:    str h1, [x8, #2]
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilewr_8_split:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_0
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI8_1
+; CHECK-NOSVE-NEXT:    sub x11, x1, x0
+; CHECK-NOSVE-NEXT:    ldr q0, [x9, :lo12:.LCPI8_0]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_2
+; CHECK-NOSVE-NEXT:    ldr q1, [x10, :lo12:.LCPI8_1]
+; CHECK-NOSVE-NEXT:    ldr q3, [x9, :lo12:.LCPI8_2]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_4
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI8_3
+; CHECK-NOSVE-NEXT:    ldr q5, [x9, :lo12:.LCPI8_4]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_5
+; CHECK-NOSVE-NEXT:    dup v2.2d, x11
+; CHECK-NOSVE-NEXT:    ldr q4, [x10, :lo12:.LCPI8_3]
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI8_6
+; CHECK-NOSVE-NEXT:    ldr q6, [x9, :lo12:.LCPI8_5]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_7
+; CHECK-NOSVE-NEXT:    ldr q7, [x10, :lo12:.LCPI8_6]
+; CHECK-NOSVE-NEXT:    cmp x11, #1
+; CHECK-NOSVE-NEXT:    ldr q16, [x9, :lo12:.LCPI8_7]
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v2.2d, v0.2d
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v2.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v3.2d, v2.2d, v3.2d
+; CHECK-NOSVE-NEXT:    cmhi v4.2d, v2.2d, v4.2d
+; CHECK-NOSVE-NEXT:    cmhi v5.2d, v2.2d, v5.2d
+; CHECK-NOSVE-NEXT:    cmhi v6.2d, v2.2d, v6.2d
+; CHECK-NOSVE-NEXT:    cmhi v7.2d, v2.2d, v7.2d
+; CHECK-NOSVE-NEXT:    cmhi v2.2d, v2.2d, v16.2d
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NOSVE-NEXT:    cset w9, lt
+; CHECK-NOSVE-NEXT:    uzp1 v1.4s, v4.4s, v3.4s
+; CHECK-NOSVE-NEXT:    uzp1 v3.4s, v6.4s, v5.4s
+; CHECK-NOSVE-NEXT:    uzp1 v2.4s, v2.4s, v7.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NOSVE-NEXT:    uzp1 v1.8h, v2.8h, v3.8h
+; CHECK-NOSVE-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
+; CHECK-NOSVE-NEXT:    dup v1.16b, w9
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI8_8
+; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    ldr q1, [x9, :lo12:.LCPI8_8]
+; CHECK-NOSVE-NEXT:    shl v0.16b, v0.16b, #7
+; CHECK-NOSVE-NEXT:    cmlt v0.16b, v0.16b, #0
+; CHECK-NOSVE-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NOSVE-NEXT:    zip1 v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    addv h0, v0.8h
+; CHECK-NOSVE-NEXT:    str h0, [x8, #2]
+; CHECK-NOSVE-NEXT:    str h0, [x8]
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 1)
+  ret <32 x i1> %0
+}
+
+define <64 x i1> @whilewr_8_split2(ptr %a, ptr %b) {
+; CHECK-SVE-LABEL: whilewr_8_split2:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    add x9, x1, #48
+; CHECK-SVE-NEXT:    add x10, x0, #48
+; CHECK-SVE-NEXT:    whilewr p0.b, x0, x1
+; CHECK-SVE-NEXT:    whilewr p1.b, x10, x9
+; CHECK-SVE-NEXT:    add x9, x1, #16
+; CHECK-SVE-NEXT:    add x10, x1, #32
+; CHECK-SVE-NEXT:    add x11, x0, #32
+; CHECK-SVE-NEXT:    add x12, x0, #16
+; CHECK-SVE-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    whilewr p0.b, x11, x10
+; CHECK-SVE-NEXT:    mov z1.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    whilewr p1.b, x12, x9
+; CHECK-SVE-NEXT:    adrp x9, .LCPI9_0
+; CHECK-SVE-NEXT:    mov z2.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    shl v0.16b, v0.16b, #7
+; CHECK-SVE-NEXT:    ldr q4, [x9, :lo12:.LCPI9_0]
+; CHECK-SVE-NEXT:    mov z3.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    shl v1.16b, v1.16b, #7
+; CHECK-SVE-NEXT:    shl v2.16b, v2.16b, #7
+; CHECK-SVE-NEXT:    cmlt v0.16b, v0.16b, #0
+; CHECK-SVE-NEXT:    shl v3.16b, v3.16b, #7
+; CHECK-SVE-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-SVE-NEXT:    cmlt v2.16b, v2.16b, #0
+; CHECK-SVE-NEXT:    and v0.16b, v0.16b, v4.16b
+; CHECK-SVE-NEXT:    cmlt v3.16b, v3.16b, #0
+; CHECK-SVE-NEXT:    and v1.16b, v1.16b, v4.16b
+; CHECK-SVE-NEXT:    and v2.16b, v2.16b, v4.16b
+; CHECK-SVE-NEXT:    and v3.16b, v3.16b, v4.16b
+; CHECK-SVE-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
+; CHECK-SVE-NEXT:    ext v5.16b, v1.16b, v1.16b, #8
+; CHECK-SVE-NEXT:    ext v6.16b, v2.16b, v2.16b, #8
+; CHECK-SVE-NEXT:    ext v7.16b, v3.16b, v3.16b, #8
+; CHECK-SVE-NEXT:    zip1 v0.16b, v0.16b, v4.16b
+; CHECK-SVE-NEXT:    zip1 v1.16b, v1.16b, v5.16b
+; CHECK-SVE-NEXT:    zip1 v2.16b, v2.16b, v6.16b
+; CHECK-SVE-NEXT:    zip1 v3.16b, v3.16b, v7.16b
+; CHECK-SVE-NEXT:    addv h0, v0.8h
+; CHECK-SVE-NEXT:    addv h1, v1.8h
+; CHECK-SVE-NEXT:    addv h2, v2.8h
+; CHECK-SVE-NEXT:    addv h3, v3.8h
+; CHECK-SVE-NEXT:    str h0, [x8]
+; CHECK-SVE-NEXT:    str h1, [x8, #6]
+; CHECK-SVE-NEXT:    str h2, [x8, #4]
+; CHECK-SVE-NEXT:    str h3, [x8, #2]
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilewr_8_split2:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI9_0
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI9_1
+; CHECK-NOSVE-NEXT:    sub x11, x1, x0
+; CHECK-NOSVE-NEXT:    ldr q0, [x9, :lo12:.LCPI9_0]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI9_2
+; CHECK-NOSVE-NEXT:    ldr q1, [x10, :lo12:.LCPI9_1]
+; CHECK-NOSVE-NEXT:    ldr q3, [x9, :lo12:.LCPI9_2]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI9_4
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI9_3
+; CHECK-NOSVE-NEXT:    ldr q5, [x9, :lo12:.LCPI9_4]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI9_5
+; CHECK-NOSVE-NEXT:    dup v2.2d, x11
+; CHECK-NOSVE-NEXT:    ldr q4, [x10, :lo12:.LCPI9_3]
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI9_6
+; CHECK-NOSVE-NEXT:    ldr q6, [x9, :lo12:.LCPI9_5]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI9_7
+; CHECK-NOSVE-NEXT:    ldr q7, [x10, :lo12:.LCPI9_6]
+; CHECK-NOSVE-NEXT:    cmp x11, #1
+; CHECK-NOSVE-NEXT:    ldr q16, [x9, :lo12:.LCPI9_7]
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v2.2d, v0.2d
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v2.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v3.2d, v2.2d, v3.2d
+; CHECK-NOSVE-NEXT:    cmhi v4.2d, v2.2d, v4.2d
+; CHECK-NOSVE-NEXT:    cmhi v5.2d, v2.2d, v5.2d
+; CHECK-NOSVE-NEXT:    cmhi v6.2d, v2.2d, v6.2d
+; CHECK-NOSVE-NEXT:    cmhi v7.2d, v2.2d, v7.2d
+; CHECK-NOSVE-NEXT:    cmhi v2.2d, v2.2d, v16.2d
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NOSVE-NEXT:    cset w9, lt
+; CHECK-NOSVE-NEXT:    uzp1 v1.4s, v4.4s, v3.4s
+; CHECK-NOSVE-NEXT:    uzp1 v3.4s, v6.4s, v5.4s
+; CHECK-NOSVE-NEXT:    uzp1 v2.4s, v2.4s, v7.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NOSVE-NEXT:    uzp1 v1.8h, v2.8h, v3.8h
+; CHECK-NOSVE-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
+; CHECK-NOSVE-NEXT:    dup v1.16b, w9
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI9_8
+; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    ldr q1, [x9, :lo12:.LCPI9_8]
+; CHECK-NOSVE-NEXT:    shl v0.16b, v0.16b, #7
+; CHECK-NOSVE-NEXT:    cmlt v0.16b, v0.16b, #0
+; CHECK-NOSVE-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NOSVE-NEXT:    zip1 v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    addv h0, v0.8h
+; CHECK-NOSVE-NEXT:    str h0, [x8, #6]
+; CHECK-NOSVE-NEXT:    str h0, [x8, #4]
+; CHECK-NOSVE-NEXT:    str h0, [x8, #2]
+; CHECK-NOSVE-NEXT:    str h0, [x8]
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <64 x i1> @llvm.loop.dependence.war.mask.v64i1(ptr %a, ptr %b, i64 1)
+  ret <64 x i1> %0
+}
+
+define <16 x i1> @whilewr_16_split(ptr %a, ptr %b) {
+; CHECK-SVE-LABEL: whilewr_16_split:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    add x8, x1, #16
+; CHECK-SVE-NEXT:    add x9, x0, #16
+; CHECK-SVE-NEXT:    whilewr p0.h, x0, x1
+; CHECK-SVE-NEXT:    whilewr p1.h, x9, x8
+; CHECK-SVE-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    mov z1.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilewr_16_split:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    sub x8, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI10_0
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI10_1
+; CHECK-NOSVE-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-NOSVE-NEXT:    ldr q0, [x9, :lo12:.LCPI10_0]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI10_2
+; CHECK-NOSVE-NEXT:    ldr q2, [x9, :lo12:.LCPI10_2]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI10_4
+; CHECK-NOSVE-NEXT:    ldr q1, [x10, :lo12:.LCPI10_1]
+; CHECK-NOSVE-NEXT:    asr x8, x8, #1
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI10_3
+; CHECK-NOSVE-NEXT:    ldr q5, [x9, :lo12:.LCPI10_4]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI10_6
+; CHECK-NOSVE-NEXT:    ldr q3, [x10, :lo12:.LCPI10_3]
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI10_5
+; CHECK-NOSVE-NEXT:    dup v4.2d, x8
+; CHECK-NOSVE-NEXT:    ldr q7, [x9, :lo12:.LCPI10_6]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI10_7
+; CHECK-NOSVE-NEXT:    ldr q6, [x10, :lo12:.LCPI10_5]
+; CHECK-NOSVE-NEXT:    ldr q16, [x9, :lo12:.LCPI10_7]
+; CHECK-NOSVE-NEXT:    cmp x8, #1
+; CHECK-NOSVE-NEXT:    cset w8, lt
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v4.2d, v0.2d
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v4.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v2.2d, v4.2d, v2.2d
+; CHECK-NOSVE-NEXT:    cmhi v3.2d, v4.2d, v3.2d
+; CHECK-NOSVE-NEXT:    cmhi v5.2d, v4.2d, v5.2d
+; CHECK-NOSVE-NEXT:    cmhi v6.2d, v4.2d, v6.2d
+; CHECK-NOSVE-NEXT:    cmhi v7.2d, v4.2d, v7.2d
+; CHECK-NOSVE-NEXT:    cmhi v4.2d, v4.2d, v16.2d
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NOSVE-NEXT:    uzp1 v1.4s, v3.4s, v2.4s
+; CHECK-NOSVE-NEXT:    uzp1 v2.4s, v6.4s, v5.4s
+; CHECK-NOSVE-NEXT:    uzp1 v3.4s, v4.4s, v7.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NOSVE-NEXT:    uzp1 v1.8h, v3.8h, v2.8h
+; CHECK-NOSVE-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
+; CHECK-NOSVE-NEXT:    dup v1.16b, w8
+; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 2)
+  ret <16 x i1> %0
+}
+
+define <32 x i1> @whilewr_16_split2(ptr %a, ptr %b) {
+; CHECK-SVE-LABEL: whilewr_16_split2:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    add x9, x1, #48
+; CHECK-SVE-NEXT:    add x10, x0, #48
+; CHECK-SVE-NEXT:    add x11, x1, #16
+; CHECK-SVE-NEXT:    whilewr p1.h, x10, x9
+; CHECK-SVE-NEXT:    add x9, x1, #32
+; CHECK-SVE-NEXT:    add x10, x0, #32
+; CHECK-SVE-NEXT:    add x12, x0, #16
+; CHECK-SVE-NEXT:    whilewr p0.h, x0, x1
+; CHECK-SVE-NEXT:    whilewr p2.h, x10, x9
+; CHECK-SVE-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    adrp x9, .LCPI11_0
+; CHECK-SVE-NEXT:    whilewr p3.h, x12, x11
+; CHECK-SVE-NEXT:    mov z2.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    mov z1.h, p2/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    mov z3.h, p3/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
+; CHECK-SVE-NEXT:    uzp1 v1.16b, v2.16b, v3.16b
+; CHECK-SVE-NEXT:    ldr q2, [x9, :lo12:.LCPI11_0]
+; CHECK-SVE-NEXT:    shl v0.16b, v0.16b, #7
+; CHECK-SVE-NEXT:    shl v1.16b, v1.16b, #7
+; CHECK-SVE-NEXT:    cmlt v0.16b, v0.16b, #0
+; CHECK-SVE-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-SVE-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-SVE-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-SVE-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-SVE-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-SVE-NEXT:    zip1 v0.16b, v0.16b, v2.16b
+; CHECK-SVE-NEXT:    zip1 v1.16b, v1.16b, v3.16b
+; CHECK-SVE-NEXT:    addv h0, v0.8h
+; CHECK-SVE-NEXT:    addv h1, v1.8h
+; CHECK-SVE-NEXT:    str h0, [x8, #2]
+; CHECK-SVE-NEXT:    str h1, [x8]
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilewr_16_split2:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    sub x9, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI11_0
+; CHECK-NOSVE-NEXT:    adrp x11, .LCPI11_1
+; CHECK-NOSVE-NEXT:    add x9, x9, x9, lsr #63
+; CHECK-NOSVE-NEXT:    ldr q0, [x10, :lo12:.LCPI11_0]
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI11_2
+; CHECK-NOSVE-NEXT:    ldr q2, [x10, :lo12:.LCPI11_2]
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI11_4
+; CHECK-NOSVE-NEXT:    ldr q1, [x11, :lo12:.LCPI11_1]
+; CHECK-NOSVE-NEXT:    asr x9, x9, #1
+; CHECK-NOSVE-NEXT:    adrp x11, .LCPI11_3
+; CHECK-NOSVE-NEXT:    ldr q5, [x10, :lo12:.LCPI11_4]
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI11_6
+; CHECK-NOSVE-NEXT:    ldr q3, [x11, :lo12:.LCPI11_3]
+; CHECK-NOSVE-NEXT:    adrp x11, .LCPI11_5
+; CHECK-NOSVE-NEXT:    dup v4.2d, x9
+; CHECK-NOSVE-NEXT:    ldr q7, [x10, :lo12:.LCPI11_6]
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI11_7
+; CHECK-NOSVE-NEXT:    ldr q6, [x11, :lo12:.LCPI11_5]
+; CHECK-NOSVE-NEXT:    ldr q16, [x10, :lo12:.LCPI11_7]
+; CHECK-NOSVE-NEXT:    cmp x9, #1
+; CHECK-NOSVE-NEXT:    cset w9, lt
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v4.2d, v0.2d
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v4.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v2.2d, v4.2d, v2.2d
+; CHECK-NOSVE-NEXT:    cmhi v3.2d, v4.2d, v3.2d
+; CHECK-NOSVE-NEXT:    cmhi v5.2d, v4.2d, v5.2d
+; CHECK-NOSVE-NEXT:    cmhi v6.2d, v4.2d, v6.2d
+; CHECK-NOSVE-NEXT:    cmhi v7.2d, v4.2d, v7.2d
+; CHECK-NOSVE-NEXT:    cmhi v4.2d, v4.2d, v16.2d
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NOSVE-NEXT:    uzp1 v1.4s, v3.4s, v2.4s
+; CHECK-NOSVE-NEXT:    uzp1 v2.4s, v6.4s, v5.4s
+; CHECK-NOSVE-NEXT:    uzp1 v3.4s, v4.4s, v7.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NOSVE-NEXT:    uzp1 v1.8h, v3.8h, v2.8h
+; CHECK-NOSVE-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
+; CHECK-NOSVE-NEXT:    dup v1.16b, w9
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI11_8
+; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    ldr q1, [x9, :lo12:.LCPI11_8]
+; CHECK-NOSVE-NEXT:    shl v0.16b, v0.16b, #7
+; CHECK-NOSVE-NEXT:    cmlt v0.16b, v0.16b, #0
+; CHECK-NOSVE-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NOSVE-NEXT:    zip1 v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    addv h0, v0.8h
+; CHECK-NOSVE-NEXT:    str h0, [x8, #2]
+; CHECK-NOSVE-NEXT:    str h0, [x8]
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 2)
+  ret <32 x i1> %0
+}
+
+define <8 x i1> @whilewr_32_split(ptr %a, ptr %b) {
+; CHECK-SVE-LABEL: whilewr_32_split:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    whilewr p0.s, x0, x1
+; CHECK-SVE-NEXT:    add x10, x0, #16
+; CHECK-SVE-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    mov w8, v0.s[1]
+; CHECK-SVE-NEXT:    mov v1.16b, v0.16b
+; CHECK-SVE-NEXT:    mov w9, v0.s[2]
+; CHECK-SVE-NEXT:    mov v1.h[1], w8
+; CHECK-SVE-NEXT:    mov w8, v0.s[3]
+; CHECK-SVE-NEXT:    mov v1.h[2], w9
+; CHECK-SVE-NEXT:    add x9, x1, #16
+; CHECK-SVE-NEXT:    whilewr p0.s, x10, x9
+; CHECK-SVE-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    mov v1.h[3], w8
+; CHECK-SVE-NEXT:    fmov w8, s0
+; CHECK-SVE-NEXT:    mov w9, v0.s[1]
+; CHECK-SVE-NEXT:    mov v1.h[4], w8
+; CHECK-SVE-NEXT:    mov w8, v0.s[2]
+; CHECK-SVE-NEXT:    mov v1.h[5], w9
+; CHECK-SVE-NEXT:    mov w9, v0.s[3]
+; CHECK-SVE-NEXT:    mov v1.h[6], w8
+; CHECK-SVE-NEXT:    mov v1.h[7], w9
+; CHECK-SVE-NEXT:    xtn v0.8b, v1.8h
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilewr_32_split:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    sub x8, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI12_1
+; CHECK-NOSVE-NEXT:    adrp x11, .LCPI12_2
+; CHECK-NOSVE-NEXT:    add x9, x8, #3
+; CHECK-NOSVE-NEXT:    cmp x8, #0
+; CHECK-NOSVE-NEXT:    ldr q2, [x10, :lo12:.LCPI12_1]
+; CHECK-NOSVE-NEXT:    csel x8, x9, x8, lt
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI12_0
+; CHECK-NOSVE-NEXT:    ldr q3, [x11, :lo12:.LCPI12_2]
+; CHECK-NOSVE-NEXT:    asr x8, x8, #2
+; CHECK-NOSVE-NEXT:    ldr q1, [x9, :lo12:.LCPI12_0]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI12_3
+; CHECK-NOSVE-NEXT:    ldr q4, [x9, :lo12:.LCPI12_3]
+; CHECK-NOSVE-NEXT:    dup v0.2d, x8
+; CHECK-NOSVE-NEXT:    cmp x8, #1
+; CHECK-NOSVE-NEXT:    cset w8, lt
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v2.2d, v0.2d, v2.2d
+; CHECK-NOSVE-NEXT:    cmhi v3.2d, v0.2d, v3.2d
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v0.2d, v4.2d
+; CHECK-NOSVE-NEXT:    uzp1 v1.4s, v2.4s, v1.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-NOSVE-NEXT:    dup v1.8b, w8
+; CHECK-NOSVE-NEXT:    xtn v0.8b, v0.8h
+; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 4)
+  ret <8 x i1> %0
+}
+
+define <16 x i1> @whilewr_32_split2(ptr %a, ptr %b) {
+; CHECK-SVE-LABEL: whilewr_32_split2:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    add x8, x1, #32
+; CHECK-SVE-NEXT:    add x9, x0, #32
+; CHECK-SVE-NEXT:    whilewr p0.s, x0, x1
+; CHECK-SVE-NEXT:    whilewr p1.s, x9, x8
+; CHECK-SVE-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    mov z1.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    mov w8, v0.s[1]
+; CHECK-SVE-NEXT:    mov v2.16b, v0.16b
+; CHECK-SVE-NEXT:    mov w10, v0.s[2]
+; CHECK-SVE-NEXT:    mov w9, v1.s[1]
+; CHECK-SVE-NEXT:    mov v3.16b, v1.16b
+; CHECK-SVE-NEXT:    mov w11, v1.s[3]
+; CHECK-SVE-NEXT:    mov v2.h[1], w8
+; CHECK-SVE-NEXT:    mov w8, v1.s[2]
+; CHECK-SVE-NEXT:    mov v3.h[1], w9
+; CHECK-SVE-NEXT:    mov w9, v0.s[3]
+; CHECK-SVE-NEXT:    mov v2.h[2], w10
+; CHECK-SVE-NEXT:    add x10, x1, #16
+; CHECK-SVE-NEXT:    mov v3.h[2], w8
+; CHECK-SVE-NEXT:    add x8, x0, #16
+; CHECK-SVE-NEXT:    whilewr p0.s, x8, x10
+; CHECK-SVE-NEXT:    add x8, x1, #48
+; CHECK-SVE-NEXT:    add x10, x0, #48
+; CHECK-SVE-NEXT:    whilewr p1.s, x10, x8
+; CHECK-SVE-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    mov v2.h[3], w9
+; CHECK-SVE-NEXT:    mov z1.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    mov v3.h[3], w11
+; CHECK-SVE-NEXT:    fmov w9, s0
+; CHECK-SVE-NEXT:    mov w8, v0.s[1]
+; CHECK-SVE-NEXT:    fmov w10, s1
+; CHECK-SVE-NEXT:    mov w11, v1.s[1]
+; CHECK-SVE-NEXT:    mov v2.h[4], w9
+; CHECK-SVE-NEXT:    mov w9, v0.s[2]
+; CHECK-SVE-NEXT:    mov v3.h[4], w10
+; CHECK-SVE-NEXT:    mov w10, v1.s[2]
+; CHECK-SVE-NEXT:    mov v2.h[5], w8
+; CHECK-SVE-NEXT:    mov w8, v0.s[3]
+; CHECK-SVE-NEXT:    mov v3.h[5], w11
+; CHECK-SVE-NEXT:    mov w11, v1.s[3]
+; CHECK-SVE-NEXT:    mov v2.h[6], w9
+; CHECK-SVE-NEXT:    mov v3.h[6], w10
+; CHECK-SVE-NEXT:    mov v2.h[7], w8
+; CHECK-SVE-NEXT:    mov v3.h[7], w11
+; CHECK-SVE-NEXT:    uzp1 v0.16b, v2.16b, v3.16b
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilewr_32_split2:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    sub x9, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI13_0
+; CHECK-NOSVE-NEXT:    add x10, x9, #3
+; CHECK-NOSVE-NEXT:    cmp x9, #0
+; CHECK-NOSVE-NEXT:    ldr q0, [x8, :lo12:.LCPI13_0]
+; CHECK-NOSVE-NEXT:    csel x9, x10, x9, lt
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI13_2
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI13_1
+; CHECK-NOSVE-NEXT:    asr x9, x9, #2
+; CHECK-NOSVE-NEXT:    ldr q2, [x8, :lo12:.LCPI13_2]
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI13_4
+; CHECK-NOSVE-NEXT:    ldr q1, [x10, :lo12:.LCPI13_1]
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI13_3
+; CHECK-NOSVE-NEXT:    ldr q5, [x8, :lo12:.LCPI13_4]
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI13_6
+; CHECK-NOSVE-NEXT:    ldr q3, [x10, :lo12:.LCPI13_3]
+; CHECK-NOSVE-NEXT:    dup v4.2d, x9
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI13_5
+; CHECK-NOSVE-NEXT:    ldr q7, [x8, :lo12:.LCPI13_6]
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI13_7
+; CHECK-NOSVE-NEXT:    ldr q6, [x10, :lo12:.LCPI13_5]
+; CHECK-NOSVE-NEXT:    ldr q16, [x8, :lo12:.LCPI13_7]
+; CHECK-NOSVE-NEXT:    cmp x9, #1
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v4.2d, v0.2d
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v4.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v2.2d, v4.2d, v2.2d
+; CHECK-NOSVE-NEXT:    cmhi v3.2d, v4.2d, v3.2d
+; CHECK-NOSVE-NEXT:    cmhi v5.2d, v4.2d, v5.2d
+; CHECK-NOSVE-NEXT:    cmhi v6.2d, v4.2d, v6.2d
+; CHECK-NOSVE-NEXT:    cmhi v7.2d, v4.2d, v7.2d
+; CHECK-NOSVE-NEXT:    cmhi v4.2d, v4.2d, v16.2d
+; CHECK-NOSVE-NEXT:    cset w8, lt
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NOSVE-NEXT:    uzp1 v1.4s, v3.4s, v2.4s
+; CHECK-NOSVE-NEXT:    uzp1 v2.4s, v6.4s, v5.4s
+; CHECK-NOSVE-NEXT:    uzp1 v3.4s, v4.4s, v7.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NOSVE-NEXT:    uzp1 v1.8h, v3.8h, v2.8h
+; CHECK-NOSVE-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
+; CHECK-NOSVE-NEXT:    dup v1.16b, w8
+; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 4)
+  ret <16 x i1> %0
+}
+
+define <4 x i1> @whilewr_64_split(ptr %a, ptr %b) {
+; CHECK-SVE-LABEL: whilewr_64_split:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    whilewr p0.d, x0, x1
+; CHECK-SVE-NEXT:    add x8, x1, #16
+; CHECK-SVE-NEXT:    add x9, x0, #16
+; CHECK-SVE-NEXT:    mov z0.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    whilewr p0.d, x9, x8
+; CHECK-SVE-NEXT:    mov z1.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    mov v0.s[1], v0.s[2]
+; CHECK-SVE-NEXT:    mov v0.s[2], v1.s[0]
+; CHECK-SVE-NEXT:    mov v0.s[3], v1.s[2]
+; CHECK-SVE-NEXT:    xtn v0.4h, v0.4s
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilewr_64_split:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    sub x9, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI14_0
+; CHECK-NOSVE-NEXT:    add x10, x9, #7
+; CHECK-NOSVE-NEXT:    cmp x9, #0
+; CHECK-NOSVE-NEXT:    ldr q1, [x8, :lo12:.LCPI14_0]
+; CHECK-NOSVE-NEXT:    csel x9, x10, x9, lt
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI14_1
+; CHECK-NOSVE-NEXT:    asr x9, x9, #3
+; CHECK-NOSVE-NEXT:    ldr q2, [x10, :lo12:.LCPI14_1]
+; CHECK-NOSVE-NEXT:    dup v0.2d, x9
+; CHECK-NOSVE-NEXT:    cmp x9, #1
+; CHECK-NOSVE-NEXT:    cset w8, lt
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v0.2d, v2.2d
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-NOSVE-NEXT:    dup v1.4h, w8
+; CHECK-NOSVE-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 8)
+  ret <4 x i1> %0
+}
+
+define <8 x i1> @whilewr_64_split2(ptr %a, ptr %b) {
+; CHECK-SVE-LABEL: whilewr_64_split2:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    add x8, x1, #32
+; CHECK-SVE-NEXT:    add x9, x0, #32
+; CHECK-SVE-NEXT:    whilewr p0.d, x0, x1
+; CHECK-SVE-NEXT:    whilewr p1.d, x9, x8
+; CHECK-SVE-NEXT:    add x8, x1, #16
+; CHECK-SVE-NEXT:    add x9, x0, #16
+; CHECK-SVE-NEXT:    mov z0.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    whilewr p0.d, x9, x8
+; CHECK-SVE-NEXT:    add x8, x1, #48
+; CHECK-SVE-NEXT:    mov z1.d, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    add x9, x0, #48
+; CHECK-SVE-NEXT:    whilewr p1.d, x9, x8
+; CHECK-SVE-NEXT:    mov z2.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    mov v0.s[1], v0.s[2]
+; CHECK-SVE-NEXT:    mov v1.s[1], v1.s[2]
+; CHECK-SVE-NEXT:    mov z3.d, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    mov v0.s[2], v2.s[0]
+; CHECK-SVE-NEXT:    mov v1.s[2], v3.s[0]
+; CHECK-SVE-NEXT:    mov v0.s[3], v2.s[2]
+; CHECK-SVE-NEXT:    mov v1.s[3], v3.s[2]
+; CHECK-SVE-NEXT:    uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-SVE-NEXT:    xtn v0.8b, v0.8h
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilewr_64_split2:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    sub x8, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI15_1
+; CHECK-NOSVE-NEXT:    adrp x11, .LCPI15_2
+; CHECK-NOSVE-NEXT:    add x9, x8, #7
+; CHECK-NOSVE-NEXT:    cmp x8, #0
+; CHECK-NOSVE-NEXT:    ldr q2, [x10, :lo12:.LCPI15_1]
+; CHECK-NOSVE-NEXT:    csel x8, x9, x8, lt
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI15_0
+; CHECK-NOSVE-NEXT:    ldr q3, [x11, :lo12:.LCPI15_2]
+; CHECK-NOSVE-NEXT:    asr x8, x8, #3
+; CHECK-NOSVE-NEXT:    ldr q1, [x9, :lo12:.LCPI15_0]
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI15_3
+; CHECK-NOSVE-NEXT:    ldr q4, [x9, :lo12:.LCPI15_3]
+; CHECK-NOSVE-NEXT:    dup v0.2d, x8
+; CHECK-NOSVE-NEXT:    cmp x8, #1
+; CHECK-NOSVE-NEXT:    cset w8, lt
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v2.2d, v0.2d, v2.2d
+; CHECK-NOSVE-NEXT:    cmhi v3.2d, v0.2d, v3.2d
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v0.2d, v4.2d
+; CHECK-NOSVE-NEXT:    uzp1 v1.4s, v2.4s, v1.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-NOSVE-NEXT:    dup v1.8b, w8
+; CHECK-NOSVE-NEXT:    xtn v0.8b, v0.8h
+; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 8)
+  ret <8 x i1> %0
+}
+
+define <9 x i1> @whilewr_8_widen(ptr %a, ptr %b) {
+; CHECK-SVE-LABEL: whilewr_8_widen:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    whilewr p0.b, x0, x1
+; CHECK-SVE-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    umov w9, v0.b[0]
+; CHECK-SVE-NEXT:    umov w10, v0.b[1]
+; CHECK-SVE-NEXT:    umov w11, v0.b[2]
+; CHECK-SVE-NEXT:    umov w12, v0.b[7]
+; CHECK-SVE-NEXT:    and w9, w9, #0x1
+; CHECK-SVE-NEXT:    bfi w9, w10, #1, #1
+; CHECK-SVE-NEXT:    umov w10, v0.b[3]
+; CHECK-SVE-NEXT:    bfi w9, w11, #2, #1
+; CHECK-SVE-NEXT:    umov w11, v0.b[4]
+; CHECK-SVE-NEXT:    bfi w9, w10, #3, #1
+; CHECK-SVE-NEXT:    umov w10, v0.b[5]
+; CHECK-SVE-NEXT:    bfi w9, w11, #4, #1
+; CHECK-SVE-NEXT:    umov w11, v0.b[6]
+; CHECK-SVE-NEXT:    bfi w9, w10, #5, #1
+; CHECK-SVE-NEXT:    umov w10, v0.b[8]
+; CHECK-SVE-NEXT:    bfi w9, w11, #6, #1
+; CHECK-SVE-NEXT:    ubfiz w11, w12, #7, #1
+; CHECK-SVE-NEXT:    orr w9, w9, w11
+; CHECK-SVE-NEXT:    orr w9, w9, w10, lsl #8
+; CHECK-SVE-NEXT:    and w9, w9, #0x1ff
+; CHECK-SVE-NEXT:    strh w9, [x8]
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilewr_8_widen:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI16_0
+; CHECK-NOSVE-NEXT:    sub x11, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI16_1
+; CHECK-NOSVE-NEXT:    adrp x12, .LCPI16_2
+; CHECK-NOSVE-NEXT:    ldr q0, [x9, :lo12:.LCPI16_0]
+; CHECK-NOSVE-NEXT:    dup v1.2d, x11
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI16_3
+; CHECK-NOSVE-NEXT:    ldr q2, [x10, :lo12:.LCPI16_1]
+; CHECK-NOSVE-NEXT:    ldr q3, [x12, :lo12:.LCPI16_2]
+; CHECK-NOSVE-NEXT:    ldr q4, [x9, :lo12:.LCPI16_3]
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI16_4
+; CHECK-NOSVE-NEXT:    cmp x11, #1
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v1.2d, v0.2d
+; CHECK-NOSVE-NEXT:    cmhi v2.2d, v1.2d, v2.2d
+; CHECK-NOSVE-NEXT:    cmhi v3.2d, v1.2d, v3.2d
+; CHECK-NOSVE-NEXT:    cmhi v4.2d, v1.2d, v4.2d
+; CHECK-NOSVE-NEXT:    ldr q5, [x10, :lo12:.LCPI16_4]
+; CHECK-NOSVE-NEXT:    cset w9, lt
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v2.4s, v0.4s
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v1.2d, v5.2d
+; CHECK-NOSVE-NEXT:    uzp1 v2.4s, v4.4s, v3.4s
+; CHECK-NOSVE-NEXT:    xtn v1.2s, v1.2d
+; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v2.8h, v0.8h
+; CHECK-NOSVE-NEXT:    uzp1 v1.4h, v1.4h, v0.4h
+; CHECK-NOSVE-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    dup v1.16b, w9
+; CHECK-NOSVE-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT:    umov w9, v0.b[0]
+; CHECK-NOSVE-NEXT:    umov w10, v0.b[1]
+; CHECK-NOSVE-NEXT:    umov w11, v0.b[2]
+; CHECK-NOSVE-NEXT:    umov w12, v0.b[7]
+; CHECK-NOSVE-NEXT:    and w9, w9, #0x1
+; CHECK-NOSVE-NEXT:    bfi w9, w10, #1, #1
+; CHECK-NOSVE-NEXT:    umov w10, v0.b[3]
+; CHECK-NOSVE-NEXT:    bfi w9, w11, #2, #1
+; CHECK-NOSVE-NEXT:    umov w11, v0.b[4]
+; CHECK-NOSVE-NEXT:    bfi w9, w10, #3, #1
+; CHECK-NOSVE-NEXT:    umov w10, v0.b[5]
+; CHECK-NOSVE-NEXT:    bfi w9, w11, #4, #1
+; CHECK-NOSVE-NEXT:    umov w11, v0.b[6]
+; CHECK-NOSVE-NEXT:    bfi w9, w10, #5, #1
+; CHECK-NOSVE-NEXT:    umov w10, v0.b[8]
+; CHECK-NOSVE-NEXT:    bfi w9, w11, #6, #1
+; CHECK-NOSVE-NEXT:    ubfiz w11, w12, #7, #1
+; CHECK-NOSVE-NEXT:    orr w9, w9, w11
+; CHECK-NOSVE-NEXT:    orr w9, w9, w10, lsl #8
+; CHECK-NOSVE-NEXT:    and w9, w9, #0x1ff
+; CHECK-NOSVE-NEXT:    strh w9, [x8]
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <9 x i1> @llvm.loop.dependence.war.mask.v9i1(ptr %a, ptr %b, i64 1)
+  ret <9 x i1> %0
+}
+
+define <7 x i1> @whilewr_16_widen(ptr %a, ptr %b) {
+; CHECK-SVE-LABEL: whilewr_16_widen:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    whilewr p0.h, x0, x1
+; CHECK-SVE-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    xtn v0.8b, v0.8h
+; CHECK-SVE-NEXT:    umov w0, v0.b[0]
+; CHECK-SVE-NEXT:    umov w1, v0.b[1]
+; CHECK-SVE-NEXT:    umov w2, v0.b[2]
+; CHECK-SVE-NEXT:    umov w3, v0.b[3]
+; CHECK-SVE-NEXT:    umov w4, v0.b[4]
+; CHECK-SVE-NEXT:    umov w5, v0.b[5]
+; CHECK-SVE-NEXT:    umov w6, v0.b[6]
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilewr_16_widen:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    sub x8, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x9, .LCPI17_0
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI17_1
+; CHECK-NOSVE-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-NOSVE-NEXT:    adrp x11, .LCPI17_2
+; CHECK-NOSVE-NEXT:    adrp x12, .LCPI17_3
+; CHECK-NOSVE-NEXT:    ldr q1, [x9, :lo12:.LCPI17_0]
+; CHECK-NOSVE-NEXT:    ldr q2, [x10, :lo12:.LCPI17_1]
+; CHECK-NOSVE-NEXT:    ldr q3, [x11, :lo12:.LCPI17_2]
+; CHECK-NOSVE-NEXT:    asr x8, x8, #1
+; CHECK-NOSVE-NEXT:    ldr q4, [x12, :lo12:.LCPI17_3]
+; CHECK-NOSVE-NEXT:    dup v0.2d, x8
+; CHECK-NOSVE-NEXT:    cmp x8, #1
+; CHECK-NOSVE-NEXT:    cset w8, lt
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v2.2d, v0.2d, v2.2d
+; CHECK-NOSVE-NEXT:    cmhi v3.2d, v0.2d, v3.2d
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v0.2d, v4.2d
+; CHECK-NOSVE-NEXT:    uzp1 v1.4s, v2.4s, v1.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
+; CHECK-NOSVE-NEXT:    uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-NOSVE-NEXT:    dup v1.8b, w8
+; CHECK-NOSVE-NEXT:    xtn v0.8b, v0.8h
+; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT:    umov w0, v0.b[0]
+; CHECK-NOSVE-NEXT:    umov w1, v0.b[1]
+; CHECK-NOSVE-NEXT:    umov w2, v0.b[2]
+; CHECK-NOSVE-NEXT:    umov w3, v0.b[3]
+; CHECK-NOSVE-NEXT:    umov w4, v0.b[4]
+; CHECK-NOSVE-NEXT:    umov w5, v0.b[5]
+; CHECK-NOSVE-NEXT:    umov w6, v0.b[6]
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <7 x i1> @llvm.loop.dependence.war.mask.v7i1(ptr %a, ptr %b, i64 2)
+  ret <7 x i1> %0
+}
+
+define <3 x i1> @whilewr_32_widen(ptr %a, ptr %b) {
+; CHECK-SVE-LABEL: whilewr_32_widen:
+; CHECK-SVE:       // %bb.0: // %entry
+; CHECK-SVE-NEXT:    whilewr p0.s, x0, x1
+; CHECK-SVE-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT:    xtn v0.4h, v0.4s
+; CHECK-SVE-NEXT:    umov w0, v0.h[0]
+; CHECK-SVE-NEXT:    umov w1, v0.h[1]
+; CHECK-SVE-NEXT:    umov w2, v0.h[2]
+; CHECK-SVE-NEXT:    ret
+;
+; CHECK-NOSVE-LABEL: whilewr_32_widen:
+; CHECK-NOSVE:       // %bb.0: // %entry
+; CHECK-NOSVE-NEXT:    sub x9, x1, x0
+; CHECK-NOSVE-NEXT:    adrp x8, .LCPI18_0
+; CHECK-NOSVE-NEXT:    add x10, x9, #3
+; CHECK-NOSVE-NEXT:    cmp x9, #0
+; CHECK-NOSVE-NEXT:    ldr q1, [x8, :lo12:.LCPI18_0]
+; CHECK-NOSVE-NEXT:    csel x9, x10, x9, lt
+; CHECK-NOSVE-NEXT:    adrp x10, .LCPI18_1
+; CHECK-NOSVE-NEXT:    asr x9, x9, #2
+; CHECK-NOSVE-NEXT:    ldr q2, [x10, :lo12:.LCPI18_1]
+; CHECK-NOSVE-NEXT:    dup v0.2d, x9
+; CHECK-NOSVE-NEXT:    cmp x9, #1
+; CHECK-NOSVE-NEXT:    cset w8, lt
+; CHECK-NOSVE-NEXT:    cmhi v1.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT:    cmhi v0.2d, v0.2d, v2.2d
+; CHECK-NOSVE-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-NOSVE-NEXT:    dup v1.4h, w8
+; CHECK-NOSVE-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NOSVE-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT:    umov w0, v0.h[0]
+; CHECK-NOSVE-NEXT:    umov w1, v0.h[1]
+; CHECK-NOSVE-NEXT:    umov w2, v0.h[2]
+; CHECK-NOSVE-NEXT:    ret
+entry:
+  %0 = call <3 x i1> @llvm.loop.dependence.war.mask.v3i1(ptr %a, ptr %b, i64 4)
+  ret <3 x i1> %0
+}



More information about the llvm-commits mailing list