[llvm] [Intrinsics][AArch64] Add intrinsic to mask off aliasing vector lanes (PR #117007)
Sam Tebbs via llvm-commits
llvm-commits at lists.llvm.org
Tue Jun 10 06:53:35 PDT 2025
https://github.com/SamTebbs33 updated https://github.com/llvm/llvm-project/pull/117007
>From 129b7b5083d571da3f816fc89b1b89306141ed62 Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Fri, 15 Nov 2024 10:24:46 +0000
Subject: [PATCH 01/25] [Intrinsics][AArch64] Add intrinsic to mask off
aliasing vector lanes
It can be unsafe to load a vector from an address and write a vector to
an address if those two addresses have overlapping lanes within a
vectorised loop iteration.
This PR adds an intrinsic designed to create a mask with lanes disabled
if they overlap between the two pointer arguments, so that only safe
lanes are loaded, operated on and stored.
Along with the two pointer parameters, the intrinsic also takes an
immediate that represents the size in bytes of the vector element
types, as well as an immediate i1 that is true if there is a write
after-read-hazard or false if there is a read-after-write hazard.
This will be used by #100579 and replaces the existing lowering for
whilewr since that isn't needed now we have the intrinsic.
---
llvm/docs/LangRef.rst | 84 ++++
llvm/include/llvm/CodeGen/TargetLowering.h | 7 +
llvm/include/llvm/IR/Intrinsics.td | 5 +
.../SelectionDAG/SelectionDAGBuilder.cpp | 50 +++
.../Target/AArch64/AArch64ISelLowering.cpp | 85 +++-
llvm/lib/Target/AArch64/AArch64ISelLowering.h | 3 +
.../lib/Target/AArch64/AArch64SVEInstrInfo.td | 11 +-
llvm/lib/Target/AArch64/SVEInstrFormats.td | 10 +-
llvm/test/CodeGen/AArch64/alias_mask.ll | 421 ++++++++++++++++++
.../CodeGen/AArch64/alias_mask_scalable.ll | 195 ++++++++
10 files changed, 861 insertions(+), 10 deletions(-)
create mode 100644 llvm/test/CodeGen/AArch64/alias_mask.ll
create mode 100644 llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 8c0a046d3a7e9..cf3816dcc1666 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -23969,6 +23969,90 @@ Examples:
%active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 %elem0, i64 429)
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
+.. _int_experimental_get_alias_lane_mask:
+
+'``llvm.experimental.get.alias.lane.mask.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+This is an overloaded intrinsic.
+
+::
+
+ declare <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %ptrA, i64 %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %ptrA, i64 %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i32(i64 %ptrA, i64 %ptrB, i32 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.nxv16i1.i64.i32(i64 %ptrA, i64 %ptrB, i32 immarg %elementSize, i1 immarg %writeAfterRead)
+
+
+Overview:
+"""""""""
+
+Create a mask representing lanes that do or not overlap between two pointers
+across one vector loop iteration.
+
+
+Arguments:
+""""""""""
+
+The first two arguments have the same scalar integer type.
+The final two are immediates and the result is a vector with the i1 element type.
+
+Semantics:
+""""""""""
+
+The intrinsic will return poison if ``%ptrA`` and ``%ptrB`` are within
+VF * ``%elementSize`` of each other and ``%ptrA`` + VF * ``%elementSize`` wraps.
+In other cases when ``%writeAfterRead`` is true, the
+'``llvm.experimental.get.alias.lane.mask.*``' intrinsics are semantically
+equivalent to:
+
+::
+
+ %diff = (%ptrB - %ptrA) / %elementSize
+ %m[i] = (icmp ult i, %diff) || (%diff <= 0)
+
+When the return value is not poison and ``%writeAfterRead`` is false, the
+'``llvm.experimental.get.alias.lane.mask.*``' intrinsics are semantically
+equivalent to:
+
+::
+
+ %diff = abs(%ptrB - %ptrA) / %elementSize
+ %m[i] = (icmp ult i, %diff) || (%diff == 0)
+
+where ``%m`` is a vector (mask) of active/inactive lanes with its elements
+indexed by ``i``, and ``%ptrA``, ``%ptrB`` are the two i64 arguments to
+``llvm.experimental.get.alias.lane.mask.*`` and ``%elementSize`` is the first
+immediate argument. The ``%writeAfterRead`` argument is expected to be true if
+``%ptrB`` is stored to after ``%ptrA`` is read from.
+The above is equivalent to:
+
+::
+
+ %m = @llvm.experimental.get.alias.lane.mask(%ptrA, %ptrB, %elementSize, %writeAfterRead)
+
+This can, for example, be emitted by the loop vectorizer in which case
+``%ptrA`` is a pointer that is read from within the loop, and ``%ptrB`` is a
+pointer that is stored to within the loop.
+If the difference between these pointers is less than the vector factor, then
+they overlap (alias) within a loop iteration.
+An example is if ``%ptrA`` is 20 and ``%ptrB`` is 23 with a vector factor of 8,
+then lanes 3, 4, 5, 6 and 7 of the vector loaded from ``%ptrA``
+share addresses with lanes 0, 1, 2, 3, 4 and 5 from the vector stored to at
+``%ptrB``.
+
+
+Examples:
+"""""""""
+
+.. code-block:: llvm
+
+ %alias.lane.mask = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i32(i64 %ptrA, i64 %ptrB, i32 4, i1 1)
+ %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptrA, i32 4, <4 x i1> %alias.lane.mask, <4 x i32> poison)
+ [...]
+ call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, <4 x i32>* %ptrB, i32 4, <4 x i1> %alias.lane.mask)
.. _int_experimental_vp_splice:
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 9c453f51e129d..622876c81505f 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -482,6 +482,13 @@ class LLVM_ABI TargetLoweringBase {
return true;
}
+ /// Return true if the @llvm.experimental.get.alias.lane.mask intrinsic should
+ /// be expanded using generic code in SelectionDAGBuilder.
+ virtual bool shouldExpandGetAliasLaneMask(EVT VT, EVT PtrVT,
+ unsigned EltSize) const {
+ return true;
+ }
+
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF,
bool IsScalable) const {
return true;
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index d3899056bc240..3d45375f590ac 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -2399,6 +2399,11 @@ let IntrProperties = [IntrNoMem, ImmArg<ArgIndex<1>>] in {
llvm_i32_ty]>;
}
+def int_experimental_get_alias_lane_mask:
+ DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+ [llvm_anyint_ty, LLVMMatchType<1>, llvm_anyint_ty, llvm_i1_ty],
+ [IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
+
def int_get_active_lane_mask:
DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[llvm_anyint_ty, LLVMMatchType<1>],
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 77771ee56e828..a62c6834e8fed 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8244,6 +8244,56 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
visitVectorExtractLastActive(I, Intrinsic);
return;
}
+ case Intrinsic::experimental_get_alias_lane_mask: {
+ SDValue SourceValue = getValue(I.getOperand(0));
+ SDValue SinkValue = getValue(I.getOperand(1));
+ SDValue EltSize = getValue(I.getOperand(2));
+ bool IsWriteAfterRead =
+ cast<ConstantSDNode>(getValue(I.getOperand(3)))->getZExtValue() != 0;
+ auto IntrinsicVT = EVT::getEVT(I.getType());
+ auto PtrVT = SourceValue->getValueType(0);
+
+ if (!TLI.shouldExpandGetAliasLaneMask(
+ IntrinsicVT, PtrVT,
+ cast<ConstantSDNode>(EltSize)->getSExtValue())) {
+ visitTargetIntrinsic(I, Intrinsic);
+ return;
+ }
+
+ SDValue Diff = DAG.getNode(ISD::SUB, sdl, PtrVT, SinkValue, SourceValue);
+ if (!IsWriteAfterRead)
+ Diff = DAG.getNode(ISD::ABS, sdl, PtrVT, Diff);
+
+ Diff = DAG.getNode(ISD::SDIV, sdl, PtrVT, Diff, EltSize);
+ SDValue Zero = DAG.getTargetConstant(0, sdl, PtrVT);
+
+ // If the difference is positive then some elements may alias
+ auto CmpVT =
+ TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), PtrVT);
+ SDValue Cmp = DAG.getSetCC(sdl, CmpVT, Diff, Zero,
+ IsWriteAfterRead ? ISD::SETLE : ISD::SETEQ);
+
+ // Splat the compare result then OR it with a lane mask
+ SDValue Splat = DAG.getSplat(IntrinsicVT, sdl, Cmp);
+
+ SDValue DiffMask;
+ // Don't emit an active lane mask if the target doesn't support it
+ if (TLI.shouldExpandGetActiveLaneMask(IntrinsicVT, PtrVT)) {
+ EVT VecTy = EVT::getVectorVT(*DAG.getContext(), PtrVT,
+ IntrinsicVT.getVectorElementCount());
+ SDValue DiffSplat = DAG.getSplat(VecTy, sdl, Diff);
+ SDValue VectorStep = DAG.getStepVector(sdl, VecTy);
+ DiffMask = DAG.getSetCC(sdl, IntrinsicVT, VectorStep, DiffSplat,
+ ISD::CondCode::SETULT);
+ } else {
+ DiffMask = DAG.getNode(
+ ISD::INTRINSIC_WO_CHAIN, sdl, IntrinsicVT,
+ DAG.getTargetConstant(Intrinsic::get_active_lane_mask, sdl, MVT::i64),
+ Zero, Diff);
+ }
+ SDValue Or = DAG.getNode(ISD::OR, sdl, IntrinsicVT, DiffMask, Splat);
+ setValue(&I, Or);
+ }
}
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index ae34e6b7dcc3c..f2e4c87a73079 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2150,6 +2150,25 @@ bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
return false;
}
+bool AArch64TargetLowering::shouldExpandGetAliasLaneMask(
+ EVT VT, EVT PtrVT, unsigned EltSize) const {
+ if (!Subtarget->hasSVE2())
+ return true;
+
+ if (PtrVT != MVT::i64)
+ return true;
+
+ if (VT == MVT::v2i1 || VT == MVT::nxv2i1)
+ return EltSize != 8;
+ if (VT == MVT::v4i1 || VT == MVT::nxv4i1)
+ return EltSize != 4;
+ if (VT == MVT::v8i1 || VT == MVT::nxv8i1)
+ return EltSize != 2;
+ if (VT == MVT::v16i1 || VT == MVT::nxv16i1)
+ return EltSize != 1;
+ return true;
+}
+
bool AArch64TargetLowering::shouldExpandPartialReductionIntrinsic(
const IntrinsicInst *I) const {
assert(I->getIntrinsicID() ==
@@ -5841,6 +5860,18 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
EVT PtrVT = getPointerTy(DAG.getDataLayout());
return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT);
}
+ case Intrinsic::aarch64_sve_whilewr_b:
+ case Intrinsic::aarch64_sve_whilewr_h:
+ case Intrinsic::aarch64_sve_whilewr_s:
+ case Intrinsic::aarch64_sve_whilewr_d:
+ return DAG.getNode(AArch64ISD::WHILEWR, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::aarch64_sve_whilerw_b:
+ case Intrinsic::aarch64_sve_whilerw_h:
+ case Intrinsic::aarch64_sve_whilerw_s:
+ case Intrinsic::aarch64_sve_whilerw_d:
+ return DAG.getNode(AArch64ISD::WHILERW, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
case Intrinsic::aarch64_neon_abs: {
EVT Ty = Op.getValueType();
if (Ty == MVT::i64) {
@@ -6317,6 +6348,52 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return DAG.getNode(AArch64ISD::USDOT, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
}
+ case Intrinsic::experimental_get_alias_lane_mask: {
+ unsigned IntrinsicID = 0;
+ uint64_t EltSize = Op.getOperand(3)->getAsZExtVal();
+ bool IsWriteAfterRead = Op.getOperand(4)->getAsZExtVal() == 1;
+ switch (EltSize) {
+ case 1:
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
+ : Intrinsic::aarch64_sve_whilerw_b;
+ break;
+ case 2:
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
+ : Intrinsic::aarch64_sve_whilerw_h;
+ break;
+ case 4:
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
+ : Intrinsic::aarch64_sve_whilerw_s;
+ break;
+ case 8:
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
+ : Intrinsic::aarch64_sve_whilerw_d;
+ break;
+ default:
+ llvm_unreachable("Unexpected element size for get.alias.lane.mask");
+ break;
+ }
+ SDValue ID = DAG.getTargetConstant(IntrinsicID, dl, MVT::i64);
+
+ EVT VT = Op.getValueType();
+ if (VT.isScalableVector())
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, ID, Op.getOperand(1),
+ Op.getOperand(2));
+
+ // We can use the SVE whilewr/whilerw instruction to lower this
+ // intrinsic by creating the appropriate sequence of scalable vector
+ // operations and then extracting a fixed-width subvector from the scalable
+ // vector.
+
+ EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
+ EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
+
+ SDValue Mask = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, WhileVT, ID,
+ Op.getOperand(1), Op.getOperand(2));
+ SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, dl, ContainerVT, Mask);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, MaskAsInt,
+ DAG.getVectorIdxConstant(0, dl));
+ }
case Intrinsic::aarch64_neon_saddlv:
case Intrinsic::aarch64_neon_uaddlv: {
EVT OpVT = Op.getOperand(1).getValueType();
@@ -19532,7 +19609,10 @@ static bool isPredicateCCSettingOp(SDValue N) {
N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilele ||
N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelo ||
N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilels ||
- N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelt)))
+ N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelt ||
+ // get_alias_lane_mask is lowered to a whilewr/rw instruction.
+ N.getConstantOperandVal(0) ==
+ Intrinsic::experimental_get_alias_lane_mask)))
return true;
return false;
@@ -27536,7 +27616,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, RuntimePStateSM));
return;
}
- case Intrinsic::experimental_vector_match: {
+ case Intrinsic::experimental_vector_match:
+ case Intrinsic::experimental_get_alias_lane_mask: {
if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
return;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 450e2efd7d430..a66ae33f7ea89 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -498,6 +498,9 @@ class AArch64TargetLowering : public TargetLowering {
bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override;
+ bool shouldExpandGetAliasLaneMask(EVT VT, EVT PtrVT,
+ unsigned EltSize) const override;
+
bool
shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const override;
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 91db6b6fc7984..78904997adbdc 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -167,6 +167,11 @@ def AArch64st1q_scatter : SDNode<"AArch64ISD::SST1Q_PRED", SDT_AArch64_SCATTER_V
// AArch64 SVE/SVE2 - the remaining node definitions
//
+// Alias masks
+def SDT_AArch64Mask : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<1>, SDTCisSameAs<2, 1>, SDTCVecEltisVT<0,i1>]>;
+def AArch64whilewr : SDNode<"AArch64ISD::WHILEWR", SDT_AArch64Mask>;
+def AArch64whilerw : SDNode<"AArch64ISD::WHILERW", SDT_AArch64Mask>;
+
// SVE CNT/INC/RDVL
def sve_rdvl_imm : ComplexPattern<i64, 1, "SelectRDVLImm<-32, 31, 16>">;
def sve_cnth_imm : ComplexPattern<i64, 1, "SelectRDVLImm<1, 16, 8>">;
@@ -4058,9 +4063,9 @@ let Predicates = [HasSVE2_or_SME] in {
defm WHILEHI_PXX : sve_int_while8_rr<0b101, "whilehi", int_aarch64_sve_whilehi, get_active_lane_mask>;
// SVE2 pointer conflict compare
- defm WHILEWR_PXX : sve2_int_while_rr<0b0, "whilewr", "int_aarch64_sve_whilewr">;
- defm WHILERW_PXX : sve2_int_while_rr<0b1, "whilerw", "int_aarch64_sve_whilerw">;
-} // End HasSVE2_or_SME
+ defm WHILEWR_PXX : sve2_int_while_rr<0b0, "whilewr", AArch64whilewr>;
+ defm WHILERW_PXX : sve2_int_while_rr<0b1, "whilerw", AArch64whilerw>;
+} // End HasSVE2orSME
let Predicates = [HasSVEAES, HasNonStreamingSVE2_or_SSVE_AES] in {
// SVE2 crypto destructive binary operations
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index c56713783289e..bb840d4850816 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -5919,16 +5919,16 @@ class sve2_int_while_rr<bits<2> sz8_64, bits<1> rw, string asm,
let isWhile = 1;
}
-multiclass sve2_int_while_rr<bits<1> rw, string asm, string op> {
+multiclass sve2_int_while_rr<bits<1> rw, string asm, SDPatternOperator op> {
def _B : sve2_int_while_rr<0b00, rw, asm, PPR8>;
def _H : sve2_int_while_rr<0b01, rw, asm, PPR16>;
def _S : sve2_int_while_rr<0b10, rw, asm, PPR32>;
def _D : sve2_int_while_rr<0b11, rw, asm, PPR64>;
- def : SVE_2_Op_Pat<nxv16i1, !cast<SDPatternOperator>(op # _b), i64, i64, !cast<Instruction>(NAME # _B)>;
- def : SVE_2_Op_Pat<nxv8i1, !cast<SDPatternOperator>(op # _h), i64, i64, !cast<Instruction>(NAME # _H)>;
- def : SVE_2_Op_Pat<nxv4i1, !cast<SDPatternOperator>(op # _s), i64, i64, !cast<Instruction>(NAME # _S)>;
- def : SVE_2_Op_Pat<nxv2i1, !cast<SDPatternOperator>(op # _d), i64, i64, !cast<Instruction>(NAME # _D)>;
+ def : SVE_2_Op_Pat<nxv16i1, op, i64, i64, !cast<Instruction>(NAME # _B)>;
+ def : SVE_2_Op_Pat<nxv8i1, op, i64, i64, !cast<Instruction>(NAME # _H)>;
+ def : SVE_2_Op_Pat<nxv4i1, op, i64, i64, !cast<Instruction>(NAME # _S)>;
+ def : SVE_2_Op_Pat<nxv2i1, op, i64, i64, !cast<Instruction>(NAME # _D)>;
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
new file mode 100644
index 0000000000000..84a22822f1702
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -0,0 +1,421 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s --check-prefix=CHECK-SVE
+; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefix=CHECK-NOSVE
+
+define <16 x i1> @whilewr_8(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilewr_8:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: whilewr p0.b, x0, x1
+; CHECK-SVE-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT: ret
+;
+; CHECK-NOSVE-LABEL: whilewr_8:
+; CHECK-NOSVE: // %bb.0: // %entry
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI0_0
+; CHECK-NOSVE-NEXT: adrp x10, .LCPI0_1
+; CHECK-NOSVE-NEXT: sub x9, x1, x0
+; CHECK-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI0_0]
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI0_2
+; CHECK-NOSVE-NEXT: ldr q1, [x10, :lo12:.LCPI0_1]
+; CHECK-NOSVE-NEXT: ldr q3, [x8, :lo12:.LCPI0_2]
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI0_4
+; CHECK-NOSVE-NEXT: adrp x10, .LCPI0_3
+; CHECK-NOSVE-NEXT: ldr q5, [x8, :lo12:.LCPI0_4]
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI0_5
+; CHECK-NOSVE-NEXT: dup v2.2d, x9
+; CHECK-NOSVE-NEXT: ldr q4, [x10, :lo12:.LCPI0_3]
+; CHECK-NOSVE-NEXT: adrp x10, .LCPI0_6
+; CHECK-NOSVE-NEXT: ldr q6, [x8, :lo12:.LCPI0_5]
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI0_7
+; CHECK-NOSVE-NEXT: ldr q7, [x10, :lo12:.LCPI0_6]
+; CHECK-NOSVE-NEXT: cmp x9, #1
+; CHECK-NOSVE-NEXT: ldr q16, [x8, :lo12:.LCPI0_7]
+; CHECK-NOSVE-NEXT: cmhi v0.2d, v2.2d, v0.2d
+; CHECK-NOSVE-NEXT: cmhi v1.2d, v2.2d, v1.2d
+; CHECK-NOSVE-NEXT: cmhi v3.2d, v2.2d, v3.2d
+; CHECK-NOSVE-NEXT: cmhi v4.2d, v2.2d, v4.2d
+; CHECK-NOSVE-NEXT: cmhi v5.2d, v2.2d, v5.2d
+; CHECK-NOSVE-NEXT: cmhi v6.2d, v2.2d, v6.2d
+; CHECK-NOSVE-NEXT: cmhi v7.2d, v2.2d, v7.2d
+; CHECK-NOSVE-NEXT: cmhi v2.2d, v2.2d, v16.2d
+; CHECK-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NOSVE-NEXT: cset w8, lt
+; CHECK-NOSVE-NEXT: uzp1 v1.4s, v4.4s, v3.4s
+; CHECK-NOSVE-NEXT: uzp1 v3.4s, v6.4s, v5.4s
+; CHECK-NOSVE-NEXT: uzp1 v2.4s, v2.4s, v7.4s
+; CHECK-NOSVE-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NOSVE-NEXT: uzp1 v1.8h, v2.8h, v3.8h
+; CHECK-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b
+; CHECK-NOSVE-NEXT: dup v1.16b, w8
+; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT: ret
+entry:
+ %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
+ ret <16 x i1> %0
+}
+
+define <8 x i1> @whilewr_16(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilewr_16:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: whilewr p0.b, x0, x1
+; CHECK-SVE-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT: // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT: ret
+;
+; CHECK-NOSVE-LABEL: whilewr_16:
+; CHECK-NOSVE: // %bb.0: // %entry
+; CHECK-NOSVE-NEXT: sub x8, x1, x0
+; CHECK-NOSVE-NEXT: adrp x9, .LCPI1_0
+; CHECK-NOSVE-NEXT: adrp x10, .LCPI1_1
+; CHECK-NOSVE-NEXT: add x8, x8, x8, lsr #63
+; CHECK-NOSVE-NEXT: adrp x11, .LCPI1_2
+; CHECK-NOSVE-NEXT: ldr q1, [x9, :lo12:.LCPI1_0]
+; CHECK-NOSVE-NEXT: adrp x9, .LCPI1_3
+; CHECK-NOSVE-NEXT: ldr q2, [x10, :lo12:.LCPI1_1]
+; CHECK-NOSVE-NEXT: ldr q3, [x11, :lo12:.LCPI1_2]
+; CHECK-NOSVE-NEXT: asr x8, x8, #1
+; CHECK-NOSVE-NEXT: ldr q4, [x9, :lo12:.LCPI1_3]
+; CHECK-NOSVE-NEXT: dup v0.2d, x8
+; CHECK-NOSVE-NEXT: cmp x8, #1
+; CHECK-NOSVE-NEXT: cset w8, lt
+; CHECK-NOSVE-NEXT: cmhi v1.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT: cmhi v2.2d, v0.2d, v2.2d
+; CHECK-NOSVE-NEXT: cmhi v3.2d, v0.2d, v3.2d
+; CHECK-NOSVE-NEXT: cmhi v0.2d, v0.2d, v4.2d
+; CHECK-NOSVE-NEXT: uzp1 v1.4s, v2.4s, v1.4s
+; CHECK-NOSVE-NEXT: uzp1 v0.4s, v0.4s, v3.4s
+; CHECK-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-NOSVE-NEXT: dup v1.8b, w8
+; CHECK-NOSVE-NEXT: xtn v0.8b, v0.8h
+; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT: ret
+entry:
+ %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 1)
+ ret <8 x i1> %0
+}
+
+define <4 x i1> @whilewr_32(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilewr_32:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: whilewr p0.h, x0, x1
+; CHECK-SVE-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT: // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT: ret
+;
+; CHECK-NOSVE-LABEL: whilewr_32:
+; CHECK-NOSVE: // %bb.0: // %entry
+; CHECK-NOSVE-NEXT: sub x9, x1, x0
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI2_0
+; CHECK-NOSVE-NEXT: add x10, x9, #3
+; CHECK-NOSVE-NEXT: cmp x9, #0
+; CHECK-NOSVE-NEXT: ldr q1, [x8, :lo12:.LCPI2_0]
+; CHECK-NOSVE-NEXT: csel x9, x10, x9, lt
+; CHECK-NOSVE-NEXT: adrp x10, .LCPI2_1
+; CHECK-NOSVE-NEXT: asr x9, x9, #2
+; CHECK-NOSVE-NEXT: ldr q2, [x10, :lo12:.LCPI2_1]
+; CHECK-NOSVE-NEXT: dup v0.2d, x9
+; CHECK-NOSVE-NEXT: cmp x9, #1
+; CHECK-NOSVE-NEXT: cset w8, lt
+; CHECK-NOSVE-NEXT: cmhi v1.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT: cmhi v0.2d, v0.2d, v2.2d
+; CHECK-NOSVE-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-NOSVE-NEXT: dup v1.4h, w8
+; CHECK-NOSVE-NEXT: xtn v0.4h, v0.4s
+; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT: ret
+entry:
+ %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
+ ret <4 x i1> %0
+}
+
+define <2 x i1> @whilewr_64(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilewr_64:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: whilewr p0.s, x0, x1
+; CHECK-SVE-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT: // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT: ret
+;
+; CHECK-NOSVE-LABEL: whilewr_64:
+; CHECK-NOSVE: // %bb.0: // %entry
+; CHECK-NOSVE-NEXT: sub x9, x1, x0
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI3_0
+; CHECK-NOSVE-NEXT: add x10, x9, #7
+; CHECK-NOSVE-NEXT: cmp x9, #0
+; CHECK-NOSVE-NEXT: ldr q1, [x8, :lo12:.LCPI3_0]
+; CHECK-NOSVE-NEXT: csel x9, x10, x9, lt
+; CHECK-NOSVE-NEXT: asr x9, x9, #3
+; CHECK-NOSVE-NEXT: dup v0.2d, x9
+; CHECK-NOSVE-NEXT: cmp x9, #1
+; CHECK-NOSVE-NEXT: cset w8, lt
+; CHECK-NOSVE-NEXT: cmhi v0.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT: dup v1.2s, w8
+; CHECK-NOSVE-NEXT: xtn v0.2s, v0.2d
+; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT: ret
+entry:
+ %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
+ ret <2 x i1> %0
+}
+
+define <16 x i1> @whilerw_8(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilerw_8:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: whilerw p0.b, x0, x1
+; CHECK-SVE-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-NEXT: ret
+;
+; CHECK-NOSVE-LABEL: whilerw_8:
+; CHECK-NOSVE: // %bb.0: // %entry
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI4_0
+; CHECK-NOSVE-NEXT: subs x9, x1, x0
+; CHECK-NOSVE-NEXT: adrp x10, .LCPI4_1
+; CHECK-NOSVE-NEXT: ldr q0, [x8, :lo12:.LCPI4_0]
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI4_2
+; CHECK-NOSVE-NEXT: cneg x9, x9, mi
+; CHECK-NOSVE-NEXT: ldr q2, [x8, :lo12:.LCPI4_2]
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI4_3
+; CHECK-NOSVE-NEXT: ldr q1, [x10, :lo12:.LCPI4_1]
+; CHECK-NOSVE-NEXT: ldr q4, [x8, :lo12:.LCPI4_3]
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI4_4
+; CHECK-NOSVE-NEXT: dup v3.2d, x9
+; CHECK-NOSVE-NEXT: ldr q5, [x8, :lo12:.LCPI4_4]
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI4_5
+; CHECK-NOSVE-NEXT: adrp x10, .LCPI4_6
+; CHECK-NOSVE-NEXT: ldr q6, [x8, :lo12:.LCPI4_5]
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI4_7
+; CHECK-NOSVE-NEXT: ldr q7, [x10, :lo12:.LCPI4_6]
+; CHECK-NOSVE-NEXT: ldr q16, [x8, :lo12:.LCPI4_7]
+; CHECK-NOSVE-NEXT: cmhi v0.2d, v3.2d, v0.2d
+; CHECK-NOSVE-NEXT: cmhi v1.2d, v3.2d, v1.2d
+; CHECK-NOSVE-NEXT: cmhi v2.2d, v3.2d, v2.2d
+; CHECK-NOSVE-NEXT: cmhi v4.2d, v3.2d, v4.2d
+; CHECK-NOSVE-NEXT: cmhi v5.2d, v3.2d, v5.2d
+; CHECK-NOSVE-NEXT: cmhi v6.2d, v3.2d, v6.2d
+; CHECK-NOSVE-NEXT: cmhi v7.2d, v3.2d, v7.2d
+; CHECK-NOSVE-NEXT: cmhi v3.2d, v3.2d, v16.2d
+; CHECK-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NOSVE-NEXT: cmp x9, #0
+; CHECK-NOSVE-NEXT: uzp1 v1.4s, v4.4s, v2.4s
+; CHECK-NOSVE-NEXT: cset w8, eq
+; CHECK-NOSVE-NEXT: uzp1 v2.4s, v6.4s, v5.4s
+; CHECK-NOSVE-NEXT: uzp1 v3.4s, v3.4s, v7.4s
+; CHECK-NOSVE-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NOSVE-NEXT: uzp1 v1.8h, v3.8h, v2.8h
+; CHECK-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b
+; CHECK-NOSVE-NEXT: dup v1.16b, w8
+; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT: ret
+entry:
+ %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
+ ret <16 x i1> %0
+}
+
+define <8 x i1> @whilerw_16(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilerw_16:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: whilerw p0.b, x0, x1
+; CHECK-SVE-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT: // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT: ret
+;
+; CHECK-NOSVE-LABEL: whilerw_16:
+; CHECK-NOSVE: // %bb.0: // %entry
+; CHECK-NOSVE-NEXT: subs x8, x1, x0
+; CHECK-NOSVE-NEXT: adrp x9, .LCPI5_0
+; CHECK-NOSVE-NEXT: adrp x10, .LCPI5_1
+; CHECK-NOSVE-NEXT: cneg x8, x8, mi
+; CHECK-NOSVE-NEXT: adrp x11, .LCPI5_2
+; CHECK-NOSVE-NEXT: ldr q1, [x9, :lo12:.LCPI5_0]
+; CHECK-NOSVE-NEXT: add x8, x8, x8, lsr #63
+; CHECK-NOSVE-NEXT: adrp x9, .LCPI5_3
+; CHECK-NOSVE-NEXT: ldr q2, [x10, :lo12:.LCPI5_1]
+; CHECK-NOSVE-NEXT: ldr q3, [x11, :lo12:.LCPI5_2]
+; CHECK-NOSVE-NEXT: ldr q4, [x9, :lo12:.LCPI5_3]
+; CHECK-NOSVE-NEXT: asr x8, x8, #1
+; CHECK-NOSVE-NEXT: dup v0.2d, x8
+; CHECK-NOSVE-NEXT: cmp x8, #0
+; CHECK-NOSVE-NEXT: cset w8, eq
+; CHECK-NOSVE-NEXT: cmhi v1.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT: cmhi v2.2d, v0.2d, v2.2d
+; CHECK-NOSVE-NEXT: cmhi v3.2d, v0.2d, v3.2d
+; CHECK-NOSVE-NEXT: cmhi v0.2d, v0.2d, v4.2d
+; CHECK-NOSVE-NEXT: uzp1 v1.4s, v2.4s, v1.4s
+; CHECK-NOSVE-NEXT: uzp1 v0.4s, v0.4s, v3.4s
+; CHECK-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-NOSVE-NEXT: dup v1.8b, w8
+; CHECK-NOSVE-NEXT: xtn v0.8b, v0.8h
+; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT: ret
+entry:
+ %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 0)
+ ret <8 x i1> %0
+}
+
+define <4 x i1> @whilerw_32(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilerw_32:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: whilerw p0.h, x0, x1
+; CHECK-SVE-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT: // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT: ret
+;
+; CHECK-NOSVE-LABEL: whilerw_32:
+; CHECK-NOSVE: // %bb.0: // %entry
+; CHECK-NOSVE-NEXT: subs x9, x1, x0
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI6_0
+; CHECK-NOSVE-NEXT: cneg x9, x9, mi
+; CHECK-NOSVE-NEXT: ldr q1, [x8, :lo12:.LCPI6_0]
+; CHECK-NOSVE-NEXT: add x10, x9, #3
+; CHECK-NOSVE-NEXT: cmp x9, #0
+; CHECK-NOSVE-NEXT: csel x9, x10, x9, lt
+; CHECK-NOSVE-NEXT: adrp x10, .LCPI6_1
+; CHECK-NOSVE-NEXT: asr x9, x9, #2
+; CHECK-NOSVE-NEXT: ldr q2, [x10, :lo12:.LCPI6_1]
+; CHECK-NOSVE-NEXT: dup v0.2d, x9
+; CHECK-NOSVE-NEXT: cmp x9, #0
+; CHECK-NOSVE-NEXT: cset w8, eq
+; CHECK-NOSVE-NEXT: cmhi v1.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT: cmhi v0.2d, v0.2d, v2.2d
+; CHECK-NOSVE-NEXT: uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-NOSVE-NEXT: dup v1.4h, w8
+; CHECK-NOSVE-NEXT: xtn v0.4h, v0.4s
+; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT: ret
+entry:
+ %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
+ ret <4 x i1> %0
+}
+
+define <2 x i1> @whilerw_64(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: whilerw_64:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: whilerw p0.s, x0, x1
+; CHECK-SVE-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT: // kill: def $d0 killed $d0 killed $z0
+; CHECK-SVE-NEXT: ret
+;
+; CHECK-NOSVE-LABEL: whilerw_64:
+; CHECK-NOSVE: // %bb.0: // %entry
+; CHECK-NOSVE-NEXT: subs x9, x1, x0
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI7_0
+; CHECK-NOSVE-NEXT: cneg x9, x9, mi
+; CHECK-NOSVE-NEXT: ldr q1, [x8, :lo12:.LCPI7_0]
+; CHECK-NOSVE-NEXT: add x10, x9, #7
+; CHECK-NOSVE-NEXT: cmp x9, #0
+; CHECK-NOSVE-NEXT: csel x9, x10, x9, lt
+; CHECK-NOSVE-NEXT: asr x9, x9, #3
+; CHECK-NOSVE-NEXT: dup v0.2d, x9
+; CHECK-NOSVE-NEXT: cmp x9, #0
+; CHECK-NOSVE-NEXT: cset w8, eq
+; CHECK-NOSVE-NEXT: cmhi v0.2d, v0.2d, v1.2d
+; CHECK-NOSVE-NEXT: dup v1.2s, w8
+; CHECK-NOSVE-NEXT: xtn v0.2s, v0.2d
+; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
+; CHECK-NOSVE-NEXT: ret
+entry:
+ %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
+ ret <2 x i1> %0
+}
+
+define <16 x i1> @not_whilewr_wrong_eltsize(i64 %a, i64 %b) {
+; CHECK-SVE-LABEL: not_whilewr_wrong_eltsize:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: sub x8, x1, x0
+; CHECK-SVE-NEXT: add x8, x8, x8, lsr #63
+; CHECK-SVE-NEXT: asr x8, x8, #1
+; CHECK-SVE-NEXT: cmp x8, #1
+; CHECK-SVE-NEXT: cset w9, lt
+; CHECK-SVE-NEXT: whilelo p0.b, #0, x8
+; CHECK-SVE-NEXT: dup v0.16b, w9
+; CHECK-SVE-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT: orr v0.16b, v1.16b, v0.16b
+; CHECK-SVE-NEXT: ret
+;
+; CHECK-NOSVE-LABEL: not_whilewr_wrong_eltsize:
+; CHECK-NOSVE: // %bb.0: // %entry
+; CHECK-NOSVE-NEXT: sub x8, x1, x0
+; CHECK-NOSVE-NEXT: adrp x9, .LCPI8_0
+; CHECK-NOSVE-NEXT: adrp x10, .LCPI8_1
+; CHECK-NOSVE-NEXT: add x8, x8, x8, lsr #63
+; CHECK-NOSVE-NEXT: ldr q0, [x9, :lo12:.LCPI8_0]
+; CHECK-NOSVE-NEXT: adrp x9, .LCPI8_2
+; CHECK-NOSVE-NEXT: ldr q2, [x9, :lo12:.LCPI8_2]
+; CHECK-NOSVE-NEXT: adrp x9, .LCPI8_4
+; CHECK-NOSVE-NEXT: ldr q1, [x10, :lo12:.LCPI8_1]
+; CHECK-NOSVE-NEXT: asr x8, x8, #1
+; CHECK-NOSVE-NEXT: adrp x10, .LCPI8_3
+; CHECK-NOSVE-NEXT: ldr q5, [x9, :lo12:.LCPI8_4]
+; CHECK-NOSVE-NEXT: adrp x9, .LCPI8_6
+; CHECK-NOSVE-NEXT: ldr q3, [x10, :lo12:.LCPI8_3]
+; CHECK-NOSVE-NEXT: adrp x10, .LCPI8_5
+; CHECK-NOSVE-NEXT: dup v4.2d, x8
+; CHECK-NOSVE-NEXT: ldr q7, [x9, :lo12:.LCPI8_6]
+; CHECK-NOSVE-NEXT: adrp x9, .LCPI8_7
+; CHECK-NOSVE-NEXT: ldr q6, [x10, :lo12:.LCPI8_5]
+; CHECK-NOSVE-NEXT: ldr q16, [x9, :lo12:.LCPI8_7]
+; CHECK-NOSVE-NEXT: cmp x8, #1
+; CHECK-NOSVE-NEXT: cset w8, lt
+; CHECK-NOSVE-NEXT: cmhi v0.2d, v4.2d, v0.2d
+; CHECK-NOSVE-NEXT: cmhi v1.2d, v4.2d, v1.2d
+; CHECK-NOSVE-NEXT: cmhi v2.2d, v4.2d, v2.2d
+; CHECK-NOSVE-NEXT: cmhi v3.2d, v4.2d, v3.2d
+; CHECK-NOSVE-NEXT: cmhi v5.2d, v4.2d, v5.2d
+; CHECK-NOSVE-NEXT: cmhi v6.2d, v4.2d, v6.2d
+; CHECK-NOSVE-NEXT: cmhi v7.2d, v4.2d, v7.2d
+; CHECK-NOSVE-NEXT: cmhi v4.2d, v4.2d, v16.2d
+; CHECK-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NOSVE-NEXT: uzp1 v1.4s, v3.4s, v2.4s
+; CHECK-NOSVE-NEXT: uzp1 v2.4s, v6.4s, v5.4s
+; CHECK-NOSVE-NEXT: uzp1 v3.4s, v4.4s, v7.4s
+; CHECK-NOSVE-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NOSVE-NEXT: uzp1 v1.8h, v3.8h, v2.8h
+; CHECK-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b
+; CHECK-NOSVE-NEXT: dup v1.16b, w8
+; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
+; CHECK-NOSVE-NEXT: ret
+entry:
+ %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 1)
+ ret <16 x i1> %0
+}
+
+define <2 x i1> @not_whilerw_ptr32(i32 %a, i32 %b) {
+; CHECK-SVE-LABEL: not_whilerw_ptr32:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: subs w8, w1, w0
+; CHECK-SVE-NEXT: cneg w8, w8, mi
+; CHECK-SVE-NEXT: add w9, w8, #7
+; CHECK-SVE-NEXT: cmp w8, #0
+; CHECK-SVE-NEXT: csel w8, w9, w8, lt
+; CHECK-SVE-NEXT: asr w8, w8, #3
+; CHECK-SVE-NEXT: cmp w8, #0
+; CHECK-SVE-NEXT: cset w9, eq
+; CHECK-SVE-NEXT: whilelo p0.s, #0, w8
+; CHECK-SVE-NEXT: dup v0.2s, w9
+; CHECK-SVE-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-SVE-NEXT: orr v0.8b, v1.8b, v0.8b
+; CHECK-SVE-NEXT: ret
+;
+; CHECK-NOSVE-LABEL: not_whilerw_ptr32:
+; CHECK-NOSVE: // %bb.0: // %entry
+; CHECK-NOSVE-NEXT: subs w9, w1, w0
+; CHECK-NOSVE-NEXT: adrp x8, .LCPI9_0
+; CHECK-NOSVE-NEXT: cneg w9, w9, mi
+; CHECK-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI9_0]
+; CHECK-NOSVE-NEXT: add w10, w9, #7
+; CHECK-NOSVE-NEXT: cmp w9, #0
+; CHECK-NOSVE-NEXT: csel w9, w10, w9, lt
+; CHECK-NOSVE-NEXT: asr w9, w9, #3
+; CHECK-NOSVE-NEXT: dup v0.2s, w9
+; CHECK-NOSVE-NEXT: cmp w9, #0
+; CHECK-NOSVE-NEXT: cset w8, eq
+; CHECK-NOSVE-NEXT: dup v2.2s, w8
+; CHECK-NOSVE-NEXT: cmhi v0.2s, v0.2s, v1.2s
+; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v2.8b
+; CHECK-NOSVE-NEXT: ret
+entry:
+ %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i32.i32(i32 %a, i32 %b, i32 8, i1 0)
+ ret <2 x i1> %0
+}
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
new file mode 100644
index 0000000000000..be5ec8b2a82bf
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -0,0 +1,195 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s --check-prefix=CHECK-SVE2
+; RUN: llc -mtriple=aarch64 -mattr=+sve %s -o - | FileCheck %s --check-prefix=CHECK-SVE
+
+define <vscale x 16 x i1> @whilewr_8(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilewr_8:
+; CHECK-SVE2: // %bb.0: // %entry
+; CHECK-SVE2-NEXT: whilewr p0.b, x0, x1
+; CHECK-SVE2-NEXT: ret
+;
+; CHECK-SVE-LABEL: whilewr_8:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: sub x8, x1, x0
+; CHECK-SVE-NEXT: cmp x8, #1
+; CHECK-SVE-NEXT: cset w9, lt
+; CHECK-SVE-NEXT: whilelo p0.b, #0, x8
+; CHECK-SVE-NEXT: sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT: whilelo p1.b, xzr, x8
+; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
+; CHECK-SVE-NEXT: ret
+entry:
+ %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
+ ret <vscale x 16 x i1> %0
+}
+
+define <vscale x 8 x i1> @whilewr_16(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilewr_16:
+; CHECK-SVE2: // %bb.0: // %entry
+; CHECK-SVE2-NEXT: whilewr p0.h, x0, x1
+; CHECK-SVE2-NEXT: ret
+;
+; CHECK-SVE-LABEL: whilewr_16:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: sub x8, x1, x0
+; CHECK-SVE-NEXT: add x8, x8, x8, lsr #63
+; CHECK-SVE-NEXT: asr x8, x8, #1
+; CHECK-SVE-NEXT: cmp x8, #1
+; CHECK-SVE-NEXT: cset w9, lt
+; CHECK-SVE-NEXT: whilelo p0.h, #0, x8
+; CHECK-SVE-NEXT: sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT: whilelo p1.h, xzr, x8
+; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
+; CHECK-SVE-NEXT: ret
+entry:
+ %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 1)
+ ret <vscale x 8 x i1> %0
+}
+
+define <vscale x 4 x i1> @whilewr_32(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilewr_32:
+; CHECK-SVE2: // %bb.0: // %entry
+; CHECK-SVE2-NEXT: whilewr p0.s, x0, x1
+; CHECK-SVE2-NEXT: ret
+;
+; CHECK-SVE-LABEL: whilewr_32:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: sub x8, x1, x0
+; CHECK-SVE-NEXT: add x9, x8, #3
+; CHECK-SVE-NEXT: cmp x8, #0
+; CHECK-SVE-NEXT: csel x8, x9, x8, lt
+; CHECK-SVE-NEXT: asr x8, x8, #2
+; CHECK-SVE-NEXT: cmp x8, #1
+; CHECK-SVE-NEXT: cset w9, lt
+; CHECK-SVE-NEXT: whilelo p1.s, #0, x8
+; CHECK-SVE-NEXT: sbfx x9, x9, #0, #1
+; CHECK-SVE-NEXT: whilelo p0.s, xzr, x9
+; CHECK-SVE-NEXT: mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT: ret
+entry:
+ %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
+ ret <vscale x 4 x i1> %0
+}
+
+define <vscale x 2 x i1> @whilewr_64(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilewr_64:
+; CHECK-SVE2: // %bb.0: // %entry
+; CHECK-SVE2-NEXT: whilewr p0.d, x0, x1
+; CHECK-SVE2-NEXT: ret
+;
+; CHECK-SVE-LABEL: whilewr_64:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: sub x8, x1, x0
+; CHECK-SVE-NEXT: add x9, x8, #7
+; CHECK-SVE-NEXT: cmp x8, #0
+; CHECK-SVE-NEXT: csel x8, x9, x8, lt
+; CHECK-SVE-NEXT: asr x8, x8, #3
+; CHECK-SVE-NEXT: cmp x8, #1
+; CHECK-SVE-NEXT: cset w9, lt
+; CHECK-SVE-NEXT: whilelo p1.d, #0, x8
+; CHECK-SVE-NEXT: sbfx x9, x9, #0, #1
+; CHECK-SVE-NEXT: whilelo p0.d, xzr, x9
+; CHECK-SVE-NEXT: mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT: ret
+entry:
+ %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
+ ret <vscale x 2 x i1> %0
+}
+
+define <vscale x 16 x i1> @whilerw_8(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilerw_8:
+; CHECK-SVE2: // %bb.0: // %entry
+; CHECK-SVE2-NEXT: whilerw p0.b, x0, x1
+; CHECK-SVE2-NEXT: ret
+;
+; CHECK-SVE-LABEL: whilerw_8:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: subs x8, x1, x0
+; CHECK-SVE-NEXT: cneg x8, x8, mi
+; CHECK-SVE-NEXT: cmp x8, #0
+; CHECK-SVE-NEXT: cset w9, eq
+; CHECK-SVE-NEXT: whilelo p0.b, #0, x8
+; CHECK-SVE-NEXT: sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT: whilelo p1.b, xzr, x8
+; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
+; CHECK-SVE-NEXT: ret
+entry:
+ %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
+ ret <vscale x 16 x i1> %0
+}
+
+define <vscale x 8 x i1> @whilerw_16(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilerw_16:
+; CHECK-SVE2: // %bb.0: // %entry
+; CHECK-SVE2-NEXT: whilerw p0.h, x0, x1
+; CHECK-SVE2-NEXT: ret
+;
+; CHECK-SVE-LABEL: whilerw_16:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: subs x8, x1, x0
+; CHECK-SVE-NEXT: cneg x8, x8, mi
+; CHECK-SVE-NEXT: add x8, x8, x8, lsr #63
+; CHECK-SVE-NEXT: asr x8, x8, #1
+; CHECK-SVE-NEXT: cmp x8, #0
+; CHECK-SVE-NEXT: cset w9, eq
+; CHECK-SVE-NEXT: whilelo p0.h, #0, x8
+; CHECK-SVE-NEXT: sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT: whilelo p1.h, xzr, x8
+; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
+; CHECK-SVE-NEXT: ret
+entry:
+ %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 0)
+ ret <vscale x 8 x i1> %0
+}
+
+define <vscale x 4 x i1> @whilerw_32(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilerw_32:
+; CHECK-SVE2: // %bb.0: // %entry
+; CHECK-SVE2-NEXT: whilerw p0.s, x0, x1
+; CHECK-SVE2-NEXT: ret
+;
+; CHECK-SVE-LABEL: whilerw_32:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: subs x8, x1, x0
+; CHECK-SVE-NEXT: cneg x8, x8, mi
+; CHECK-SVE-NEXT: add x9, x8, #3
+; CHECK-SVE-NEXT: cmp x8, #0
+; CHECK-SVE-NEXT: csel x8, x9, x8, lt
+; CHECK-SVE-NEXT: asr x8, x8, #2
+; CHECK-SVE-NEXT: cmp x8, #0
+; CHECK-SVE-NEXT: cset w9, eq
+; CHECK-SVE-NEXT: whilelo p1.s, #0, x8
+; CHECK-SVE-NEXT: sbfx x9, x9, #0, #1
+; CHECK-SVE-NEXT: whilelo p0.s, xzr, x9
+; CHECK-SVE-NEXT: mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT: ret
+entry:
+ %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
+ ret <vscale x 4 x i1> %0
+}
+
+define <vscale x 2 x i1> @whilerw_64(i64 %a, i64 %b) {
+; CHECK-SVE2-LABEL: whilerw_64:
+; CHECK-SVE2: // %bb.0: // %entry
+; CHECK-SVE2-NEXT: whilerw p0.d, x0, x1
+; CHECK-SVE2-NEXT: ret
+;
+; CHECK-SVE-LABEL: whilerw_64:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: subs x8, x1, x0
+; CHECK-SVE-NEXT: cneg x8, x8, mi
+; CHECK-SVE-NEXT: add x9, x8, #7
+; CHECK-SVE-NEXT: cmp x8, #0
+; CHECK-SVE-NEXT: csel x8, x9, x8, lt
+; CHECK-SVE-NEXT: asr x8, x8, #3
+; CHECK-SVE-NEXT: cmp x8, #0
+; CHECK-SVE-NEXT: cset w9, eq
+; CHECK-SVE-NEXT: whilelo p1.d, #0, x8
+; CHECK-SVE-NEXT: sbfx x9, x9, #0, #1
+; CHECK-SVE-NEXT: whilelo p0.d, xzr, x9
+; CHECK-SVE-NEXT: mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT: ret
+entry:
+ %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
+ ret <vscale x 2 x i1> %0
+}
>From 732398188a890e79197c61ed9c338310246f9a49 Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Fri, 10 Jan 2025 11:37:37 +0000
Subject: [PATCH 02/25] Rework lowering location
---
llvm/include/llvm/CodeGen/ISDOpcodes.h | 5 +
.../SelectionDAG/LegalizeIntegerTypes.cpp | 22 ++
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 2 +
.../SelectionDAG/LegalizeVectorOps.cpp | 41 ++++
.../SelectionDAG/SelectionDAGBuilder.cpp | 53 +----
.../SelectionDAG/SelectionDAGDumper.cpp | 2 +
llvm/lib/CodeGen/TargetLoweringBase.cpp | 3 +
.../Target/AArch64/AArch64ISelLowering.cpp | 96 +++++++-
llvm/lib/Target/AArch64/AArch64ISelLowering.h | 1 +
llvm/test/CodeGen/AArch64/alias_mask.ll | 120 ++--------
.../CodeGen/AArch64/alias_mask_scalable.ll | 210 ++++++++++++++----
11 files changed, 358 insertions(+), 197 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 47a1aec3da06a..aa9f75343f0a2 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1556,6 +1556,11 @@ enum NodeType {
// bits conform to getBooleanContents similar to the SETCC operator.
GET_ACTIVE_LANE_MASK,
+ // The `llvm.experimental.get.alias.lane.mask.*` intrinsics
+ // Operands: Load pointer, Store pointer, Element size, Write after read
+ // Output: Mask
+ EXPERIMENTAL_ALIAS_LANE_MASK,
+
// llvm.clear_cache intrinsic
// Operands: Input Chain, Start Addres, End Address
// Outputs: Output Chain
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 2bcca91f6f81a..82fa70c578f34 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -55,6 +55,9 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
N->dump(&DAG); dbgs() << "\n";
#endif
report_fatal_error("Do not know how to promote this operator!");
+ case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+ Res = PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(N);
+ break;
case ISD::MERGE_VALUES:Res = PromoteIntRes_MERGE_VALUES(N, ResNo); break;
case ISD::AssertSext: Res = PromoteIntRes_AssertSext(N); break;
case ISD::AssertZext: Res = PromoteIntRes_AssertZext(N); break;
@@ -369,6 +372,14 @@ SDValue DAGTypeLegalizer::PromoteIntRes_MERGE_VALUES(SDNode *N,
return GetPromotedInteger(Op);
}
+SDValue
+DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N) {
+ EVT VT = N->getValueType(0);
+ EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
+ return DAG.getNode(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, SDLoc(N), NewVT,
+ N->ops());
+}
+
SDValue DAGTypeLegalizer::PromoteIntRes_AssertSext(SDNode *N) {
// Sign-extend the new bits, and continue the assertion.
SDValue Op = SExtPromotedInteger(N->getOperand(0));
@@ -2095,6 +2106,9 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
case ISD::PARTIAL_REDUCE_SMLA:
Res = PromoteIntOp_PARTIAL_REDUCE_MLA(N);
break;
+ case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+ Res = DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(N, OpNo);
+ break;
}
// If the result is null, the sub-method took care of registering results etc.
@@ -2896,6 +2910,14 @@ SDValue DAGTypeLegalizer::PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N) {
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
}
+SDValue
+DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N,
+ unsigned OpNo) {
+ SmallVector<SDValue, 4> NewOps(N->ops());
+ NewOps[OpNo] = GetPromotedInteger(N->getOperand(OpNo));
+ return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
+}
+
//===----------------------------------------------------------------------===//
// Integer Result Expansion
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index dd9af47da5287..5f81daa431e6e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -381,6 +381,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue PromoteIntRes_VECTOR_FIND_LAST_ACTIVE(SDNode *N);
SDValue PromoteIntRes_GET_ACTIVE_LANE_MASK(SDNode *N);
SDValue PromoteIntRes_PARTIAL_REDUCE_MLA(SDNode *N);
+ SDValue PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N);
// Integer Operand Promotion.
bool PromoteIntegerOperand(SDNode *N, unsigned OpNo);
@@ -434,6 +435,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_GET_ACTIVE_LANE_MASK(SDNode *N);
SDValue PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N);
+ SDValue PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N, unsigned OpNo);
void SExtOrZExtPromotedOperands(SDValue &LHS, SDValue &RHS);
void PromoteSetCCOperands(SDValue &LHS,SDValue &RHS, ISD::CondCode Code);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 910a40e5b5141..fb8eb0f423fa3 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -138,6 +138,7 @@ class VectorLegalizer {
SDValue ExpandVP_FNEG(SDNode *Node);
SDValue ExpandVP_FABS(SDNode *Node);
SDValue ExpandVP_FCOPYSIGN(SDNode *Node);
+ SDValue ExpandEXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N);
SDValue ExpandSELECT(SDNode *Node);
std::pair<SDValue, SDValue> ExpandLoad(SDNode *N);
SDValue ExpandStore(SDNode *N);
@@ -469,6 +470,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case ISD::VECTOR_COMPRESS:
case ISD::SCMP:
case ISD::UCMP:
+ case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
break;
case ISD::SMULFIX:
@@ -1262,6 +1264,9 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
case ISD::UCMP:
Results.push_back(TLI.expandCMP(Node, DAG));
return;
+ case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+ Results.push_back(ExpandEXPERIMENTAL_ALIAS_LANE_MASK(Node));
+ return;
case ISD::FADD:
case ISD::FMUL:
@@ -1767,6 +1772,42 @@ SDValue VectorLegalizer::ExpandVP_FCOPYSIGN(SDNode *Node) {
return DAG.getNode(ISD::BITCAST, DL, VT, CopiedSign);
}
+SDValue VectorLegalizer::ExpandEXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N) {
+ SDLoc DL(N);
+ SDValue SourceValue = N->getOperand(0);
+ SDValue SinkValue = N->getOperand(1);
+ SDValue EltSize = N->getOperand(2);
+
+ bool IsWriteAfterRead =
+ cast<ConstantSDNode>(N->getOperand(3))->getZExtValue() != 0;
+ auto VT = N->getValueType(0);
+ auto PtrVT = SourceValue->getValueType(0);
+
+ SDValue Diff = DAG.getNode(ISD::SUB, DL, PtrVT, SinkValue, SourceValue);
+ if (!IsWriteAfterRead)
+ Diff = DAG.getNode(ISD::ABS, DL, PtrVT, Diff);
+
+ Diff = DAG.getNode(ISD::SDIV, DL, PtrVT, Diff, EltSize);
+ SDValue Zero = DAG.getTargetConstant(0, DL, PtrVT);
+
+ // If the difference is positive then some elements may alias
+ auto CmpVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
+ Diff.getValueType());
+ SDValue Cmp = DAG.getSetCC(DL, CmpVT, Diff, Zero,
+ IsWriteAfterRead ? ISD::SETLE : ISD::SETEQ);
+
+ EVT SplatTY =
+ EVT::getVectorVT(*DAG.getContext(), PtrVT, VT.getVectorElementCount());
+ SDValue DiffSplat = DAG.getSplat(SplatTY, DL, Diff);
+ SDValue VectorStep = DAG.getStepVector(DL, SplatTY);
+ SDValue DiffMask =
+ DAG.getSetCC(DL, VT, VectorStep, DiffSplat, ISD::CondCode::SETULT);
+
+ // Splat the compare result then OR it with a lane mask
+ SDValue Splat = DAG.getSplat(VT, DL, Cmp);
+ return DAG.getNode(ISD::OR, DL, VT, DiffMask, Splat);
+}
+
void VectorLegalizer::ExpandFP_TO_UINT(SDNode *Node,
SmallVectorImpl<SDValue> &Results) {
// Attempt to expand using TargetLowering.
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index a62c6834e8fed..f88c7ddb0c867 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8245,54 +8245,13 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
return;
}
case Intrinsic::experimental_get_alias_lane_mask: {
- SDValue SourceValue = getValue(I.getOperand(0));
- SDValue SinkValue = getValue(I.getOperand(1));
- SDValue EltSize = getValue(I.getOperand(2));
- bool IsWriteAfterRead =
- cast<ConstantSDNode>(getValue(I.getOperand(3)))->getZExtValue() != 0;
auto IntrinsicVT = EVT::getEVT(I.getType());
- auto PtrVT = SourceValue->getValueType(0);
-
- if (!TLI.shouldExpandGetAliasLaneMask(
- IntrinsicVT, PtrVT,
- cast<ConstantSDNode>(EltSize)->getSExtValue())) {
- visitTargetIntrinsic(I, Intrinsic);
- return;
- }
-
- SDValue Diff = DAG.getNode(ISD::SUB, sdl, PtrVT, SinkValue, SourceValue);
- if (!IsWriteAfterRead)
- Diff = DAG.getNode(ISD::ABS, sdl, PtrVT, Diff);
-
- Diff = DAG.getNode(ISD::SDIV, sdl, PtrVT, Diff, EltSize);
- SDValue Zero = DAG.getTargetConstant(0, sdl, PtrVT);
-
- // If the difference is positive then some elements may alias
- auto CmpVT =
- TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), PtrVT);
- SDValue Cmp = DAG.getSetCC(sdl, CmpVT, Diff, Zero,
- IsWriteAfterRead ? ISD::SETLE : ISD::SETEQ);
-
- // Splat the compare result then OR it with a lane mask
- SDValue Splat = DAG.getSplat(IntrinsicVT, sdl, Cmp);
-
- SDValue DiffMask;
- // Don't emit an active lane mask if the target doesn't support it
- if (TLI.shouldExpandGetActiveLaneMask(IntrinsicVT, PtrVT)) {
- EVT VecTy = EVT::getVectorVT(*DAG.getContext(), PtrVT,
- IntrinsicVT.getVectorElementCount());
- SDValue DiffSplat = DAG.getSplat(VecTy, sdl, Diff);
- SDValue VectorStep = DAG.getStepVector(sdl, VecTy);
- DiffMask = DAG.getSetCC(sdl, IntrinsicVT, VectorStep, DiffSplat,
- ISD::CondCode::SETULT);
- } else {
- DiffMask = DAG.getNode(
- ISD::INTRINSIC_WO_CHAIN, sdl, IntrinsicVT,
- DAG.getTargetConstant(Intrinsic::get_active_lane_mask, sdl, MVT::i64),
- Zero, Diff);
- }
- SDValue Or = DAG.getNode(ISD::OR, sdl, IntrinsicVT, DiffMask, Splat);
- setValue(&I, Or);
+ SmallVector<SDValue, 4> Ops;
+ for (auto &Op : I.operands())
+ Ops.push_back(getValue(Op));
+ SDValue Mask =
+ DAG.getNode(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, sdl, IntrinsicVT, Ops);
+ setValue(&I, Mask);
}
}
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 539f583ea361f..f04a007ddb1ea 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -585,6 +585,8 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
return "partial_reduce_umla";
case ISD::PARTIAL_REDUCE_SMLA:
return "partial_reduce_smla";
+ case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+ return "alias_mask";
// Vector Predication
#define BEGIN_REGISTER_VP_SDNODE(SDID, LEGALARG, NAME, ...) \
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 935afaf9dd550..2ab7a85662211 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -839,6 +839,9 @@ void TargetLoweringBase::initActions() {
// Masked vector extracts default to expand.
setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Expand);
+ // Aliasing lanes mask default to expand
+ setOperationAction(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, VT, Expand);
+
// FP environment operations default to expand.
setOperationAction(ISD::GET_FPENV, VT, Expand);
setOperationAction(ISD::SET_FPENV, VT, Expand);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index f2e4c87a73079..800c3a97294a6 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1899,6 +1899,15 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
}
}
+ // Handle non-aliasing elements mask
+ if (Subtarget->hasSVE2() ||
+ (Subtarget->hasSME() && Subtarget->isStreaming())) {
+ for (auto VT : {MVT::v2i32, MVT::v4i16, MVT::v8i8, MVT::v16i8, MVT::nxv2i1,
+ MVT::nxv4i1, MVT::nxv8i1, MVT::nxv16i1}) {
+ setOperationAction(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, VT, Custom);
+ }
+ }
+
// Handle operations that are only available in non-streaming SVE mode.
if (Subtarget->isSVEAvailable()) {
for (auto VT : {MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64,
@@ -5103,6 +5112,65 @@ SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
static MVT getSVEContainerType(EVT ContentTy);
+SDValue AArch64TargetLowering::LowerALIAS_LANE_MASK(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ unsigned IntrinsicID = 0;
+ uint64_t EltSize = Op.getOperand(2)->getAsZExtVal();
+ bool IsWriteAfterRead = Op.getOperand(3)->getAsZExtVal() == 1;
+ EVT VT = Op.getValueType();
+ MVT SimpleVT = VT.getSimpleVT();
+ // Make sure that the promoted mask size and element size match
+ switch (EltSize) {
+ case 1:
+ assert((SimpleVT == MVT::v16i8 || SimpleVT == MVT::nxv16i1) &&
+ "Unexpected mask or element size");
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
+ : Intrinsic::aarch64_sve_whilerw_b;
+ break;
+ case 2:
+ assert((SimpleVT == MVT::v8i8 || SimpleVT == MVT::nxv8i1) &&
+ "Unexpected mask or element size");
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
+ : Intrinsic::aarch64_sve_whilerw_h;
+ break;
+ case 4:
+ assert((SimpleVT == MVT::v4i16 || SimpleVT == MVT::nxv4i1) &&
+ "Unexpected mask or element size");
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
+ : Intrinsic::aarch64_sve_whilerw_s;
+ break;
+ case 8:
+ assert((SimpleVT == MVT::v2i32 || SimpleVT == MVT::nxv2i1) &&
+ "Unexpected mask or element size");
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
+ : Intrinsic::aarch64_sve_whilerw_d;
+ break;
+ default:
+ llvm_unreachable("Unexpected element size for get.alias.lane.mask");
+ break;
+ }
+ SDValue ID = DAG.getTargetConstant(IntrinsicID, DL, MVT::i64);
+
+ if (VT.isScalableVector())
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, ID, Op.getOperand(0),
+ Op.getOperand(1));
+
+ // We can use the SVE whilewr/whilerw instruction to lower this
+ // intrinsic by creating the appropriate sequence of scalable vector
+ // operations and then extracting a fixed-width subvector from the scalable
+ // vector.
+
+ EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
+ EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
+
+ SDValue Mask = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WhileVT, ID,
+ Op.getOperand(0), Op.getOperand(1));
+ SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, DL, ContainerVT, Mask);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, MaskAsInt,
+ DAG.getVectorIdxConstant(0, DL));
+}
+
SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op,
SelectionDAG &DAG) const {
EVT OpVT = Op.getValueType();
@@ -7257,6 +7325,8 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
default:
llvm_unreachable("unimplemented operand");
return SDValue();
+ case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+ return LowerALIAS_LANE_MASK(Op, DAG);
case ISD::BITCAST:
return LowerBITCAST(Op, DAG);
case ISD::GlobalAddress:
@@ -19598,9 +19668,10 @@ static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
AArch64CC::CondCode Cond);
static bool isPredicateCCSettingOp(SDValue N) {
- if ((N.getOpcode() == ISD::SETCC) ||
+ if ((N.getOpcode() == ISD::SETCC ||
// get_active_lane_mask is lowered to a whilelo instruction.
- (N.getOpcode() == ISD::GET_ACTIVE_LANE_MASK) ||
+ N.getOpcode() == ISD::GET_ACTIVE_LANE_MASK ||
+ N.getOpcode() == ISD::EXPERIMENTAL_ALIAS_LANE_MASK) ||
(N.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
(N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilege ||
N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilegt ||
@@ -19609,10 +19680,7 @@ static bool isPredicateCCSettingOp(SDValue N) {
N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilele ||
N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelo ||
N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilels ||
- N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelt ||
- // get_alias_lane_mask is lowered to a whilewr/rw instruction.
- N.getConstantOperandVal(0) ==
- Intrinsic::experimental_get_alias_lane_mask)))
+ N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelt)))
return true;
return false;
@@ -27561,6 +27629,22 @@ void AArch64TargetLowering::ReplaceNodeResults(
// CONCAT_VECTORS -- but delegate to common code for result type
// legalisation
return;
+ case ISD::EXPERIMENTAL_ALIAS_LANE_MASK: {
+ EVT VT = N->getValueType(0);
+ if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
+ return;
+
+ // NOTE: Only trivial type promotion is supported.
+ EVT NewVT = getTypeToTransformTo(*DAG.getContext(), VT);
+ if (NewVT.getVectorNumElements() != VT.getVectorNumElements())
+ return;
+
+ SDLoc DL(N);
+ auto V =
+ DAG.getNode(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, DL, NewVT, N->ops());
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
+ return;
+ }
case ISD::INTRINSIC_WO_CHAIN: {
EVT VT = N->getValueType(0);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index a66ae33f7ea89..dfab9a3646037 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -713,6 +713,7 @@ class AArch64TargetLowering : public TargetLowering {
SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerALIAS_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 84a22822f1702..9b344f03da077 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -48,10 +48,12 @@ define <16 x i1> @whilewr_8(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: uzp1 v1.8h, v2.8h, v3.8h
; CHECK-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b
; CHECK-NOSVE-NEXT: dup v1.16b, w8
+; CHECK-NOSVE-NEXT: shl v0.16b, v0.16b, #7
+; CHECK-NOSVE-NEXT: cmlt v0.16b, v0.16b, #0
; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
+ %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
ret <16 x i1> %0
}
@@ -88,6 +90,8 @@ define <8 x i1> @whilewr_16(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v1.8h
; CHECK-NOSVE-NEXT: dup v1.8b, w8
; CHECK-NOSVE-NEXT: xtn v0.8b, v0.8h
+; CHECK-NOSVE-NEXT: shl v0.8b, v0.8b, #7
+; CHECK-NOSVE-NEXT: cmlt v0.8b, v0.8b, #0
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
@@ -125,7 +129,7 @@ define <4 x i1> @whilewr_32(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
+ %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
ret <4 x i1> %0
}
@@ -155,7 +159,7 @@ define <2 x i1> @whilewr_64(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
+ %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
ret <2 x i1> %0
}
@@ -206,10 +210,12 @@ define <16 x i1> @whilerw_8(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: uzp1 v1.8h, v3.8h, v2.8h
; CHECK-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b
; CHECK-NOSVE-NEXT: dup v1.16b, w8
+; CHECK-NOSVE-NEXT: shl v0.16b, v0.16b, #7
+; CHECK-NOSVE-NEXT: cmlt v0.16b, v0.16b, #0
; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
+ %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
ret <16 x i1> %0
}
@@ -247,6 +253,8 @@ define <8 x i1> @whilerw_16(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v1.8h
; CHECK-NOSVE-NEXT: dup v1.8b, w8
; CHECK-NOSVE-NEXT: xtn v0.8b, v0.8h
+; CHECK-NOSVE-NEXT: shl v0.8b, v0.8b, #7
+; CHECK-NOSVE-NEXT: cmlt v0.8b, v0.8b, #0
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
@@ -285,7 +293,7 @@ define <4 x i1> @whilerw_32(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
+ %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
ret <4 x i1> %0
}
@@ -316,106 +324,6 @@ define <2 x i1> @whilerw_64(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
- ret <2 x i1> %0
-}
-
-define <16 x i1> @not_whilewr_wrong_eltsize(i64 %a, i64 %b) {
-; CHECK-SVE-LABEL: not_whilewr_wrong_eltsize:
-; CHECK-SVE: // %bb.0: // %entry
-; CHECK-SVE-NEXT: sub x8, x1, x0
-; CHECK-SVE-NEXT: add x8, x8, x8, lsr #63
-; CHECK-SVE-NEXT: asr x8, x8, #1
-; CHECK-SVE-NEXT: cmp x8, #1
-; CHECK-SVE-NEXT: cset w9, lt
-; CHECK-SVE-NEXT: whilelo p0.b, #0, x8
-; CHECK-SVE-NEXT: dup v0.16b, w9
-; CHECK-SVE-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-SVE-NEXT: orr v0.16b, v1.16b, v0.16b
-; CHECK-SVE-NEXT: ret
-;
-; CHECK-NOSVE-LABEL: not_whilewr_wrong_eltsize:
-; CHECK-NOSVE: // %bb.0: // %entry
-; CHECK-NOSVE-NEXT: sub x8, x1, x0
-; CHECK-NOSVE-NEXT: adrp x9, .LCPI8_0
-; CHECK-NOSVE-NEXT: adrp x10, .LCPI8_1
-; CHECK-NOSVE-NEXT: add x8, x8, x8, lsr #63
-; CHECK-NOSVE-NEXT: ldr q0, [x9, :lo12:.LCPI8_0]
-; CHECK-NOSVE-NEXT: adrp x9, .LCPI8_2
-; CHECK-NOSVE-NEXT: ldr q2, [x9, :lo12:.LCPI8_2]
-; CHECK-NOSVE-NEXT: adrp x9, .LCPI8_4
-; CHECK-NOSVE-NEXT: ldr q1, [x10, :lo12:.LCPI8_1]
-; CHECK-NOSVE-NEXT: asr x8, x8, #1
-; CHECK-NOSVE-NEXT: adrp x10, .LCPI8_3
-; CHECK-NOSVE-NEXT: ldr q5, [x9, :lo12:.LCPI8_4]
-; CHECK-NOSVE-NEXT: adrp x9, .LCPI8_6
-; CHECK-NOSVE-NEXT: ldr q3, [x10, :lo12:.LCPI8_3]
-; CHECK-NOSVE-NEXT: adrp x10, .LCPI8_5
-; CHECK-NOSVE-NEXT: dup v4.2d, x8
-; CHECK-NOSVE-NEXT: ldr q7, [x9, :lo12:.LCPI8_6]
-; CHECK-NOSVE-NEXT: adrp x9, .LCPI8_7
-; CHECK-NOSVE-NEXT: ldr q6, [x10, :lo12:.LCPI8_5]
-; CHECK-NOSVE-NEXT: ldr q16, [x9, :lo12:.LCPI8_7]
-; CHECK-NOSVE-NEXT: cmp x8, #1
-; CHECK-NOSVE-NEXT: cset w8, lt
-; CHECK-NOSVE-NEXT: cmhi v0.2d, v4.2d, v0.2d
-; CHECK-NOSVE-NEXT: cmhi v1.2d, v4.2d, v1.2d
-; CHECK-NOSVE-NEXT: cmhi v2.2d, v4.2d, v2.2d
-; CHECK-NOSVE-NEXT: cmhi v3.2d, v4.2d, v3.2d
-; CHECK-NOSVE-NEXT: cmhi v5.2d, v4.2d, v5.2d
-; CHECK-NOSVE-NEXT: cmhi v6.2d, v4.2d, v6.2d
-; CHECK-NOSVE-NEXT: cmhi v7.2d, v4.2d, v7.2d
-; CHECK-NOSVE-NEXT: cmhi v4.2d, v4.2d, v16.2d
-; CHECK-NOSVE-NEXT: uzp1 v0.4s, v1.4s, v0.4s
-; CHECK-NOSVE-NEXT: uzp1 v1.4s, v3.4s, v2.4s
-; CHECK-NOSVE-NEXT: uzp1 v2.4s, v6.4s, v5.4s
-; CHECK-NOSVE-NEXT: uzp1 v3.4s, v4.4s, v7.4s
-; CHECK-NOSVE-NEXT: uzp1 v0.8h, v1.8h, v0.8h
-; CHECK-NOSVE-NEXT: uzp1 v1.8h, v3.8h, v2.8h
-; CHECK-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b
-; CHECK-NOSVE-NEXT: dup v1.16b, w8
-; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
-; CHECK-NOSVE-NEXT: ret
-entry:
- %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 1)
- ret <16 x i1> %0
-}
-
-define <2 x i1> @not_whilerw_ptr32(i32 %a, i32 %b) {
-; CHECK-SVE-LABEL: not_whilerw_ptr32:
-; CHECK-SVE: // %bb.0: // %entry
-; CHECK-SVE-NEXT: subs w8, w1, w0
-; CHECK-SVE-NEXT: cneg w8, w8, mi
-; CHECK-SVE-NEXT: add w9, w8, #7
-; CHECK-SVE-NEXT: cmp w8, #0
-; CHECK-SVE-NEXT: csel w8, w9, w8, lt
-; CHECK-SVE-NEXT: asr w8, w8, #3
-; CHECK-SVE-NEXT: cmp w8, #0
-; CHECK-SVE-NEXT: cset w9, eq
-; CHECK-SVE-NEXT: whilelo p0.s, #0, w8
-; CHECK-SVE-NEXT: dup v0.2s, w9
-; CHECK-SVE-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-SVE-NEXT: orr v0.8b, v1.8b, v0.8b
-; CHECK-SVE-NEXT: ret
-;
-; CHECK-NOSVE-LABEL: not_whilerw_ptr32:
-; CHECK-NOSVE: // %bb.0: // %entry
-; CHECK-NOSVE-NEXT: subs w9, w1, w0
-; CHECK-NOSVE-NEXT: adrp x8, .LCPI9_0
-; CHECK-NOSVE-NEXT: cneg w9, w9, mi
-; CHECK-NOSVE-NEXT: ldr d1, [x8, :lo12:.LCPI9_0]
-; CHECK-NOSVE-NEXT: add w10, w9, #7
-; CHECK-NOSVE-NEXT: cmp w9, #0
-; CHECK-NOSVE-NEXT: csel w9, w10, w9, lt
-; CHECK-NOSVE-NEXT: asr w9, w9, #3
-; CHECK-NOSVE-NEXT: dup v0.2s, w9
-; CHECK-NOSVE-NEXT: cmp w9, #0
-; CHECK-NOSVE-NEXT: cset w8, eq
-; CHECK-NOSVE-NEXT: dup v2.2s, w8
-; CHECK-NOSVE-NEXT: cmhi v0.2s, v0.2s, v1.2s
-; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v2.8b
-; CHECK-NOSVE-NEXT: ret
-entry:
- %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i32.i32(i32 %a, i32 %b, i32 8, i1 0)
+ %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
ret <2 x i1> %0
}
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index be5ec8b2a82bf..a7c9c5e3cdd33 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -10,16 +10,57 @@ define <vscale x 16 x i1> @whilewr_8(i64 %a, i64 %b) {
;
; CHECK-SVE-LABEL: whilewr_8:
; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-SVE-NEXT: addvl sp, sp, #-1
+; CHECK-SVE-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-SVE-NEXT: .cfi_offset w29, -16
+; CHECK-SVE-NEXT: index z0.d, #0, #1
; CHECK-SVE-NEXT: sub x8, x1, x0
+; CHECK-SVE-NEXT: ptrue p0.d
+; CHECK-SVE-NEXT: mov z2.d, x8
+; CHECK-SVE-NEXT: mov z1.d, z0.d
+; CHECK-SVE-NEXT: mov z3.d, z0.d
+; CHECK-SVE-NEXT: cmphi p1.d, p0/z, z2.d, z0.d
+; CHECK-SVE-NEXT: incd z0.d, all, mul #4
+; CHECK-SVE-NEXT: incd z1.d
+; CHECK-SVE-NEXT: incd z3.d, all, mul #2
+; CHECK-SVE-NEXT: cmphi p5.d, p0/z, z2.d, z0.d
+; CHECK-SVE-NEXT: mov z4.d, z1.d
+; CHECK-SVE-NEXT: cmphi p2.d, p0/z, z2.d, z1.d
+; CHECK-SVE-NEXT: incd z1.d, all, mul #4
+; CHECK-SVE-NEXT: cmphi p3.d, p0/z, z2.d, z3.d
+; CHECK-SVE-NEXT: incd z3.d, all, mul #4
+; CHECK-SVE-NEXT: incd z4.d, all, mul #2
+; CHECK-SVE-NEXT: cmphi p6.d, p0/z, z2.d, z1.d
+; CHECK-SVE-NEXT: cmphi p7.d, p0/z, z2.d, z3.d
+; CHECK-SVE-NEXT: uzp1 p1.s, p1.s, p2.s
+; CHECK-SVE-NEXT: cmphi p4.d, p0/z, z2.d, z4.d
+; CHECK-SVE-NEXT: incd z4.d, all, mul #4
+; CHECK-SVE-NEXT: uzp1 p2.s, p5.s, p6.s
+; CHECK-SVE-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT: cmphi p0.d, p0/z, z2.d, z4.d
+; CHECK-SVE-NEXT: uzp1 p3.s, p3.s, p4.s
; CHECK-SVE-NEXT: cmp x8, #1
-; CHECK-SVE-NEXT: cset w9, lt
-; CHECK-SVE-NEXT: whilelo p0.b, #0, x8
-; CHECK-SVE-NEXT: sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT: cset w8, lt
+; CHECK-SVE-NEXT: uzp1 p1.h, p1.h, p3.h
+; CHECK-SVE-NEXT: sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT: uzp1 p0.s, p7.s, p0.s
+; CHECK-SVE-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT: uzp1 p0.h, p2.h, p0.h
+; CHECK-SVE-NEXT: uzp1 p0.b, p1.b, p0.b
; CHECK-SVE-NEXT: whilelo p1.b, xzr, x8
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
+; CHECK-SVE-NEXT: addvl sp, sp, #1
+; CHECK-SVE-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
+ %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
ret <vscale x 16 x i1> %0
}
@@ -31,13 +72,28 @@ define <vscale x 8 x i1> @whilewr_16(i64 %a, i64 %b) {
;
; CHECK-SVE-LABEL: whilewr_16:
; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: index z0.d, #0, #1
; CHECK-SVE-NEXT: sub x8, x1, x0
+; CHECK-SVE-NEXT: ptrue p0.d
; CHECK-SVE-NEXT: add x8, x8, x8, lsr #63
; CHECK-SVE-NEXT: asr x8, x8, #1
+; CHECK-SVE-NEXT: mov z1.d, z0.d
+; CHECK-SVE-NEXT: mov z2.d, z0.d
+; CHECK-SVE-NEXT: mov z3.d, x8
+; CHECK-SVE-NEXT: incd z1.d
+; CHECK-SVE-NEXT: incd z2.d, all, mul #2
+; CHECK-SVE-NEXT: cmphi p1.d, p0/z, z3.d, z0.d
+; CHECK-SVE-NEXT: mov z4.d, z1.d
+; CHECK-SVE-NEXT: cmphi p2.d, p0/z, z3.d, z1.d
+; CHECK-SVE-NEXT: cmphi p3.d, p0/z, z3.d, z2.d
+; CHECK-SVE-NEXT: incd z4.d, all, mul #2
+; CHECK-SVE-NEXT: uzp1 p1.s, p1.s, p2.s
+; CHECK-SVE-NEXT: cmphi p0.d, p0/z, z3.d, z4.d
; CHECK-SVE-NEXT: cmp x8, #1
-; CHECK-SVE-NEXT: cset w9, lt
-; CHECK-SVE-NEXT: whilelo p0.h, #0, x8
-; CHECK-SVE-NEXT: sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT: cset w8, lt
+; CHECK-SVE-NEXT: sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT: uzp1 p0.s, p3.s, p0.s
+; CHECK-SVE-NEXT: uzp1 p0.h, p1.h, p0.h
; CHECK-SVE-NEXT: whilelo p1.h, xzr, x8
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
@@ -54,20 +110,27 @@ define <vscale x 4 x i1> @whilewr_32(i64 %a, i64 %b) {
;
; CHECK-SVE-LABEL: whilewr_32:
; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: index z0.d, #0, #1
; CHECK-SVE-NEXT: sub x8, x1, x0
+; CHECK-SVE-NEXT: ptrue p0.d
; CHECK-SVE-NEXT: add x9, x8, #3
; CHECK-SVE-NEXT: cmp x8, #0
; CHECK-SVE-NEXT: csel x8, x9, x8, lt
; CHECK-SVE-NEXT: asr x8, x8, #2
+; CHECK-SVE-NEXT: mov z1.d, z0.d
+; CHECK-SVE-NEXT: mov z2.d, x8
+; CHECK-SVE-NEXT: incd z1.d
+; CHECK-SVE-NEXT: cmphi p1.d, p0/z, z2.d, z0.d
+; CHECK-SVE-NEXT: cmphi p0.d, p0/z, z2.d, z1.d
; CHECK-SVE-NEXT: cmp x8, #1
-; CHECK-SVE-NEXT: cset w9, lt
-; CHECK-SVE-NEXT: whilelo p1.s, #0, x8
-; CHECK-SVE-NEXT: sbfx x9, x9, #0, #1
-; CHECK-SVE-NEXT: whilelo p0.s, xzr, x9
-; CHECK-SVE-NEXT: mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT: cset w8, lt
+; CHECK-SVE-NEXT: sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT: uzp1 p0.s, p1.s, p0.s
+; CHECK-SVE-NEXT: whilelo p1.s, xzr, x8
+; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
+ %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
ret <vscale x 4 x i1> %0
}
@@ -80,19 +143,22 @@ define <vscale x 2 x i1> @whilewr_64(i64 %a, i64 %b) {
; CHECK-SVE-LABEL: whilewr_64:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: sub x8, x1, x0
+; CHECK-SVE-NEXT: index z0.d, #0, #1
+; CHECK-SVE-NEXT: ptrue p0.d
; CHECK-SVE-NEXT: add x9, x8, #7
; CHECK-SVE-NEXT: cmp x8, #0
; CHECK-SVE-NEXT: csel x8, x9, x8, lt
; CHECK-SVE-NEXT: asr x8, x8, #3
+; CHECK-SVE-NEXT: mov z1.d, x8
+; CHECK-SVE-NEXT: cmphi p0.d, p0/z, z1.d, z0.d
; CHECK-SVE-NEXT: cmp x8, #1
-; CHECK-SVE-NEXT: cset w9, lt
-; CHECK-SVE-NEXT: whilelo p1.d, #0, x8
-; CHECK-SVE-NEXT: sbfx x9, x9, #0, #1
-; CHECK-SVE-NEXT: whilelo p0.d, xzr, x9
-; CHECK-SVE-NEXT: mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT: cset w8, lt
+; CHECK-SVE-NEXT: sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT: whilelo p1.d, xzr, x8
+; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
+ %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
ret <vscale x 2 x i1> %0
}
@@ -104,17 +170,60 @@ define <vscale x 16 x i1> @whilerw_8(i64 %a, i64 %b) {
;
; CHECK-SVE-LABEL: whilerw_8:
; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-SVE-NEXT: addvl sp, sp, #-1
+; CHECK-SVE-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-SVE-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-SVE-NEXT: .cfi_offset w29, -16
+; CHECK-SVE-NEXT: index z0.d, #0, #1
; CHECK-SVE-NEXT: subs x8, x1, x0
+; CHECK-SVE-NEXT: ptrue p0.d
; CHECK-SVE-NEXT: cneg x8, x8, mi
+; CHECK-SVE-NEXT: mov z1.d, x8
+; CHECK-SVE-NEXT: mov z2.d, z0.d
+; CHECK-SVE-NEXT: mov z4.d, z0.d
+; CHECK-SVE-NEXT: mov z5.d, z0.d
+; CHECK-SVE-NEXT: cmphi p2.d, p0/z, z1.d, z0.d
+; CHECK-SVE-NEXT: incd z2.d
+; CHECK-SVE-NEXT: incd z4.d, all, mul #2
+; CHECK-SVE-NEXT: incd z5.d, all, mul #4
+; CHECK-SVE-NEXT: mov z3.d, z2.d
+; CHECK-SVE-NEXT: cmphi p1.d, p0/z, z1.d, z2.d
+; CHECK-SVE-NEXT: incd z2.d, all, mul #4
+; CHECK-SVE-NEXT: cmphi p3.d, p0/z, z1.d, z4.d
+; CHECK-SVE-NEXT: incd z4.d, all, mul #4
+; CHECK-SVE-NEXT: cmphi p4.d, p0/z, z1.d, z5.d
+; CHECK-SVE-NEXT: incd z3.d, all, mul #2
+; CHECK-SVE-NEXT: cmphi p5.d, p0/z, z1.d, z2.d
+; CHECK-SVE-NEXT: cmphi p7.d, p0/z, z1.d, z4.d
+; CHECK-SVE-NEXT: uzp1 p1.s, p2.s, p1.s
+; CHECK-SVE-NEXT: mov z0.d, z3.d
+; CHECK-SVE-NEXT: cmphi p6.d, p0/z, z1.d, z3.d
+; CHECK-SVE-NEXT: uzp1 p2.s, p4.s, p5.s
+; CHECK-SVE-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT: incd z0.d, all, mul #4
+; CHECK-SVE-NEXT: uzp1 p3.s, p3.s, p6.s
+; CHECK-SVE-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT: cmphi p0.d, p0/z, z1.d, z0.d
+; CHECK-SVE-NEXT: uzp1 p1.h, p1.h, p3.h
; CHECK-SVE-NEXT: cmp x8, #0
-; CHECK-SVE-NEXT: cset w9, eq
-; CHECK-SVE-NEXT: whilelo p0.b, #0, x8
-; CHECK-SVE-NEXT: sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT: cset w8, eq
+; CHECK-SVE-NEXT: sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT: uzp1 p0.s, p7.s, p0.s
+; CHECK-SVE-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-SVE-NEXT: uzp1 p0.h, p2.h, p0.h
+; CHECK-SVE-NEXT: uzp1 p0.b, p1.b, p0.b
; CHECK-SVE-NEXT: whilelo p1.b, xzr, x8
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
+; CHECK-SVE-NEXT: addvl sp, sp, #1
+; CHECK-SVE-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
+ %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
ret <vscale x 16 x i1> %0
}
@@ -126,14 +235,29 @@ define <vscale x 8 x i1> @whilerw_16(i64 %a, i64 %b) {
;
; CHECK-SVE-LABEL: whilerw_16:
; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: index z0.d, #0, #1
; CHECK-SVE-NEXT: subs x8, x1, x0
+; CHECK-SVE-NEXT: ptrue p0.d
; CHECK-SVE-NEXT: cneg x8, x8, mi
; CHECK-SVE-NEXT: add x8, x8, x8, lsr #63
+; CHECK-SVE-NEXT: mov z1.d, z0.d
+; CHECK-SVE-NEXT: mov z2.d, z0.d
; CHECK-SVE-NEXT: asr x8, x8, #1
+; CHECK-SVE-NEXT: mov z3.d, x8
+; CHECK-SVE-NEXT: incd z1.d
+; CHECK-SVE-NEXT: incd z2.d, all, mul #2
+; CHECK-SVE-NEXT: cmphi p1.d, p0/z, z3.d, z0.d
+; CHECK-SVE-NEXT: mov z4.d, z1.d
+; CHECK-SVE-NEXT: cmphi p2.d, p0/z, z3.d, z1.d
+; CHECK-SVE-NEXT: cmphi p3.d, p0/z, z3.d, z2.d
+; CHECK-SVE-NEXT: incd z4.d, all, mul #2
+; CHECK-SVE-NEXT: uzp1 p1.s, p1.s, p2.s
+; CHECK-SVE-NEXT: cmphi p0.d, p0/z, z3.d, z4.d
; CHECK-SVE-NEXT: cmp x8, #0
-; CHECK-SVE-NEXT: cset w9, eq
-; CHECK-SVE-NEXT: whilelo p0.h, #0, x8
-; CHECK-SVE-NEXT: sbfx x8, x9, #0, #1
+; CHECK-SVE-NEXT: cset w8, eq
+; CHECK-SVE-NEXT: sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT: uzp1 p0.s, p3.s, p0.s
+; CHECK-SVE-NEXT: uzp1 p0.h, p1.h, p0.h
; CHECK-SVE-NEXT: whilelo p1.h, xzr, x8
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
@@ -150,21 +274,28 @@ define <vscale x 4 x i1> @whilerw_32(i64 %a, i64 %b) {
;
; CHECK-SVE-LABEL: whilerw_32:
; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: index z0.d, #0, #1
; CHECK-SVE-NEXT: subs x8, x1, x0
+; CHECK-SVE-NEXT: ptrue p0.d
; CHECK-SVE-NEXT: cneg x8, x8, mi
; CHECK-SVE-NEXT: add x9, x8, #3
; CHECK-SVE-NEXT: cmp x8, #0
; CHECK-SVE-NEXT: csel x8, x9, x8, lt
+; CHECK-SVE-NEXT: mov z1.d, z0.d
; CHECK-SVE-NEXT: asr x8, x8, #2
+; CHECK-SVE-NEXT: mov z2.d, x8
+; CHECK-SVE-NEXT: incd z1.d
+; CHECK-SVE-NEXT: cmphi p1.d, p0/z, z2.d, z0.d
+; CHECK-SVE-NEXT: cmphi p0.d, p0/z, z2.d, z1.d
; CHECK-SVE-NEXT: cmp x8, #0
-; CHECK-SVE-NEXT: cset w9, eq
-; CHECK-SVE-NEXT: whilelo p1.s, #0, x8
-; CHECK-SVE-NEXT: sbfx x9, x9, #0, #1
-; CHECK-SVE-NEXT: whilelo p0.s, xzr, x9
-; CHECK-SVE-NEXT: mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT: cset w8, eq
+; CHECK-SVE-NEXT: sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT: uzp1 p0.s, p1.s, p0.s
+; CHECK-SVE-NEXT: whilelo p1.s, xzr, x8
+; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
+ %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
ret <vscale x 4 x i1> %0
}
@@ -177,19 +308,22 @@ define <vscale x 2 x i1> @whilerw_64(i64 %a, i64 %b) {
; CHECK-SVE-LABEL: whilerw_64:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: subs x8, x1, x0
+; CHECK-SVE-NEXT: index z0.d, #0, #1
+; CHECK-SVE-NEXT: ptrue p0.d
; CHECK-SVE-NEXT: cneg x8, x8, mi
; CHECK-SVE-NEXT: add x9, x8, #7
; CHECK-SVE-NEXT: cmp x8, #0
; CHECK-SVE-NEXT: csel x8, x9, x8, lt
; CHECK-SVE-NEXT: asr x8, x8, #3
+; CHECK-SVE-NEXT: mov z1.d, x8
+; CHECK-SVE-NEXT: cmphi p0.d, p0/z, z1.d, z0.d
; CHECK-SVE-NEXT: cmp x8, #0
-; CHECK-SVE-NEXT: cset w9, eq
-; CHECK-SVE-NEXT: whilelo p1.d, #0, x8
-; CHECK-SVE-NEXT: sbfx x9, x9, #0, #1
-; CHECK-SVE-NEXT: whilelo p0.d, xzr, x9
-; CHECK-SVE-NEXT: mov p0.b, p1/m, p1.b
+; CHECK-SVE-NEXT: cset w8, eq
+; CHECK-SVE-NEXT: sbfx x8, x8, #0, #1
+; CHECK-SVE-NEXT: whilelo p1.d, xzr, x8
+; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
+ %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
ret <vscale x 2 x i1> %0
}
>From d98f300d1638d0b2e33f8afe1d1684505095d4b0 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Wed, 15 Jan 2025 16:16:31 +0000
Subject: [PATCH 03/25] Fix ISD node name string and remove shouldExpand
function
---
.../SelectionDAG/SelectionDAGDumper.cpp | 2 +-
.../Target/AArch64/AArch64ISelLowering.cpp | 19 -------------------
llvm/lib/Target/AArch64/AArch64ISelLowering.h | 3 ---
3 files changed, 1 insertion(+), 23 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index f04a007ddb1ea..19ea942b060a8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -586,7 +586,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::PARTIAL_REDUCE_SMLA:
return "partial_reduce_smla";
case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
- return "alias_mask";
+ return "alias_lane_mask";
// Vector Predication
#define BEGIN_REGISTER_VP_SDNODE(SDID, LEGALARG, NAME, ...) \
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 800c3a97294a6..6f1b4d11b005d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2159,25 +2159,6 @@ bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
return false;
}
-bool AArch64TargetLowering::shouldExpandGetAliasLaneMask(
- EVT VT, EVT PtrVT, unsigned EltSize) const {
- if (!Subtarget->hasSVE2())
- return true;
-
- if (PtrVT != MVT::i64)
- return true;
-
- if (VT == MVT::v2i1 || VT == MVT::nxv2i1)
- return EltSize != 8;
- if (VT == MVT::v4i1 || VT == MVT::nxv4i1)
- return EltSize != 4;
- if (VT == MVT::v8i1 || VT == MVT::nxv8i1)
- return EltSize != 2;
- if (VT == MVT::v16i1 || VT == MVT::nxv16i1)
- return EltSize != 1;
- return true;
-}
-
bool AArch64TargetLowering::shouldExpandPartialReductionIntrinsic(
const IntrinsicInst *I) const {
assert(I->getIntrinsicID() ==
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index dfab9a3646037..bf43e98f8b706 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -498,9 +498,6 @@ class AArch64TargetLowering : public TargetLowering {
bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override;
- bool shouldExpandGetAliasLaneMask(EVT VT, EVT PtrVT,
- unsigned EltSize) const override;
-
bool
shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const override;
>From ad85f4ba011e1a6bea9c245c3e4182417f79dbbc Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 16 Jan 2025 10:24:59 +0000
Subject: [PATCH 04/25] Format
---
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 6f1b4d11b005d..aa42e5c3eda19 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5104,24 +5104,32 @@ SDValue AArch64TargetLowering::LowerALIAS_LANE_MASK(SDValue Op,
// Make sure that the promoted mask size and element size match
switch (EltSize) {
case 1:
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
+ : Intrinsic::aarch64_sve_whilerw_b;
assert((SimpleVT == MVT::v16i8 || SimpleVT == MVT::nxv16i1) &&
"Unexpected mask or element size");
IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
: Intrinsic::aarch64_sve_whilerw_b;
break;
case 2:
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
+ : Intrinsic::aarch64_sve_whilerw_h;
assert((SimpleVT == MVT::v8i8 || SimpleVT == MVT::nxv8i1) &&
"Unexpected mask or element size");
IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
: Intrinsic::aarch64_sve_whilerw_h;
break;
case 4:
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
+ : Intrinsic::aarch64_sve_whilerw_s;
assert((SimpleVT == MVT::v4i16 || SimpleVT == MVT::nxv4i1) &&
"Unexpected mask or element size");
IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
: Intrinsic::aarch64_sve_whilerw_s;
break;
case 8:
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
+ : Intrinsic::aarch64_sve_whilerw_d;
assert((SimpleVT == MVT::v2i32 || SimpleVT == MVT::nxv2i1) &&
"Unexpected mask or element size");
IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
>From 5dff164c67acfb3291cd9fc7fcd216e243ca6cab Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Mon, 27 Jan 2025 14:17:16 +0000
Subject: [PATCH 05/25] Move promote case
---
llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 82fa70c578f34..5b4bd337321f9 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -55,9 +55,6 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
N->dump(&DAG); dbgs() << "\n";
#endif
report_fatal_error("Do not know how to promote this operator!");
- case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
- Res = PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(N);
- break;
case ISD::MERGE_VALUES:Res = PromoteIntRes_MERGE_VALUES(N, ResNo); break;
case ISD::AssertSext: Res = PromoteIntRes_AssertSext(N); break;
case ISD::AssertZext: Res = PromoteIntRes_AssertZext(N); break;
@@ -325,6 +322,10 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
Res = PromoteIntRes_VP_REDUCE(N);
break;
+ case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+ Res = PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(N);
+ break;
+
case ISD::FREEZE:
Res = PromoteIntRes_FREEZE(N);
break;
>From 60177a7f2b200ecae8bd8e41a7c5da7257ea4b4c Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Mon, 27 Jan 2025 14:17:30 +0000
Subject: [PATCH 06/25] Fix tablegen comment
---
llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 78904997adbdc..ab8fd1d7e894a 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -4065,7 +4065,7 @@ let Predicates = [HasSVE2_or_SME] in {
// SVE2 pointer conflict compare
defm WHILEWR_PXX : sve2_int_while_rr<0b0, "whilewr", AArch64whilewr>;
defm WHILERW_PXX : sve2_int_while_rr<0b1, "whilerw", AArch64whilerw>;
-} // End HasSVE2orSME
+} // End HasSVE2_or_SME
let Predicates = [HasSVEAES, HasNonStreamingSVE2_or_SSVE_AES] in {
// SVE2 crypto destructive binary operations
>From 60e9a1f1b0df08f5c19ba9e90dad39c092ba7185 Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Mon, 27 Jan 2025 14:20:31 +0000
Subject: [PATCH 07/25] Remove DAGTypeLegalizer::
---
llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 5b4bd337321f9..054a69278bf20 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -2108,7 +2108,7 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
Res = PromoteIntOp_PARTIAL_REDUCE_MLA(N);
break;
case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
- Res = DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(N, OpNo);
+ Res = PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(N, OpNo);
break;
}
>From eef2bf068d1addc208875ec979e0d4141e56d6b8 Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Mon, 27 Jan 2025 14:20:39 +0000
Subject: [PATCH 08/25] Use getConstantOperandVal
---
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index aa42e5c3eda19..707413a605f07 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5097,8 +5097,8 @@ SDValue AArch64TargetLowering::LowerALIAS_LANE_MASK(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
unsigned IntrinsicID = 0;
- uint64_t EltSize = Op.getOperand(2)->getAsZExtVal();
- bool IsWriteAfterRead = Op.getOperand(3)->getAsZExtVal() == 1;
+ uint64_t EltSize = Op.getConstantOperandVal(2);
+ bool IsWriteAfterRead = Op.getConstantOperandVal(3) == 1;
EVT VT = Op.getValueType();
MVT SimpleVT = VT.getSimpleVT();
// Make sure that the promoted mask size and element size match
>From 161c027699025fcf10ec98855a6e9d71a8bda150 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Wed, 29 Jan 2025 11:40:32 +0000
Subject: [PATCH 09/25] Remove isPredicateCCSettingOp case
---
.../Target/AArch64/AArch64ISelLowering.cpp | 51 +++++++++----------
1 file changed, 25 insertions(+), 26 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 707413a605f07..85019edaee5ed 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -6407,29 +6407,29 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
}
case Intrinsic::experimental_get_alias_lane_mask: {
unsigned IntrinsicID = 0;
- uint64_t EltSize = Op.getOperand(3)->getAsZExtVal();
- bool IsWriteAfterRead = Op.getOperand(4)->getAsZExtVal() == 1;
- switch (EltSize) {
- case 1:
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
- : Intrinsic::aarch64_sve_whilerw_b;
- break;
- case 2:
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
- : Intrinsic::aarch64_sve_whilerw_h;
- break;
- case 4:
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
- : Intrinsic::aarch64_sve_whilerw_s;
- break;
- case 8:
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
- : Intrinsic::aarch64_sve_whilerw_d;
- break;
- default:
- llvm_unreachable("Unexpected element size for get.alias.lane.mask");
- break;
- }
+ uint64_t EltSize = Op.getOperand(3)->getAsZExtVal();
+ bool IsWriteAfterRead = Op.getOperand(4)->getAsZExtVal() == 1;
+ switch (EltSize) {
+ case 1:
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
+ : Intrinsic::aarch64_sve_whilerw_b;
+ break;
+ case 2:
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
+ : Intrinsic::aarch64_sve_whilerw_h;
+ break;
+ case 4:
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
+ : Intrinsic::aarch64_sve_whilerw_s;
+ break;
+ case 8:
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
+ : Intrinsic::aarch64_sve_whilerw_d;
+ break;
+ default:
+ llvm_unreachable("Unexpected element size for get.alias.lane.mask");
+ break;
+ }
SDValue ID = DAG.getTargetConstant(IntrinsicID, dl, MVT::i64);
EVT VT = Op.getValueType();
@@ -19658,9 +19658,8 @@ static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
static bool isPredicateCCSettingOp(SDValue N) {
if ((N.getOpcode() == ISD::SETCC ||
- // get_active_lane_mask is lowered to a whilelo instruction.
- N.getOpcode() == ISD::GET_ACTIVE_LANE_MASK ||
- N.getOpcode() == ISD::EXPERIMENTAL_ALIAS_LANE_MASK) ||
+ // get_active_lane_mask is lowered to a whilelo instruction.
+ N.getOpcode() == ISD::GET_ACTIVE_LANE_MASK) ||
(N.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
(N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilege ||
N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilegt ||
>From 5841a7946b9cbedbc40e1c1daaf2109d4cb99e77 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 30 Jan 2025 14:40:12 +0000
Subject: [PATCH 10/25] Remove overloads for pointer and element size
parameters
---
llvm/docs/LangRef.rst | 12 +++----
llvm/include/llvm/IR/Intrinsics.td | 2 +-
.../SelectionDAG/LegalizeVectorOps.cpp | 11 ++++---
llvm/test/CodeGen/AArch64/alias_mask.ll | 32 +++++++++----------
.../CodeGen/AArch64/alias_mask_scalable.ll | 32 +++++++++----------
5 files changed, 46 insertions(+), 43 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index cf3816dcc1666..5bc8e45295275 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -23980,10 +23980,10 @@ This is an overloaded intrinsic.
::
- declare <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %ptrA, i64 %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
- declare <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %ptrA, i64 %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
- declare <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i32(i64 %ptrA, i64 %ptrB, i32 immarg %elementSize, i1 immarg %writeAfterRead)
- declare <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.nxv16i1.i64.i32(i64 %ptrA, i64 %ptrB, i32 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
Overview:
@@ -24023,7 +24023,7 @@ equivalent to:
%m[i] = (icmp ult i, %diff) || (%diff == 0)
where ``%m`` is a vector (mask) of active/inactive lanes with its elements
-indexed by ``i``, and ``%ptrA``, ``%ptrB`` are the two i64 arguments to
+indexed by ``i``, and ``%ptrA``, ``%ptrB`` are the two ptr arguments to
``llvm.experimental.get.alias.lane.mask.*`` and ``%elementSize`` is the first
immediate argument. The ``%writeAfterRead`` argument is expected to be true if
``%ptrB`` is stored to after ``%ptrA`` is read from.
@@ -24049,7 +24049,7 @@ Examples:
.. code-block:: llvm
- %alias.lane.mask = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i32(i64 %ptrA, i64 %ptrB, i32 4, i1 1)
+ %alias.lane.mask = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4, i1 1)
%vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptrA, i32 4, <4 x i1> %alias.lane.mask, <4 x i32> poison)
[...]
call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, <4 x i32>* %ptrB, i32 4, <4 x i1> %alias.lane.mask)
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 3d45375f590ac..3923a20db9bd9 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -2401,7 +2401,7 @@ let IntrProperties = [IntrNoMem, ImmArg<ArgIndex<1>>] in {
def int_experimental_get_alias_lane_mask:
DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [llvm_anyint_ty, LLVMMatchType<1>, llvm_anyint_ty, llvm_i1_ty],
+ [llvm_anyptr_ty, LLVMMatchType<1>, llvm_i64_ty, llvm_i1_ty],
[IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
def int_get_active_lane_mask:
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index fb8eb0f423fa3..74963622ffbf1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1778,8 +1778,7 @@ SDValue VectorLegalizer::ExpandEXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N) {
SDValue SinkValue = N->getOperand(1);
SDValue EltSize = N->getOperand(2);
- bool IsWriteAfterRead =
- cast<ConstantSDNode>(N->getOperand(3))->getZExtValue() != 0;
+ bool IsWriteAfterRead = N->getConstantOperandVal(3) != 0;
auto VT = N->getValueType(0);
auto PtrVT = SourceValue->getValueType(0);
@@ -1788,14 +1787,15 @@ SDValue VectorLegalizer::ExpandEXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N) {
Diff = DAG.getNode(ISD::ABS, DL, PtrVT, Diff);
Diff = DAG.getNode(ISD::SDIV, DL, PtrVT, Diff, EltSize);
- SDValue Zero = DAG.getTargetConstant(0, DL, PtrVT);
// If the difference is positive then some elements may alias
auto CmpVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
Diff.getValueType());
+ SDValue Zero = DAG.getTargetConstant(0, DL, PtrVT);
SDValue Cmp = DAG.getSetCC(DL, CmpVT, Diff, Zero,
IsWriteAfterRead ? ISD::SETLE : ISD::SETEQ);
+ // Create the lane mask
EVT SplatTY =
EVT::getVectorVT(*DAG.getContext(), PtrVT, VT.getVectorElementCount());
SDValue DiffSplat = DAG.getSplat(SplatTY, DL, Diff);
@@ -1803,7 +1803,10 @@ SDValue VectorLegalizer::ExpandEXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N) {
SDValue DiffMask =
DAG.getSetCC(DL, VT, VectorStep, DiffSplat, ISD::CondCode::SETULT);
- // Splat the compare result then OR it with a lane mask
+ // Splat the compare result then OR it with the lane mask
+ auto VTElementTy = VT.getVectorElementType();
+ if (CmpVT.getScalarSizeInBits() < VTElementTy.getScalarSizeInBits())
+ Cmp = DAG.getNode(ISD::ZERO_EXTEND, DL, VTElementTy, Cmp);
SDValue Splat = DAG.getSplat(VT, DL, Cmp);
return DAG.getNode(ISD::OR, DL, VT, DiffMask, Splat);
}
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 9b344f03da077..f88baeece0356 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s --check-prefix=CHECK-SVE
; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefix=CHECK-NOSVE
-define <16 x i1> @whilewr_8(i64 %a, i64 %b) {
+define <16 x i1> @whilewr_8(ptr %a, ptr %b) {
; CHECK-SVE-LABEL: whilewr_8:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: whilewr p0.b, x0, x1
@@ -53,11 +53,11 @@ define <16 x i1> @whilewr_8(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
+ %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
ret <16 x i1> %0
}
-define <8 x i1> @whilewr_16(i64 %a, i64 %b) {
+define <8 x i1> @whilewr_16(ptr %a, ptr %b) {
; CHECK-SVE-LABEL: whilewr_16:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: whilewr p0.b, x0, x1
@@ -95,11 +95,11 @@ define <8 x i1> @whilewr_16(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 1)
+ %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
ret <8 x i1> %0
}
-define <4 x i1> @whilewr_32(i64 %a, i64 %b) {
+define <4 x i1> @whilewr_32(ptr %a, ptr %b) {
; CHECK-SVE-LABEL: whilewr_32:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: whilewr p0.h, x0, x1
@@ -129,11 +129,11 @@ define <4 x i1> @whilewr_32(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
+ %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
ret <4 x i1> %0
}
-define <2 x i1> @whilewr_64(i64 %a, i64 %b) {
+define <2 x i1> @whilewr_64(ptr %a, ptr %b) {
; CHECK-SVE-LABEL: whilewr_64:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: whilewr p0.s, x0, x1
@@ -159,11 +159,11 @@ define <2 x i1> @whilewr_64(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
+ %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
ret <2 x i1> %0
}
-define <16 x i1> @whilerw_8(i64 %a, i64 %b) {
+define <16 x i1> @whilerw_8(ptr %a, ptr %b) {
; CHECK-SVE-LABEL: whilerw_8:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: whilerw p0.b, x0, x1
@@ -215,11 +215,11 @@ define <16 x i1> @whilerw_8(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
+ %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
ret <16 x i1> %0
}
-define <8 x i1> @whilerw_16(i64 %a, i64 %b) {
+define <8 x i1> @whilerw_16(ptr %a, ptr %b) {
; CHECK-SVE-LABEL: whilerw_16:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: whilerw p0.b, x0, x1
@@ -258,11 +258,11 @@ define <8 x i1> @whilerw_16(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 0)
+ %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
ret <8 x i1> %0
}
-define <4 x i1> @whilerw_32(i64 %a, i64 %b) {
+define <4 x i1> @whilerw_32(ptr %a, ptr %b) {
; CHECK-SVE-LABEL: whilerw_32:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: whilerw p0.h, x0, x1
@@ -293,11 +293,11 @@ define <4 x i1> @whilerw_32(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
+ %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
ret <4 x i1> %0
}
-define <2 x i1> @whilerw_64(i64 %a, i64 %b) {
+define <2 x i1> @whilerw_64(ptr %a, ptr %b) {
; CHECK-SVE-LABEL: whilerw_64:
; CHECK-SVE: // %bb.0: // %entry
; CHECK-SVE-NEXT: whilerw p0.s, x0, x1
@@ -324,6 +324,6 @@ define <2 x i1> @whilerw_64(i64 %a, i64 %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
+ %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
ret <2 x i1> %0
}
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index a7c9c5e3cdd33..3d0f293b4687a 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s --check-prefix=CHECK-SVE2
; RUN: llc -mtriple=aarch64 -mattr=+sve %s -o - | FileCheck %s --check-prefix=CHECK-SVE
-define <vscale x 16 x i1> @whilewr_8(i64 %a, i64 %b) {
+define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) {
; CHECK-SVE2-LABEL: whilewr_8:
; CHECK-SVE2: // %bb.0: // %entry
; CHECK-SVE2-NEXT: whilewr p0.b, x0, x1
@@ -60,11 +60,11 @@ define <vscale x 16 x i1> @whilewr_8(i64 %a, i64 %b) {
; CHECK-SVE-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 1)
+ %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
ret <vscale x 16 x i1> %0
}
-define <vscale x 8 x i1> @whilewr_16(i64 %a, i64 %b) {
+define <vscale x 8 x i1> @whilewr_16(ptr %a, ptr %b) {
; CHECK-SVE2-LABEL: whilewr_16:
; CHECK-SVE2: // %bb.0: // %entry
; CHECK-SVE2-NEXT: whilewr p0.h, x0, x1
@@ -98,11 +98,11 @@ define <vscale x 8 x i1> @whilewr_16(i64 %a, i64 %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 1)
+ %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
ret <vscale x 8 x i1> %0
}
-define <vscale x 4 x i1> @whilewr_32(i64 %a, i64 %b) {
+define <vscale x 4 x i1> @whilewr_32(ptr %a, ptr %b) {
; CHECK-SVE2-LABEL: whilewr_32:
; CHECK-SVE2: // %bb.0: // %entry
; CHECK-SVE2-NEXT: whilewr p0.s, x0, x1
@@ -130,11 +130,11 @@ define <vscale x 4 x i1> @whilewr_32(i64 %a, i64 %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 1)
+ %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
ret <vscale x 4 x i1> %0
}
-define <vscale x 2 x i1> @whilewr_64(i64 %a, i64 %b) {
+define <vscale x 2 x i1> @whilewr_64(ptr %a, ptr %b) {
; CHECK-SVE2-LABEL: whilewr_64:
; CHECK-SVE2: // %bb.0: // %entry
; CHECK-SVE2-NEXT: whilewr p0.d, x0, x1
@@ -158,11 +158,11 @@ define <vscale x 2 x i1> @whilewr_64(i64 %a, i64 %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 1)
+ %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
ret <vscale x 2 x i1> %0
}
-define <vscale x 16 x i1> @whilerw_8(i64 %a, i64 %b) {
+define <vscale x 16 x i1> @whilerw_8(ptr %a, ptr %b) {
; CHECK-SVE2-LABEL: whilerw_8:
; CHECK-SVE2: // %bb.0: // %entry
; CHECK-SVE2-NEXT: whilerw p0.b, x0, x1
@@ -223,11 +223,11 @@ define <vscale x 16 x i1> @whilerw_8(i64 %a, i64 %b) {
; CHECK-SVE-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1.i64.i64(i64 %a, i64 %b, i64 1, i1 0)
+ %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
ret <vscale x 16 x i1> %0
}
-define <vscale x 8 x i1> @whilerw_16(i64 %a, i64 %b) {
+define <vscale x 8 x i1> @whilerw_16(ptr %a, ptr %b) {
; CHECK-SVE2-LABEL: whilerw_16:
; CHECK-SVE2: // %bb.0: // %entry
; CHECK-SVE2-NEXT: whilerw p0.h, x0, x1
@@ -262,11 +262,11 @@ define <vscale x 8 x i1> @whilerw_16(i64 %a, i64 %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1.i64.i64(i64 %a, i64 %b, i64 2, i1 0)
+ %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
ret <vscale x 8 x i1> %0
}
-define <vscale x 4 x i1> @whilerw_32(i64 %a, i64 %b) {
+define <vscale x 4 x i1> @whilerw_32(ptr %a, ptr %b) {
; CHECK-SVE2-LABEL: whilerw_32:
; CHECK-SVE2: // %bb.0: // %entry
; CHECK-SVE2-NEXT: whilerw p0.s, x0, x1
@@ -295,11 +295,11 @@ define <vscale x 4 x i1> @whilerw_32(i64 %a, i64 %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1.i64.i64(i64 %a, i64 %b, i64 4, i1 0)
+ %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
ret <vscale x 4 x i1> %0
}
-define <vscale x 2 x i1> @whilerw_64(i64 %a, i64 %b) {
+define <vscale x 2 x i1> @whilerw_64(ptr %a, ptr %b) {
; CHECK-SVE2-LABEL: whilerw_64:
; CHECK-SVE2: // %bb.0: // %entry
; CHECK-SVE2-NEXT: whilerw p0.d, x0, x1
@@ -324,6 +324,6 @@ define <vscale x 2 x i1> @whilerw_64(i64 %a, i64 %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1.i64.i64(i64 %a, i64 %b, i64 8, i1 0)
+ %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
ret <vscale x 2 x i1> %0
}
>From 7243ccc8efdd91272416bf402fcb9ebf5b8ac04f Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 30 Jan 2025 15:16:57 +0000
Subject: [PATCH 11/25] Clarify elementSize and writeAfterRead = 0
---
llvm/docs/LangRef.rst | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 5bc8e45295275..78cf359b43c65 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24002,6 +24002,7 @@ The final two are immediates and the result is a vector with the i1 element type
Semantics:
""""""""""
+``%elementSize`` is the size of the accessed elements in bytes.
The intrinsic will return poison if ``%ptrA`` and ``%ptrB`` are within
VF * ``%elementSize`` of each other and ``%ptrA`` + VF * ``%elementSize`` wraps.
In other cases when ``%writeAfterRead`` is true, the
@@ -24026,7 +24027,8 @@ where ``%m`` is a vector (mask) of active/inactive lanes with its elements
indexed by ``i``, and ``%ptrA``, ``%ptrB`` are the two ptr arguments to
``llvm.experimental.get.alias.lane.mask.*`` and ``%elementSize`` is the first
immediate argument. The ``%writeAfterRead`` argument is expected to be true if
-``%ptrB`` is stored to after ``%ptrA`` is read from.
+``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise it is false for
+a read after write.
The above is equivalent to:
::
>From 1a264e861b23d11615da27693fb9da6ecb527f0f Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 30 Jan 2025 15:23:46 +0000
Subject: [PATCH 12/25] Add i=0 to VF-1
---
llvm/docs/LangRef.rst | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 78cf359b43c65..cce357facf5e7 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24024,11 +24024,11 @@ equivalent to:
%m[i] = (icmp ult i, %diff) || (%diff == 0)
where ``%m`` is a vector (mask) of active/inactive lanes with its elements
-indexed by ``i``, and ``%ptrA``, ``%ptrB`` are the two ptr arguments to
-``llvm.experimental.get.alias.lane.mask.*`` and ``%elementSize`` is the first
-immediate argument. The ``%writeAfterRead`` argument is expected to be true if
-``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise it is false for
-a read after write.
+indexed by ``i`` (i = 0 to VF - 1), and ``%ptrA``, ``%ptrB`` are the two ptr
+arguments to ``llvm.experimental.get.alias.lane.mask.*`` and ``%elementSize``
+is the first immediate argument. The ``%writeAfterRead`` argument is expected
+to be true if ``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise
+it is false for a read after write.
The above is equivalent to:
::
>From dab5d3e115069b437dbe5ea6b8bd443244e270c7 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 30 Jan 2025 16:08:47 +0000
Subject: [PATCH 13/25] Rename to get.nonalias.lane.mask
---
llvm/docs/LangRef.rst | 28 +++++++++----------
llvm/include/llvm/CodeGen/ISDOpcodes.h | 2 +-
llvm/include/llvm/IR/Intrinsics.td | 4 +--
.../SelectionDAG/LegalizeIntegerTypes.cpp | 16 +++++------
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 5 ++--
.../SelectionDAG/LegalizeVectorOps.cpp | 10 +++----
.../SelectionDAG/SelectionDAGBuilder.cpp | 6 ++--
.../SelectionDAG/SelectionDAGDumper.cpp | 2 +-
llvm/lib/CodeGen/TargetLoweringBase.cpp | 4 +--
.../Target/AArch64/AArch64ISelLowering.cpp | 19 +++++++------
llvm/lib/Target/AArch64/AArch64ISelLowering.h | 2 +-
llvm/test/CodeGen/AArch64/alias_mask.ll | 16 +++++------
.../CodeGen/AArch64/alias_mask_scalable.ll | 16 +++++------
13 files changed, 66 insertions(+), 64 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index cce357facf5e7..c70c3d1eccd14 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -23969,9 +23969,9 @@ Examples:
%active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 %elem0, i64 429)
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
-.. _int_experimental_get_alias_lane_mask:
+.. _int_experimental_get_nonalias_lane_mask:
-'``llvm.experimental.get.alias.lane.mask.*``' Intrinsics
+'``llvm.experimental.get.nonalias.lane.mask.*``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
@@ -23980,16 +23980,16 @@ This is an overloaded intrinsic.
::
- declare <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
- declare <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
- declare <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
- declare <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <vscale x 16 x i1> @llvm.experimental.get.nonalias.lane.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
Overview:
"""""""""
-Create a mask representing lanes that do or not overlap between two pointers
+Create a mask enabling lanes that do not overlap between two pointers
across one vector loop iteration.
@@ -24006,7 +24006,7 @@ Semantics:
The intrinsic will return poison if ``%ptrA`` and ``%ptrB`` are within
VF * ``%elementSize`` of each other and ``%ptrA`` + VF * ``%elementSize`` wraps.
In other cases when ``%writeAfterRead`` is true, the
-'``llvm.experimental.get.alias.lane.mask.*``' intrinsics are semantically
+'``llvm.experimental.get.nonalias.lane.mask.*``' intrinsics are semantically
equivalent to:
::
@@ -24015,7 +24015,7 @@ equivalent to:
%m[i] = (icmp ult i, %diff) || (%diff <= 0)
When the return value is not poison and ``%writeAfterRead`` is false, the
-'``llvm.experimental.get.alias.lane.mask.*``' intrinsics are semantically
+'``llvm.experimental.get.nonalias.lane.mask.*``' intrinsics are semantically
equivalent to:
::
@@ -24025,7 +24025,7 @@ equivalent to:
where ``%m`` is a vector (mask) of active/inactive lanes with its elements
indexed by ``i`` (i = 0 to VF - 1), and ``%ptrA``, ``%ptrB`` are the two ptr
-arguments to ``llvm.experimental.get.alias.lane.mask.*`` and ``%elementSize``
+arguments to ``llvm.experimental.get.nonalias.lane.mask.*`` and ``%elementSize``
is the first immediate argument. The ``%writeAfterRead`` argument is expected
to be true if ``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise
it is false for a read after write.
@@ -24033,7 +24033,7 @@ The above is equivalent to:
::
- %m = @llvm.experimental.get.alias.lane.mask(%ptrA, %ptrB, %elementSize, %writeAfterRead)
+ %m = @llvm.experimental.get.nonalias.lane.mask(%ptrA, %ptrB, %elementSize, %writeAfterRead)
This can, for example, be emitted by the loop vectorizer in which case
``%ptrA`` is a pointer that is read from within the loop, and ``%ptrB`` is a
@@ -24051,10 +24051,10 @@ Examples:
.. code-block:: llvm
- %alias.lane.mask = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4, i1 1)
- %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptrA, i32 4, <4 x i1> %alias.lane.mask, <4 x i32> poison)
+ %nonalias.lane.mask = call <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4, i1 1)
+ %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptrA, i32 4, <4 x i1> %nonalias.lane.mask, <4 x i32> poison)
[...]
- call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, <4 x i32>* %ptrB, i32 4, <4 x i1> %alias.lane.mask)
+ call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, <4 x i32>* %ptrB, i32 4, <4 x i1> %nonalias.lane.mask)
.. _int_experimental_vp_splice:
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index aa9f75343f0a2..cd60ce40ec090 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1559,7 +1559,7 @@ enum NodeType {
// The `llvm.experimental.get.alias.lane.mask.*` intrinsics
// Operands: Load pointer, Store pointer, Element size, Write after read
// Output: Mask
- EXPERIMENTAL_ALIAS_LANE_MASK,
+ EXPERIMENTAL_NONALIAS_LANE_MASK,
// llvm.clear_cache intrinsic
// Operands: Input Chain, Start Addres, End Address
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 3923a20db9bd9..0ab3bffdaf87b 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -2399,9 +2399,9 @@ let IntrProperties = [IntrNoMem, ImmArg<ArgIndex<1>>] in {
llvm_i32_ty]>;
}
-def int_experimental_get_alias_lane_mask:
+def int_experimental_get_nonalias_lane_mask:
DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [llvm_anyptr_ty, LLVMMatchType<1>, llvm_i64_ty, llvm_i1_ty],
+ [llvm_ptr_ty, llvm_ptr_ty, llvm_i64_ty, llvm_i1_ty],
[IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
def int_get_active_lane_mask:
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 054a69278bf20..e3664afd68dac 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -322,8 +322,8 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
Res = PromoteIntRes_VP_REDUCE(N);
break;
- case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
- Res = PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(N);
+ case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
+ Res = PromoteIntRes_EXPERIMENTAL_NONALIAS_LANE_MASK(N);
break;
case ISD::FREEZE:
@@ -374,10 +374,10 @@ SDValue DAGTypeLegalizer::PromoteIntRes_MERGE_VALUES(SDNode *N,
}
SDValue
-DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N) {
+DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N) {
EVT VT = N->getValueType(0);
EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
- return DAG.getNode(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, SDLoc(N), NewVT,
+ return DAG.getNode(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, SDLoc(N), NewVT,
N->ops());
}
@@ -2107,8 +2107,8 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
case ISD::PARTIAL_REDUCE_SMLA:
Res = PromoteIntOp_PARTIAL_REDUCE_MLA(N);
break;
- case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
- Res = PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(N, OpNo);
+ case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
+ Res = PromoteIntOp_EXPERIMENTAL_NONALIAS_LANE_MASK(N, OpNo);
break;
}
@@ -2912,8 +2912,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N) {
}
SDValue
-DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N,
- unsigned OpNo) {
+DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N,
+ unsigned OpNo) {
SmallVector<SDValue, 4> NewOps(N->ops());
NewOps[OpNo] = GetPromotedInteger(N->getOperand(OpNo));
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 5f81daa431e6e..2811478f99b3d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -381,7 +381,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue PromoteIntRes_VECTOR_FIND_LAST_ACTIVE(SDNode *N);
SDValue PromoteIntRes_GET_ACTIVE_LANE_MASK(SDNode *N);
SDValue PromoteIntRes_PARTIAL_REDUCE_MLA(SDNode *N);
- SDValue PromoteIntRes_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N);
+ SDValue PromoteIntRes_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N);
// Integer Operand Promotion.
bool PromoteIntegerOperand(SDNode *N, unsigned OpNo);
@@ -435,7 +435,8 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_GET_ACTIVE_LANE_MASK(SDNode *N);
SDValue PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N);
- SDValue PromoteIntOp_EXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N, unsigned OpNo);
+ SDValue PromoteIntOp_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N,
+ unsigned OpNo);
void SExtOrZExtPromotedOperands(SDValue &LHS, SDValue &RHS);
void PromoteSetCCOperands(SDValue &LHS,SDValue &RHS, ISD::CondCode Code);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 74963622ffbf1..678d1cc74456b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -138,7 +138,7 @@ class VectorLegalizer {
SDValue ExpandVP_FNEG(SDNode *Node);
SDValue ExpandVP_FABS(SDNode *Node);
SDValue ExpandVP_FCOPYSIGN(SDNode *Node);
- SDValue ExpandEXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N);
+ SDValue ExpandEXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N);
SDValue ExpandSELECT(SDNode *Node);
std::pair<SDValue, SDValue> ExpandLoad(SDNode *N);
SDValue ExpandStore(SDNode *N);
@@ -470,7 +470,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case ISD::VECTOR_COMPRESS:
case ISD::SCMP:
case ISD::UCMP:
- case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+ case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
break;
case ISD::SMULFIX:
@@ -1264,8 +1264,8 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
case ISD::UCMP:
Results.push_back(TLI.expandCMP(Node, DAG));
return;
- case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
- Results.push_back(ExpandEXPERIMENTAL_ALIAS_LANE_MASK(Node));
+ case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
+ Results.push_back(ExpandEXPERIMENTAL_NONALIAS_LANE_MASK(Node));
return;
case ISD::FADD:
@@ -1772,7 +1772,7 @@ SDValue VectorLegalizer::ExpandVP_FCOPYSIGN(SDNode *Node) {
return DAG.getNode(ISD::BITCAST, DL, VT, CopiedSign);
}
-SDValue VectorLegalizer::ExpandEXPERIMENTAL_ALIAS_LANE_MASK(SDNode *N) {
+SDValue VectorLegalizer::ExpandEXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N) {
SDLoc DL(N);
SDValue SourceValue = N->getOperand(0);
SDValue SinkValue = N->getOperand(1);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index f88c7ddb0c867..162ead1246e8c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8244,13 +8244,13 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
visitVectorExtractLastActive(I, Intrinsic);
return;
}
- case Intrinsic::experimental_get_alias_lane_mask: {
+ case Intrinsic::experimental_get_nonalias_lane_mask: {
auto IntrinsicVT = EVT::getEVT(I.getType());
SmallVector<SDValue, 4> Ops;
for (auto &Op : I.operands())
Ops.push_back(getValue(Op));
- SDValue Mask =
- DAG.getNode(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, sdl, IntrinsicVT, Ops);
+ SDValue Mask = DAG.getNode(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, sdl,
+ IntrinsicVT, Ops);
setValue(&I, Mask);
}
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 19ea942b060a8..bb5984baa90c4 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -585,7 +585,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
return "partial_reduce_umla";
case ISD::PARTIAL_REDUCE_SMLA:
return "partial_reduce_smla";
- case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
+ case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
return "alias_lane_mask";
// Vector Predication
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 2ab7a85662211..f33fcb8effa01 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -839,8 +839,8 @@ void TargetLoweringBase::initActions() {
// Masked vector extracts default to expand.
setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Expand);
- // Aliasing lanes mask default to expand
- setOperationAction(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, VT, Expand);
+ // Non-aliasing lanes mask default to expand
+ setOperationAction(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, VT, Expand);
// FP environment operations default to expand.
setOperationAction(ISD::GET_FPENV, VT, Expand);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 85019edaee5ed..2b5d871ee81f1 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1904,7 +1904,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
(Subtarget->hasSME() && Subtarget->isStreaming())) {
for (auto VT : {MVT::v2i32, MVT::v4i16, MVT::v8i8, MVT::v16i8, MVT::nxv2i1,
MVT::nxv4i1, MVT::nxv8i1, MVT::nxv16i1}) {
- setOperationAction(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, VT, Custom);
+ setOperationAction(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, VT, Custom);
}
}
@@ -5093,8 +5093,9 @@ SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
static MVT getSVEContainerType(EVT ContentTy);
-SDValue AArch64TargetLowering::LowerALIAS_LANE_MASK(SDValue Op,
- SelectionDAG &DAG) const {
+SDValue
+AArch64TargetLowering::LowerNONALIAS_LANE_MASK(SDValue Op,
+ SelectionDAG &DAG) const {
SDLoc DL(Op);
unsigned IntrinsicID = 0;
uint64_t EltSize = Op.getConstantOperandVal(2);
@@ -6405,7 +6406,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return DAG.getNode(AArch64ISD::USDOT, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
}
- case Intrinsic::experimental_get_alias_lane_mask: {
+ case Intrinsic::experimental_get_nonalias_lane_mask: {
unsigned IntrinsicID = 0;
uint64_t EltSize = Op.getOperand(3)->getAsZExtVal();
bool IsWriteAfterRead = Op.getOperand(4)->getAsZExtVal() == 1;
@@ -7314,8 +7315,8 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
default:
llvm_unreachable("unimplemented operand");
return SDValue();
- case ISD::EXPERIMENTAL_ALIAS_LANE_MASK:
- return LowerALIAS_LANE_MASK(Op, DAG);
+ case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
+ return LowerNONALIAS_LANE_MASK(Op, DAG);
case ISD::BITCAST:
return LowerBITCAST(Op, DAG);
case ISD::GlobalAddress:
@@ -27617,7 +27618,7 @@ void AArch64TargetLowering::ReplaceNodeResults(
// CONCAT_VECTORS -- but delegate to common code for result type
// legalisation
return;
- case ISD::EXPERIMENTAL_ALIAS_LANE_MASK: {
+ case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK: {
EVT VT = N->getValueType(0);
if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
return;
@@ -27629,7 +27630,7 @@ void AArch64TargetLowering::ReplaceNodeResults(
SDLoc DL(N);
auto V =
- DAG.getNode(ISD::EXPERIMENTAL_ALIAS_LANE_MASK, DL, NewVT, N->ops());
+ DAG.getNode(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, DL, NewVT, N->ops());
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
return;
}
@@ -27689,7 +27690,7 @@ void AArch64TargetLowering::ReplaceNodeResults(
return;
}
case Intrinsic::experimental_vector_match:
- case Intrinsic::experimental_get_alias_lane_mask: {
+ case Intrinsic::experimental_get_nonalias_lane_mask: {
if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
return;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index bf43e98f8b706..e9057ad7ac0bd 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -710,7 +710,7 @@ class AArch64TargetLowering : public TargetLowering {
SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerALIAS_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerNONALIAS_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index f88baeece0356..5ef6b588fe767 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -53,7 +53,7 @@ define <16 x i1> @whilewr_8(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
+ %0 = call <16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
ret <16 x i1> %0
}
@@ -95,7 +95,7 @@ define <8 x i1> @whilewr_16(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
+ %0 = call <8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
ret <8 x i1> %0
}
@@ -129,7 +129,7 @@ define <4 x i1> @whilewr_32(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
+ %0 = call <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
ret <4 x i1> %0
}
@@ -159,7 +159,7 @@ define <2 x i1> @whilewr_64(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
+ %0 = call <2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
ret <2 x i1> %0
}
@@ -215,7 +215,7 @@ define <16 x i1> @whilerw_8(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
+ %0 = call <16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
ret <16 x i1> %0
}
@@ -258,7 +258,7 @@ define <8 x i1> @whilerw_16(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
+ %0 = call <8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
ret <8 x i1> %0
}
@@ -293,7 +293,7 @@ define <4 x i1> @whilerw_32(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
+ %0 = call <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
ret <4 x i1> %0
}
@@ -324,6 +324,6 @@ define <2 x i1> @whilerw_64(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
+ %0 = call <2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
ret <2 x i1> %0
}
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index 3d0f293b4687a..6884f14d685b5 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -60,7 +60,7 @@ define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
+ %0 = call <vscale x 16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
ret <vscale x 16 x i1> %0
}
@@ -98,7 +98,7 @@ define <vscale x 8 x i1> @whilewr_16(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
+ %0 = call <vscale x 8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
ret <vscale x 8 x i1> %0
}
@@ -130,7 +130,7 @@ define <vscale x 4 x i1> @whilewr_32(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
+ %0 = call <vscale x 4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
ret <vscale x 4 x i1> %0
}
@@ -158,7 +158,7 @@ define <vscale x 2 x i1> @whilewr_64(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
+ %0 = call <vscale x 2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
ret <vscale x 2 x i1> %0
}
@@ -223,7 +223,7 @@ define <vscale x 16 x i1> @whilerw_8(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 16 x i1> @llvm.experimental.get.alias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
+ %0 = call <vscale x 16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
ret <vscale x 16 x i1> %0
}
@@ -262,7 +262,7 @@ define <vscale x 8 x i1> @whilerw_16(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 8 x i1> @llvm.experimental.get.alias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
+ %0 = call <vscale x 8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
ret <vscale x 8 x i1> %0
}
@@ -295,7 +295,7 @@ define <vscale x 4 x i1> @whilerw_32(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 4 x i1> @llvm.experimental.get.alias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
+ %0 = call <vscale x 4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
ret <vscale x 4 x i1> %0
}
@@ -324,6 +324,6 @@ define <vscale x 2 x i1> @whilerw_64(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 2 x i1> @llvm.experimental.get.alias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
+ %0 = call <vscale x 2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
ret <vscale x 2 x i1> %0
}
>From 561f2d3bcf05b9edad2ae5fced7b986dc3592278 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 30 Jan 2025 16:15:26 +0000
Subject: [PATCH 14/25] Fix pointer types in example
---
llvm/docs/LangRef.rst | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index c70c3d1eccd14..cbb1e1909e98c 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24052,9 +24052,9 @@ Examples:
.. code-block:: llvm
%nonalias.lane.mask = call <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4, i1 1)
- %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %ptrA, i32 4, <4 x i1> %nonalias.lane.mask, <4 x i32> poison)
+ %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %nonalias.lane.mask, <4 x i32> poison)
[...]
- call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, <4 x i32>* %ptrB, i32 4, <4 x i1> %nonalias.lane.mask)
+ call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %nonalias.lane.mask)
.. _int_experimental_vp_splice:
>From e3d6ce7ec5234d24b2b7a389e1d1396949311a72 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 30 Jan 2025 16:15:35 +0000
Subject: [PATCH 15/25] Remove shouldExpandGetAliasLaneMask
---
llvm/include/llvm/CodeGen/TargetLowering.h | 7 -------
1 file changed, 7 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 622876c81505f..9c453f51e129d 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -482,13 +482,6 @@ class LLVM_ABI TargetLoweringBase {
return true;
}
- /// Return true if the @llvm.experimental.get.alias.lane.mask intrinsic should
- /// be expanded using generic code in SelectionDAGBuilder.
- virtual bool shouldExpandGetAliasLaneMask(EVT VT, EVT PtrVT,
- unsigned EltSize) const {
- return true;
- }
-
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF,
bool IsScalable) const {
return true;
>From 22687ffb79a4a0eb518d2563e956d66a66d3eea6 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Thu, 30 Jan 2025 16:25:35 +0000
Subject: [PATCH 16/25] Lower to ISD node rather than intrinsic
---
.../Target/AArch64/AArch64ISelLowering.cpp | 19 +++++--------------
1 file changed, 5 insertions(+), 14 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 2b5d871ee81f1..c7a26792c2834 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5097,40 +5097,33 @@ SDValue
AArch64TargetLowering::LowerNONALIAS_LANE_MASK(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
- unsigned IntrinsicID = 0;
uint64_t EltSize = Op.getConstantOperandVal(2);
bool IsWriteAfterRead = Op.getConstantOperandVal(3) == 1;
+ unsigned Opcode =
+ IsWriteAfterRead ? AArch64ISD::WHILEWR : AArch64ISD::WHILERW;
EVT VT = Op.getValueType();
MVT SimpleVT = VT.getSimpleVT();
// Make sure that the promoted mask size and element size match
switch (EltSize) {
case 1:
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
- : Intrinsic::aarch64_sve_whilerw_b;
assert((SimpleVT == MVT::v16i8 || SimpleVT == MVT::nxv16i1) &&
"Unexpected mask or element size");
IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
: Intrinsic::aarch64_sve_whilerw_b;
break;
case 2:
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
- : Intrinsic::aarch64_sve_whilerw_h;
assert((SimpleVT == MVT::v8i8 || SimpleVT == MVT::nxv8i1) &&
"Unexpected mask or element size");
IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
: Intrinsic::aarch64_sve_whilerw_h;
break;
case 4:
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
- : Intrinsic::aarch64_sve_whilerw_s;
assert((SimpleVT == MVT::v4i16 || SimpleVT == MVT::nxv4i1) &&
"Unexpected mask or element size");
IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
: Intrinsic::aarch64_sve_whilerw_s;
break;
case 8:
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
- : Intrinsic::aarch64_sve_whilerw_d;
assert((SimpleVT == MVT::v2i32 || SimpleVT == MVT::nxv2i1) &&
"Unexpected mask or element size");
IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
@@ -5140,11 +5133,9 @@ AArch64TargetLowering::LowerNONALIAS_LANE_MASK(SDValue Op,
llvm_unreachable("Unexpected element size for get.alias.lane.mask");
break;
}
- SDValue ID = DAG.getTargetConstant(IntrinsicID, DL, MVT::i64);
if (VT.isScalableVector())
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, ID, Op.getOperand(0),
- Op.getOperand(1));
+ return DAG.getNode(Opcode, DL, VT, Op.getOperand(0), Op.getOperand(1));
// We can use the SVE whilewr/whilerw instruction to lower this
// intrinsic by creating the appropriate sequence of scalable vector
@@ -5154,8 +5145,8 @@ AArch64TargetLowering::LowerNONALIAS_LANE_MASK(SDValue Op,
EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
- SDValue Mask = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WhileVT, ID,
- Op.getOperand(0), Op.getOperand(1));
+ SDValue Mask =
+ DAG.getNode(Opcode, DL, WhileVT, Op.getOperand(0), Op.getOperand(1));
SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, DL, ContainerVT, Mask);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, MaskAsInt,
DAG.getVectorIdxConstant(0, DL));
>From 836c34cf902c59578d395d0a99e4a0107b884aaf Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Fri, 31 Jan 2025 14:24:11 +0000
Subject: [PATCH 17/25] Rename to noalias
---
llvm/docs/LangRef.rst | 28 +++++++++----------
llvm/include/llvm/CodeGen/ISDOpcodes.h | 2 +-
llvm/include/llvm/IR/Intrinsics.td | 2 +-
.../SelectionDAG/LegalizeIntegerTypes.cpp | 16 +++++------
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 5 ++--
.../SelectionDAG/LegalizeVectorOps.cpp | 10 +++----
.../SelectionDAG/SelectionDAGBuilder.cpp | 6 ++--
.../SelectionDAG/SelectionDAGDumper.cpp | 2 +-
llvm/lib/CodeGen/TargetLoweringBase.cpp | 2 +-
.../Target/AArch64/AArch64ISelLowering.cpp | 25 ++++++-----------
llvm/lib/Target/AArch64/AArch64ISelLowering.h | 2 +-
llvm/test/CodeGen/AArch64/alias_mask.ll | 16 +++++------
.../CodeGen/AArch64/alias_mask_scalable.ll | 16 +++++------
13 files changed, 61 insertions(+), 71 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index cbb1e1909e98c..91f1824754ac6 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -23969,10 +23969,10 @@ Examples:
%active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 %elem0, i64 429)
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
-.. _int_experimental_get_nonalias_lane_mask:
+.. _int_experimental_get_noalias_lane_mask:
-'``llvm.experimental.get.nonalias.lane.mask.*``' Intrinsics
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+'``llvm.experimental.get.noalias.lane.mask.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
@@ -23980,10 +23980,10 @@ This is an overloaded intrinsic.
::
- declare <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
- declare <8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
- declare <16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
- declare <vscale x 16 x i1> @llvm.experimental.get.nonalias.lane.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <vscale x 16 x i1> @llvm.experimental.get.noalias.lane.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
Overview:
@@ -24006,7 +24006,7 @@ Semantics:
The intrinsic will return poison if ``%ptrA`` and ``%ptrB`` are within
VF * ``%elementSize`` of each other and ``%ptrA`` + VF * ``%elementSize`` wraps.
In other cases when ``%writeAfterRead`` is true, the
-'``llvm.experimental.get.nonalias.lane.mask.*``' intrinsics are semantically
+'``llvm.experimental.get.noalias.lane.mask.*``' intrinsics are semantically
equivalent to:
::
@@ -24015,7 +24015,7 @@ equivalent to:
%m[i] = (icmp ult i, %diff) || (%diff <= 0)
When the return value is not poison and ``%writeAfterRead`` is false, the
-'``llvm.experimental.get.nonalias.lane.mask.*``' intrinsics are semantically
+'``llvm.experimental.get.noalias.lane.mask.*``' intrinsics are semantically
equivalent to:
::
@@ -24025,7 +24025,7 @@ equivalent to:
where ``%m`` is a vector (mask) of active/inactive lanes with its elements
indexed by ``i`` (i = 0 to VF - 1), and ``%ptrA``, ``%ptrB`` are the two ptr
-arguments to ``llvm.experimental.get.nonalias.lane.mask.*`` and ``%elementSize``
+arguments to ``llvm.experimental.get.noalias.lane.mask.*`` and ``%elementSize``
is the first immediate argument. The ``%writeAfterRead`` argument is expected
to be true if ``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise
it is false for a read after write.
@@ -24033,7 +24033,7 @@ The above is equivalent to:
::
- %m = @llvm.experimental.get.nonalias.lane.mask(%ptrA, %ptrB, %elementSize, %writeAfterRead)
+ %m = @llvm.experimental.get.noalias.lane.mask(%ptrA, %ptrB, %elementSize, %writeAfterRead)
This can, for example, be emitted by the loop vectorizer in which case
``%ptrA`` is a pointer that is read from within the loop, and ``%ptrB`` is a
@@ -24051,10 +24051,10 @@ Examples:
.. code-block:: llvm
- %nonalias.lane.mask = call <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4, i1 1)
- %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %nonalias.lane.mask, <4 x i32> poison)
+ %noalias.lane.mask = call <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4, i1 1)
+ %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %noalias.lane.mask, <4 x i32> poison)
[...]
- call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %nonalias.lane.mask)
+ call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %noalias.lane.mask)
.. _int_experimental_vp_splice:
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index cd60ce40ec090..e76c5368f3b92 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1559,7 +1559,7 @@ enum NodeType {
// The `llvm.experimental.get.alias.lane.mask.*` intrinsics
// Operands: Load pointer, Store pointer, Element size, Write after read
// Output: Mask
- EXPERIMENTAL_NONALIAS_LANE_MASK,
+ EXPERIMENTAL_NOALIAS_LANE_MASK,
// llvm.clear_cache intrinsic
// Operands: Input Chain, Start Addres, End Address
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 0ab3bffdaf87b..99eceb14b5103 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -2399,7 +2399,7 @@ let IntrProperties = [IntrNoMem, ImmArg<ArgIndex<1>>] in {
llvm_i32_ty]>;
}
-def int_experimental_get_nonalias_lane_mask:
+def int_experimental_get_noalias_lane_mask:
DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[llvm_ptr_ty, llvm_ptr_ty, llvm_i64_ty, llvm_i1_ty],
[IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index e3664afd68dac..1d4a545042d93 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -322,8 +322,8 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
Res = PromoteIntRes_VP_REDUCE(N);
break;
- case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
- Res = PromoteIntRes_EXPERIMENTAL_NONALIAS_LANE_MASK(N);
+ case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
+ Res = PromoteIntRes_EXPERIMENTAL_NOALIAS_LANE_MASK(N);
break;
case ISD::FREEZE:
@@ -374,10 +374,10 @@ SDValue DAGTypeLegalizer::PromoteIntRes_MERGE_VALUES(SDNode *N,
}
SDValue
-DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N) {
+DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N) {
EVT VT = N->getValueType(0);
EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
- return DAG.getNode(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, SDLoc(N), NewVT,
+ return DAG.getNode(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, SDLoc(N), NewVT,
N->ops());
}
@@ -2107,8 +2107,8 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
case ISD::PARTIAL_REDUCE_SMLA:
Res = PromoteIntOp_PARTIAL_REDUCE_MLA(N);
break;
- case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
- Res = PromoteIntOp_EXPERIMENTAL_NONALIAS_LANE_MASK(N, OpNo);
+ case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
+ Res = PromoteIntOp_EXPERIMENTAL_NOALIAS_LANE_MASK(N, OpNo);
break;
}
@@ -2912,8 +2912,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N) {
}
SDValue
-DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N,
- unsigned OpNo) {
+DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N,
+ unsigned OpNo) {
SmallVector<SDValue, 4> NewOps(N->ops());
NewOps[OpNo] = GetPromotedInteger(N->getOperand(OpNo));
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 2811478f99b3d..a3896eca6c5e2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -381,7 +381,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue PromoteIntRes_VECTOR_FIND_LAST_ACTIVE(SDNode *N);
SDValue PromoteIntRes_GET_ACTIVE_LANE_MASK(SDNode *N);
SDValue PromoteIntRes_PARTIAL_REDUCE_MLA(SDNode *N);
- SDValue PromoteIntRes_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N);
+ SDValue PromoteIntRes_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N);
// Integer Operand Promotion.
bool PromoteIntegerOperand(SDNode *N, unsigned OpNo);
@@ -435,8 +435,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_GET_ACTIVE_LANE_MASK(SDNode *N);
SDValue PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N);
- SDValue PromoteIntOp_EXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N,
- unsigned OpNo);
+ SDValue PromoteIntOp_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N, unsigned OpNo);
void SExtOrZExtPromotedOperands(SDValue &LHS, SDValue &RHS);
void PromoteSetCCOperands(SDValue &LHS,SDValue &RHS, ISD::CondCode Code);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 678d1cc74456b..0d938d9b141ba 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -138,7 +138,7 @@ class VectorLegalizer {
SDValue ExpandVP_FNEG(SDNode *Node);
SDValue ExpandVP_FABS(SDNode *Node);
SDValue ExpandVP_FCOPYSIGN(SDNode *Node);
- SDValue ExpandEXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N);
+ SDValue ExpandEXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N);
SDValue ExpandSELECT(SDNode *Node);
std::pair<SDValue, SDValue> ExpandLoad(SDNode *N);
SDValue ExpandStore(SDNode *N);
@@ -470,7 +470,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case ISD::VECTOR_COMPRESS:
case ISD::SCMP:
case ISD::UCMP:
- case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
+ case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
break;
case ISD::SMULFIX:
@@ -1264,8 +1264,8 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
case ISD::UCMP:
Results.push_back(TLI.expandCMP(Node, DAG));
return;
- case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
- Results.push_back(ExpandEXPERIMENTAL_NONALIAS_LANE_MASK(Node));
+ case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
+ Results.push_back(ExpandEXPERIMENTAL_NOALIAS_LANE_MASK(Node));
return;
case ISD::FADD:
@@ -1772,7 +1772,7 @@ SDValue VectorLegalizer::ExpandVP_FCOPYSIGN(SDNode *Node) {
return DAG.getNode(ISD::BITCAST, DL, VT, CopiedSign);
}
-SDValue VectorLegalizer::ExpandEXPERIMENTAL_NONALIAS_LANE_MASK(SDNode *N) {
+SDValue VectorLegalizer::ExpandEXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N) {
SDLoc DL(N);
SDValue SourceValue = N->getOperand(0);
SDValue SinkValue = N->getOperand(1);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 162ead1246e8c..0b98e84922ebf 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8244,13 +8244,13 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
visitVectorExtractLastActive(I, Intrinsic);
return;
}
- case Intrinsic::experimental_get_nonalias_lane_mask: {
+ case Intrinsic::experimental_get_noalias_lane_mask: {
auto IntrinsicVT = EVT::getEVT(I.getType());
SmallVector<SDValue, 4> Ops;
for (auto &Op : I.operands())
Ops.push_back(getValue(Op));
- SDValue Mask = DAG.getNode(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, sdl,
- IntrinsicVT, Ops);
+ SDValue Mask =
+ DAG.getNode(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, sdl, IntrinsicVT, Ops);
setValue(&I, Mask);
}
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index bb5984baa90c4..ee454b4600171 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -585,7 +585,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
return "partial_reduce_umla";
case ISD::PARTIAL_REDUCE_SMLA:
return "partial_reduce_smla";
- case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
+ case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
return "alias_lane_mask";
// Vector Predication
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index f33fcb8effa01..6ff7fec7ae7b0 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -840,7 +840,7 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Expand);
// Non-aliasing lanes mask default to expand
- setOperationAction(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, VT, Expand);
+ setOperationAction(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, VT, Expand);
// FP environment operations default to expand.
setOperationAction(ISD::GET_FPENV, VT, Expand);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c7a26792c2834..132cfb209dff7 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1904,7 +1904,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
(Subtarget->hasSME() && Subtarget->isStreaming())) {
for (auto VT : {MVT::v2i32, MVT::v4i16, MVT::v8i8, MVT::v16i8, MVT::nxv2i1,
MVT::nxv4i1, MVT::nxv8i1, MVT::nxv16i1}) {
- setOperationAction(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, VT, Custom);
+ setOperationAction(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, VT, Custom);
}
}
@@ -5093,9 +5093,8 @@ SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
static MVT getSVEContainerType(EVT ContentTy);
-SDValue
-AArch64TargetLowering::LowerNONALIAS_LANE_MASK(SDValue Op,
- SelectionDAG &DAG) const {
+SDValue AArch64TargetLowering::LowerNOALIAS_LANE_MASK(SDValue Op,
+ SelectionDAG &DAG) const {
SDLoc DL(Op);
uint64_t EltSize = Op.getConstantOperandVal(2);
bool IsWriteAfterRead = Op.getConstantOperandVal(3) == 1;
@@ -5108,26 +5107,18 @@ AArch64TargetLowering::LowerNONALIAS_LANE_MASK(SDValue Op,
case 1:
assert((SimpleVT == MVT::v16i8 || SimpleVT == MVT::nxv16i1) &&
"Unexpected mask or element size");
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
- : Intrinsic::aarch64_sve_whilerw_b;
break;
case 2:
assert((SimpleVT == MVT::v8i8 || SimpleVT == MVT::nxv8i1) &&
"Unexpected mask or element size");
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
- : Intrinsic::aarch64_sve_whilerw_h;
break;
case 4:
assert((SimpleVT == MVT::v4i16 || SimpleVT == MVT::nxv4i1) &&
"Unexpected mask or element size");
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
- : Intrinsic::aarch64_sve_whilerw_s;
break;
case 8:
assert((SimpleVT == MVT::v2i32 || SimpleVT == MVT::nxv2i1) &&
"Unexpected mask or element size");
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
- : Intrinsic::aarch64_sve_whilerw_d;
break;
default:
llvm_unreachable("Unexpected element size for get.alias.lane.mask");
@@ -7306,8 +7297,8 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
default:
llvm_unreachable("unimplemented operand");
return SDValue();
- case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK:
- return LowerNONALIAS_LANE_MASK(Op, DAG);
+ case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
+ return LowerNOALIAS_LANE_MASK(Op, DAG);
case ISD::BITCAST:
return LowerBITCAST(Op, DAG);
case ISD::GlobalAddress:
@@ -27609,7 +27600,7 @@ void AArch64TargetLowering::ReplaceNodeResults(
// CONCAT_VECTORS -- but delegate to common code for result type
// legalisation
return;
- case ISD::EXPERIMENTAL_NONALIAS_LANE_MASK: {
+ case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK: {
EVT VT = N->getValueType(0);
if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
return;
@@ -27621,7 +27612,7 @@ void AArch64TargetLowering::ReplaceNodeResults(
SDLoc DL(N);
auto V =
- DAG.getNode(ISD::EXPERIMENTAL_NONALIAS_LANE_MASK, DL, NewVT, N->ops());
+ DAG.getNode(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, DL, NewVT, N->ops());
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
return;
}
@@ -27681,7 +27672,7 @@ void AArch64TargetLowering::ReplaceNodeResults(
return;
}
case Intrinsic::experimental_vector_match:
- case Intrinsic::experimental_get_nonalias_lane_mask: {
+ case Intrinsic::experimental_get_noalias_lane_mask: {
if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
return;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index e9057ad7ac0bd..6550cdf95cd11 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -710,7 +710,7 @@ class AArch64TargetLowering : public TargetLowering {
SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerNONALIAS_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerNOALIAS_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 5ef6b588fe767..21eff3b11c001 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -53,7 +53,7 @@ define <16 x i1> @whilewr_8(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
+ %0 = call <16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
ret <16 x i1> %0
}
@@ -95,7 +95,7 @@ define <8 x i1> @whilewr_16(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
+ %0 = call <8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
ret <8 x i1> %0
}
@@ -129,7 +129,7 @@ define <4 x i1> @whilewr_32(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
+ %0 = call <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
ret <4 x i1> %0
}
@@ -159,7 +159,7 @@ define <2 x i1> @whilewr_64(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
+ %0 = call <2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
ret <2 x i1> %0
}
@@ -215,7 +215,7 @@ define <16 x i1> @whilerw_8(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
+ %0 = call <16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
ret <16 x i1> %0
}
@@ -258,7 +258,7 @@ define <8 x i1> @whilerw_16(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
+ %0 = call <8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
ret <8 x i1> %0
}
@@ -293,7 +293,7 @@ define <4 x i1> @whilerw_32(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
+ %0 = call <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
ret <4 x i1> %0
}
@@ -324,6 +324,6 @@ define <2 x i1> @whilerw_64(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
+ %0 = call <2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
ret <2 x i1> %0
}
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index 6884f14d685b5..b29619c7f397d 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -60,7 +60,7 @@ define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
+ %0 = call <vscale x 16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
ret <vscale x 16 x i1> %0
}
@@ -98,7 +98,7 @@ define <vscale x 8 x i1> @whilewr_16(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
+ %0 = call <vscale x 8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
ret <vscale x 8 x i1> %0
}
@@ -130,7 +130,7 @@ define <vscale x 4 x i1> @whilewr_32(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
+ %0 = call <vscale x 4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
ret <vscale x 4 x i1> %0
}
@@ -158,7 +158,7 @@ define <vscale x 2 x i1> @whilewr_64(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
+ %0 = call <vscale x 2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
ret <vscale x 2 x i1> %0
}
@@ -223,7 +223,7 @@ define <vscale x 16 x i1> @whilerw_8(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 16 x i1> @llvm.experimental.get.nonalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
+ %0 = call <vscale x 16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
ret <vscale x 16 x i1> %0
}
@@ -262,7 +262,7 @@ define <vscale x 8 x i1> @whilerw_16(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 8 x i1> @llvm.experimental.get.nonalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
+ %0 = call <vscale x 8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
ret <vscale x 8 x i1> %0
}
@@ -295,7 +295,7 @@ define <vscale x 4 x i1> @whilerw_32(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 4 x i1> @llvm.experimental.get.nonalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
+ %0 = call <vscale x 4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
ret <vscale x 4 x i1> %0
}
@@ -324,6 +324,6 @@ define <vscale x 2 x i1> @whilerw_64(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 2 x i1> @llvm.experimental.get.nonalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
+ %0 = call <vscale x 2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
ret <vscale x 2 x i1> %0
}
>From e6d99090284de0d51a5822f10880b66f7814ef57 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Wed, 26 Feb 2025 23:39:53 +0000
Subject: [PATCH 18/25] Rename to loop.dependence.raw/war.mask
---
llvm/include/llvm/CodeGen/ISDOpcodes.h | 7 ++---
llvm/include/llvm/IR/Intrinsics.td | 11 +++++---
.../SelectionDAG/LegalizeIntegerTypes.cpp | 20 +++++++-------
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 5 ++--
.../SelectionDAG/LegalizeVectorOps.cpp | 15 ++++++-----
.../SelectionDAG/SelectionDAGBuilder.cpp | 9 ++++---
.../SelectionDAG/SelectionDAGDumper.cpp | 6 +++--
llvm/lib/CodeGen/TargetLoweringBase.cpp | 5 ++--
.../Target/AArch64/AArch64ISelLowering.cpp | 27 ++++++++++++-------
llvm/lib/Target/AArch64/AArch64ISelLowering.h | 2 +-
llvm/test/CodeGen/AArch64/alias_mask.ll | 16 +++++------
.../CodeGen/AArch64/alias_mask_scalable.ll | 16 +++++------
12 files changed, 81 insertions(+), 58 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index e76c5368f3b92..3b911c493f8ca 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1556,10 +1556,11 @@ enum NodeType {
// bits conform to getBooleanContents similar to the SETCC operator.
GET_ACTIVE_LANE_MASK,
- // The `llvm.experimental.get.alias.lane.mask.*` intrinsics
- // Operands: Load pointer, Store pointer, Element size, Write after read
+ // The `llvm.experimental.loop.dependence.{war, raw}.mask` intrinsics
+ // Operands: Load pointer, Store pointer, Element size
// Output: Mask
- EXPERIMENTAL_NOALIAS_LANE_MASK,
+ EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK,
+ EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK,
// llvm.clear_cache intrinsic
// Operands: Input Chain, Start Addres, End Address
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 99eceb14b5103..3061ffd10b6cb 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -2399,10 +2399,15 @@ let IntrProperties = [IntrNoMem, ImmArg<ArgIndex<1>>] in {
llvm_i32_ty]>;
}
-def int_experimental_get_noalias_lane_mask:
+def int_experimental_loop_dependence_raw_mask:
DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [llvm_ptr_ty, llvm_ptr_ty, llvm_i64_ty, llvm_i1_ty],
- [IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
+ [llvm_ptr_ty, llvm_ptr_ty, llvm_i64_ty],
+ [IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>]>;
+
+def int_experimental_loop_dependence_war_mask:
+ DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+ [llvm_ptr_ty, llvm_ptr_ty, llvm_i64_ty],
+ [IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>]>;
def int_get_active_lane_mask:
DefaultAttrsIntrinsic<[llvm_anyvector_ty],
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 1d4a545042d93..ed30f16fe4d5c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -322,8 +322,9 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
Res = PromoteIntRes_VP_REDUCE(N);
break;
- case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
- Res = PromoteIntRes_EXPERIMENTAL_NOALIAS_LANE_MASK(N);
+ case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+ case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+ Res = PromoteIntRes_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(N);
break;
case ISD::FREEZE:
@@ -374,11 +375,10 @@ SDValue DAGTypeLegalizer::PromoteIntRes_MERGE_VALUES(SDNode *N,
}
SDValue
-DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N) {
+DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(SDNode *N) {
EVT VT = N->getValueType(0);
EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
- return DAG.getNode(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, SDLoc(N), NewVT,
- N->ops());
+ return DAG.getNode(N->getOpcode(), SDLoc(N), NewVT, N->ops());
}
SDValue DAGTypeLegalizer::PromoteIntRes_AssertSext(SDNode *N) {
@@ -2107,8 +2107,9 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
case ISD::PARTIAL_REDUCE_SMLA:
Res = PromoteIntOp_PARTIAL_REDUCE_MLA(N);
break;
- case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
- Res = PromoteIntOp_EXPERIMENTAL_NOALIAS_LANE_MASK(N, OpNo);
+ case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+ case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+ Res = PromoteIntOp_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(N, OpNo);
break;
}
@@ -2911,9 +2912,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N) {
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
}
-SDValue
-DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N,
- unsigned OpNo) {
+SDValue DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(
+ SDNode *N, unsigned OpNo) {
SmallVector<SDValue, 4> NewOps(N->ops());
NewOps[OpNo] = GetPromotedInteger(N->getOperand(OpNo));
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index a3896eca6c5e2..b9cfce5878b78 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -381,7 +381,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue PromoteIntRes_VECTOR_FIND_LAST_ACTIVE(SDNode *N);
SDValue PromoteIntRes_GET_ACTIVE_LANE_MASK(SDNode *N);
SDValue PromoteIntRes_PARTIAL_REDUCE_MLA(SDNode *N);
- SDValue PromoteIntRes_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N);
+ SDValue PromoteIntRes_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(SDNode *N);
// Integer Operand Promotion.
bool PromoteIntegerOperand(SDNode *N, unsigned OpNo);
@@ -435,7 +435,8 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_GET_ACTIVE_LANE_MASK(SDNode *N);
SDValue PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N);
- SDValue PromoteIntOp_EXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N, unsigned OpNo);
+ SDValue PromoteIntOp_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(SDNode *N,
+ unsigned OpNo);
void SExtOrZExtPromotedOperands(SDValue &LHS, SDValue &RHS);
void PromoteSetCCOperands(SDValue &LHS,SDValue &RHS, ISD::CondCode Code);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 0d938d9b141ba..b394fadeefa94 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -138,7 +138,7 @@ class VectorLegalizer {
SDValue ExpandVP_FNEG(SDNode *Node);
SDValue ExpandVP_FABS(SDNode *Node);
SDValue ExpandVP_FCOPYSIGN(SDNode *Node);
- SDValue ExpandEXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N);
+ SDValue ExpandLOOP_DEPENDENCE_MASK(SDNode *N);
SDValue ExpandSELECT(SDNode *Node);
std::pair<SDValue, SDValue> ExpandLoad(SDNode *N);
SDValue ExpandStore(SDNode *N);
@@ -470,7 +470,8 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case ISD::VECTOR_COMPRESS:
case ISD::SCMP:
case ISD::UCMP:
- case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
+ case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+ case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
break;
case ISD::SMULFIX:
@@ -1264,8 +1265,9 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
case ISD::UCMP:
Results.push_back(TLI.expandCMP(Node, DAG));
return;
- case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
- Results.push_back(ExpandEXPERIMENTAL_NOALIAS_LANE_MASK(Node));
+ case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+ case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+ Results.push_back(ExpandLOOP_DEPENDENCE_MASK(Node));
return;
case ISD::FADD:
@@ -1772,13 +1774,14 @@ SDValue VectorLegalizer::ExpandVP_FCOPYSIGN(SDNode *Node) {
return DAG.getNode(ISD::BITCAST, DL, VT, CopiedSign);
}
-SDValue VectorLegalizer::ExpandEXPERIMENTAL_NOALIAS_LANE_MASK(SDNode *N) {
+SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
SDLoc DL(N);
SDValue SourceValue = N->getOperand(0);
SDValue SinkValue = N->getOperand(1);
SDValue EltSize = N->getOperand(2);
- bool IsWriteAfterRead = N->getConstantOperandVal(3) != 0;
+ bool IsWriteAfterRead =
+ N->getOpcode() == ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK;
auto VT = N->getValueType(0);
auto PtrVT = SourceValue->getValueType(0);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 0b98e84922ebf..bc806c40e2911 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8244,13 +8244,16 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
visitVectorExtractLastActive(I, Intrinsic);
return;
}
- case Intrinsic::experimental_get_noalias_lane_mask: {
+ case Intrinsic::experimental_loop_dependence_war_mask:
+ case Intrinsic::experimental_loop_dependence_raw_mask: {
auto IntrinsicVT = EVT::getEVT(I.getType());
SmallVector<SDValue, 4> Ops;
for (auto &Op : I.operands())
Ops.push_back(getValue(Op));
- SDValue Mask =
- DAG.getNode(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, sdl, IntrinsicVT, Ops);
+ unsigned ID = Intrinsic == Intrinsic::experimental_loop_dependence_war_mask
+ ? ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK
+ : ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK;
+ SDValue Mask = DAG.getNode(ID, sdl, IntrinsicVT, Ops);
setValue(&I, Mask);
}
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index ee454b4600171..8894ace590335 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -585,8 +585,10 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
return "partial_reduce_umla";
case ISD::PARTIAL_REDUCE_SMLA:
return "partial_reduce_smla";
- case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
- return "alias_lane_mask";
+ case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+ return "loop_dep_war";
+ case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+ return "loop_dep_raw";
// Vector Predication
#define BEGIN_REGISTER_VP_SDNODE(SDID, LEGALARG, NAME, ...) \
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 6ff7fec7ae7b0..d2ab106eb5d5d 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -839,8 +839,9 @@ void TargetLoweringBase::initActions() {
// Masked vector extracts default to expand.
setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Expand);
- // Non-aliasing lanes mask default to expand
- setOperationAction(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, VT, Expand);
+ // Lane mask with non-aliasing lanes enabled default to expand
+ setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK, VT, Expand);
+ setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK, VT, Expand);
// FP environment operations default to expand.
setOperationAction(ISD::GET_FPENV, VT, Expand);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 132cfb209dff7..ac7deb830730c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1904,7 +1904,10 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
(Subtarget->hasSME() && Subtarget->isStreaming())) {
for (auto VT : {MVT::v2i32, MVT::v4i16, MVT::v8i8, MVT::v16i8, MVT::nxv2i1,
MVT::nxv4i1, MVT::nxv8i1, MVT::nxv16i1}) {
- setOperationAction(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, VT, Custom);
+ setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK, VT,
+ Custom);
+ setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK, VT,
+ Custom);
}
}
@@ -5093,11 +5096,13 @@ SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
static MVT getSVEContainerType(EVT ContentTy);
-SDValue AArch64TargetLowering::LowerNOALIAS_LANE_MASK(SDValue Op,
- SelectionDAG &DAG) const {
+SDValue
+AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
+ SelectionDAG &DAG) const {
SDLoc DL(Op);
uint64_t EltSize = Op.getConstantOperandVal(2);
- bool IsWriteAfterRead = Op.getConstantOperandVal(3) == 1;
+ bool IsWriteAfterRead =
+ Op.getOpcode() == ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK;
unsigned Opcode =
IsWriteAfterRead ? AArch64ISD::WHILEWR : AArch64ISD::WHILERW;
EVT VT = Op.getValueType();
@@ -7297,8 +7302,9 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
default:
llvm_unreachable("unimplemented operand");
return SDValue();
- case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK:
- return LowerNOALIAS_LANE_MASK(Op, DAG);
+ case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+ case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+ return LowerLOOP_DEPENDENCE_MASK(Op, DAG);
case ISD::BITCAST:
return LowerBITCAST(Op, DAG);
case ISD::GlobalAddress:
@@ -27600,7 +27606,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
// CONCAT_VECTORS -- but delegate to common code for result type
// legalisation
return;
- case ISD::EXPERIMENTAL_NOALIAS_LANE_MASK: {
+ case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+ case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK: {
EVT VT = N->getValueType(0);
if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
return;
@@ -27611,8 +27618,7 @@ void AArch64TargetLowering::ReplaceNodeResults(
return;
SDLoc DL(N);
- auto V =
- DAG.getNode(ISD::EXPERIMENTAL_NOALIAS_LANE_MASK, DL, NewVT, N->ops());
+ auto V = DAG.getNode(N->getOpcode(), DL, NewVT, N->ops());
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
return;
}
@@ -27672,7 +27678,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
return;
}
case Intrinsic::experimental_vector_match:
- case Intrinsic::experimental_get_noalias_lane_mask: {
+ case Intrinsic::experimental_loop_dependence_raw_mask:
+ case Intrinsic::experimental_loop_dependence_war_mask: {
if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
return;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 6550cdf95cd11..a0a91e54d8a60 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -710,7 +710,7 @@ class AArch64TargetLowering : public TargetLowering {
SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerNOALIAS_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerLOOP_DEPENDENCE_MASK(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 21eff3b11c001..3248cb2de2644 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -53,7 +53,7 @@ define <16 x i1> @whilewr_8(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
+ %0 = call <16 x i1> @llvm.experimental.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 1)
ret <16 x i1> %0
}
@@ -95,7 +95,7 @@ define <8 x i1> @whilewr_16(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
+ %0 = call <8 x i1> @llvm.experimental.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 2)
ret <8 x i1> %0
}
@@ -129,7 +129,7 @@ define <4 x i1> @whilewr_32(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
+ %0 = call <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 4)
ret <4 x i1> %0
}
@@ -159,7 +159,7 @@ define <2 x i1> @whilewr_64(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
+ %0 = call <2 x i1> @llvm.experimental.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 8)
ret <2 x i1> %0
}
@@ -215,7 +215,7 @@ define <16 x i1> @whilerw_8(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
+ %0 = call <16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %a, ptr %b, i64 1)
ret <16 x i1> %0
}
@@ -258,7 +258,7 @@ define <8 x i1> @whilerw_16(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
+ %0 = call <8 x i1> @llvm.experimental.loop.dependence.raw.mask.v8i1(ptr %a, ptr %b, i64 2)
ret <8 x i1> %0
}
@@ -293,7 +293,7 @@ define <4 x i1> @whilerw_32(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
+ %0 = call <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %a, ptr %b, i64 4)
ret <4 x i1> %0
}
@@ -324,6 +324,6 @@ define <2 x i1> @whilerw_64(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
+ %0 = call <2 x i1> @llvm.experimental.loop.dependence.raw.mask.v2i1(ptr %a, ptr %b, i64 8)
ret <2 x i1> %0
}
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index b29619c7f397d..5a7c3180e2807 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -60,7 +60,7 @@ define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 1)
+ %0 = call <vscale x 16 x i1> @llvm.experimental.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 1)
ret <vscale x 16 x i1> %0
}
@@ -98,7 +98,7 @@ define <vscale x 8 x i1> @whilewr_16(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 1)
+ %0 = call <vscale x 8 x i1> @llvm.experimental.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 2)
ret <vscale x 8 x i1> %0
}
@@ -130,7 +130,7 @@ define <vscale x 4 x i1> @whilewr_32(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 1)
+ %0 = call <vscale x 4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 4)
ret <vscale x 4 x i1> %0
}
@@ -158,7 +158,7 @@ define <vscale x 2 x i1> @whilewr_64(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 1)
+ %0 = call <vscale x 2 x i1> @llvm.experimental.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 8)
ret <vscale x 2 x i1> %0
}
@@ -223,7 +223,7 @@ define <vscale x 16 x i1> @whilerw_8(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %a, ptr %b, i64 1, i1 0)
+ %0 = call <vscale x 16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %a, ptr %b, i64 1)
ret <vscale x 16 x i1> %0
}
@@ -262,7 +262,7 @@ define <vscale x 8 x i1> @whilerw_16(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %a, ptr %b, i64 2, i1 0)
+ %0 = call <vscale x 8 x i1> @llvm.experimental.loop.dependence.raw.mask.v8i1(ptr %a, ptr %b, i64 2)
ret <vscale x 8 x i1> %0
}
@@ -295,7 +295,7 @@ define <vscale x 4 x i1> @whilerw_32(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %a, ptr %b, i64 4, i1 0)
+ %0 = call <vscale x 4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %a, ptr %b, i64 4)
ret <vscale x 4 x i1> %0
}
@@ -324,6 +324,6 @@ define <vscale x 2 x i1> @whilerw_64(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 2 x i1> @llvm.experimental.get.noalias.lane.mask.v2i1(ptr %a, ptr %b, i64 8, i1 0)
+ %0 = call <vscale x 2 x i1> @llvm.experimental.loop.dependence.raw.mask.v2i1(ptr %a, ptr %b, i64 8)
ret <vscale x 2 x i1> %0
}
>From baae4c674d0b2e34cfa337d4f870e5bbc5624106 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Mon, 10 Mar 2025 13:29:48 +0000
Subject: [PATCH 19/25] Rename in langref
---
llvm/docs/LangRef.rst | 47 +++++++++++++++++++------------------------
1 file changed, 21 insertions(+), 26 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 91f1824754ac6..c7eb95e5931f7 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -23969,10 +23969,12 @@ Examples:
%active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 %elem0, i64 429)
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
-.. _int_experimental_get_noalias_lane_mask:
-'``llvm.experimental.get.noalias.lane.mask.*``' Intrinsics
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+.. _int_experimental_loop_dependence_war_mask:
+.. _int_experimental_loop_dependence_raw_mask:
+
+'``llvm.experimental.loop.dependence.raw.mask.*``' and '``llvm.experimental.loop.dependence.war.mask.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
@@ -23980,10 +23982,10 @@ This is an overloaded intrinsic.
::
- declare <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
- declare <8 x i1> @llvm.experimental.get.noalias.lane.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
- declare <16 x i1> @llvm.experimental.get.noalias.lane.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
- declare <vscale x 16 x i1> @llvm.experimental.get.noalias.lane.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize, i1 immarg %writeAfterRead)
+ declare <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <8 x i1> @llvm.experimental.loop.dependence.war.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <vscale x 16 x i1> @llvm.experimental.loop.dependence.war.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
Overview:
@@ -23996,8 +23998,8 @@ across one vector loop iteration.
Arguments:
""""""""""
-The first two arguments have the same scalar integer type.
-The final two are immediates and the result is a vector with the i1 element type.
+The first two arguments have the same pointer type.
+The final one is an immediate and the result is a vector with the i1 element type.
Semantics:
""""""""""
@@ -24005,8 +24007,7 @@ Semantics:
``%elementSize`` is the size of the accessed elements in bytes.
The intrinsic will return poison if ``%ptrA`` and ``%ptrB`` are within
VF * ``%elementSize`` of each other and ``%ptrA`` + VF * ``%elementSize`` wraps.
-In other cases when ``%writeAfterRead`` is true, the
-'``llvm.experimental.get.noalias.lane.mask.*``' intrinsics are semantically
+The '``llvm.experimental.loop.dependence.war.mask*``' intrinsics are semantically
equivalent to:
::
@@ -24014,9 +24015,8 @@ equivalent to:
%diff = (%ptrB - %ptrA) / %elementSize
%m[i] = (icmp ult i, %diff) || (%diff <= 0)
-When the return value is not poison and ``%writeAfterRead`` is false, the
-'``llvm.experimental.get.noalias.lane.mask.*``' intrinsics are semantically
-equivalent to:
+When the return value is not poison the '``llvm.experimental.loop.dependence.raw.mask.*``'
+intrinsics are semantically equivalent to:
::
@@ -24025,15 +24025,10 @@ equivalent to:
where ``%m`` is a vector (mask) of active/inactive lanes with its elements
indexed by ``i`` (i = 0 to VF - 1), and ``%ptrA``, ``%ptrB`` are the two ptr
-arguments to ``llvm.experimental.get.noalias.lane.mask.*`` and ``%elementSize``
-is the first immediate argument. The ``%writeAfterRead`` argument is expected
-to be true if ``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise
-it is false for a read after write.
-The above is equivalent to:
-
-::
-
- %m = @llvm.experimental.get.noalias.lane.mask(%ptrA, %ptrB, %elementSize, %writeAfterRead)
+arguments to ``llvm.experimental.loop.dependence.{raw,war}.mask.*`` and ``%elementSize``
+is the first immediate argument. The ``war`` variant is expected to be used when
+``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise the ``raw`` variant is
+expected to be used.
This can, for example, be emitted by the loop vectorizer in which case
``%ptrA`` is a pointer that is read from within the loop, and ``%ptrB`` is a
@@ -24051,10 +24046,10 @@ Examples:
.. code-block:: llvm
- %noalias.lane.mask = call <4 x i1> @llvm.experimental.get.noalias.lane.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4, i1 1)
- %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %noalias.lane.mask, <4 x i32> poison)
+ %loop.dependence.mask = call <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
+ %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %loop.dependence.mask, <4 x i32> poison)
[...]
- call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %noalias.lane.mask)
+ call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %loop.dependence.mask)
.. _int_experimental_vp_splice:
>From 6ecb0d3973cea8fb461d0409b04f6b041025578e Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Fri, 21 Mar 2025 17:47:00 +0000
Subject: [PATCH 20/25] Reword argument description
---
llvm/docs/LangRef.rst | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index c7eb95e5931f7..5ff21685d0c61 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -23998,8 +23998,8 @@ across one vector loop iteration.
Arguments:
""""""""""
-The first two arguments have the same pointer type.
-The final one is an immediate and the result is a vector with the i1 element type.
+The first two arguments are pointers and the last argument is an immediate.
+The result is a vector with the i1 element type.
Semantics:
""""""""""
>From 8a295fd2f413488bdf2a960ee41fbef9c9af9051 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Tue, 20 May 2025 15:47:45 +0100
Subject: [PATCH 21/25] Fixup langref
---
llvm/docs/LangRef.rst | 112 ++++++++++++++++++++++++++++--------------
1 file changed, 76 insertions(+), 36 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 5ff21685d0c61..b425c70b9da29 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -23971,10 +23971,9 @@ Examples:
.. _int_experimental_loop_dependence_war_mask:
-.. _int_experimental_loop_dependence_raw_mask:
-'``llvm.experimental.loop.dependence.raw.mask.*``' and '``llvm.experimental.loop.dependence.war.mask.*``' Intrinsics
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+'``llvm.experimental.loop.dependence.war.mask.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
@@ -23982,18 +23981,22 @@ This is an overloaded intrinsic.
::
- declare <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
declare <8 x i1> @llvm.experimental.loop.dependence.war.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
- declare <16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <16 x i1> @llvm.experimental.loop.dependence.war.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
declare <vscale x 16 x i1> @llvm.experimental.loop.dependence.war.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
Overview:
"""""""""
-Create a mask enabling lanes that do not overlap between two pointers
-across one vector loop iteration.
+Given a scalar load from %ptrA, followed by a scalar store to %ptrB, this
+instruction generates a mask where an active lane indicates that there is no
+write-after-read hazard for this lane.
+A write-after-read hazard occurs when a write-after-read sequence for a given
+lane in a vector ends up being executed as a read-after-write sequence due to
+the aliasing of pointers.
Arguments:
""""""""""
@@ -24005,51 +24008,88 @@ Semantics:
""""""""""
``%elementSize`` is the size of the accessed elements in bytes.
-The intrinsic will return poison if ``%ptrA`` and ``%ptrB`` are within
-VF * ``%elementSize`` of each other and ``%ptrA`` + VF * ``%elementSize`` wraps.
-The '``llvm.experimental.loop.dependence.war.mask*``' intrinsics are semantically
-equivalent to:
+The intrinsic returns ``poison`` if the distance between ``%prtA`` and ``%ptrB``
+is smaller than ``VF * %elementsize`` and either ``%ptrA + VF * %elementSize``
+or ``%ptrB + VF * %elementSize`` wrap.
+The element of the result mask is active when no write-after-read hazard occurs,
+meaning that:
-::
+* (ptrB - ptrA) <= 0 (guarantees that all lanes are loaded before any stores are
+ committed), or
+* (ptrB - ptrA) >= elementSize * lane (guarantees that this lane is loaded
+ before the store to the same address is committed)
- %diff = (%ptrB - %ptrA) / %elementSize
- %m[i] = (icmp ult i, %diff) || (%diff <= 0)
+Examples:
+"""""""""
-When the return value is not poison the '``llvm.experimental.loop.dependence.raw.mask.*``'
-intrinsics are semantically equivalent to:
+.. code-block:: llvm
+
+ %loop.dependence.mask = call <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
+ %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %loop.dependence.mask, <4 x i32> poison)
+ [...]
+ call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %loop.dependence.mask)
+
+.. _int_experimental_loop_dependence_raw_mask:
+
+'``llvm.experimental.loop.dependence.raw.mask.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+This is an overloaded intrinsic.
::
- %diff = abs(%ptrB - %ptrA) / %elementSize
- %m[i] = (icmp ult i, %diff) || (%diff == 0)
+ declare <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <8 x i1> @llvm.experimental.loop.dependence.raw.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <vscale x 16 x i1> @llvm.experimental.loop.dependence.raw.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+
-where ``%m`` is a vector (mask) of active/inactive lanes with its elements
-indexed by ``i`` (i = 0 to VF - 1), and ``%ptrA``, ``%ptrB`` are the two ptr
-arguments to ``llvm.experimental.loop.dependence.{raw,war}.mask.*`` and ``%elementSize``
-is the first immediate argument. The ``war`` variant is expected to be used when
-``%ptrB`` is stored to after ``%ptrA`` is read from, otherwise the ``raw`` variant is
-expected to be used.
+Overview:
+"""""""""
-This can, for example, be emitted by the loop vectorizer in which case
-``%ptrA`` is a pointer that is read from within the loop, and ``%ptrB`` is a
-pointer that is stored to within the loop.
-If the difference between these pointers is less than the vector factor, then
-they overlap (alias) within a loop iteration.
-An example is if ``%ptrA`` is 20 and ``%ptrB`` is 23 with a vector factor of 8,
-then lanes 3, 4, 5, 6 and 7 of the vector loaded from ``%ptrA``
-share addresses with lanes 0, 1, 2, 3, 4 and 5 from the vector stored to at
-``%ptrB``.
+Given a scalar store to %ptrA, followed by a scalar load from %ptrB, this
+instruction generates a mask where an active lane indicates that there is no
+read-after-write hazard for this lane and that this lane does not introduce any
+new store-to-load forwarding hazard.
+A read-after-write hazard occurs when a read-after-write sequence for a given
+lane in a vector ends up being executed as a write-after-read sequence due to
+the aliasing of pointers.
+
+
+Arguments:
+""""""""""
+
+The first two arguments are pointers and the last argument is an immediate.
+The result is a vector with the i1 element type.
+
+Semantics:
+""""""""""
+
+``%elementSize`` is the size of the accessed elements in bytes.
+The intrinsic returns ``poison`` if the distance between ``%prtA`` and ``%ptrB``
+is smaller than ``VF * %elementsize`` and either ``%ptrA + VF * %elementSize``
+or ``%ptrB + VF * %elementSize`` wrap.
+The element of the result mask is active when no read-after-write hazard occurs, meaning that:
+
+ abs(ptrB - ptrA) >= elementSize * lane (guarantees that the store of this lane
+ is committed before loading from this address)
+
+Note that the case where (ptrB - ptrA) < 0 does not result in any
+read-after-write hazards, but may introduce new store-to-load-forwarding stalls
+where both the store and load partially access the same addresses.
Examples:
"""""""""
.. code-block:: llvm
- %loop.dependence.mask = call <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
- %vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %loop.dependence.mask, <4 x i32> poison)
+ %loop.dependence.mask = call <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
+ call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrA, i32 4, <4 x i1> %loop.dependence.mask)
[...]
- call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %loop.dependence.mask)
+ %vecB = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrB, i32 4, <4 x i1> %loop.dependence.mask, <4 x i32> poison)
.. _int_experimental_vp_splice:
>From b9616cb02e97a68c887617cd43e77d09084be360 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Tue, 20 May 2025 15:50:40 +0100
Subject: [PATCH 22/25] IsWriteAfterRead -> IsReadAfterWrite and avoid using
ops vector
---
llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp | 10 +++++-----
llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 7 +++----
2 files changed, 8 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index b394fadeefa94..0190baa858190 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1780,13 +1780,13 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
SDValue SinkValue = N->getOperand(1);
SDValue EltSize = N->getOperand(2);
- bool IsWriteAfterRead =
- N->getOpcode() == ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK;
+ bool IsReadAfterWrite =
+ N->getOpcode() == ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK;
auto VT = N->getValueType(0);
auto PtrVT = SourceValue->getValueType(0);
SDValue Diff = DAG.getNode(ISD::SUB, DL, PtrVT, SinkValue, SourceValue);
- if (!IsWriteAfterRead)
+ if (IsReadAfterWrite)
Diff = DAG.getNode(ISD::ABS, DL, PtrVT, Diff);
Diff = DAG.getNode(ISD::SDIV, DL, PtrVT, Diff, EltSize);
@@ -1796,7 +1796,7 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
Diff.getValueType());
SDValue Zero = DAG.getTargetConstant(0, DL, PtrVT);
SDValue Cmp = DAG.getSetCC(DL, CmpVT, Diff, Zero,
- IsWriteAfterRead ? ISD::SETLE : ISD::SETEQ);
+ IsReadAfterWrite ? ISD::SETEQ : ISD::SETLE);
// Create the lane mask
EVT SplatTY =
@@ -1807,7 +1807,7 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
DAG.getSetCC(DL, VT, VectorStep, DiffSplat, ISD::CondCode::SETULT);
// Splat the compare result then OR it with the lane mask
- auto VTElementTy = VT.getVectorElementType();
+ EVT VTElementTy = VT.getVectorElementType();
if (CmpVT.getScalarSizeInBits() < VTElementTy.getScalarSizeInBits())
Cmp = DAG.getNode(ISD::ZERO_EXTEND, DL, VTElementTy, Cmp);
SDValue Splat = DAG.getSplat(VT, DL, Cmp);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index bc806c40e2911..78a8ec9b92ddb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8248,13 +8248,12 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
case Intrinsic::experimental_loop_dependence_raw_mask: {
auto IntrinsicVT = EVT::getEVT(I.getType());
SmallVector<SDValue, 4> Ops;
- for (auto &Op : I.operands())
- Ops.push_back(getValue(Op));
unsigned ID = Intrinsic == Intrinsic::experimental_loop_dependence_war_mask
? ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK
: ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK;
- SDValue Mask = DAG.getNode(ID, sdl, IntrinsicVT, Ops);
- setValue(&I, Mask);
+ setValue(&I,
+ DAG.getNode(ID, sdl, IntrinsicVT, getValue(I.getOperand(0)),
+ getValue(I.getOperand(1)), getValue(I.getOperand(2))));
}
}
}
>From 360d723913c7bc4e85d68737ad73cbc2aca5594c Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Tue, 20 May 2025 16:41:07 +0100
Subject: [PATCH 23/25] Extend vXi1 setcc to account for intrinsic VT promotion
---
.../CodeGen/SelectionDAG/LegalizeVectorOps.cpp | 15 ++++++++++++---
llvm/test/CodeGen/AArch64/alias_mask.ll | 8 --------
2 files changed, 12 insertions(+), 11 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 0190baa858190..4abc5230e9bc7 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1800,14 +1800,23 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
// Create the lane mask
EVT SplatTY =
- EVT::getVectorVT(*DAG.getContext(), PtrVT, VT.getVectorElementCount());
+ EVT::getVectorVT(*DAG.getContext(), PtrVT, VT.getVectorMinNumElements(),
+ VT.isScalableVector());
SDValue DiffSplat = DAG.getSplat(SplatTY, DL, Diff);
SDValue VectorStep = DAG.getStepVector(DL, SplatTY);
+ EVT MaskVT =
+ EVT::getVectorVT(*DAG.getContext(), MVT::i1, VT.getVectorMinNumElements(),
+ VT.isScalableVector());
SDValue DiffMask =
- DAG.getSetCC(DL, VT, VectorStep, DiffSplat, ISD::CondCode::SETULT);
+ DAG.getSetCC(DL, MaskVT, VectorStep, DiffSplat, ISD::CondCode::SETULT);
- // Splat the compare result then OR it with the lane mask
EVT VTElementTy = VT.getVectorElementType();
+ // Extend the diff setcc in case the intrinsic has been promoted to a vector
+ // type with elements larger than i1
+ if (VTElementTy.getScalarSizeInBits() > MaskVT.getScalarSizeInBits())
+ DiffMask = DAG.getNode(ISD::ANY_EXTEND, DL, VT, DiffMask);
+
+ // Splat the compare result then OR it with the lane mask
if (CmpVT.getScalarSizeInBits() < VTElementTy.getScalarSizeInBits())
Cmp = DAG.getNode(ISD::ZERO_EXTEND, DL, VTElementTy, Cmp);
SDValue Splat = DAG.getSplat(VT, DL, Cmp);
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 3248cb2de2644..75ab6f62095b6 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -48,8 +48,6 @@ define <16 x i1> @whilewr_8(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: uzp1 v1.8h, v2.8h, v3.8h
; CHECK-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b
; CHECK-NOSVE-NEXT: dup v1.16b, w8
-; CHECK-NOSVE-NEXT: shl v0.16b, v0.16b, #7
-; CHECK-NOSVE-NEXT: cmlt v0.16b, v0.16b, #0
; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NOSVE-NEXT: ret
entry:
@@ -90,8 +88,6 @@ define <8 x i1> @whilewr_16(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v1.8h
; CHECK-NOSVE-NEXT: dup v1.8b, w8
; CHECK-NOSVE-NEXT: xtn v0.8b, v0.8h
-; CHECK-NOSVE-NEXT: shl v0.8b, v0.8b, #7
-; CHECK-NOSVE-NEXT: cmlt v0.8b, v0.8b, #0
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
@@ -210,8 +206,6 @@ define <16 x i1> @whilerw_8(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: uzp1 v1.8h, v3.8h, v2.8h
; CHECK-NOSVE-NEXT: uzp1 v0.16b, v1.16b, v0.16b
; CHECK-NOSVE-NEXT: dup v1.16b, w8
-; CHECK-NOSVE-NEXT: shl v0.16b, v0.16b, #7
-; CHECK-NOSVE-NEXT: cmlt v0.16b, v0.16b, #0
; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NOSVE-NEXT: ret
entry:
@@ -253,8 +247,6 @@ define <8 x i1> @whilerw_16(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: uzp1 v0.8h, v0.8h, v1.8h
; CHECK-NOSVE-NEXT: dup v1.8b, w8
; CHECK-NOSVE-NEXT: xtn v0.8b, v0.8h
-; CHECK-NOSVE-NEXT: shl v0.8b, v0.8b, #7
-; CHECK-NOSVE-NEXT: cmlt v0.8b, v0.8b, #0
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
>From d4d8d8cb3a8edf10f9a4743bd651f826c14cb6d4 Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Wed, 21 May 2025 14:35:29 +0100
Subject: [PATCH 24/25] Remove experimental from intrinsic name
---
llvm/docs/LangRef.rst | 28 +++++++++----------
llvm/include/llvm/CodeGen/ISDOpcodes.h | 4 +--
llvm/include/llvm/IR/Intrinsics.td | 4 +--
.../SelectionDAG/LegalizeIntegerTypes.cpp | 19 ++++++-------
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 5 ++--
.../SelectionDAG/LegalizeVectorOps.cpp | 11 ++++----
.../SelectionDAG/SelectionDAGBuilder.cpp | 10 +++----
.../SelectionDAG/SelectionDAGDumper.cpp | 4 +--
llvm/lib/CodeGen/TargetLoweringBase.cpp | 4 +--
.../Target/AArch64/AArch64ISelLowering.cpp | 21 ++++++--------
llvm/test/CodeGen/AArch64/alias_mask.ll | 16 +++++------
.../CodeGen/AArch64/alias_mask_scalable.ll | 16 +++++------
12 files changed, 68 insertions(+), 74 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index b425c70b9da29..8aaeb9593ce60 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -23970,9 +23970,9 @@ Examples:
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
-.. _int_experimental_loop_dependence_war_mask:
+.. _int_loop_dependence_war_mask:
-'``llvm.experimental.loop.dependence.war.mask.*``' Intrinsics
+'``llvm.loop.dependence.war.mask.*``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
@@ -23981,10 +23981,10 @@ This is an overloaded intrinsic.
::
- declare <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
- declare <8 x i1> @llvm.experimental.loop.dependence.war.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
- declare <16 x i1> @llvm.experimental.loop.dependence.war.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
- declare <vscale x 16 x i1> @llvm.experimental.loop.dependence.war.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
Overview:
@@ -24024,14 +24024,14 @@ Examples:
.. code-block:: llvm
- %loop.dependence.mask = call <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
+ %loop.dependence.mask = call <4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
%vecA = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrA, i32 4, <4 x i1> %loop.dependence.mask, <4 x i32> poison)
[...]
call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrB, i32 4, <4 x i1> %loop.dependence.mask)
-.. _int_experimental_loop_dependence_raw_mask:
+.. _int_loop_dependence_raw_mask:
-'``llvm.experimental.loop.dependence.raw.mask.*``' Intrinsics
+'``llvm.loop.dependence.raw.mask.*``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
@@ -24040,10 +24040,10 @@ This is an overloaded intrinsic.
::
- declare <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
- declare <8 x i1> @llvm.experimental.loop.dependence.raw.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
- declare <16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
- declare <vscale x 16 x i1> @llvm.experimental.loop.dependence.raw.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <4 x i1> @llvm.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <8 x i1> @llvm.loop.dependence.raw.mask.v8i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <16 x i1> @llvm.loop.dependence.raw.mask.v16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
+ declare <vscale x 16 x i1> @llvm.loop.dependence.raw.mask.nxv16i1(ptr %ptrA, ptr %ptrB, i64 immarg %elementSize)
Overview:
@@ -24086,7 +24086,7 @@ Examples:
.. code-block:: llvm
- %loop.dependence.mask = call <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
+ %loop.dependence.mask = call <4 x i1> @llvm.loop.dependence.raw.mask.v4i1(ptr %ptrA, ptr %ptrB, i64 4)
call @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %vecA, ptr %ptrA, i32 4, <4 x i1> %loop.dependence.mask)
[...]
%vecB = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(ptr %ptrB, i32 4, <4 x i1> %loop.dependence.mask, <4 x i32> poison)
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 3b911c493f8ca..d703bce04e4fc 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1559,8 +1559,8 @@ enum NodeType {
// The `llvm.experimental.loop.dependence.{war, raw}.mask` intrinsics
// Operands: Load pointer, Store pointer, Element size
// Output: Mask
- EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK,
- EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK,
+ LOOP_DEPENDENCE_WAR_MASK,
+ LOOP_DEPENDENCE_RAW_MASK,
// llvm.clear_cache intrinsic
// Operands: Input Chain, Start Addres, End Address
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 3061ffd10b6cb..e91f56d5b53de 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -2399,12 +2399,12 @@ let IntrProperties = [IntrNoMem, ImmArg<ArgIndex<1>>] in {
llvm_i32_ty]>;
}
-def int_experimental_loop_dependence_raw_mask:
+def int_loop_dependence_raw_mask:
DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[llvm_ptr_ty, llvm_ptr_ty, llvm_i64_ty],
[IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>]>;
-def int_experimental_loop_dependence_war_mask:
+def int_loop_dependence_war_mask:
DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[llvm_ptr_ty, llvm_ptr_ty, llvm_i64_ty],
[IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<2>>]>;
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index ed30f16fe4d5c..e57cc0535ac7c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -322,9 +322,9 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
Res = PromoteIntRes_VP_REDUCE(N);
break;
- case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
- case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
- Res = PromoteIntRes_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(N);
+ case ISD::LOOP_DEPENDENCE_WAR_MASK:
+ case ISD::LOOP_DEPENDENCE_RAW_MASK:
+ Res = PromoteIntRes_LOOP_DEPENDENCE_MASK(N);
break;
case ISD::FREEZE:
@@ -374,8 +374,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_MERGE_VALUES(SDNode *N,
return GetPromotedInteger(Op);
}
-SDValue
-DAGTypeLegalizer::PromoteIntRes_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(SDNode *N) {
+SDValue DAGTypeLegalizer::PromoteIntRes_LOOP_DEPENDENCE_MASK(SDNode *N) {
EVT VT = N->getValueType(0);
EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
return DAG.getNode(N->getOpcode(), SDLoc(N), NewVT, N->ops());
@@ -2107,9 +2106,9 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
case ISD::PARTIAL_REDUCE_SMLA:
Res = PromoteIntOp_PARTIAL_REDUCE_MLA(N);
break;
- case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
- case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
- Res = PromoteIntOp_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(N, OpNo);
+ case ISD::LOOP_DEPENDENCE_RAW_MASK:
+ case ISD::LOOP_DEPENDENCE_WAR_MASK:
+ Res = PromoteIntOp_LOOP_DEPENDENCE_MASK(N, OpNo);
break;
}
@@ -2912,8 +2911,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N) {
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
}
-SDValue DAGTypeLegalizer::PromoteIntOp_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(
- SDNode *N, unsigned OpNo) {
+SDValue DAGTypeLegalizer::PromoteIntOp_LOOP_DEPENDENCE_MASK(SDNode *N,
+ unsigned OpNo) {
SmallVector<SDValue, 4> NewOps(N->ops());
NewOps[OpNo] = GetPromotedInteger(N->getOperand(OpNo));
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index b9cfce5878b78..bfaccba6f1b2f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -381,7 +381,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue PromoteIntRes_VECTOR_FIND_LAST_ACTIVE(SDNode *N);
SDValue PromoteIntRes_GET_ACTIVE_LANE_MASK(SDNode *N);
SDValue PromoteIntRes_PARTIAL_REDUCE_MLA(SDNode *N);
- SDValue PromoteIntRes_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(SDNode *N);
+ SDValue PromoteIntRes_LOOP_DEPENDENCE_MASK(SDNode *N);
// Integer Operand Promotion.
bool PromoteIntegerOperand(SDNode *N, unsigned OpNo);
@@ -435,8 +435,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_GET_ACTIVE_LANE_MASK(SDNode *N);
SDValue PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N);
- SDValue PromoteIntOp_EXPERIMENTAL_LOOP_DEPENDENCE_MASK(SDNode *N,
- unsigned OpNo);
+ SDValue PromoteIntOp_LOOP_DEPENDENCE_MASK(SDNode *N, unsigned OpNo);
void SExtOrZExtPromotedOperands(SDValue &LHS, SDValue &RHS);
void PromoteSetCCOperands(SDValue &LHS,SDValue &RHS, ISD::CondCode Code);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 4abc5230e9bc7..959e99cc0e00e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -470,8 +470,8 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case ISD::VECTOR_COMPRESS:
case ISD::SCMP:
case ISD::UCMP:
- case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
- case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+ case ISD::LOOP_DEPENDENCE_WAR_MASK:
+ case ISD::LOOP_DEPENDENCE_RAW_MASK:
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
break;
case ISD::SMULFIX:
@@ -1265,8 +1265,8 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
case ISD::UCMP:
Results.push_back(TLI.expandCMP(Node, DAG));
return;
- case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
- case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+ case ISD::LOOP_DEPENDENCE_WAR_MASK:
+ case ISD::LOOP_DEPENDENCE_RAW_MASK:
Results.push_back(ExpandLOOP_DEPENDENCE_MASK(Node));
return;
@@ -1780,8 +1780,7 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
SDValue SinkValue = N->getOperand(1);
SDValue EltSize = N->getOperand(2);
- bool IsReadAfterWrite =
- N->getOpcode() == ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK;
+ bool IsReadAfterWrite = N->getOpcode() == ISD::LOOP_DEPENDENCE_RAW_MASK;
auto VT = N->getValueType(0);
auto PtrVT = SourceValue->getValueType(0);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 78a8ec9b92ddb..279fe65884528 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8244,13 +8244,13 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
visitVectorExtractLastActive(I, Intrinsic);
return;
}
- case Intrinsic::experimental_loop_dependence_war_mask:
- case Intrinsic::experimental_loop_dependence_raw_mask: {
+ case Intrinsic::loop_dependence_war_mask:
+ case Intrinsic::loop_dependence_raw_mask: {
auto IntrinsicVT = EVT::getEVT(I.getType());
SmallVector<SDValue, 4> Ops;
- unsigned ID = Intrinsic == Intrinsic::experimental_loop_dependence_war_mask
- ? ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK
- : ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK;
+ unsigned ID = Intrinsic == Intrinsic::loop_dependence_war_mask
+ ? ISD::LOOP_DEPENDENCE_WAR_MASK
+ : ISD::LOOP_DEPENDENCE_RAW_MASK;
setValue(&I,
DAG.getNode(ID, sdl, IntrinsicVT, getValue(I.getOperand(0)),
getValue(I.getOperand(1)), getValue(I.getOperand(2))));
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 8894ace590335..bcb286894faa7 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -585,9 +585,9 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
return "partial_reduce_umla";
case ISD::PARTIAL_REDUCE_SMLA:
return "partial_reduce_smla";
- case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+ case ISD::LOOP_DEPENDENCE_WAR_MASK:
return "loop_dep_war";
- case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
+ case ISD::LOOP_DEPENDENCE_RAW_MASK:
return "loop_dep_raw";
// Vector Predication
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index d2ab106eb5d5d..c8b021c48fb1e 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -840,8 +840,8 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Expand);
// Lane mask with non-aliasing lanes enabled default to expand
- setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK, VT, Expand);
- setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK, VT, Expand);
+ setOperationAction(ISD::LOOP_DEPENDENCE_RAW_MASK, VT, Expand);
+ setOperationAction(ISD::LOOP_DEPENDENCE_WAR_MASK, VT, Expand);
// FP environment operations default to expand.
setOperationAction(ISD::GET_FPENV, VT, Expand);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index ac7deb830730c..fdb1bc17dcfcf 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1904,10 +1904,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
(Subtarget->hasSME() && Subtarget->isStreaming())) {
for (auto VT : {MVT::v2i32, MVT::v4i16, MVT::v8i8, MVT::v16i8, MVT::nxv2i1,
MVT::nxv4i1, MVT::nxv8i1, MVT::nxv16i1}) {
- setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK, VT,
- Custom);
- setOperationAction(ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK, VT,
- Custom);
+ setOperationAction(ISD::LOOP_DEPENDENCE_RAW_MASK, VT, Custom);
+ setOperationAction(ISD::LOOP_DEPENDENCE_WAR_MASK, VT, Custom);
}
}
@@ -5101,8 +5099,7 @@ AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
uint64_t EltSize = Op.getConstantOperandVal(2);
- bool IsWriteAfterRead =
- Op.getOpcode() == ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK;
+ bool IsWriteAfterRead = Op.getOpcode() == ISD::LOOP_DEPENDENCE_WAR_MASK;
unsigned Opcode =
IsWriteAfterRead ? AArch64ISD::WHILEWR : AArch64ISD::WHILERW;
EVT VT = Op.getValueType();
@@ -7302,8 +7299,8 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
default:
llvm_unreachable("unimplemented operand");
return SDValue();
- case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK:
- case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
+ case ISD::LOOP_DEPENDENCE_RAW_MASK:
+ case ISD::LOOP_DEPENDENCE_WAR_MASK:
return LowerLOOP_DEPENDENCE_MASK(Op, DAG);
case ISD::BITCAST:
return LowerBITCAST(Op, DAG);
@@ -27606,8 +27603,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
// CONCAT_VECTORS -- but delegate to common code for result type
// legalisation
return;
- case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_WAR_MASK:
- case ISD::EXPERIMENTAL_LOOP_DEPENDENCE_RAW_MASK: {
+ case ISD::LOOP_DEPENDENCE_WAR_MASK:
+ case ISD::LOOP_DEPENDENCE_RAW_MASK: {
EVT VT = N->getValueType(0);
if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
return;
@@ -27678,8 +27675,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
return;
}
case Intrinsic::experimental_vector_match:
- case Intrinsic::experimental_loop_dependence_raw_mask:
- case Intrinsic::experimental_loop_dependence_war_mask: {
+ case Intrinsic::loop_dependence_raw_mask:
+ case Intrinsic::loop_dependence_war_mask: {
if (!VT.isFixedLengthVector() || VT.getVectorElementType() != MVT::i1)
return;
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 75ab6f62095b6..8b74b7101a740 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -51,7 +51,7 @@ define <16 x i1> @whilewr_8(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <16 x i1> @llvm.experimental.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 1)
+ %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 1)
ret <16 x i1> %0
}
@@ -91,7 +91,7 @@ define <8 x i1> @whilewr_16(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <8 x i1> @llvm.experimental.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 2)
+ %0 = call <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 2)
ret <8 x i1> %0
}
@@ -125,7 +125,7 @@ define <4 x i1> @whilewr_32(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 4)
+ %0 = call <4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 4)
ret <4 x i1> %0
}
@@ -155,7 +155,7 @@ define <2 x i1> @whilewr_64(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <2 x i1> @llvm.experimental.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 8)
+ %0 = call <2 x i1> @llvm.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 8)
ret <2 x i1> %0
}
@@ -209,7 +209,7 @@ define <16 x i1> @whilerw_8(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %a, ptr %b, i64 1)
+ %0 = call <16 x i1> @llvm.loop.dependence.raw.mask.v16i1(ptr %a, ptr %b, i64 1)
ret <16 x i1> %0
}
@@ -250,7 +250,7 @@ define <8 x i1> @whilerw_16(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <8 x i1> @llvm.experimental.loop.dependence.raw.mask.v8i1(ptr %a, ptr %b, i64 2)
+ %0 = call <8 x i1> @llvm.loop.dependence.raw.mask.v8i1(ptr %a, ptr %b, i64 2)
ret <8 x i1> %0
}
@@ -285,7 +285,7 @@ define <4 x i1> @whilerw_32(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %a, ptr %b, i64 4)
+ %0 = call <4 x i1> @llvm.loop.dependence.raw.mask.v4i1(ptr %a, ptr %b, i64 4)
ret <4 x i1> %0
}
@@ -316,6 +316,6 @@ define <2 x i1> @whilerw_64(ptr %a, ptr %b) {
; CHECK-NOSVE-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NOSVE-NEXT: ret
entry:
- %0 = call <2 x i1> @llvm.experimental.loop.dependence.raw.mask.v2i1(ptr %a, ptr %b, i64 8)
+ %0 = call <2 x i1> @llvm.loop.dependence.raw.mask.v2i1(ptr %a, ptr %b, i64 8)
ret <2 x i1> %0
}
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index 5a7c3180e2807..47b8e31d8b5be 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -60,7 +60,7 @@ define <vscale x 16 x i1> @whilewr_8(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 16 x i1> @llvm.experimental.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 1)
+ %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 1)
ret <vscale x 16 x i1> %0
}
@@ -98,7 +98,7 @@ define <vscale x 8 x i1> @whilewr_16(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 8 x i1> @llvm.experimental.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 2)
+ %0 = call <vscale x 8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 2)
ret <vscale x 8 x i1> %0
}
@@ -130,7 +130,7 @@ define <vscale x 4 x i1> @whilewr_32(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 4 x i1> @llvm.experimental.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 4)
+ %0 = call <vscale x 4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 4)
ret <vscale x 4 x i1> %0
}
@@ -158,7 +158,7 @@ define <vscale x 2 x i1> @whilewr_64(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 2 x i1> @llvm.experimental.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 8)
+ %0 = call <vscale x 2 x i1> @llvm.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 8)
ret <vscale x 2 x i1> %0
}
@@ -223,7 +223,7 @@ define <vscale x 16 x i1> @whilerw_8(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 16 x i1> @llvm.experimental.loop.dependence.raw.mask.v16i1(ptr %a, ptr %b, i64 1)
+ %0 = call <vscale x 16 x i1> @llvm.loop.dependence.raw.mask.v16i1(ptr %a, ptr %b, i64 1)
ret <vscale x 16 x i1> %0
}
@@ -262,7 +262,7 @@ define <vscale x 8 x i1> @whilerw_16(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 8 x i1> @llvm.experimental.loop.dependence.raw.mask.v8i1(ptr %a, ptr %b, i64 2)
+ %0 = call <vscale x 8 x i1> @llvm.loop.dependence.raw.mask.v8i1(ptr %a, ptr %b, i64 2)
ret <vscale x 8 x i1> %0
}
@@ -295,7 +295,7 @@ define <vscale x 4 x i1> @whilerw_32(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 4 x i1> @llvm.experimental.loop.dependence.raw.mask.v4i1(ptr %a, ptr %b, i64 4)
+ %0 = call <vscale x 4 x i1> @llvm.loop.dependence.raw.mask.v4i1(ptr %a, ptr %b, i64 4)
ret <vscale x 4 x i1> %0
}
@@ -324,6 +324,6 @@ define <vscale x 2 x i1> @whilerw_64(ptr %a, ptr %b) {
; CHECK-SVE-NEXT: sel p0.b, p0, p0.b, p1.b
; CHECK-SVE-NEXT: ret
entry:
- %0 = call <vscale x 2 x i1> @llvm.experimental.loop.dependence.raw.mask.v2i1(ptr %a, ptr %b, i64 8)
+ %0 = call <vscale x 2 x i1> @llvm.loop.dependence.raw.mask.v2i1(ptr %a, ptr %b, i64 8)
ret <vscale x 2 x i1> %0
}
>From 64a971482fe71b1dd1ca18024e18e2812babe642 Mon Sep 17 00:00:00 2001
From: Sam Tebbs <samuel.tebbs at arm.com>
Date: Wed, 21 May 2025 15:16:06 +0100
Subject: [PATCH 25/25] Clean up vector type creation
---
llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp | 12 ++++--------
.../lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 1 -
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 9 +++++----
3 files changed, 9 insertions(+), 13 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 959e99cc0e00e..8328050fab3b8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1791,21 +1791,17 @@ SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
Diff = DAG.getNode(ISD::SDIV, DL, PtrVT, Diff, EltSize);
// If the difference is positive then some elements may alias
- auto CmpVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
- Diff.getValueType());
+ EVT CmpVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
+ Diff.getValueType());
SDValue Zero = DAG.getTargetConstant(0, DL, PtrVT);
SDValue Cmp = DAG.getSetCC(DL, CmpVT, Diff, Zero,
IsReadAfterWrite ? ISD::SETEQ : ISD::SETLE);
// Create the lane mask
- EVT SplatTY =
- EVT::getVectorVT(*DAG.getContext(), PtrVT, VT.getVectorMinNumElements(),
- VT.isScalableVector());
+ EVT SplatTY = VT.changeElementType(PtrVT);
SDValue DiffSplat = DAG.getSplat(SplatTY, DL, Diff);
SDValue VectorStep = DAG.getStepVector(DL, SplatTY);
- EVT MaskVT =
- EVT::getVectorVT(*DAG.getContext(), MVT::i1, VT.getVectorMinNumElements(),
- VT.isScalableVector());
+ EVT MaskVT = VT.changeElementType(MVT::i1);
SDValue DiffMask =
DAG.getSetCC(DL, MaskVT, VectorStep, DiffSplat, ISD::CondCode::SETULT);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 279fe65884528..3fd9ee6a3411c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8247,7 +8247,6 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
case Intrinsic::loop_dependence_war_mask:
case Intrinsic::loop_dependence_raw_mask: {
auto IntrinsicVT = EVT::getEVT(I.getType());
- SmallVector<SDValue, 4> Ops;
unsigned ID = Intrinsic == Intrinsic::loop_dependence_war_mask
? ISD::LOOP_DEPENDENCE_WAR_MASK
: ISD::LOOP_DEPENDENCE_RAW_MASK;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index fdb1bc17dcfcf..a6cf6636548ea 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -6390,7 +6390,8 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return DAG.getNode(AArch64ISD::USDOT, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
}
- case Intrinsic::experimental_get_nonalias_lane_mask: {
+ case Intrinsic::loop_dependence_war_mask:
+ case Intrinsic::loop_dependence_raw_mask: {
unsigned IntrinsicID = 0;
uint64_t EltSize = Op.getOperand(3)->getAsZExtVal();
bool IsWriteAfterRead = Op.getOperand(4)->getAsZExtVal() == 1;
@@ -19643,9 +19644,9 @@ static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
AArch64CC::CondCode Cond);
static bool isPredicateCCSettingOp(SDValue N) {
- if ((N.getOpcode() == ISD::SETCC ||
- // get_active_lane_mask is lowered to a whilelo instruction.
- N.getOpcode() == ISD::GET_ACTIVE_LANE_MASK) ||
+ if ((N.getOpcode() == ISD::SETCC) ||
+ // get_active_lane_mask is lowered to a whilelo instruction.
+ (N.getOpcode() == ISD::GET_ACTIVE_LANE_MASK) ||
(N.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
(N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilege ||
N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilegt ||
More information about the llvm-commits
mailing list