[llvm] b61144b - [AArch64] Allow lowering of more types to GET_ACTIVE_LANE_MASK (#140062)
via llvm-commits
llvm-commits at lists.llvm.org
Tue May 27 03:22:00 PDT 2025
Author: Kerry McLaughlin
Date: 2025-05-27T11:21:57+01:00
New Revision: b61144bf77eedbcb209c0328e8867f693624aacc
URL: https://github.com/llvm/llvm-project/commit/b61144bf77eedbcb209c0328e8867f693624aacc
DIFF: https://github.com/llvm/llvm-project/commit/b61144bf77eedbcb209c0328e8867f693624aacc.diff
LOG: [AArch64] Allow lowering of more types to GET_ACTIVE_LANE_MASK (#140062)
Adds support for operand promotion and splitting/widening the result
of the ISD::GET_ACTIVE_LANE_MASK node.
For AArch64, shouldExpandGetActiveLaneMask now returns false for more
types which we know can be legalised.
Added:
Modified:
llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll
llvm/test/CodeGen/AArch64/active_lane_mask.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 90af5f2cd8e70..2bcca91f6f81a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -2088,6 +2088,9 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
case ISD::VECTOR_FIND_LAST_ACTIVE:
Res = PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(N, OpNo);
break;
+ case ISD::GET_ACTIVE_LANE_MASK:
+ Res = PromoteIntOp_GET_ACTIVE_LANE_MASK(N);
+ break;
case ISD::PARTIAL_REDUCE_UMLA:
case ISD::PARTIAL_REDUCE_SMLA:
Res = PromoteIntOp_PARTIAL_REDUCE_MLA(N);
@@ -2874,6 +2877,13 @@ SDValue DAGTypeLegalizer::PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(SDNode *N,
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
}
+SDValue DAGTypeLegalizer::PromoteIntOp_GET_ACTIVE_LANE_MASK(SDNode *N) {
+ SmallVector<SDValue, 1> NewOps(N->ops());
+ NewOps[0] = ZExtPromotedInteger(N->getOperand(0));
+ NewOps[1] = ZExtPromotedInteger(N->getOperand(1));
+ return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
+}
+
SDValue DAGTypeLegalizer::PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N) {
SmallVector<SDValue, 1> NewOps(N->ops());
if (N->getOpcode() == ISD::PARTIAL_REDUCE_SMLA) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index cf3a9e23f4878..dd9af47da5287 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -432,6 +432,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue PromoteIntOp_VP_SPLICE(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_VECTOR_HISTOGRAM(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_VECTOR_FIND_LAST_ACTIVE(SDNode *N, unsigned OpNo);
+ SDValue PromoteIntOp_GET_ACTIVE_LANE_MASK(SDNode *N);
SDValue PromoteIntOp_PARTIAL_REDUCE_MLA(SDNode *N);
void SExtOrZExtPromotedOperands(SDValue &LHS, SDValue &RHS);
@@ -985,6 +986,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
void SplitVecRes_FP_TO_XINT_SAT(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_VP_REVERSE(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_PARTIAL_REDUCE_MLA(SDNode *N, SDValue &Lo, SDValue &Hi);
+ void SplitVecRes_GET_ACTIVE_LANE_MASK(SDNode *N, SDValue &Lo, SDValue &Hi);
// Vector Operand Splitting: <128 x ty> -> 2 x <64 x ty>.
bool SplitVectorOperand(SDNode *N, unsigned OpNo);
@@ -1081,6 +1083,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue WidenVecRes_UNDEF(SDNode *N);
SDValue WidenVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N);
SDValue WidenVecRes_VECTOR_REVERSE(SDNode *N);
+ SDValue WidenVecRes_GET_ACTIVE_LANE_MASK(SDNode *N);
SDValue WidenVecRes_Ternary(SDNode *N);
SDValue WidenVecRes_Binary(SDNode *N);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index c011a0a61d698..4d844f0036a75 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1389,6 +1389,9 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::PARTIAL_REDUCE_SMLA:
SplitVecRes_PARTIAL_REDUCE_MLA(N, Lo, Hi);
break;
+ case ISD::GET_ACTIVE_LANE_MASK:
+ SplitVecRes_GET_ACTIVE_LANE_MASK(N, Lo, Hi);
+ break;
}
// If Lo/Hi is null, the sub-method took care of registering results etc.
@@ -3234,6 +3237,22 @@ void DAGTypeLegalizer::SplitVecRes_PARTIAL_REDUCE_MLA(SDNode *N, SDValue &Lo,
Hi = DAG.getNode(Opcode, DL, ResultVT, AccHi, Input1Hi, Input2Hi);
}
+void DAGTypeLegalizer::SplitVecRes_GET_ACTIVE_LANE_MASK(SDNode *N, SDValue &Lo,
+ SDValue &Hi) {
+ SDLoc DL(N);
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+ EVT OpVT = Op0.getValueType();
+
+ EVT LoVT, HiVT;
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+
+ Lo = DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, DL, LoVT, Op0, Op1);
+ SDValue LoElts = DAG.getElementCount(DL, OpVT, LoVT.getVectorElementCount());
+ SDValue HiStartVal = DAG.getNode(ISD::UADDSAT, DL, OpVT, Op0, LoElts);
+ Hi = DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, DL, HiVT, HiStartVal, Op1);
+}
+
void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(SDNode *N) {
unsigned Factor = N->getNumOperands();
@@ -4631,6 +4650,9 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::VECTOR_REVERSE:
Res = WidenVecRes_VECTOR_REVERSE(N);
break;
+ case ISD::GET_ACTIVE_LANE_MASK:
+ Res = WidenVecRes_GET_ACTIVE_LANE_MASK(N);
+ break;
case ISD::ADD: case ISD::VP_ADD:
case ISD::AND: case ISD::VP_AND:
@@ -6579,6 +6601,11 @@ SDValue DAGTypeLegalizer::WidenVecRes_VECTOR_REVERSE(SDNode *N) {
Mask);
}
+SDValue DAGTypeLegalizer::WidenVecRes_GET_ACTIVE_LANE_MASK(SDNode *N) {
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ return DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, SDLoc(N), NVT, N->ops());
+}
+
SDValue DAGTypeLegalizer::WidenVecRes_SETCC(SDNode *N) {
assert(N->getValueType(0).isVector() &&
N->getOperand(0).getValueType().isVector() &&
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4dacd2273306e..f0a703be35207 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2102,18 +2102,18 @@ void AArch64TargetLowering::addTypeForNEON(MVT VT) {
bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
EVT OpVT) const {
// Only SVE has a 1:1 mapping from intrinsic -> instruction (whilelo).
- if (!Subtarget->hasSVE())
+ if (!Subtarget->hasSVE() || ResVT.getVectorElementType() != MVT::i1)
return true;
- // We can only support legal predicate result types. We can use the SVE
- // whilelo instruction for generating fixed-width predicates too.
- if (ResVT != MVT::nxv2i1 && ResVT != MVT::nxv4i1 && ResVT != MVT::nxv8i1 &&
- ResVT != MVT::nxv16i1 && ResVT != MVT::v2i1 && ResVT != MVT::v4i1 &&
- ResVT != MVT::v8i1 && ResVT != MVT::v16i1)
+ // Only support illegal types if the result is scalable and min elements > 1.
+ if (ResVT.getVectorMinNumElements() == 1 ||
+ (ResVT.isFixedLengthVector() && (ResVT.getVectorNumElements() > 16 ||
+ (OpVT != MVT::i32 && OpVT != MVT::i64))))
return true;
- // The whilelo instruction only works with i32 or i64 scalar inputs.
- if (OpVT != MVT::i32 && OpVT != MVT::i64)
+ // 32 & 64 bit operands are supported. We can promote anything < 64 bits,
+ // but anything larger should be expanded.
+ if (OpVT.getFixedSizeInBits() > 64)
return true;
return false;
diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll
index 1716f48f4b042..ec84c58bf9681 100644
--- a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll
@@ -920,8 +920,8 @@ define void @get_lane_mask() #0 {
; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of 1 for: %mask_nxv8i1_i32 = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32 undef, i32 undef)
; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of 1 for: %mask_nxv4i1_i32 = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 undef, i32 undef)
; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of 1 for: %mask_nxv2i1_i32 = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32 undef, i32 undef)
-; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:48 CodeSize:33 Lat:33 SizeLat:33 for: %mask_nxv32i1_i64 = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64 undef, i64 undef)
-; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of RThru:6 CodeSize:5 Lat:5 SizeLat:5 for: %mask_nxv16i1_i16 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16 undef, i16 undef)
+; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of 2 for: %mask_nxv32i1_i64 = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64 undef, i64 undef)
+; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of 1 for: %mask_nxv16i1_i16 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16 undef, i16 undef)
; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of 32 for: %mask_v16i1_i64 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 undef, i64 undef)
; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of 16 for: %mask_v8i1_i64 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64 undef, i64 undef)
; CHECK-VSCALE-1-NEXT: Cost Model: Found costs of 8 for: %mask_v4i1_i64 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 undef, i64 undef)
@@ -943,8 +943,8 @@ define void @get_lane_mask() #0 {
; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of 1 for: %mask_nxv8i1_i32 = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32 undef, i32 undef)
; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of 1 for: %mask_nxv4i1_i32 = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 undef, i32 undef)
; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of 1 for: %mask_nxv2i1_i32 = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32 undef, i32 undef)
-; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:48 CodeSize:33 Lat:33 SizeLat:33 for: %mask_nxv32i1_i64 = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64 undef, i64 undef)
-; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of RThru:6 CodeSize:5 Lat:5 SizeLat:5 for: %mask_nxv16i1_i16 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16 undef, i16 undef)
+; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of 2 for: %mask_nxv32i1_i64 = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64 undef, i64 undef)
+; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of 1 for: %mask_nxv16i1_i16 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16 undef, i16 undef)
; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of 32 for: %mask_v16i1_i64 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 undef, i64 undef)
; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of 16 for: %mask_v8i1_i64 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64 undef, i64 undef)
; CHECK-VSCALE-2-NEXT: Cost Model: Found costs of 8 for: %mask_v4i1_i64 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 undef, i64 undef)
@@ -966,8 +966,8 @@ define void @get_lane_mask() #0 {
; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of 1 for: %mask_nxv8i1_i32 = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32 undef, i32 undef)
; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of 1 for: %mask_nxv4i1_i32 = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 undef, i32 undef)
; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of 1 for: %mask_nxv2i1_i32 = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32 undef, i32 undef)
-; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:48 CodeSize:33 Lat:33 SizeLat:33 for: %mask_nxv32i1_i64 = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64 undef, i64 undef)
-; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of RThru:6 CodeSize:5 Lat:5 SizeLat:5 for: %mask_nxv16i1_i16 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16 undef, i16 undef)
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of 2 for: %mask_nxv32i1_i64 = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64 undef, i64 undef)
+; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of 1 for: %mask_nxv16i1_i16 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16 undef, i16 undef)
; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of 32 for: %mask_v16i1_i64 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 undef, i64 undef)
; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of 16 for: %mask_v8i1_i64 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64 undef, i64 undef)
; TYPE_BASED_ONLY-NEXT: Cost Model: Found costs of 8 for: %mask_v4i1_i64 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 undef, i64 undef)
diff --git a/llvm/test/CodeGen/AArch64/active_lane_mask.ll b/llvm/test/CodeGen/AArch64/active_lane_mask.ll
index 025bbf749fc71..fd34a1498bcd2 100644
--- a/llvm/test/CodeGen/AArch64/active_lane_mask.ll
+++ b/llvm/test/CodeGen/AArch64/active_lane_mask.ll
@@ -78,12 +78,9 @@ define <vscale x 2 x i1> @lane_mask_nxv2i1_i64(i64 %index, i64 %TC) {
define <vscale x 16 x i1> @lane_mask_nxv16i1_i8(i8 %index, i8 %TC) {
; CHECK-LABEL: lane_mask_nxv16i1_i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: index z0.b, #0, #1
-; CHECK-NEXT: mov z1.b, w0
-; CHECK-NEXT: ptrue p0.b
-; CHECK-NEXT: uqadd z0.b, z0.b, z1.b
-; CHECK-NEXT: mov z1.b, w1
-; CHECK-NEXT: cmphi p0.b, p0/z, z1.b, z0.b
+; CHECK-NEXT: and w8, w1, #0xff
+; CHECK-NEXT: and w9, w0, #0xff
+; CHECK-NEXT: whilelo p0.b, w9, w8
; CHECK-NEXT: ret
%active.lane.mask = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i8(i8 %index, i8 %TC)
ret <vscale x 16 x i1> %active.lane.mask
@@ -92,16 +89,9 @@ define <vscale x 16 x i1> @lane_mask_nxv16i1_i8(i8 %index, i8 %TC) {
define <vscale x 8 x i1> @lane_mask_nxv8i1_i8(i8 %index, i8 %TC) {
; CHECK-LABEL: lane_mask_nxv8i1_i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: index z0.h, #0, #1
-; CHECK-NEXT: mov z1.h, w0
-; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: and z1.h, z1.h, #0xff
-; CHECK-NEXT: and z0.h, z0.h, #0xff
-; CHECK-NEXT: add z0.h, z0.h, z1.h
-; CHECK-NEXT: mov z1.h, w1
-; CHECK-NEXT: umin z0.h, z0.h, #255
-; CHECK-NEXT: and z1.h, z1.h, #0xff
-; CHECK-NEXT: cmphi p0.h, p0/z, z1.h, z0.h
+; CHECK-NEXT: and w8, w1, #0xff
+; CHECK-NEXT: and w9, w0, #0xff
+; CHECK-NEXT: whilelo p0.h, w9, w8
; CHECK-NEXT: ret
%active.lane.mask = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i8(i8 %index, i8 %TC)
ret <vscale x 8 x i1> %active.lane.mask
@@ -110,16 +100,9 @@ define <vscale x 8 x i1> @lane_mask_nxv8i1_i8(i8 %index, i8 %TC) {
define <vscale x 4 x i1> @lane_mask_nxv4i1_i8(i8 %index, i8 %TC) {
; CHECK-LABEL: lane_mask_nxv4i1_i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: index z0.s, #0, #1
-; CHECK-NEXT: and w8, w0, #0xff
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: mov z1.s, w8
; CHECK-NEXT: and w8, w1, #0xff
-; CHECK-NEXT: and z0.s, z0.s, #0xff
-; CHECK-NEXT: add z0.s, z0.s, z1.s
-; CHECK-NEXT: mov z1.s, w8
-; CHECK-NEXT: umin z0.s, z0.s, #255
-; CHECK-NEXT: cmphi p0.s, p0/z, z1.s, z0.s
+; CHECK-NEXT: and w9, w0, #0xff
+; CHECK-NEXT: whilelo p0.s, w9, w8
; CHECK-NEXT: ret
%active.lane.mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i8(i8 %index, i8 %TC)
ret <vscale x 4 x i1> %active.lane.mask
@@ -128,18 +111,9 @@ define <vscale x 4 x i1> @lane_mask_nxv4i1_i8(i8 %index, i8 %TC) {
define <vscale x 2 x i1> @lane_mask_nxv2i1_i8(i8 %index, i8 %TC) {
; CHECK-LABEL: lane_mask_nxv2i1_i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: index z0.d, #0, #1
-; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
-; CHECK-NEXT: and x8, x0, #0xff
-; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: mov z1.d, x8
-; CHECK-NEXT: and x8, x1, #0xff
-; CHECK-NEXT: and z0.d, z0.d, #0xff
-; CHECK-NEXT: add z0.d, z0.d, z1.d
-; CHECK-NEXT: mov z1.d, x8
-; CHECK-NEXT: umin z0.d, z0.d, #255
-; CHECK-NEXT: cmphi p0.d, p0/z, z1.d, z0.d
+; CHECK-NEXT: and w8, w1, #0xff
+; CHECK-NEXT: and w9, w0, #0xff
+; CHECK-NEXT: whilelo p0.d, w9, w8
; CHECK-NEXT: ret
%active.lane.mask = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i8(i8 %index, i8 %TC)
ret <vscale x 2 x i1> %active.lane.mask
@@ -151,56 +125,11 @@ define <vscale x 2 x i1> @lane_mask_nxv2i1_i8(i8 %index, i8 %TC) {
define <vscale x 32 x i1> @lane_mask_nxv32i1_i32(i32 %index, i32 %TC) {
; CHECK-LABEL: lane_mask_nxv32i1_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: index z0.s, #0, #1
-; CHECK-NEXT: mov z1.s, w0
-; CHECK-NEXT: mov z25.s, w1
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: mov z2.d, z0.d
-; CHECK-NEXT: mov z3.d, z0.d
-; CHECK-NEXT: uqadd z6.s, z0.s, z1.s
-; CHECK-NEXT: incw z0.s, all, mul #4
-; CHECK-NEXT: incw z2.s
-; CHECK-NEXT: incw z3.s, all, mul #2
-; CHECK-NEXT: uqadd z0.s, z0.s, z1.s
-; CHECK-NEXT: cmphi p2.s, p0/z, z25.s, z6.s
-; CHECK-NEXT: mov z4.d, z2.d
-; CHECK-NEXT: uqadd z5.s, z2.s, z1.s
-; CHECK-NEXT: uqadd z7.s, z3.s, z1.s
-; CHECK-NEXT: incw z2.s, all, mul #4
-; CHECK-NEXT: incw z3.s, all, mul #4
-; CHECK-NEXT: cmphi p5.s, p0/z, z25.s, z0.s
-; CHECK-NEXT: incw z4.s, all, mul #2
-; CHECK-NEXT: uqadd z2.s, z2.s, z1.s
-; CHECK-NEXT: uqadd z3.s, z3.s, z1.s
-; CHECK-NEXT: cmphi p1.s, p0/z, z25.s, z5.s
-; CHECK-NEXT: cmphi p3.s, p0/z, z25.s, z7.s
-; CHECK-NEXT: uqadd z24.s, z4.s, z1.s
-; CHECK-NEXT: incw z4.s, all, mul #4
-; CHECK-NEXT: cmphi p6.s, p0/z, z25.s, z2.s
-; CHECK-NEXT: cmphi p7.s, p0/z, z25.s, z3.s
-; CHECK-NEXT: uzp1 p1.h, p2.h, p1.h
-; CHECK-NEXT: uqadd z1.s, z4.s, z1.s
-; CHECK-NEXT: cmphi p4.s, p0/z, z25.s, z24.s
-; CHECK-NEXT: cmphi p0.s, p0/z, z25.s, z1.s
-; CHECK-NEXT: uzp1 p2.h, p3.h, p4.h
-; CHECK-NEXT: uzp1 p3.h, p5.h, p6.h
-; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: uzp1 p4.h, p7.h, p0.h
-; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: uzp1 p0.b, p1.b, p2.b
-; CHECK-NEXT: uzp1 p1.b, p3.b, p4.b
-; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: addvl sp, sp, #1
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: adds w8, w0, w8
+; CHECK-NEXT: csinv w8, w8, wzr, lo
+; CHECK-NEXT: whilelo p0.b, w0, w1
+; CHECK-NEXT: whilelo p1.b, w8, w1
; CHECK-NEXT: ret
%active.lane.mask = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i32(i32 %index, i32 %TC)
ret <vscale x 32 x i1> %active.lane.mask
@@ -209,99 +138,11 @@ define <vscale x 32 x i1> @lane_mask_nxv32i1_i32(i32 %index, i32 %TC) {
define <vscale x 32 x i1> @lane_mask_nxv32i1_i64(i64 %index, i64 %TC) {
; CHECK-LABEL: lane_mask_nxv32i1_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: str p9, [sp, #2, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
-; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: index z5.d, #0, #1
-; CHECK-NEXT: mov z0.d, x0
-; CHECK-NEXT: mov z3.d, x1
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: mov z2.d, z5.d
-; CHECK-NEXT: mov z1.d, z5.d
-; CHECK-NEXT: mov z4.d, z5.d
-; CHECK-NEXT: uqadd z25.d, z5.d, z0.d
-; CHECK-NEXT: incd z5.d, all, mul #8
-; CHECK-NEXT: incd z2.d
-; CHECK-NEXT: incd z1.d, all, mul #2
-; CHECK-NEXT: incd z4.d, all, mul #4
-; CHECK-NEXT: uqadd z5.d, z5.d, z0.d
-; CHECK-NEXT: cmphi p3.d, p0/z, z3.d, z25.d
-; CHECK-NEXT: mov z6.d, z2.d
-; CHECK-NEXT: mov z7.d, z2.d
-; CHECK-NEXT: mov z24.d, z1.d
-; CHECK-NEXT: uqadd z26.d, z2.d, z0.d
-; CHECK-NEXT: uqadd z27.d, z1.d, z0.d
-; CHECK-NEXT: uqadd z28.d, z4.d, z0.d
-; CHECK-NEXT: incd z2.d, all, mul #8
-; CHECK-NEXT: incd z1.d, all, mul #8
-; CHECK-NEXT: incd z4.d, all, mul #8
-; CHECK-NEXT: incd z6.d, all, mul #2
-; CHECK-NEXT: incd z7.d, all, mul #4
-; CHECK-NEXT: incd z24.d, all, mul #4
-; CHECK-NEXT: cmphi p4.d, p0/z, z3.d, z26.d
-; CHECK-NEXT: cmphi p2.d, p0/z, z3.d, z27.d
-; CHECK-NEXT: cmphi p1.d, p0/z, z3.d, z28.d
-; CHECK-NEXT: mov z31.d, z6.d
-; CHECK-NEXT: uqadd z29.d, z6.d, z0.d
-; CHECK-NEXT: uqadd z30.d, z7.d, z0.d
-; CHECK-NEXT: uqadd z8.d, z24.d, z0.d
-; CHECK-NEXT: incd z6.d, all, mul #8
-; CHECK-NEXT: incd z7.d, all, mul #8
-; CHECK-NEXT: incd z24.d, all, mul #8
-; CHECK-NEXT: uqadd z2.d, z2.d, z0.d
-; CHECK-NEXT: uqadd z1.d, z1.d, z0.d
-; CHECK-NEXT: incd z31.d, all, mul #4
-; CHECK-NEXT: uqadd z4.d, z4.d, z0.d
-; CHECK-NEXT: uzp1 p3.s, p3.s, p4.s
-; CHECK-NEXT: cmphi p5.d, p0/z, z3.d, z29.d
-; CHECK-NEXT: cmphi p7.d, p0/z, z3.d, z30.d
-; CHECK-NEXT: uqadd z6.d, z6.d, z0.d
-; CHECK-NEXT: cmphi p6.d, p0/z, z3.d, z8.d
-; CHECK-NEXT: ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: uqadd z7.d, z7.d, z0.d
-; CHECK-NEXT: uqadd z25.d, z31.d, z0.d
-; CHECK-NEXT: incd z31.d, all, mul #8
-; CHECK-NEXT: uqadd z24.d, z24.d, z0.d
-; CHECK-NEXT: cmphi p4.d, p0/z, z3.d, z5.d
-; CHECK-NEXT: uzp1 p2.s, p2.s, p5.s
-; CHECK-NEXT: cmphi p5.d, p0/z, z3.d, z2.d
-; CHECK-NEXT: cmphi p9.d, p0/z, z3.d, z6.d
-; CHECK-NEXT: uqadd z0.d, z31.d, z0.d
-; CHECK-NEXT: uzp1 p1.s, p1.s, p7.s
-; CHECK-NEXT: cmphi p7.d, p0/z, z3.d, z1.d
-; CHECK-NEXT: cmphi p8.d, p0/z, z3.d, z25.d
-; CHECK-NEXT: uzp1 p2.h, p3.h, p2.h
-; CHECK-NEXT: cmphi p3.d, p0/z, z3.d, z7.d
-; CHECK-NEXT: uzp1 p4.s, p4.s, p5.s
-; CHECK-NEXT: uzp1 p5.s, p7.s, p9.s
-; CHECK-NEXT: ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: uzp1 p6.s, p6.s, p8.s
-; CHECK-NEXT: cmphi p8.d, p0/z, z3.d, z4.d
-; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: uzp1 p4.h, p4.h, p5.h
-; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: uzp1 p1.h, p1.h, p6.h
-; CHECK-NEXT: cmphi p6.d, p0/z, z3.d, z24.d
-; CHECK-NEXT: cmphi p0.d, p0/z, z3.d, z0.d
-; CHECK-NEXT: uzp1 p3.s, p8.s, p3.s
-; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: uzp1 p0.s, p6.s, p0.s
-; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: uzp1 p3.h, p3.h, p0.h
-; CHECK-NEXT: uzp1 p0.b, p2.b, p1.b
-; CHECK-NEXT: uzp1 p1.b, p4.b, p3.b
-; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: adds x8, x0, x8
+; CHECK-NEXT: csinv x8, x8, xzr, lo
+; CHECK-NEXT: whilelo p0.b, x0, x1
+; CHECK-NEXT: whilelo p1.b, x8, x1
; CHECK-NEXT: ret
%active.lane.mask = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64 %index, i64 %TC)
ret <vscale x 32 x i1> %active.lane.mask
@@ -310,22 +151,45 @@ define <vscale x 32 x i1> @lane_mask_nxv32i1_i64(i64 %index, i64 %TC) {
define <vscale x 32 x i1> @lane_mask_nxv32i1_i8(i8 %index, i8 %TC) {
; CHECK-LABEL: lane_mask_nxv32i1_i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: index z0.b, #0, #1
; CHECK-NEXT: rdvl x8, #1
-; CHECK-NEXT: mov z2.b, w0
-; CHECK-NEXT: mov z1.b, w8
-; CHECK-NEXT: ptrue p1.b
-; CHECK-NEXT: add z1.b, z0.b, z1.b
-; CHECK-NEXT: uqadd z0.b, z0.b, z2.b
-; CHECK-NEXT: uqadd z1.b, z1.b, z2.b
-; CHECK-NEXT: mov z2.b, w1
-; CHECK-NEXT: cmphi p0.b, p1/z, z2.b, z0.b
-; CHECK-NEXT: cmphi p1.b, p1/z, z2.b, z1.b
+; CHECK-NEXT: and w9, w0, #0xff
+; CHECK-NEXT: mov w10, #255 // =0xff
+; CHECK-NEXT: add w8, w9, w8, uxtb
+; CHECK-NEXT: and w11, w1, #0xff
+; CHECK-NEXT: cmp w8, #255
+; CHECK-NEXT: csel w8, w8, w10, lo
+; CHECK-NEXT: whilelo p0.b, w9, w11
+; CHECK-NEXT: whilelo p1.b, w8, w11
; CHECK-NEXT: ret
%active.lane.mask = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i8(i8 %index, i8 %TC)
ret <vscale x 32 x i1> %active.lane.mask
}
+define <vscale x 7 x i1> @lane_mask_nxv7i1_i64(i64 %index, i64 %TC) {
+; CHECK-LABEL: lane_mask_nxv7i1_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilelo p0.h, x0, x1
+; CHECK-NEXT: ret
+ %active.lane.mask = call <vscale x 7 x i1> @llvm.get.active.lane.mask.nxv7i1.i64(i64 %index, i64 %TC)
+ ret <vscale x 7 x i1> %active.lane.mask
+}
+
+define <vscale x 1 x i1> @lane_mask_nxv1i1_i32(i32 %index, i32 %TC) {
+; CHECK-LABEL: lane_mask_nxv1i1_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: index z0.s, #0, #1
+; CHECK-NEXT: mov z1.s, w0
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: uqadd z0.s, z0.s, z1.s
+; CHECK-NEXT: mov z1.s, w1
+; CHECK-NEXT: cmphi p0.s, p0/z, z1.s, z0.s
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ret
+ %active.lane.mask = call <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i32(i32 %index, i32 %TC)
+ ret <vscale x 1 x i1> %active.lane.mask
+}
+
; UTC_ARGS: --disable
; This test exists to protect against a compiler crash caused by an attempt to
; convert (via changeVectorElementType) an MVT into an EVT, which is impossible.
@@ -539,12 +403,12 @@ entry:
ret <vscale x 16 x i1> %active.lane.mask
}
-
declare <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i32(i32, i32)
declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32, i32)
declare <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32, i32)
declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32, i32)
declare <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32, i32)
+declare <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i32(i32, i32)
declare <vscale x 64 x i1> @llvm.get.active.lane.mask.nxv64i1.i64(i64, i64)
declare <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64, i64)
@@ -556,10 +420,10 @@ declare <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64, i64)
declare <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i8(i8, i8)
declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i8(i8, i8)
declare <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i8(i8, i8)
+declare <vscale x 7 x i1> @llvm.get.active.lane.mask.nxv7i1.i8(i8, i8)
declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i8(i8, i8)
declare <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i8(i8, i8)
-
declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
More information about the llvm-commits
mailing list