[llvm] clastb representation in existing IR, and AArch64 codegen (PR #112738)
Graham Hunter via llvm-commits
llvm-commits at lists.llvm.org
Wed Jan 22 08:07:30 PST 2025
https://github.com/huntergr-arm updated https://github.com/llvm/llvm-project/pull/112738
>From 1bdd4c172bf5d07fb606a5d9c619c47f545c7511 Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Tue, 14 Jan 2025 13:22:42 +0000
Subject: [PATCH 1/4] [AArch64] Basic ISel for find_last_active
---
.../include/llvm/Target/TargetSelectionDAG.td | 3 +
.../Target/AArch64/AArch64ISelLowering.cpp | 1 +
.../lib/Target/AArch64/AArch64SVEInstrInfo.td | 11 ++++
.../AArch64/vector-extract-last-active.ll | 62 +++++--------------
4 files changed, 32 insertions(+), 45 deletions(-)
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index 7f3c2be90d8207..2c8951154f854f 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -840,6 +840,9 @@ def vector_insert_subvec : SDNode<"ISD::INSERT_SUBVECTOR",
def extract_subvector : SDNode<"ISD::EXTRACT_SUBVECTOR", SDTSubVecExtract, []>;
def insert_subvector : SDNode<"ISD::INSERT_SUBVECTOR", SDTSubVecInsert, []>;
+def find_last_active : SDNode<"ISD::VECTOR_FIND_LAST_ACTIVE",
+ SDTypeProfile<1, 1, []>, []>;
+
// Nodes for intrinsics, you should use the intrinsic itself and let tblgen use
// these internally. Don't reference these directly.
def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID",
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 9a0bb73087980d..0ccc2a5fc6dbb9 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1446,6 +1446,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::VECTOR_DEINTERLEAVE, VT, Custom);
setOperationAction(ISD::VECTOR_INTERLEAVE, VT, Custom);
+ setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Legal);
}
}
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 6d5e2697160ab6..40fef376961240 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -3379,6 +3379,17 @@ let Predicates = [HasSVE_or_SME] in {
def : Pat<(i64 (vector_extract nxv2i64:$vec, VectorIndexD:$index)),
(UMOVvi64 (v2i64 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexD:$index)>;
+ // Find index of last active lane. This is a fallback in case we miss the
+ // opportunity to fold into a lastb or clastb directly.
+ def : Pat<(i64 (find_last_active nxv16i1:$P1)),
+ (INSERT_SUBREG (IMPLICIT_DEF), (LASTB_RPZ_B $P1, (INDEX_II_B 0, 1)), sub_32)>;
+ def : Pat<(i64 (find_last_active nxv8i1:$P1)),
+ (INSERT_SUBREG (IMPLICIT_DEF), (LASTB_RPZ_H $P1, (INDEX_II_H 0, 1)), sub_32)>;
+ def : Pat<(i64 (find_last_active nxv4i1:$P1)),
+ (INSERT_SUBREG (IMPLICIT_DEF), (LASTB_RPZ_S $P1, (INDEX_II_S 0, 1)), sub_32)>;
+ def : Pat<(i64 (find_last_active nxv2i1:$P1)),
+ (LASTB_RPZ_D $P1, (INDEX_II_D 0, 1))>;
+
// Move element from the bottom 128-bits of a scalable vector to a single-element vector.
// Alternative case where insertelement is just scalar_to_vector rather than vector_insert.
def : Pat<(v1f64 (scalar_to_vector
diff --git a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
index 3b11e67d072e7a..e4055608a1aeed 100644
--- a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
+++ b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
@@ -294,12 +294,7 @@ define i8 @extract_last_i8_scalable(<vscale x 16 x i8> %data, <vscale x 16 x i1>
; CHECK-LABEL: extract_last_i8_scalable:
; CHECK: // %bb.0:
; CHECK-NEXT: index z1.b, #0, #1
-; CHECK-NEXT: mov z2.b, #0 // =0x0
-; CHECK-NEXT: ptrue p1.b
-; CHECK-NEXT: sel z1.b, p0, z1.b, z2.b
-; CHECK-NEXT: umaxv b1, p1, z1.b
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: and x8, x8, #0xff
+; CHECK-NEXT: lastb w8, p0, z1.b
; CHECK-NEXT: whilels p1.b, xzr, x8
; CHECK-NEXT: ptest p0, p0.b
; CHECK-NEXT: lastb w8, p1, z0.b
@@ -313,15 +308,11 @@ define i16 @extract_last_i16_scalable(<vscale x 8 x i16> %data, <vscale x 8 x i1
; CHECK-LABEL: extract_last_i16_scalable:
; CHECK: // %bb.0:
; CHECK-NEXT: index z1.h, #0, #1
-; CHECK-NEXT: mov z2.h, #0 // =0x0
+; CHECK-NEXT: lastb w8, p0, z1.h
+; CHECK-NEXT: whilels p1.h, xzr, x8
+; CHECK-NEXT: lastb w8, p1, z0.h
; CHECK-NEXT: ptrue p1.h
-; CHECK-NEXT: sel z1.h, p0, z1.h, z2.h
-; CHECK-NEXT: umaxv h1, p1, z1.h
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: and x8, x8, #0xffff
-; CHECK-NEXT: whilels p2.h, xzr, x8
; CHECK-NEXT: ptest p1, p0.b
-; CHECK-NEXT: lastb w8, p2, z0.h
; CHECK-NEXT: csel w0, w8, w0, ne
; CHECK-NEXT: ret
%res = call i16 @llvm.experimental.vector.extract.last.active.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask, i16 %passthru)
@@ -332,15 +323,11 @@ define i32 @extract_last_i32_scalable(<vscale x 4 x i32> %data, <vscale x 4 x i1
; CHECK-LABEL: extract_last_i32_scalable:
; CHECK: // %bb.0:
; CHECK-NEXT: index z1.s, #0, #1
-; CHECK-NEXT: mov z2.s, #0 // =0x0
+; CHECK-NEXT: lastb w8, p0, z1.s
+; CHECK-NEXT: whilels p1.s, xzr, x8
+; CHECK-NEXT: lastb w8, p1, z0.s
; CHECK-NEXT: ptrue p1.s
-; CHECK-NEXT: sel z1.s, p0, z1.s, z2.s
-; CHECK-NEXT: umaxv s1, p1, z1.s
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: mov w8, w8
-; CHECK-NEXT: whilels p2.s, xzr, x8
; CHECK-NEXT: ptest p1, p0.b
-; CHECK-NEXT: lastb w8, p2, z0.s
; CHECK-NEXT: csel w0, w8, w0, ne
; CHECK-NEXT: ret
%res = call i32 @llvm.experimental.vector.extract.last.active.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask, i32 %passthru)
@@ -351,14 +338,11 @@ define i64 @extract_last_i64_scalable(<vscale x 2 x i64> %data, <vscale x 2 x i1
; CHECK-LABEL: extract_last_i64_scalable:
; CHECK: // %bb.0:
; CHECK-NEXT: index z1.d, #0, #1
-; CHECK-NEXT: mov z2.d, #0 // =0x0
+; CHECK-NEXT: lastb x8, p0, z1.d
+; CHECK-NEXT: whilels p1.d, xzr, x8
+; CHECK-NEXT: lastb x8, p1, z0.d
; CHECK-NEXT: ptrue p1.d
-; CHECK-NEXT: sel z1.d, p0, z1.d, z2.d
-; CHECK-NEXT: umaxv d1, p1, z1.d
-; CHECK-NEXT: fmov x8, d1
-; CHECK-NEXT: whilels p2.d, xzr, x8
; CHECK-NEXT: ptest p1, p0.b
-; CHECK-NEXT: lastb x8, p2, z0.d
; CHECK-NEXT: csel x0, x8, x0, ne
; CHECK-NEXT: ret
%res = call i64 @llvm.experimental.vector.extract.last.active.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask, i64 %passthru)
@@ -369,15 +353,11 @@ define float @extract_last_float_scalable(<vscale x 4 x float> %data, <vscale x
; CHECK-LABEL: extract_last_float_scalable:
; CHECK: // %bb.0:
; CHECK-NEXT: index z2.s, #0, #1
-; CHECK-NEXT: mov z3.s, #0 // =0x0
+; CHECK-NEXT: lastb w8, p0, z2.s
+; CHECK-NEXT: whilels p1.s, xzr, x8
+; CHECK-NEXT: lastb s0, p1, z0.s
; CHECK-NEXT: ptrue p1.s
-; CHECK-NEXT: sel z2.s, p0, z2.s, z3.s
-; CHECK-NEXT: umaxv s2, p1, z2.s
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: mov w8, w8
-; CHECK-NEXT: whilels p2.s, xzr, x8
; CHECK-NEXT: ptest p1, p0.b
-; CHECK-NEXT: lastb s0, p2, z0.s
; CHECK-NEXT: fcsel s0, s0, s1, ne
; CHECK-NEXT: ret
%res = call float @llvm.experimental.vector.extract.last.active.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask, float %passthru)
@@ -388,14 +368,11 @@ define double @extract_last_double_scalable(<vscale x 2 x double> %data, <vscale
; CHECK-LABEL: extract_last_double_scalable:
; CHECK: // %bb.0:
; CHECK-NEXT: index z2.d, #0, #1
-; CHECK-NEXT: mov z3.d, #0 // =0x0
+; CHECK-NEXT: lastb x8, p0, z2.d
+; CHECK-NEXT: whilels p1.d, xzr, x8
+; CHECK-NEXT: lastb d0, p1, z0.d
; CHECK-NEXT: ptrue p1.d
-; CHECK-NEXT: sel z2.d, p0, z2.d, z3.d
-; CHECK-NEXT: umaxv d2, p1, z2.d
-; CHECK-NEXT: fmov x8, d2
-; CHECK-NEXT: whilels p2.d, xzr, x8
; CHECK-NEXT: ptest p1, p0.b
-; CHECK-NEXT: lastb d0, p2, z0.d
; CHECK-NEXT: fcsel d0, d0, d1, ne
; CHECK-NEXT: ret
%res = call double @llvm.experimental.vector.extract.last.active.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask, double %passthru)
@@ -407,12 +384,7 @@ define i8 @extract_last_i8_scalable_poison_passthru(<vscale x 16 x i8> %data, <v
; CHECK-LABEL: extract_last_i8_scalable_poison_passthru:
; CHECK: // %bb.0:
; CHECK-NEXT: index z1.b, #0, #1
-; CHECK-NEXT: mov z2.b, #0 // =0x0
-; CHECK-NEXT: sel z1.b, p0, z1.b, z2.b
-; CHECK-NEXT: ptrue p0.b
-; CHECK-NEXT: umaxv b1, p0, z1.b
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: and x8, x8, #0xff
+; CHECK-NEXT: lastb w8, p0, z1.b
; CHECK-NEXT: whilels p0.b, xzr, x8
; CHECK-NEXT: lastb w0, p0, z0.b
; CHECK-NEXT: ret
>From 1759f0e636b5bd05ca03d69ed44564ee86d7d7dc Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Tue, 14 Jan 2025 13:44:45 +0000
Subject: [PATCH 2/4] Combine extract_vector_elt(find_last_active) -> lastb
---
.../Target/AArch64/AArch64ISelLowering.cpp | 25 ++++++++++++++
.../AArch64/vector-extract-last-active.ll | 33 ++++---------------
2 files changed, 31 insertions(+), 27 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 0ccc2a5fc6dbb9..585f2193835501 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -19945,6 +19945,29 @@ performLastTrueTestVectorCombine(SDNode *N,
return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::LAST_ACTIVE);
}
+static SDValue
+performExtractLastActiveCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const AArch64Subtarget *Subtarget) {
+ assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
+ SelectionDAG &DAG = DCI.DAG;
+ SDValue Vec = N->getOperand(0);
+ SDValue Idx = N->getOperand(1);
+
+ if (!Subtarget->hasSVE() || DCI.isBeforeLegalize() ||
+ Idx.getOpcode() != ISD::VECTOR_FIND_LAST_ACTIVE)
+ return SDValue();
+
+ SDValue Mask = Idx.getOperand(0);
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (TLI.getOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, Mask.getValueType())
+ != TargetLowering::Legal)
+ return SDValue();
+
+ return DAG.getNode(AArch64ISD::LASTB, SDLoc(N), N->getValueType(0),
+ Mask, Vec);
+}
+
static SDValue
performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) {
@@ -19953,6 +19976,8 @@ performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
return Res;
if (SDValue Res = performLastTrueTestVectorCombine(N, DCI, Subtarget))
return Res;
+ if (SDValue Res = performExtractLastActiveCombine(N, DCI, Subtarget))
+ return Res;
SelectionDAG &DAG = DCI.DAG;
SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
diff --git a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
index e4055608a1aeed..b091d4874138f8 100644
--- a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
+++ b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
@@ -293,11 +293,8 @@ define double @extract_last_double(<2 x double> %data, <2 x i64> %mask, double %
define i8 @extract_last_i8_scalable(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, i8 %passthru) #0 {
; CHECK-LABEL: extract_last_i8_scalable:
; CHECK: // %bb.0:
-; CHECK-NEXT: index z1.b, #0, #1
-; CHECK-NEXT: lastb w8, p0, z1.b
-; CHECK-NEXT: whilels p1.b, xzr, x8
+; CHECK-NEXT: lastb w8, p0, z0.b
; CHECK-NEXT: ptest p0, p0.b
-; CHECK-NEXT: lastb w8, p1, z0.b
; CHECK-NEXT: csel w0, w8, w0, ne
; CHECK-NEXT: ret
%res = call i8 @llvm.experimental.vector.extract.last.active.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, i8 %passthru)
@@ -307,10 +304,7 @@ define i8 @extract_last_i8_scalable(<vscale x 16 x i8> %data, <vscale x 16 x i1>
define i16 @extract_last_i16_scalable(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask, i16 %passthru) #0 {
; CHECK-LABEL: extract_last_i16_scalable:
; CHECK: // %bb.0:
-; CHECK-NEXT: index z1.h, #0, #1
-; CHECK-NEXT: lastb w8, p0, z1.h
-; CHECK-NEXT: whilels p1.h, xzr, x8
-; CHECK-NEXT: lastb w8, p1, z0.h
+; CHECK-NEXT: lastb w8, p0, z0.h
; CHECK-NEXT: ptrue p1.h
; CHECK-NEXT: ptest p1, p0.b
; CHECK-NEXT: csel w0, w8, w0, ne
@@ -322,10 +316,7 @@ define i16 @extract_last_i16_scalable(<vscale x 8 x i16> %data, <vscale x 8 x i1
define i32 @extract_last_i32_scalable(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask, i32 %passthru) #0 {
; CHECK-LABEL: extract_last_i32_scalable:
; CHECK: // %bb.0:
-; CHECK-NEXT: index z1.s, #0, #1
-; CHECK-NEXT: lastb w8, p0, z1.s
-; CHECK-NEXT: whilels p1.s, xzr, x8
-; CHECK-NEXT: lastb w8, p1, z0.s
+; CHECK-NEXT: lastb w8, p0, z0.s
; CHECK-NEXT: ptrue p1.s
; CHECK-NEXT: ptest p1, p0.b
; CHECK-NEXT: csel w0, w8, w0, ne
@@ -337,10 +328,7 @@ define i32 @extract_last_i32_scalable(<vscale x 4 x i32> %data, <vscale x 4 x i1
define i64 @extract_last_i64_scalable(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask, i64 %passthru) #0 {
; CHECK-LABEL: extract_last_i64_scalable:
; CHECK: // %bb.0:
-; CHECK-NEXT: index z1.d, #0, #1
-; CHECK-NEXT: lastb x8, p0, z1.d
-; CHECK-NEXT: whilels p1.d, xzr, x8
-; CHECK-NEXT: lastb x8, p1, z0.d
+; CHECK-NEXT: lastb x8, p0, z0.d
; CHECK-NEXT: ptrue p1.d
; CHECK-NEXT: ptest p1, p0.b
; CHECK-NEXT: csel x0, x8, x0, ne
@@ -352,10 +340,7 @@ define i64 @extract_last_i64_scalable(<vscale x 2 x i64> %data, <vscale x 2 x i1
define float @extract_last_float_scalable(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask, float %passthru) #0 {
; CHECK-LABEL: extract_last_float_scalable:
; CHECK: // %bb.0:
-; CHECK-NEXT: index z2.s, #0, #1
-; CHECK-NEXT: lastb w8, p0, z2.s
-; CHECK-NEXT: whilels p1.s, xzr, x8
-; CHECK-NEXT: lastb s0, p1, z0.s
+; CHECK-NEXT: lastb s0, p0, z0.s
; CHECK-NEXT: ptrue p1.s
; CHECK-NEXT: ptest p1, p0.b
; CHECK-NEXT: fcsel s0, s0, s1, ne
@@ -367,10 +352,7 @@ define float @extract_last_float_scalable(<vscale x 4 x float> %data, <vscale x
define double @extract_last_double_scalable(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask, double %passthru) #0 {
; CHECK-LABEL: extract_last_double_scalable:
; CHECK: // %bb.0:
-; CHECK-NEXT: index z2.d, #0, #1
-; CHECK-NEXT: lastb x8, p0, z2.d
-; CHECK-NEXT: whilels p1.d, xzr, x8
-; CHECK-NEXT: lastb d0, p1, z0.d
+; CHECK-NEXT: lastb d0, p0, z0.d
; CHECK-NEXT: ptrue p1.d
; CHECK-NEXT: ptest p1, p0.b
; CHECK-NEXT: fcsel d0, d0, d1, ne
@@ -383,9 +365,6 @@ define double @extract_last_double_scalable(<vscale x 2 x double> %data, <vscale
define i8 @extract_last_i8_scalable_poison_passthru(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask) #0 {
; CHECK-LABEL: extract_last_i8_scalable_poison_passthru:
; CHECK: // %bb.0:
-; CHECK-NEXT: index z1.b, #0, #1
-; CHECK-NEXT: lastb w8, p0, z1.b
-; CHECK-NEXT: whilels p0.b, xzr, x8
; CHECK-NEXT: lastb w0, p0, z0.b
; CHECK-NEXT: ret
%res = call i8 @llvm.experimental.vector.extract.last.active.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, i8 poison)
>From e45669af2b02fc14f9fc9f75fb74106f43a2da1c Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Tue, 14 Jan 2025 15:50:32 +0000
Subject: [PATCH 3/4] Combine csel+lastb -> clastb
---
.../Target/AArch64/AArch64ISelLowering.cpp | 39 +++++++++++++++++++
.../AArch64/vector-extract-last-active.ll | 31 ++++-----------
2 files changed, 47 insertions(+), 23 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 585f2193835501..59cd5d1cdb47d2 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -25058,6 +25058,41 @@ static SDValue reassociateCSELOperandsForCSE(SDNode *N, SelectionDAG &DAG) {
}
}
+static SDValue foldCSELofLASTB(SDNode *Op, SelectionDAG &DAG) {
+ AArch64CC::CondCode OpCC =
+ static_cast<AArch64CC::CondCode>(Op->getConstantOperandVal(2));
+
+ if (OpCC != AArch64CC::NE)
+ return SDValue();
+
+ SDValue PTest = Op->getOperand(3);
+ if (PTest.getOpcode() != AArch64ISD::PTEST_ANY)
+ return SDValue();
+
+ SDValue TruePred = PTest.getOperand(0);
+ SDValue AnyPred = PTest.getOperand(1);
+
+ if (TruePred.getOpcode() == AArch64ISD::REINTERPRET_CAST)
+ TruePred = TruePred.getOperand(0);
+
+ if (AnyPred.getOpcode() == AArch64ISD::REINTERPRET_CAST)
+ AnyPred = AnyPred.getOperand(0);
+
+ if (TruePred != AnyPred && TruePred.getOpcode() != AArch64ISD::PTRUE)
+ return SDValue();
+
+ SDValue LastB = Op->getOperand(0);
+ SDValue Default = Op->getOperand(1);
+
+ if (LastB.getOpcode() != AArch64ISD::LASTB || LastB.getOperand(0) != AnyPred)
+ return SDValue();
+
+ SDValue Vec = LastB.getOperand(1);
+
+ return DAG.getNode(AArch64ISD::CLASTB_N, SDLoc(Op), Op->getValueType(0),
+ AnyPred, Default, Vec);
+}
+
// Optimize CSEL instructions
static SDValue performCSELCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
@@ -25103,6 +25138,10 @@ static SDValue performCSELCombine(SDNode *N,
}
}
+ // CSEL (LASTB P, Z), X, NE(ANY P) -> CLASTB P, X, Z
+ if (SDValue CondLast = foldCSELofLASTB(N, DAG))
+ return CondLast;
+
return performCONDCombine(N, DCI, DAG, 2, 3);
}
diff --git a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
index b091d4874138f8..e9593ce014d234 100644
--- a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
+++ b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
@@ -293,9 +293,7 @@ define double @extract_last_double(<2 x double> %data, <2 x i64> %mask, double %
define i8 @extract_last_i8_scalable(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, i8 %passthru) #0 {
; CHECK-LABEL: extract_last_i8_scalable:
; CHECK: // %bb.0:
-; CHECK-NEXT: lastb w8, p0, z0.b
-; CHECK-NEXT: ptest p0, p0.b
-; CHECK-NEXT: csel w0, w8, w0, ne
+; CHECK-NEXT: clastb w0, p0, w0, z0.b
; CHECK-NEXT: ret
%res = call i8 @llvm.experimental.vector.extract.last.active.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, i8 %passthru)
ret i8 %res
@@ -304,10 +302,7 @@ define i8 @extract_last_i8_scalable(<vscale x 16 x i8> %data, <vscale x 16 x i1>
define i16 @extract_last_i16_scalable(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask, i16 %passthru) #0 {
; CHECK-LABEL: extract_last_i16_scalable:
; CHECK: // %bb.0:
-; CHECK-NEXT: lastb w8, p0, z0.h
-; CHECK-NEXT: ptrue p1.h
-; CHECK-NEXT: ptest p1, p0.b
-; CHECK-NEXT: csel w0, w8, w0, ne
+; CHECK-NEXT: clastb w0, p0, w0, z0.h
; CHECK-NEXT: ret
%res = call i16 @llvm.experimental.vector.extract.last.active.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask, i16 %passthru)
ret i16 %res
@@ -316,10 +311,7 @@ define i16 @extract_last_i16_scalable(<vscale x 8 x i16> %data, <vscale x 8 x i1
define i32 @extract_last_i32_scalable(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask, i32 %passthru) #0 {
; CHECK-LABEL: extract_last_i32_scalable:
; CHECK: // %bb.0:
-; CHECK-NEXT: lastb w8, p0, z0.s
-; CHECK-NEXT: ptrue p1.s
-; CHECK-NEXT: ptest p1, p0.b
-; CHECK-NEXT: csel w0, w8, w0, ne
+; CHECK-NEXT: clastb w0, p0, w0, z0.s
; CHECK-NEXT: ret
%res = call i32 @llvm.experimental.vector.extract.last.active.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask, i32 %passthru)
ret i32 %res
@@ -328,10 +320,7 @@ define i32 @extract_last_i32_scalable(<vscale x 4 x i32> %data, <vscale x 4 x i1
define i64 @extract_last_i64_scalable(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask, i64 %passthru) #0 {
; CHECK-LABEL: extract_last_i64_scalable:
; CHECK: // %bb.0:
-; CHECK-NEXT: lastb x8, p0, z0.d
-; CHECK-NEXT: ptrue p1.d
-; CHECK-NEXT: ptest p1, p0.b
-; CHECK-NEXT: csel x0, x8, x0, ne
+; CHECK-NEXT: clastb x0, p0, x0, z0.d
; CHECK-NEXT: ret
%res = call i64 @llvm.experimental.vector.extract.last.active.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask, i64 %passthru)
ret i64 %res
@@ -340,10 +329,8 @@ define i64 @extract_last_i64_scalable(<vscale x 2 x i64> %data, <vscale x 2 x i1
define float @extract_last_float_scalable(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask, float %passthru) #0 {
; CHECK-LABEL: extract_last_float_scalable:
; CHECK: // %bb.0:
-; CHECK-NEXT: lastb s0, p0, z0.s
-; CHECK-NEXT: ptrue p1.s
-; CHECK-NEXT: ptest p1, p0.b
-; CHECK-NEXT: fcsel s0, s0, s1, ne
+; CHECK-NEXT: clastb s1, p0, s1, z0.s
+; CHECK-NEXT: fmov s0, s1
; CHECK-NEXT: ret
%res = call float @llvm.experimental.vector.extract.last.active.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask, float %passthru)
ret float %res
@@ -352,10 +339,8 @@ define float @extract_last_float_scalable(<vscale x 4 x float> %data, <vscale x
define double @extract_last_double_scalable(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask, double %passthru) #0 {
; CHECK-LABEL: extract_last_double_scalable:
; CHECK: // %bb.0:
-; CHECK-NEXT: lastb d0, p0, z0.d
-; CHECK-NEXT: ptrue p1.d
-; CHECK-NEXT: ptest p1, p0.b
-; CHECK-NEXT: fcsel d0, d0, d1, ne
+; CHECK-NEXT: clastb d1, p0, d1, z0.d
+; CHECK-NEXT: fmov d0, d1
; CHECK-NEXT: ret
%res = call double @llvm.experimental.vector.extract.last.active.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask, double %passthru)
ret double %res
>From e7bfbf611292e4ad8f5dadb4049005452156cfc4 Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Wed, 22 Jan 2025 15:58:17 +0000
Subject: [PATCH 4/4] Formatting
---
.../include/llvm/Target/TargetSelectionDAG.td | 4 ++--
.../Target/AArch64/AArch64ISelLowering.cpp | 11 +++++------
.../lib/Target/AArch64/AArch64SVEInstrInfo.td | 19 +++++++++++--------
3 files changed, 18 insertions(+), 16 deletions(-)
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index 2c8951154f854f..44046129f73aba 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -840,8 +840,8 @@ def vector_insert_subvec : SDNode<"ISD::INSERT_SUBVECTOR",
def extract_subvector : SDNode<"ISD::EXTRACT_SUBVECTOR", SDTSubVecExtract, []>;
def insert_subvector : SDNode<"ISD::INSERT_SUBVECTOR", SDTSubVecInsert, []>;
-def find_last_active : SDNode<"ISD::VECTOR_FIND_LAST_ACTIVE",
- SDTypeProfile<1, 1, []>, []>;
+def find_last_active
+ : SDNode<"ISD::VECTOR_FIND_LAST_ACTIVE", SDTypeProfile<1, 1, []>, []>;
// Nodes for intrinsics, you should use the intrinsic itself and let tblgen use
// these internally. Don't reference these directly.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 59cd5d1cdb47d2..f3c97471d5fb78 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -19946,8 +19946,7 @@ performLastTrueTestVectorCombine(SDNode *N,
}
static SDValue
-performExtractLastActiveCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
+performExtractLastActiveCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) {
assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
SelectionDAG &DAG = DCI.DAG;
@@ -19960,12 +19959,12 @@ performExtractLastActiveCombine(SDNode *N,
SDValue Mask = Idx.getOperand(0);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- if (TLI.getOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, Mask.getValueType())
- != TargetLowering::Legal)
+ if (TLI.getOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE,
+ Mask.getValueType()) != TargetLowering::Legal)
return SDValue();
- return DAG.getNode(AArch64ISD::LASTB, SDLoc(N), N->getValueType(0),
- Mask, Vec);
+ return DAG.getNode(AArch64ISD::LASTB, SDLoc(N), N->getValueType(0), Mask,
+ Vec);
}
static SDValue
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 40fef376961240..a3ef7e3baa76c6 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -3381,14 +3381,17 @@ let Predicates = [HasSVE_or_SME] in {
// Find index of last active lane. This is a fallback in case we miss the
// opportunity to fold into a lastb or clastb directly.
- def : Pat<(i64 (find_last_active nxv16i1:$P1)),
- (INSERT_SUBREG (IMPLICIT_DEF), (LASTB_RPZ_B $P1, (INDEX_II_B 0, 1)), sub_32)>;
- def : Pat<(i64 (find_last_active nxv8i1:$P1)),
- (INSERT_SUBREG (IMPLICIT_DEF), (LASTB_RPZ_H $P1, (INDEX_II_H 0, 1)), sub_32)>;
- def : Pat<(i64 (find_last_active nxv4i1:$P1)),
- (INSERT_SUBREG (IMPLICIT_DEF), (LASTB_RPZ_S $P1, (INDEX_II_S 0, 1)), sub_32)>;
- def : Pat<(i64 (find_last_active nxv2i1:$P1)),
- (LASTB_RPZ_D $P1, (INDEX_II_D 0, 1))>;
+ def : Pat<(i64(find_last_active nxv16i1:$P1)),
+ (INSERT_SUBREG(IMPLICIT_DEF), (LASTB_RPZ_B $P1, (INDEX_II_B 0, 1)),
+ sub_32)>;
+ def : Pat<(i64(find_last_active nxv8i1:$P1)),
+ (INSERT_SUBREG(IMPLICIT_DEF), (LASTB_RPZ_H $P1, (INDEX_II_H 0, 1)),
+ sub_32)>;
+ def : Pat<(i64(find_last_active nxv4i1:$P1)),
+ (INSERT_SUBREG(IMPLICIT_DEF), (LASTB_RPZ_S $P1, (INDEX_II_S 0, 1)),
+ sub_32)>;
+ def : Pat<(i64(find_last_active nxv2i1:$P1)), (LASTB_RPZ_D $P1, (INDEX_II_D 0,
+ 1))>;
// Move element from the bottom 128-bits of a scalable vector to a single-element vector.
// Alternative case where insertelement is just scalar_to_vector rather than vector_insert.
More information about the llvm-commits
mailing list