[llvm] clastb representation in existing IR, and AArch64 codegen (PR #112738)

Graham Hunter via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 5 09:20:34 PST 2025


https://github.com/huntergr-arm updated https://github.com/llvm/llvm-project/pull/112738

>From 5807025914a65d265d88967227fc003af6589f6d Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Tue, 14 Jan 2025 13:22:42 +0000
Subject: [PATCH 01/10] [AArch64] Basic ISel for find_last_active

---
 .../include/llvm/Target/TargetSelectionDAG.td |  3 +
 .../Target/AArch64/AArch64ISelLowering.cpp    |  1 +
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td | 11 ++++
 .../AArch64/vector-extract-last-active.ll     | 62 +++++--------------
 4 files changed, 32 insertions(+), 45 deletions(-)

diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index 7f3c2be90d82072..2c8951154f854fc 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -840,6 +840,9 @@ def vector_insert_subvec : SDNode<"ISD::INSERT_SUBVECTOR",
 def extract_subvector : SDNode<"ISD::EXTRACT_SUBVECTOR", SDTSubVecExtract, []>;
 def insert_subvector : SDNode<"ISD::INSERT_SUBVECTOR", SDTSubVecInsert, []>;
 
+def find_last_active : SDNode<"ISD::VECTOR_FIND_LAST_ACTIVE",
+                              SDTypeProfile<1, 1, []>, []>;
+
 // Nodes for intrinsics, you should use the intrinsic itself and let tblgen use
 // these internally.  Don't reference these directly.
 def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID",
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 8617377ffc55b58..aacff045d99fe53 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1451,6 +1451,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
       setOperationAction(ISD::VECTOR_DEINTERLEAVE, VT, Custom);
       setOperationAction(ISD::VECTOR_INTERLEAVE, VT, Custom);
+      setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Legal);
     }
   }
 
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 524fccb8d43e66f..02062f7e98a6855 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -3379,6 +3379,17 @@ let Predicates = [HasSVE_or_SME] in {
   def : Pat<(i64 (vector_extract nxv2i64:$vec, VectorIndexD:$index)),
             (UMOVvi64 (v2i64 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexD:$index)>;
 
+  // Find index of last active lane. This is a fallback in case we miss the
+  // opportunity to fold into a lastb or clastb directly.
+  def : Pat<(i64 (find_last_active nxv16i1:$P1)),
+            (INSERT_SUBREG (IMPLICIT_DEF), (LASTB_RPZ_B $P1, (INDEX_II_B 0, 1)), sub_32)>;
+  def : Pat<(i64 (find_last_active nxv8i1:$P1)),
+            (INSERT_SUBREG (IMPLICIT_DEF), (LASTB_RPZ_H $P1, (INDEX_II_H 0, 1)), sub_32)>;
+  def : Pat<(i64 (find_last_active nxv4i1:$P1)),
+            (INSERT_SUBREG (IMPLICIT_DEF), (LASTB_RPZ_S $P1, (INDEX_II_S 0, 1)), sub_32)>;
+  def : Pat<(i64 (find_last_active nxv2i1:$P1)),
+            (LASTB_RPZ_D $P1, (INDEX_II_D 0, 1))>;
+
   // Move element from the bottom 128-bits of a scalable vector to a single-element vector.
   // Alternative case where insertelement is just scalar_to_vector rather than vector_insert.
   def : Pat<(v1f64 (scalar_to_vector
diff --git a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
index 3b11e67d072e7a2..e4055608a1aeedd 100644
--- a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
+++ b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
@@ -294,12 +294,7 @@ define i8 @extract_last_i8_scalable(<vscale x 16 x i8> %data, <vscale x 16 x i1>
 ; CHECK-LABEL: extract_last_i8_scalable:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.b, #0, #1
-; CHECK-NEXT:    mov z2.b, #0 // =0x0
-; CHECK-NEXT:    ptrue p1.b
-; CHECK-NEXT:    sel z1.b, p0, z1.b, z2.b
-; CHECK-NEXT:    umaxv b1, p1, z1.b
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    and x8, x8, #0xff
+; CHECK-NEXT:    lastb w8, p0, z1.b
 ; CHECK-NEXT:    whilels p1.b, xzr, x8
 ; CHECK-NEXT:    ptest p0, p0.b
 ; CHECK-NEXT:    lastb w8, p1, z0.b
@@ -313,15 +308,11 @@ define i16 @extract_last_i16_scalable(<vscale x 8 x i16> %data, <vscale x 8 x i1
 ; CHECK-LABEL: extract_last_i16_scalable:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.h, #0, #1
-; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    lastb w8, p0, z1.h
+; CHECK-NEXT:    whilels p1.h, xzr, x8
+; CHECK-NEXT:    lastb w8, p1, z0.h
 ; CHECK-NEXT:    ptrue p1.h
-; CHECK-NEXT:    sel z1.h, p0, z1.h, z2.h
-; CHECK-NEXT:    umaxv h1, p1, z1.h
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    and x8, x8, #0xffff
-; CHECK-NEXT:    whilels p2.h, xzr, x8
 ; CHECK-NEXT:    ptest p1, p0.b
-; CHECK-NEXT:    lastb w8, p2, z0.h
 ; CHECK-NEXT:    csel w0, w8, w0, ne
 ; CHECK-NEXT:    ret
   %res = call i16 @llvm.experimental.vector.extract.last.active.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask, i16 %passthru)
@@ -332,15 +323,11 @@ define i32 @extract_last_i32_scalable(<vscale x 4 x i32> %data, <vscale x 4 x i1
 ; CHECK-LABEL: extract_last_i32_scalable:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.s, #0, #1
-; CHECK-NEXT:    mov z2.s, #0 // =0x0
+; CHECK-NEXT:    lastb w8, p0, z1.s
+; CHECK-NEXT:    whilels p1.s, xzr, x8
+; CHECK-NEXT:    lastb w8, p1, z0.s
 ; CHECK-NEXT:    ptrue p1.s
-; CHECK-NEXT:    sel z1.s, p0, z1.s, z2.s
-; CHECK-NEXT:    umaxv s1, p1, z1.s
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov w8, w8
-; CHECK-NEXT:    whilels p2.s, xzr, x8
 ; CHECK-NEXT:    ptest p1, p0.b
-; CHECK-NEXT:    lastb w8, p2, z0.s
 ; CHECK-NEXT:    csel w0, w8, w0, ne
 ; CHECK-NEXT:    ret
   %res = call i32 @llvm.experimental.vector.extract.last.active.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask, i32 %passthru)
@@ -351,14 +338,11 @@ define i64 @extract_last_i64_scalable(<vscale x 2 x i64> %data, <vscale x 2 x i1
 ; CHECK-LABEL: extract_last_i64_scalable:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.d, #0, #1
-; CHECK-NEXT:    mov z2.d, #0 // =0x0
+; CHECK-NEXT:    lastb x8, p0, z1.d
+; CHECK-NEXT:    whilels p1.d, xzr, x8
+; CHECK-NEXT:    lastb x8, p1, z0.d
 ; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    sel z1.d, p0, z1.d, z2.d
-; CHECK-NEXT:    umaxv d1, p1, z1.d
-; CHECK-NEXT:    fmov x8, d1
-; CHECK-NEXT:    whilels p2.d, xzr, x8
 ; CHECK-NEXT:    ptest p1, p0.b
-; CHECK-NEXT:    lastb x8, p2, z0.d
 ; CHECK-NEXT:    csel x0, x8, x0, ne
 ; CHECK-NEXT:    ret
   %res = call i64 @llvm.experimental.vector.extract.last.active.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask, i64 %passthru)
@@ -369,15 +353,11 @@ define float @extract_last_float_scalable(<vscale x 4 x float> %data, <vscale x
 ; CHECK-LABEL: extract_last_float_scalable:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z2.s, #0, #1
-; CHECK-NEXT:    mov z3.s, #0 // =0x0
+; CHECK-NEXT:    lastb w8, p0, z2.s
+; CHECK-NEXT:    whilels p1.s, xzr, x8
+; CHECK-NEXT:    lastb s0, p1, z0.s
 ; CHECK-NEXT:    ptrue p1.s
-; CHECK-NEXT:    sel z2.s, p0, z2.s, z3.s
-; CHECK-NEXT:    umaxv s2, p1, z2.s
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov w8, w8
-; CHECK-NEXT:    whilels p2.s, xzr, x8
 ; CHECK-NEXT:    ptest p1, p0.b
-; CHECK-NEXT:    lastb s0, p2, z0.s
 ; CHECK-NEXT:    fcsel s0, s0, s1, ne
 ; CHECK-NEXT:    ret
   %res = call float @llvm.experimental.vector.extract.last.active.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask, float %passthru)
@@ -388,14 +368,11 @@ define double @extract_last_double_scalable(<vscale x 2 x double> %data, <vscale
 ; CHECK-LABEL: extract_last_double_scalable:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z2.d, #0, #1
-; CHECK-NEXT:    mov z3.d, #0 // =0x0
+; CHECK-NEXT:    lastb x8, p0, z2.d
+; CHECK-NEXT:    whilels p1.d, xzr, x8
+; CHECK-NEXT:    lastb d0, p1, z0.d
 ; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    sel z2.d, p0, z2.d, z3.d
-; CHECK-NEXT:    umaxv d2, p1, z2.d
-; CHECK-NEXT:    fmov x8, d2
-; CHECK-NEXT:    whilels p2.d, xzr, x8
 ; CHECK-NEXT:    ptest p1, p0.b
-; CHECK-NEXT:    lastb d0, p2, z0.d
 ; CHECK-NEXT:    fcsel d0, d0, d1, ne
 ; CHECK-NEXT:    ret
   %res = call double @llvm.experimental.vector.extract.last.active.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask, double %passthru)
@@ -407,12 +384,7 @@ define i8 @extract_last_i8_scalable_poison_passthru(<vscale x 16 x i8> %data, <v
 ; CHECK-LABEL: extract_last_i8_scalable_poison_passthru:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.b, #0, #1
-; CHECK-NEXT:    mov z2.b, #0 // =0x0
-; CHECK-NEXT:    sel z1.b, p0, z1.b, z2.b
-; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    umaxv b1, p0, z1.b
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    and x8, x8, #0xff
+; CHECK-NEXT:    lastb w8, p0, z1.b
 ; CHECK-NEXT:    whilels p0.b, xzr, x8
 ; CHECK-NEXT:    lastb w0, p0, z0.b
 ; CHECK-NEXT:    ret

>From 793989c712659587419b2c9646b47ee08a385ff4 Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Tue, 14 Jan 2025 13:44:45 +0000
Subject: [PATCH 02/10] Combine extract_vector_elt(find_last_active) -> lastb

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 25 ++++++++++++++
 .../AArch64/vector-extract-last-active.ll     | 33 ++++---------------
 2 files changed, 31 insertions(+), 27 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index aacff045d99fe53..f5c785e6278a8d6 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -19731,6 +19731,29 @@ performLastTrueTestVectorCombine(SDNode *N,
   return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::LAST_ACTIVE);
 }
 
+static SDValue
+performExtractLastActiveCombine(SDNode *N,
+                                TargetLowering::DAGCombinerInfo &DCI,
+                                const AArch64Subtarget *Subtarget) {
+  assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
+  SelectionDAG &DAG = DCI.DAG;
+  SDValue Vec = N->getOperand(0);
+  SDValue Idx = N->getOperand(1);
+
+  if (!Subtarget->hasSVE() || DCI.isBeforeLegalize() ||
+      Idx.getOpcode() != ISD::VECTOR_FIND_LAST_ACTIVE)
+    return SDValue();
+
+  SDValue Mask = Idx.getOperand(0);
+  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+  if (TLI.getOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, Mask.getValueType())
+      != TargetLowering::Legal)
+    return SDValue();
+
+  return DAG.getNode(AArch64ISD::LASTB, SDLoc(N), N->getValueType(0),
+                     Mask, Vec);
+}
+
 static SDValue
 performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                                const AArch64Subtarget *Subtarget) {
@@ -19739,6 +19762,8 @@ performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
     return Res;
   if (SDValue Res = performLastTrueTestVectorCombine(N, DCI, Subtarget))
     return Res;
+  if (SDValue Res = performExtractLastActiveCombine(N, DCI, Subtarget))
+    return Res;
 
   SelectionDAG &DAG = DCI.DAG;
   SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
diff --git a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
index e4055608a1aeedd..b091d4874138f84 100644
--- a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
+++ b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
@@ -293,11 +293,8 @@ define double @extract_last_double(<2 x double> %data, <2 x i64> %mask, double %
 define i8 @extract_last_i8_scalable(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, i8 %passthru) #0 {
 ; CHECK-LABEL: extract_last_i8_scalable:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    index z1.b, #0, #1
-; CHECK-NEXT:    lastb w8, p0, z1.b
-; CHECK-NEXT:    whilels p1.b, xzr, x8
+; CHECK-NEXT:    lastb w8, p0, z0.b
 ; CHECK-NEXT:    ptest p0, p0.b
-; CHECK-NEXT:    lastb w8, p1, z0.b
 ; CHECK-NEXT:    csel w0, w8, w0, ne
 ; CHECK-NEXT:    ret
   %res = call i8 @llvm.experimental.vector.extract.last.active.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, i8 %passthru)
@@ -307,10 +304,7 @@ define i8 @extract_last_i8_scalable(<vscale x 16 x i8> %data, <vscale x 16 x i1>
 define i16 @extract_last_i16_scalable(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask, i16 %passthru) #0 {
 ; CHECK-LABEL: extract_last_i16_scalable:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    index z1.h, #0, #1
-; CHECK-NEXT:    lastb w8, p0, z1.h
-; CHECK-NEXT:    whilels p1.h, xzr, x8
-; CHECK-NEXT:    lastb w8, p1, z0.h
+; CHECK-NEXT:    lastb w8, p0, z0.h
 ; CHECK-NEXT:    ptrue p1.h
 ; CHECK-NEXT:    ptest p1, p0.b
 ; CHECK-NEXT:    csel w0, w8, w0, ne
@@ -322,10 +316,7 @@ define i16 @extract_last_i16_scalable(<vscale x 8 x i16> %data, <vscale x 8 x i1
 define i32 @extract_last_i32_scalable(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask, i32 %passthru) #0 {
 ; CHECK-LABEL: extract_last_i32_scalable:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    index z1.s, #0, #1
-; CHECK-NEXT:    lastb w8, p0, z1.s
-; CHECK-NEXT:    whilels p1.s, xzr, x8
-; CHECK-NEXT:    lastb w8, p1, z0.s
+; CHECK-NEXT:    lastb w8, p0, z0.s
 ; CHECK-NEXT:    ptrue p1.s
 ; CHECK-NEXT:    ptest p1, p0.b
 ; CHECK-NEXT:    csel w0, w8, w0, ne
@@ -337,10 +328,7 @@ define i32 @extract_last_i32_scalable(<vscale x 4 x i32> %data, <vscale x 4 x i1
 define i64 @extract_last_i64_scalable(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask, i64 %passthru) #0 {
 ; CHECK-LABEL: extract_last_i64_scalable:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    index z1.d, #0, #1
-; CHECK-NEXT:    lastb x8, p0, z1.d
-; CHECK-NEXT:    whilels p1.d, xzr, x8
-; CHECK-NEXT:    lastb x8, p1, z0.d
+; CHECK-NEXT:    lastb x8, p0, z0.d
 ; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    ptest p1, p0.b
 ; CHECK-NEXT:    csel x0, x8, x0, ne
@@ -352,10 +340,7 @@ define i64 @extract_last_i64_scalable(<vscale x 2 x i64> %data, <vscale x 2 x i1
 define float @extract_last_float_scalable(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask, float %passthru) #0 {
 ; CHECK-LABEL: extract_last_float_scalable:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    index z2.s, #0, #1
-; CHECK-NEXT:    lastb w8, p0, z2.s
-; CHECK-NEXT:    whilels p1.s, xzr, x8
-; CHECK-NEXT:    lastb s0, p1, z0.s
+; CHECK-NEXT:    lastb s0, p0, z0.s
 ; CHECK-NEXT:    ptrue p1.s
 ; CHECK-NEXT:    ptest p1, p0.b
 ; CHECK-NEXT:    fcsel s0, s0, s1, ne
@@ -367,10 +352,7 @@ define float @extract_last_float_scalable(<vscale x 4 x float> %data, <vscale x
 define double @extract_last_double_scalable(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask, double %passthru) #0 {
 ; CHECK-LABEL: extract_last_double_scalable:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    index z2.d, #0, #1
-; CHECK-NEXT:    lastb x8, p0, z2.d
-; CHECK-NEXT:    whilels p1.d, xzr, x8
-; CHECK-NEXT:    lastb d0, p1, z0.d
+; CHECK-NEXT:    lastb d0, p0, z0.d
 ; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    ptest p1, p0.b
 ; CHECK-NEXT:    fcsel d0, d0, d1, ne
@@ -383,9 +365,6 @@ define double @extract_last_double_scalable(<vscale x 2 x double> %data, <vscale
 define i8 @extract_last_i8_scalable_poison_passthru(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask) #0 {
 ; CHECK-LABEL: extract_last_i8_scalable_poison_passthru:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    index z1.b, #0, #1
-; CHECK-NEXT:    lastb w8, p0, z1.b
-; CHECK-NEXT:    whilels p0.b, xzr, x8
 ; CHECK-NEXT:    lastb w0, p0, z0.b
 ; CHECK-NEXT:    ret
   %res = call i8 @llvm.experimental.vector.extract.last.active.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, i8 poison)

>From 2ae4f71718337fd32ab408527aaa42133a743598 Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Tue, 14 Jan 2025 15:50:32 +0000
Subject: [PATCH 03/10] Combine csel+lastb -> clastb

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 39 +++++++++++++++++++
 .../AArch64/vector-extract-last-active.ll     | 31 ++++-----------
 2 files changed, 47 insertions(+), 23 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index f5c785e6278a8d6..db751fb5f16eb7b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -24878,6 +24878,41 @@ static SDValue reassociateCSELOperandsForCSE(SDNode *N, SelectionDAG &DAG) {
   }
 }
 
+static SDValue foldCSELofLASTB(SDNode *Op, SelectionDAG &DAG) {
+  AArch64CC::CondCode OpCC =
+      static_cast<AArch64CC::CondCode>(Op->getConstantOperandVal(2));
+
+  if (OpCC != AArch64CC::NE)
+    return SDValue();
+
+  SDValue PTest = Op->getOperand(3);
+  if (PTest.getOpcode() != AArch64ISD::PTEST_ANY)
+    return SDValue();
+
+  SDValue TruePred = PTest.getOperand(0);
+  SDValue AnyPred = PTest.getOperand(1);
+
+  if (TruePred.getOpcode() == AArch64ISD::REINTERPRET_CAST)
+    TruePred = TruePred.getOperand(0);
+
+  if (AnyPred.getOpcode() == AArch64ISD::REINTERPRET_CAST)
+    AnyPred = AnyPred.getOperand(0);
+
+  if (TruePred != AnyPred && TruePred.getOpcode() != AArch64ISD::PTRUE)
+    return SDValue();
+
+  SDValue LastB = Op->getOperand(0);
+  SDValue Default = Op->getOperand(1);
+
+  if (LastB.getOpcode() != AArch64ISD::LASTB || LastB.getOperand(0) != AnyPred)
+    return SDValue();
+
+  SDValue Vec = LastB.getOperand(1);
+
+  return DAG.getNode(AArch64ISD::CLASTB_N, SDLoc(Op), Op->getValueType(0),
+                     AnyPred, Default, Vec);
+}
+
 // Optimize CSEL instructions
 static SDValue performCSELCombine(SDNode *N,
                                   TargetLowering::DAGCombinerInfo &DCI,
@@ -24923,6 +24958,10 @@ static SDValue performCSELCombine(SDNode *N,
     }
   }
 
+  // CSEL (LASTB P, Z), X, NE(ANY P) -> CLASTB P, X, Z
+  if (SDValue CondLast = foldCSELofLASTB(N, DAG))
+    return CondLast;
+
   return performCONDCombine(N, DCI, DAG, 2, 3);
 }
 
diff --git a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
index b091d4874138f84..e9593ce014d234d 100644
--- a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
+++ b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
@@ -293,9 +293,7 @@ define double @extract_last_double(<2 x double> %data, <2 x i64> %mask, double %
 define i8 @extract_last_i8_scalable(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, i8 %passthru) #0 {
 ; CHECK-LABEL: extract_last_i8_scalable:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    lastb w8, p0, z0.b
-; CHECK-NEXT:    ptest p0, p0.b
-; CHECK-NEXT:    csel w0, w8, w0, ne
+; CHECK-NEXT:    clastb w0, p0, w0, z0.b
 ; CHECK-NEXT:    ret
   %res = call i8 @llvm.experimental.vector.extract.last.active.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, i8 %passthru)
   ret i8 %res
@@ -304,10 +302,7 @@ define i8 @extract_last_i8_scalable(<vscale x 16 x i8> %data, <vscale x 16 x i1>
 define i16 @extract_last_i16_scalable(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask, i16 %passthru) #0 {
 ; CHECK-LABEL: extract_last_i16_scalable:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    lastb w8, p0, z0.h
-; CHECK-NEXT:    ptrue p1.h
-; CHECK-NEXT:    ptest p1, p0.b
-; CHECK-NEXT:    csel w0, w8, w0, ne
+; CHECK-NEXT:    clastb w0, p0, w0, z0.h
 ; CHECK-NEXT:    ret
   %res = call i16 @llvm.experimental.vector.extract.last.active.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask, i16 %passthru)
   ret i16 %res
@@ -316,10 +311,7 @@ define i16 @extract_last_i16_scalable(<vscale x 8 x i16> %data, <vscale x 8 x i1
 define i32 @extract_last_i32_scalable(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask, i32 %passthru) #0 {
 ; CHECK-LABEL: extract_last_i32_scalable:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    lastb w8, p0, z0.s
-; CHECK-NEXT:    ptrue p1.s
-; CHECK-NEXT:    ptest p1, p0.b
-; CHECK-NEXT:    csel w0, w8, w0, ne
+; CHECK-NEXT:    clastb w0, p0, w0, z0.s
 ; CHECK-NEXT:    ret
   %res = call i32 @llvm.experimental.vector.extract.last.active.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask, i32 %passthru)
   ret i32 %res
@@ -328,10 +320,7 @@ define i32 @extract_last_i32_scalable(<vscale x 4 x i32> %data, <vscale x 4 x i1
 define i64 @extract_last_i64_scalable(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask, i64 %passthru) #0 {
 ; CHECK-LABEL: extract_last_i64_scalable:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    lastb x8, p0, z0.d
-; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    ptest p1, p0.b
-; CHECK-NEXT:    csel x0, x8, x0, ne
+; CHECK-NEXT:    clastb x0, p0, x0, z0.d
 ; CHECK-NEXT:    ret
   %res = call i64 @llvm.experimental.vector.extract.last.active.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask, i64 %passthru)
   ret i64 %res
@@ -340,10 +329,8 @@ define i64 @extract_last_i64_scalable(<vscale x 2 x i64> %data, <vscale x 2 x i1
 define float @extract_last_float_scalable(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask, float %passthru) #0 {
 ; CHECK-LABEL: extract_last_float_scalable:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    lastb s0, p0, z0.s
-; CHECK-NEXT:    ptrue p1.s
-; CHECK-NEXT:    ptest p1, p0.b
-; CHECK-NEXT:    fcsel s0, s0, s1, ne
+; CHECK-NEXT:    clastb s1, p0, s1, z0.s
+; CHECK-NEXT:    fmov s0, s1
 ; CHECK-NEXT:    ret
   %res = call float @llvm.experimental.vector.extract.last.active.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask, float %passthru)
   ret float %res
@@ -352,10 +339,8 @@ define float @extract_last_float_scalable(<vscale x 4 x float> %data, <vscale x
 define double @extract_last_double_scalable(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask, double %passthru) #0 {
 ; CHECK-LABEL: extract_last_double_scalable:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    lastb d0, p0, z0.d
-; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    ptest p1, p0.b
-; CHECK-NEXT:    fcsel d0, d0, d1, ne
+; CHECK-NEXT:    clastb d1, p0, d1, z0.d
+; CHECK-NEXT:    fmov d0, d1
 ; CHECK-NEXT:    ret
   %res = call double @llvm.experimental.vector.extract.last.active.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask, double %passthru)
   ret double %res

>From 4d30b3a2692794278d41f40df8b1699a086b42e7 Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Wed, 22 Jan 2025 15:58:17 +0000
Subject: [PATCH 04/10] Formatting

---
 .../include/llvm/Target/TargetSelectionDAG.td |  4 ++--
 .../Target/AArch64/AArch64ISelLowering.cpp    | 11 +++++------
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td | 19 +++++++++++--------
 3 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index 2c8951154f854fc..44046129f73abac 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -840,8 +840,8 @@ def vector_insert_subvec : SDNode<"ISD::INSERT_SUBVECTOR",
 def extract_subvector : SDNode<"ISD::EXTRACT_SUBVECTOR", SDTSubVecExtract, []>;
 def insert_subvector : SDNode<"ISD::INSERT_SUBVECTOR", SDTSubVecInsert, []>;
 
-def find_last_active : SDNode<"ISD::VECTOR_FIND_LAST_ACTIVE",
-                              SDTypeProfile<1, 1, []>, []>;
+def find_last_active
+    : SDNode<"ISD::VECTOR_FIND_LAST_ACTIVE", SDTypeProfile<1, 1, []>, []>;
 
 // Nodes for intrinsics, you should use the intrinsic itself and let tblgen use
 // these internally.  Don't reference these directly.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index db751fb5f16eb7b..1268eff7b65f931 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -19732,8 +19732,7 @@ performLastTrueTestVectorCombine(SDNode *N,
 }
 
 static SDValue
-performExtractLastActiveCombine(SDNode *N,
-                                TargetLowering::DAGCombinerInfo &DCI,
+performExtractLastActiveCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                                 const AArch64Subtarget *Subtarget) {
   assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
   SelectionDAG &DAG = DCI.DAG;
@@ -19746,12 +19745,12 @@ performExtractLastActiveCombine(SDNode *N,
 
   SDValue Mask = Idx.getOperand(0);
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-  if (TLI.getOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, Mask.getValueType())
-      != TargetLowering::Legal)
+  if (TLI.getOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE,
+                             Mask.getValueType()) != TargetLowering::Legal)
     return SDValue();
 
-  return DAG.getNode(AArch64ISD::LASTB, SDLoc(N), N->getValueType(0),
-                     Mask, Vec);
+  return DAG.getNode(AArch64ISD::LASTB, SDLoc(N), N->getValueType(0), Mask,
+                     Vec);
 }
 
 static SDValue
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 02062f7e98a6855..28aecd14e33fae5 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -3381,14 +3381,17 @@ let Predicates = [HasSVE_or_SME] in {
 
   // Find index of last active lane. This is a fallback in case we miss the
   // opportunity to fold into a lastb or clastb directly.
-  def : Pat<(i64 (find_last_active nxv16i1:$P1)),
-            (INSERT_SUBREG (IMPLICIT_DEF), (LASTB_RPZ_B $P1, (INDEX_II_B 0, 1)), sub_32)>;
-  def : Pat<(i64 (find_last_active nxv8i1:$P1)),
-            (INSERT_SUBREG (IMPLICIT_DEF), (LASTB_RPZ_H $P1, (INDEX_II_H 0, 1)), sub_32)>;
-  def : Pat<(i64 (find_last_active nxv4i1:$P1)),
-            (INSERT_SUBREG (IMPLICIT_DEF), (LASTB_RPZ_S $P1, (INDEX_II_S 0, 1)), sub_32)>;
-  def : Pat<(i64 (find_last_active nxv2i1:$P1)),
-            (LASTB_RPZ_D $P1, (INDEX_II_D 0, 1))>;
+  def : Pat<(i64(find_last_active nxv16i1:$P1)),
+            (INSERT_SUBREG(IMPLICIT_DEF), (LASTB_RPZ_B $P1, (INDEX_II_B 0, 1)),
+                sub_32)>;
+  def : Pat<(i64(find_last_active nxv8i1:$P1)),
+            (INSERT_SUBREG(IMPLICIT_DEF), (LASTB_RPZ_H $P1, (INDEX_II_H 0, 1)),
+                sub_32)>;
+  def : Pat<(i64(find_last_active nxv4i1:$P1)),
+            (INSERT_SUBREG(IMPLICIT_DEF), (LASTB_RPZ_S $P1, (INDEX_II_S 0, 1)),
+                sub_32)>;
+  def : Pat<(i64(find_last_active nxv2i1:$P1)), (LASTB_RPZ_D $P1, (INDEX_II_D 0,
+                                                                      1))>;
 
   // Move element from the bottom 128-bits of a scalable vector to a single-element vector.
   // Alternative case where insertelement is just scalar_to_vector rather than vector_insert.

>From 15c49745e09fa11e1d0ce5ae2583b8c4a01c6048 Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Wed, 5 Feb 2025 13:37:06 +0000
Subject: [PATCH 05/10] Type constraints for find last active ISD node

---
 llvm/include/llvm/Target/TargetSelectionDAG.td | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index 44046129f73abac..e8b5bc7ba630955 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -841,7 +841,7 @@ def extract_subvector : SDNode<"ISD::EXTRACT_SUBVECTOR", SDTSubVecExtract, []>;
 def insert_subvector : SDNode<"ISD::INSERT_SUBVECTOR", SDTSubVecInsert, []>;
 
 def find_last_active
-    : SDNode<"ISD::VECTOR_FIND_LAST_ACTIVE", SDTypeProfile<1, 1, []>, []>;
+    : SDNode<"ISD::VECTOR_FIND_LAST_ACTIVE", SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>]>, []>;
 
 // Nodes for intrinsics, you should use the intrinsic itself and let tblgen use
 // these internally.  Don't reference these directly.

>From 5d74593b1ea4c2abb6aa5cfc3303524669e5551d Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Wed, 5 Feb 2025 15:20:32 +0000
Subject: [PATCH 06/10] Restrict combine by element types

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp   | 10 ++++++++--
 .../CodeGen/AArch64/vector-extract-last-active.ll | 15 +++++++++++++++
 2 files changed, 23 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 1268eff7b65f931..6222fb923beef4c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -19739,8 +19739,14 @@ performExtractLastActiveCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
   SDValue Vec = N->getOperand(0);
   SDValue Idx = N->getOperand(1);
 
-  if (!Subtarget->hasSVE() || DCI.isBeforeLegalize() ||
-      Idx.getOpcode() != ISD::VECTOR_FIND_LAST_ACTIVE)
+  if (DCI.isBeforeLegalize() || Idx.getOpcode() != ISD::VECTOR_FIND_LAST_ACTIVE)
+    return SDValue();
+
+  // Only legal for 8, 16, 32, and 64 bit element types.
+  EVT EltVT = Vec.getValueType().getVectorElementType();
+  if (!is_contained(
+          ArrayRef({MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64}),
+          EltVT.getSimpleVT().SimpleTy))
     return SDValue();
 
   SDValue Mask = Idx.getOperand(0);
diff --git a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
index e9593ce014d234d..af9b75a52d9d7cf 100644
--- a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
+++ b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll
@@ -356,6 +356,20 @@ define i8 @extract_last_i8_scalable_poison_passthru(<vscale x 16 x i8> %data, <v
   ret i8 %res
 }
 
+;; (c)lastb doesn't exist for predicate types; check we get functional codegen
+define i1 @extract_last_i1_scalable(<vscale x 16 x i1> %data, <vscale x 16 x i1> %mask) #0 {
+; CHECK-LABEL: extract_last_i1_scalable:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.b, p0/z, #1 // =0x1
+; CHECK-NEXT:    ptest p1, p1.b
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    lastb w8, p1, z0.b
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %res = call i1 @llvm.experimental.vector.extract.last.active.nxv16i1(<vscale x 16 x i1> %data, <vscale x 16 x i1> %mask, i1 false)
+  ret i1 %res
+}
+
 declare i8 @llvm.experimental.vector.extract.last.active.v16i8(<16 x i8>, <16 x i1>, i8)
 declare i16 @llvm.experimental.vector.extract.last.active.v8i16(<8 x i16>, <8 x i1>, i16)
 declare i32 @llvm.experimental.vector.extract.last.active.v4i32(<4 x i32>, <4 x i1>, i32)
@@ -368,5 +382,6 @@ declare i32 @llvm.experimental.vector.extract.last.active.nxv4i32(<vscale x 4 x
 declare i64 @llvm.experimental.vector.extract.last.active.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64)
 declare float @llvm.experimental.vector.extract.last.active.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float)
 declare double @llvm.experimental.vector.extract.last.active.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double)
+declare i1 @llvm.experimental.vector.extract.last.active.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, i1)
 
 attributes #0 = { "target-features"="+sve" vscale_range(1, 16) }

>From 6e375c4210649ba2ffb26dfc3bfa8b71529cdd6b Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Wed, 5 Feb 2025 15:57:02 +0000
Subject: [PATCH 07/10] Don't flag nxv1i1 as legal for extract last

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 6222fb923beef4c..55f4399d56d8248 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1451,8 +1451,9 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
       setOperationAction(ISD::VECTOR_DEINTERLEAVE, VT, Custom);
       setOperationAction(ISD::VECTOR_INTERLEAVE, VT, Custom);
-      setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Legal);
     }
+    for (auto VT: {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1})
+      setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Legal);
   }
 
   if (Subtarget->isSVEorStreamingSVEAvailable()) {

>From 40eb936924623d15cd528f406a7c4562609c44bb Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Wed, 5 Feb 2025 15:57:23 +0000
Subject: [PATCH 08/10] Use isOperationLegal

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 55f4399d56d8248..817306d75a1738f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -19752,8 +19752,7 @@ performExtractLastActiveCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
 
   SDValue Mask = Idx.getOperand(0);
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-  if (TLI.getOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE,
-                             Mask.getValueType()) != TargetLowering::Legal)
+  if (!TLI.isOperationLegal(ISD::VECTOR_FIND_LAST_ACTIVE, Mask.getValueType()))
     return SDValue();
 
   return DAG.getNode(AArch64ISD::LASTB, SDLoc(N), N->getValueType(0), Mask,

>From 997ae1a26deeef0b5fa1cd81dd98eecb50678225 Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Wed, 5 Feb 2025 16:11:56 +0000
Subject: [PATCH 09/10] Remove single-use variable

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 817306d75a1738f..3c0a9a05d3a5151 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -24912,10 +24912,8 @@ static SDValue foldCSELofLASTB(SDNode *Op, SelectionDAG &DAG) {
   if (LastB.getOpcode() != AArch64ISD::LASTB || LastB.getOperand(0) != AnyPred)
     return SDValue();
 
-  SDValue Vec = LastB.getOperand(1);
-
   return DAG.getNode(AArch64ISD::CLASTB_N, SDLoc(Op), Op->getValueType(0),
-                     AnyPred, Default, Vec);
+                     AnyPred, Default, LastB.getOperand(1));
 }
 
 // Optimize CSEL instructions

>From cf5b283d8997580c5a05aad7309735448d40b455 Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Wed, 5 Feb 2025 16:17:24 +0000
Subject: [PATCH 10/10] Formatting

---
 llvm/include/llvm/Target/TargetSelectionDAG.td  | 3 ++-
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index e8b5bc7ba630955..42a5fbec95174e1 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -841,7 +841,8 @@ def extract_subvector : SDNode<"ISD::EXTRACT_SUBVECTOR", SDTSubVecExtract, []>;
 def insert_subvector : SDNode<"ISD::INSERT_SUBVECTOR", SDTSubVecInsert, []>;
 
 def find_last_active
-    : SDNode<"ISD::VECTOR_FIND_LAST_ACTIVE", SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>]>, []>;
+    : SDNode<"ISD::VECTOR_FIND_LAST_ACTIVE",
+             SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>]>, []>;
 
 // Nodes for intrinsics, you should use the intrinsic itself and let tblgen use
 // these internally.  Don't reference these directly.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 3c0a9a05d3a5151..7e97c75f06863a0 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1452,7 +1452,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::VECTOR_DEINTERLEAVE, VT, Custom);
       setOperationAction(ISD::VECTOR_INTERLEAVE, VT, Custom);
     }
-    for (auto VT: {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1})
+    for (auto VT : {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1})
       setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Legal);
   }
 



More information about the llvm-commits mailing list