[llvm] [AArch64][SVE] Rework VECTOR_COMPRESS lowering (PR #171162)

Benjamin Maxwell via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 12 06:26:46 PST 2025


https://github.com/MacDue updated https://github.com/llvm/llvm-project/pull/171162

>From 82f8d105369acde820035fa26e37b6c03153bce4 Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Mon, 8 Dec 2025 17:35:19 +0000
Subject: [PATCH 1/4] [AArch64][SVE] Rework VECTOR_COMPRESS lowering
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This removes the use of `LowerVECTOR_COMPRESS` in `ReplaceNodeResults`
(which was used to promote illegal integer VTs), and instead only marks
the legal VTs as "Custom" (allowing for standard type legalization). 

This patch also simplifies the lowering by using the existing
fixed-length <-> SVE conversion helpers. 

This was intended to be a NFC, but it appears to have caused some minor
code-gen changes/improvements. 
---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 115 ++++++------------
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |   2 +
 .../CodeGen/AArch64/sve-vector-compress.ll    |  39 +++---
 3 files changed, 57 insertions(+), 99 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d1441a744eee8..7767d06190c7f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1993,22 +1993,25 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
 
     // We can lower types that have <vscale x {2|4}> elements to compact.
+    for (auto VT :
+         {MVT::nxv2i64, MVT::nxv2f32, MVT::nxv2f64, MVT::nxv4i32, MVT::nxv4f32})
+      setOperationAction(ISD::VECTOR_COMPRESS, VT, Custom);
+
+    // If we have SVE, we can use SVE logic for legal NEON vectors in the lowest
+    // bits of the SVE register.
+    for (auto VT : {MVT::v2i32, MVT::v2i64, MVT::v2f32, MVT::v2f64, MVT::v4i16,
+                    MVT::v4i32, MVT::v4f32})
+      setOperationAction(ISD::VECTOR_COMPRESS, VT, Custom);
+
     for (auto VT : {MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64,
                     MVT::nxv2f32, MVT::nxv2f64, MVT::nxv4i8, MVT::nxv4i16,
                     MVT::nxv4i32, MVT::nxv4f32}) {
-      setOperationAction(ISD::VECTOR_COMPRESS, VT, Custom);
       // Use a custom lowering for masked stores that could be a supported
       // compressing store. Note: These types still use the normal (Legal)
       // lowering for non-compressing masked stores.
       setOperationAction(ISD::MSTORE, VT, Custom);
     }
 
-    // If we have SVE, we can use SVE logic for legal (or smaller than legal)
-    // NEON vectors in the lowest bits of the SVE register.
-    for (auto VT : {MVT::v2i8, MVT::v2i16, MVT::v2i32, MVT::v2i64, MVT::v2f32,
-                    MVT::v2f64, MVT::v4i8, MVT::v4i16, MVT::v4i32, MVT::v4f32})
-      setOperationAction(ISD::VECTOR_COMPRESS, VT, Custom);
-
     // Histcnt is SVE2 only
     if (Subtarget->hasSVE2()) {
       setOperationAction(ISD::EXPERIMENTAL_VECTOR_HISTOGRAM, MVT::nxv4i32,
@@ -7442,60 +7445,43 @@ static SDValue convertFromSVEContainerType(SDLoc DL, SDValue Vec, EVT VecVT,
   return Vec;
 }
 
-SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
-                                                    SelectionDAG &DAG) const {
+SDValue AArch64TargetLowering::LowerFixedLengthVectorCompressToSVE(
+    SDValue Op, SelectionDAG &DAG) const {
   SDLoc DL(Op);
-  SDValue Vec = Op.getOperand(0);
-  SDValue Mask = Op.getOperand(1);
-  SDValue Passthru = Op.getOperand(2);
-  EVT VecVT = Vec.getValueType();
-  EVT MaskVT = Mask.getValueType();
-  EVT ElmtVT = VecVT.getVectorElementType();
-  const bool IsFixedLength = VecVT.isFixedLengthVector();
-  const bool HasPassthru = !Passthru.isUndef();
-  unsigned MinElmts = VecVT.getVectorElementCount().getKnownMinValue();
-  EVT FixedVecVT = MVT::getVectorVT(ElmtVT.getSimpleVT(), MinElmts);
+  EVT VT = Op.getValueType();
 
-  assert(VecVT.isVector() && "Input to VECTOR_COMPRESS must be vector.");
+  EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
+  SDValue Vec = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
+  SDValue Mask = convertFixedMaskToScalableVector(Op.getOperand(1), DAG);
+  SDValue Passthru =
+      convertToScalableVector(DAG, ContainerVT, Op.getOperand(2));
 
-  if (!Subtarget->isSVEAvailable())
-    return SDValue();
+  SDValue Result =
+      DAG.getNode(ISD::VECTOR_COMPRESS, DL, ContainerVT, Vec, Mask, Passthru);
+  return convertFromScalableVector(DAG, VT, Result);
+}
 
-  if (IsFixedLength && VecVT.getSizeInBits().getFixedValue() > 128)
-    return SDValue();
+SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
+                                                    SelectionDAG &DAG) const {
+  SDLoc DL(Op);
+  EVT VT = Op.getValueType();
+  if (VT.isFixedLengthVector())
+    return LowerFixedLengthVectorCompressToSVE(Op, DAG);
 
-  // Only <vscale x {4|2} x {i32|i64}> supported for compact.
-  if (MinElmts != 2 && MinElmts != 4)
+  if (!Subtarget->isSVEAvailable())
     return SDValue();
 
-  // We can use the SVE register containing the NEON vector in its lowest bits.
-  if (IsFixedLength) {
-    EVT ScalableVecVT =
-        MVT::getScalableVectorVT(ElmtVT.getSimpleVT(), MinElmts);
-    EVT ScalableMaskVT = MVT::getScalableVectorVT(
-        MaskVT.getVectorElementType().getSimpleVT(), MinElmts);
-
-    Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ScalableVecVT,
-                      DAG.getUNDEF(ScalableVecVT), Vec,
-                      DAG.getConstant(0, DL, MVT::i64));
-    Mask = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ScalableMaskVT,
-                       DAG.getUNDEF(ScalableMaskVT), Mask,
-                       DAG.getConstant(0, DL, MVT::i64));
-    Mask = DAG.getNode(ISD::TRUNCATE, DL,
-                       ScalableMaskVT.changeVectorElementType(MVT::i1), Mask);
-    Passthru = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ScalableVecVT,
-                           DAG.getUNDEF(ScalableVecVT), Passthru,
-                           DAG.getConstant(0, DL, MVT::i64));
+  SDValue Vec = Op.getOperand(0);
+  SDValue Mask = Op.getOperand(1);
+  SDValue Passthru = Op.getOperand(2);
+  EVT MaskVT = Mask.getValueType();
 
-    VecVT = Vec.getValueType();
-    MaskVT = Mask.getValueType();
-  }
+  assert(VT.isVector() && "Input to VECTOR_COMPRESS must be vector.");
 
   // Get legal type for compact instruction
-  EVT ContainerVT = getSVEContainerType(VecVT);
+  EVT ContainerVT = getSVEContainerType(VT);
 
-  // Convert to 32 or 64 bits for smaller types, as these are the only supported
-  // sizes for compact.
+  // Convert to a packed 32/64-bit SVE vector of the same element count as VT.
   Vec = convertToSVEContainerType(DL, Vec, ContainerVT, DAG);
 
   SDValue Compressed = DAG.getNode(
@@ -7504,7 +7490,8 @@ SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
       Vec);
 
   // compact fills with 0s, so if our passthru is all 0s, do nothing here.
-  if (HasPassthru && !ISD::isConstantSplatVectorAllZeros(Passthru.getNode())) {
+  if (!Passthru.isUndef() &&
+      !ISD::isConstantSplatVectorAllZeros(Passthru.getNode())) {
     SDValue Offset = DAG.getNode(
         ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64,
         DAG.getTargetConstant(Intrinsic::aarch64_sve_cntp, DL, MVT::i64), Mask,
@@ -7516,28 +7503,10 @@ SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
         DAG.getConstant(0, DL, MVT::i64), Offset);
 
     Compressed =
-        DAG.getNode(ISD::VSELECT, DL, VecVT, IndexMask, Compressed, Passthru);
-  }
-
-  // If we changed the element type before, we need to convert it back.
-  if (ElmtVT.isFloatingPoint())
-    Compressed = convertFromSVEContainerType(DL, Compressed, VecVT, DAG);
-
-  // Extracting from a legal SVE type before truncating produces better code.
-  if (IsFixedLength) {
-    EVT FixedSubVector = VecVT.isInteger()
-                             ? FixedVecVT.changeVectorElementType(
-                                   ContainerVT.getVectorElementType())
-                             : FixedVecVT;
-    Compressed = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, FixedSubVector,
-                             Compressed, DAG.getConstant(0, DL, MVT::i64));
-    VecVT = FixedVecVT;
+        DAG.getNode(ISD::VSELECT, DL, VT, IndexMask, Compressed, Passthru);
   }
 
-  if (VecVT.isInteger())
-    Compressed = DAG.getNode(ISD::TRUNCATE, DL, VecVT, Compressed);
-
-  return Compressed;
+  return convertFromSVEContainerType(DL, Compressed, VT, DAG);
 }
 
 // Generate SUBS and CSEL for integer abs.
@@ -29320,10 +29289,6 @@ void AArch64TargetLowering::ReplaceNodeResults(
   case ISD::VECREDUCE_UMIN:
     Results.push_back(LowerVECREDUCE(SDValue(N, 0), DAG));
     return;
-  case ISD::VECTOR_COMPRESS:
-    if (SDValue Res = LowerVECTOR_COMPRESS(SDValue(N, 0), DAG))
-      Results.push_back(Res);
-    return;
   case ISD::ADD:
   case ISD::FADD:
     ReplaceAddWithADDP(N, Results, DAG, Subtarget);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index e8c026d989eb8..deddaa0e63ade 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -623,6 +623,8 @@ class AArch64TargetLowering : public TargetLowering {
   SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) const;
 
   SDValue LowerVECTOR_COMPRESS(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerFixedLengthVectorCompressToSVE(SDValue Op,
+                                              SelectionDAG &DAG) const;
 
   SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll
index f700dee0fb2e4..559c842a60791 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll
@@ -194,11 +194,9 @@ define <4 x i32> @test_compress_v4i32_with_sve(<4 x i32> %vec, <4 x i1> %mask) {
 ; CHECK-LABEL: test_compress_v4i32_with_sve:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ushll v1.4s, v1.4h, #0
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    shl v1.4s, v1.4s, #31
-; CHECK-NEXT:    cmlt v1.4s, v1.4s, #0
-; CHECK-NEXT:    and z1.s, z1.s, #0x1
 ; CHECK-NEXT:    cmpne p0.s, p0/z, z1.s, #0
 ; CHECK-NEXT:    compact z0.s, p0, z0.s
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
@@ -212,14 +210,12 @@ define <1 x i32> @test_compress_v1i32_with_sve(<1 x i32> %vec, <1 x i1> %mask) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    sbfx w8, w0, #0, #1
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    mov v1.s[0], w8
-; CHECK-NEXT:    ushll v1.2d, v1.2s, #0
-; CHECK-NEXT:    and z1.d, z1.d, #0x1
-; CHECK-NEXT:    cmpne p0.d, p0/z, z1.d, #0
-; CHECK-NEXT:    compact z0.d, p0, z0.d
-; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    cmpne p0.s, p0/z, z1.s, #0
+; CHECK-NEXT:    compact z0.s, p0, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
     %out = call <1 x i32> @llvm.experimental.vector.compress(<1 x i32> %vec, <1 x i1> %mask, <1 x i32> poison)
     ret <1 x i32> %out
@@ -231,7 +227,7 @@ define <4 x double> @test_compress_v4f64_with_sve(<4 x double> %vec, <4 x i1> %m
 ; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    ushll v2.4s, v2.4h, #0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
 ; CHECK-NEXT:    ushll v3.2d, v2.2s, #0
@@ -242,14 +238,10 @@ define <4 x double> @test_compress_v4f64_with_sve(<4 x double> %vec, <4 x i1> %m
 ; CHECK-NEXT:    lsr x9, x8, #32
 ; CHECK-NEXT:    eor w8, w8, w9
 ; CHECK-NEXT:    mov x9, sp
-; CHECK-NEXT:    cmlt v3.2d, v3.2d, #0
-; CHECK-NEXT:    cmlt v4.2d, v4.2d, #0
-; CHECK-NEXT:    and x8, x8, #0x3
-; CHECK-NEXT:    lsl x8, x8, #3
-; CHECK-NEXT:    and z3.d, z3.d, #0x1
-; CHECK-NEXT:    and z4.d, z4.d, #0x1
 ; CHECK-NEXT:    cmpne p1.d, p0/z, z3.d, #0
 ; CHECK-NEXT:    cmpne p0.d, p0/z, z4.d, #0
+; CHECK-NEXT:    and x8, x8, #0x3
+; CHECK-NEXT:    lsl x8, x8, #3
 ; CHECK-NEXT:    compact z0.d, p1, z0.d
 ; CHECK-NEXT:    compact z1.d, p0, z1.d
 ; CHECK-NEXT:    str q0, [sp]
@@ -263,13 +255,12 @@ define <4 x double> @test_compress_v4f64_with_sve(<4 x double> %vec, <4 x i1> %m
 define <2 x i16> @test_compress_v2i16_with_sve(<2 x i16> %vec, <2 x i1> %mask) {
 ; CHECK-LABEL: test_compress_v2i16_with_sve:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ushll v1.2d, v1.2s, #0
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
-; CHECK-NEXT:    and z1.d, z1.d, #0x1
-; CHECK-NEXT:    cmpne p0.d, p0/z, z1.d, #0
-; CHECK-NEXT:    compact z0.d, p0, z0.d
-; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    shl v1.2s, v1.2s, #31
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    cmpne p0.s, p0/z, z1.s, #0
+; CHECK-NEXT:    compact z0.s, p0, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
     %out = call <2 x i16> @llvm.experimental.vector.compress(<2 x i16> %vec, <2 x i1> %mask, <2 x i16> poison)
     ret <2 x i16> %out

>From 0f533172c9818b8d408e5e8cff1e7312a128d878 Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Tue, 9 Dec 2025 12:31:23 +0000
Subject: [PATCH 2/4] Fixups

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7767d06190c7f..ce67e260720ef 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1999,8 +1999,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
 
     // If we have SVE, we can use SVE logic for legal NEON vectors in the lowest
     // bits of the SVE register.
-    for (auto VT : {MVT::v2i32, MVT::v2i64, MVT::v2f32, MVT::v2f64, MVT::v4i16,
-                    MVT::v4i32, MVT::v4f32})
+    for (auto VT : {MVT::v2i32, MVT::v2i64, MVT::v2f32, MVT::v2f64, MVT::v4i32,
+                    MVT::v4f32})
       setOperationAction(ISD::VECTOR_COMPRESS, VT, Custom);
 
     for (auto VT : {MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64,
@@ -7480,6 +7480,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
 
   // Get legal type for compact instruction
   EVT ContainerVT = getSVEContainerType(VT);
+  assert(ContainerVT == MVT::nxv4i32 || ContainerVT == MVT::nxv2i64);
 
   // Convert to a packed 32/64-bit SVE vector of the same element count as VT.
   Vec = convertToSVEContainerType(DL, Vec, ContainerVT, DAG);

>From 6601fcbc855c3e99e339ef033d5ed6c5658b58a9 Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Fri, 12 Dec 2025 14:16:35 +0000
Subject: [PATCH 3/4] Fixups

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 72 ++++---------------
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |  4 +-
 llvm/lib/Target/AArch64/SVEInstrFormats.td    |  1 +
 3 files changed, 18 insertions(+), 59 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index ce67e260720ef..de491ada10d3a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1994,13 +1994,13 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
 
     // We can lower types that have <vscale x {2|4}> elements to compact.
     for (auto VT :
-         {MVT::nxv2i64, MVT::nxv2f32, MVT::nxv2f64, MVT::nxv4i32, MVT::nxv4f32})
+         {MVT::nxv4i32, MVT::nxv2i64, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv2f64})
       setOperationAction(ISD::VECTOR_COMPRESS, VT, Custom);
 
     // If we have SVE, we can use SVE logic for legal NEON vectors in the lowest
     // bits of the SVE register.
-    for (auto VT : {MVT::v2i32, MVT::v2i64, MVT::v2f32, MVT::v2f64, MVT::v4i32,
-                    MVT::v4f32})
+    for (auto VT : {MVT::v2i32, MVT::v4i32, MVT::v2i64, MVT::v2f32, MVT::v4f32,
+                    MVT::v2f64})
       setOperationAction(ISD::VECTOR_COMPRESS, VT, Custom);
 
     for (auto VT : {MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64,
@@ -7414,37 +7414,6 @@ SDValue AArch64TargetLowering::LowerLOAD(SDValue Op,
   return SDValue();
 }
 
-// Convert to ContainerVT with no-op casts where possible.
-static SDValue convertToSVEContainerType(SDLoc DL, SDValue Vec, EVT ContainerVT,
-                                         SelectionDAG &DAG) {
-  EVT VecVT = Vec.getValueType();
-  if (VecVT.isFloatingPoint()) {
-    // Use no-op casts for floating-point types.
-    EVT PackedVT = getPackedSVEVectorVT(VecVT.getScalarType());
-    Vec = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, PackedVT, Vec);
-    Vec = DAG.getNode(AArch64ISD::NVCAST, DL, ContainerVT, Vec);
-  } else {
-    // Extend integers (may not be a no-op).
-    Vec = DAG.getNode(ISD::ANY_EXTEND, DL, ContainerVT, Vec);
-  }
-  return Vec;
-}
-
-// Convert to VecVT with no-op casts where possible.
-static SDValue convertFromSVEContainerType(SDLoc DL, SDValue Vec, EVT VecVT,
-                                           SelectionDAG &DAG) {
-  if (VecVT.isFloatingPoint()) {
-    // Use no-op casts for floating-point types.
-    EVT PackedVT = getPackedSVEVectorVT(VecVT.getScalarType());
-    Vec = DAG.getNode(AArch64ISD::NVCAST, DL, PackedVT, Vec);
-    Vec = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VecVT, Vec);
-  } else {
-    // Truncate integers (may not be a no-op).
-    Vec = DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
-  }
-  return Vec;
-}
-
 SDValue AArch64TargetLowering::LowerFixedLengthVectorCompressToSVE(
     SDValue Op, SelectionDAG &DAG) const {
   SDLoc DL(Op);
@@ -7476,38 +7445,27 @@ SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
   SDValue Passthru = Op.getOperand(2);
   EVT MaskVT = Mask.getValueType();
 
-  assert(VT.isVector() && "Input to VECTOR_COMPRESS must be vector.");
-
-  // Get legal type for compact instruction
-  EVT ContainerVT = getSVEContainerType(VT);
-  assert(ContainerVT == MVT::nxv4i32 || ContainerVT == MVT::nxv2i64);
-
-  // Convert to a packed 32/64-bit SVE vector of the same element count as VT.
-  Vec = convertToSVEContainerType(DL, Vec, ContainerVT, DAG);
-
   SDValue Compressed = DAG.getNode(
       ISD::INTRINSIC_WO_CHAIN, DL, Vec.getValueType(),
       DAG.getTargetConstant(Intrinsic::aarch64_sve_compact, DL, MVT::i64), Mask,
       Vec);
 
   // compact fills with 0s, so if our passthru is all 0s, do nothing here.
-  if (!Passthru.isUndef() &&
-      !ISD::isConstantSplatVectorAllZeros(Passthru.getNode())) {
-    SDValue Offset = DAG.getNode(
-        ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64,
-        DAG.getTargetConstant(Intrinsic::aarch64_sve_cntp, DL, MVT::i64), Mask,
-        Mask);
+  if (Passthru.isUndef() ||
+      ISD::isConstantSplatVectorAllZeros(Passthru.getNode()))
+    return Compressed;
 
-    SDValue IndexMask = DAG.getNode(
-        ISD::INTRINSIC_WO_CHAIN, DL, MaskVT,
-        DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64),
-        DAG.getConstant(0, DL, MVT::i64), Offset);
+  SDValue Offset = DAG.getNode(
+      ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64,
+      DAG.getTargetConstant(Intrinsic::aarch64_sve_cntp, DL, MVT::i64), Mask,
+      Mask);
 
-    Compressed =
-        DAG.getNode(ISD::VSELECT, DL, VT, IndexMask, Compressed, Passthru);
-  }
+  SDValue IndexMask = DAG.getNode(
+      ISD::INTRINSIC_WO_CHAIN, DL, MaskVT,
+      DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64),
+      DAG.getConstant(0, DL, MVT::i64), Offset);
 
-  return convertFromSVEContainerType(DL, Compressed, VT, DAG);
+  return DAG.getNode(ISD::VSELECT, DL, VT, IndexMask, Compressed, Passthru);
 }
 
 // Generate SUBS and CSEL for integer abs.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index deddaa0e63ade..cdcfe3cd6fd22 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -623,8 +623,6 @@ class AArch64TargetLowering : public TargetLowering {
   SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) const;
 
   SDValue LowerVECTOR_COMPRESS(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFixedLengthVectorCompressToSVE(SDValue Op,
-                                              SelectionDAG &DAG) const;
 
   SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
@@ -798,6 +796,8 @@ class AArch64TargetLowering : public TargetLowering {
   SDValue LowerFixedLengthVECTOR_SHUFFLEToSVE(SDValue Op,
                                               SelectionDAG &DAG) const;
   SDValue LowerFixedLengthBuildVectorToSVE(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerFixedLengthVectorCompressToSVE(SDValue Op,
+                                              SelectionDAG &DAG) const;
 
   SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
                         SmallVectorImpl<SDNode *> &Created) const override;
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 1f031f91f70e7..c7221cc31fb6d 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -7983,6 +7983,7 @@ multiclass sve_int_perm_compact_sd<string asm, SDPatternOperator op> {
   def : SVE_2_Op_Pat<nxv4f32, op, nxv4i1, nxv4f32, !cast<Instruction>(NAME # _S)>;
   def : SVE_2_Op_Pat<nxv2i64, op, nxv2i1, nxv2i64, !cast<Instruction>(NAME # _D)>;
   def : SVE_2_Op_Pat<nxv2f64, op, nxv2i1, nxv2f64, !cast<Instruction>(NAME # _D)>;
+  def : SVE_2_Op_Pat<nxv2f32, op, nxv2i1, nxv2f32, !cast<Instruction>(NAME # _D)>;
 }
 
 multiclass sve_int_perm_compact_bh<string asm, SDPatternOperator op> {

>From f2e239df02af6b6a0aa401174c3c34a839fbf077 Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Fri, 12 Dec 2025 14:24:47 +0000
Subject: [PATCH 4/4] Fixups

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 12 ++++++------
 llvm/lib/Target/AArch64/SVEInstrFormats.td      |  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index de491ada10d3a..d3e8844551910 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -7455,17 +7455,17 @@ SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
       ISD::isConstantSplatVectorAllZeros(Passthru.getNode()))
     return Compressed;
 
-  SDValue Offset = DAG.getNode(
+  SDValue CntActive = DAG.getNode(
       ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64,
       DAG.getTargetConstant(Intrinsic::aarch64_sve_cntp, DL, MVT::i64), Mask,
       Mask);
 
-  SDValue IndexMask = DAG.getNode(
-      ISD::INTRINSIC_WO_CHAIN, DL, MaskVT,
-      DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64),
-      DAG.getConstant(0, DL, MVT::i64), Offset);
+  SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
+  SDValue CompressedMask =
+      DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, DL, MaskVT, Zero, CntActive);
 
-  return DAG.getNode(ISD::VSELECT, DL, VT, IndexMask, Compressed, Passthru);
+  return DAG.getNode(ISD::VSELECT, DL, VT, CompressedMask, Compressed,
+                     Passthru);
 }
 
 // Generate SUBS and CSEL for integer abs.
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index c7221cc31fb6d..c3ebc8c2ae222 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -9508,7 +9508,7 @@ multiclass sve_fp8_dot<bit bf, ZPRRegOp dstrc, string asm, ValueType vt,
     let mayLoad  = 1;
     let mayStore = 0;
   }
-  
+
   def : SVE_3_Op_Pat<vt, op, vt, nxv16i8, nxv16i8, !cast<Instruction>(NAME)>;
 }
 
@@ -10994,7 +10994,7 @@ class sve2_fp8_cvt_single<bit L, bits<2> opc, string mnemonic,
 
 multiclass sve2_fp8_cvt_single<bit L, bits<2> opc, string mnemonic, ValueType vtd, SDPatternOperator op> {
   def _BtoH : sve2_fp8_cvt_single<L, opc, mnemonic, ZPR16, ZPR8>;
-  
+
   def : SVE_1_Op_Pat<vtd, op, nxv16i8, !cast<Instruction>(NAME # _BtoH)>;
 }
 
@@ -11039,7 +11039,7 @@ class sve2_fp8_down_cvt_single_top<bits<2> opc, string mnemonic, RegisterOperand
   let Constraints = "$Zd = $_Zd";
   let DestructiveInstType = DestructiveOther;
   let ElementSize         = ZPR8.ElementSize;
-  
+
   let Uses     = [FPMR, FPCR];
   let mayLoad  = 1;
   let mayStore = 0;



More information about the llvm-commits mailing list