[llvm] [AArch64][SVE] Rework VECTOR_COMPRESS lowering (PR #171162)
Benjamin Maxwell via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 9 04:33:33 PST 2025
https://github.com/MacDue updated https://github.com/llvm/llvm-project/pull/171162
>From 82f8d105369acde820035fa26e37b6c03153bce4 Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Mon, 8 Dec 2025 17:35:19 +0000
Subject: [PATCH 1/2] [AArch64][SVE] Rework VECTOR_COMPRESS lowering
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This removes the use of `LowerVECTOR_COMPRESS` in `ReplaceNodeResults`
(which was used to promote illegal integer VTs), and instead only marks
the legal VTs as "Custom" (allowing for standard type legalization).
This patch also simplifies the lowering by using the existing
fixed-length <-> SVE conversion helpers.
This was intended to be a NFC, but it appears to have caused some minor
code-gen changes/improvements.
---
.../Target/AArch64/AArch64ISelLowering.cpp | 115 ++++++------------
llvm/lib/Target/AArch64/AArch64ISelLowering.h | 2 +
.../CodeGen/AArch64/sve-vector-compress.ll | 39 +++---
3 files changed, 57 insertions(+), 99 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d1441a744eee8..7767d06190c7f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1993,22 +1993,25 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
// We can lower types that have <vscale x {2|4}> elements to compact.
+ for (auto VT :
+ {MVT::nxv2i64, MVT::nxv2f32, MVT::nxv2f64, MVT::nxv4i32, MVT::nxv4f32})
+ setOperationAction(ISD::VECTOR_COMPRESS, VT, Custom);
+
+ // If we have SVE, we can use SVE logic for legal NEON vectors in the lowest
+ // bits of the SVE register.
+ for (auto VT : {MVT::v2i32, MVT::v2i64, MVT::v2f32, MVT::v2f64, MVT::v4i16,
+ MVT::v4i32, MVT::v4f32})
+ setOperationAction(ISD::VECTOR_COMPRESS, VT, Custom);
+
for (auto VT : {MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64,
MVT::nxv2f32, MVT::nxv2f64, MVT::nxv4i8, MVT::nxv4i16,
MVT::nxv4i32, MVT::nxv4f32}) {
- setOperationAction(ISD::VECTOR_COMPRESS, VT, Custom);
// Use a custom lowering for masked stores that could be a supported
// compressing store. Note: These types still use the normal (Legal)
// lowering for non-compressing masked stores.
setOperationAction(ISD::MSTORE, VT, Custom);
}
- // If we have SVE, we can use SVE logic for legal (or smaller than legal)
- // NEON vectors in the lowest bits of the SVE register.
- for (auto VT : {MVT::v2i8, MVT::v2i16, MVT::v2i32, MVT::v2i64, MVT::v2f32,
- MVT::v2f64, MVT::v4i8, MVT::v4i16, MVT::v4i32, MVT::v4f32})
- setOperationAction(ISD::VECTOR_COMPRESS, VT, Custom);
-
// Histcnt is SVE2 only
if (Subtarget->hasSVE2()) {
setOperationAction(ISD::EXPERIMENTAL_VECTOR_HISTOGRAM, MVT::nxv4i32,
@@ -7442,60 +7445,43 @@ static SDValue convertFromSVEContainerType(SDLoc DL, SDValue Vec, EVT VecVT,
return Vec;
}
-SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
- SelectionDAG &DAG) const {
+SDValue AArch64TargetLowering::LowerFixedLengthVectorCompressToSVE(
+ SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
- SDValue Vec = Op.getOperand(0);
- SDValue Mask = Op.getOperand(1);
- SDValue Passthru = Op.getOperand(2);
- EVT VecVT = Vec.getValueType();
- EVT MaskVT = Mask.getValueType();
- EVT ElmtVT = VecVT.getVectorElementType();
- const bool IsFixedLength = VecVT.isFixedLengthVector();
- const bool HasPassthru = !Passthru.isUndef();
- unsigned MinElmts = VecVT.getVectorElementCount().getKnownMinValue();
- EVT FixedVecVT = MVT::getVectorVT(ElmtVT.getSimpleVT(), MinElmts);
+ EVT VT = Op.getValueType();
- assert(VecVT.isVector() && "Input to VECTOR_COMPRESS must be vector.");
+ EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
+ SDValue Vec = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
+ SDValue Mask = convertFixedMaskToScalableVector(Op.getOperand(1), DAG);
+ SDValue Passthru =
+ convertToScalableVector(DAG, ContainerVT, Op.getOperand(2));
- if (!Subtarget->isSVEAvailable())
- return SDValue();
+ SDValue Result =
+ DAG.getNode(ISD::VECTOR_COMPRESS, DL, ContainerVT, Vec, Mask, Passthru);
+ return convertFromScalableVector(DAG, VT, Result);
+}
- if (IsFixedLength && VecVT.getSizeInBits().getFixedValue() > 128)
- return SDValue();
+SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ if (VT.isFixedLengthVector())
+ return LowerFixedLengthVectorCompressToSVE(Op, DAG);
- // Only <vscale x {4|2} x {i32|i64}> supported for compact.
- if (MinElmts != 2 && MinElmts != 4)
+ if (!Subtarget->isSVEAvailable())
return SDValue();
- // We can use the SVE register containing the NEON vector in its lowest bits.
- if (IsFixedLength) {
- EVT ScalableVecVT =
- MVT::getScalableVectorVT(ElmtVT.getSimpleVT(), MinElmts);
- EVT ScalableMaskVT = MVT::getScalableVectorVT(
- MaskVT.getVectorElementType().getSimpleVT(), MinElmts);
-
- Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ScalableVecVT,
- DAG.getUNDEF(ScalableVecVT), Vec,
- DAG.getConstant(0, DL, MVT::i64));
- Mask = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ScalableMaskVT,
- DAG.getUNDEF(ScalableMaskVT), Mask,
- DAG.getConstant(0, DL, MVT::i64));
- Mask = DAG.getNode(ISD::TRUNCATE, DL,
- ScalableMaskVT.changeVectorElementType(MVT::i1), Mask);
- Passthru = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ScalableVecVT,
- DAG.getUNDEF(ScalableVecVT), Passthru,
- DAG.getConstant(0, DL, MVT::i64));
+ SDValue Vec = Op.getOperand(0);
+ SDValue Mask = Op.getOperand(1);
+ SDValue Passthru = Op.getOperand(2);
+ EVT MaskVT = Mask.getValueType();
- VecVT = Vec.getValueType();
- MaskVT = Mask.getValueType();
- }
+ assert(VT.isVector() && "Input to VECTOR_COMPRESS must be vector.");
// Get legal type for compact instruction
- EVT ContainerVT = getSVEContainerType(VecVT);
+ EVT ContainerVT = getSVEContainerType(VT);
- // Convert to 32 or 64 bits for smaller types, as these are the only supported
- // sizes for compact.
+ // Convert to a packed 32/64-bit SVE vector of the same element count as VT.
Vec = convertToSVEContainerType(DL, Vec, ContainerVT, DAG);
SDValue Compressed = DAG.getNode(
@@ -7504,7 +7490,8 @@ SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
Vec);
// compact fills with 0s, so if our passthru is all 0s, do nothing here.
- if (HasPassthru && !ISD::isConstantSplatVectorAllZeros(Passthru.getNode())) {
+ if (!Passthru.isUndef() &&
+ !ISD::isConstantSplatVectorAllZeros(Passthru.getNode())) {
SDValue Offset = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64,
DAG.getTargetConstant(Intrinsic::aarch64_sve_cntp, DL, MVT::i64), Mask,
@@ -7516,28 +7503,10 @@ SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
DAG.getConstant(0, DL, MVT::i64), Offset);
Compressed =
- DAG.getNode(ISD::VSELECT, DL, VecVT, IndexMask, Compressed, Passthru);
- }
-
- // If we changed the element type before, we need to convert it back.
- if (ElmtVT.isFloatingPoint())
- Compressed = convertFromSVEContainerType(DL, Compressed, VecVT, DAG);
-
- // Extracting from a legal SVE type before truncating produces better code.
- if (IsFixedLength) {
- EVT FixedSubVector = VecVT.isInteger()
- ? FixedVecVT.changeVectorElementType(
- ContainerVT.getVectorElementType())
- : FixedVecVT;
- Compressed = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, FixedSubVector,
- Compressed, DAG.getConstant(0, DL, MVT::i64));
- VecVT = FixedVecVT;
+ DAG.getNode(ISD::VSELECT, DL, VT, IndexMask, Compressed, Passthru);
}
- if (VecVT.isInteger())
- Compressed = DAG.getNode(ISD::TRUNCATE, DL, VecVT, Compressed);
-
- return Compressed;
+ return convertFromSVEContainerType(DL, Compressed, VT, DAG);
}
// Generate SUBS and CSEL for integer abs.
@@ -29320,10 +29289,6 @@ void AArch64TargetLowering::ReplaceNodeResults(
case ISD::VECREDUCE_UMIN:
Results.push_back(LowerVECREDUCE(SDValue(N, 0), DAG));
return;
- case ISD::VECTOR_COMPRESS:
- if (SDValue Res = LowerVECTOR_COMPRESS(SDValue(N, 0), DAG))
- Results.push_back(Res);
- return;
case ISD::ADD:
case ISD::FADD:
ReplaceAddWithADDP(N, Results, DAG, Subtarget);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index e8c026d989eb8..deddaa0e63ade 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -623,6 +623,8 @@ class AArch64TargetLowering : public TargetLowering {
SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVECTOR_COMPRESS(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFixedLengthVectorCompressToSVE(SDValue Op,
+ SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll
index f700dee0fb2e4..559c842a60791 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll
@@ -194,11 +194,9 @@ define <4 x i32> @test_compress_v4i32_with_sve(<4 x i32> %vec, <4 x i1> %mask) {
; CHECK-LABEL: test_compress_v4i32_with_sve:
; CHECK: // %bb.0:
; CHECK-NEXT: ushll v1.4s, v1.4h, #0
-; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: shl v1.4s, v1.4s, #31
-; CHECK-NEXT: cmlt v1.4s, v1.4s, #0
-; CHECK-NEXT: and z1.s, z1.s, #0x1
; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0
; CHECK-NEXT: compact z0.s, p0, z0.s
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
@@ -212,14 +210,12 @@ define <1 x i32> @test_compress_v1i32_with_sve(<1 x i32> %vec, <1 x i1> %mask) {
; CHECK: // %bb.0:
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: sbfx w8, w0, #0, #1
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ushll v0.2d, v0.2s, #0
+; CHECK-NEXT: ptrue p0.s, vl2
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: mov v1.s[0], w8
-; CHECK-NEXT: ushll v1.2d, v1.2s, #0
-; CHECK-NEXT: and z1.d, z1.d, #0x1
-; CHECK-NEXT: cmpne p0.d, p0/z, z1.d, #0
-; CHECK-NEXT: compact z0.d, p0, z0.d
-; CHECK-NEXT: xtn v0.2s, v0.2d
+; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0
+; CHECK-NEXT: compact z0.s, p0, z0.s
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
%out = call <1 x i32> @llvm.experimental.vector.compress(<1 x i32> %vec, <1 x i1> %mask, <1 x i32> poison)
ret <1 x i32> %out
@@ -231,7 +227,7 @@ define <4 x double> @test_compress_v4f64_with_sve(<4 x double> %vec, <4 x i1> %m
; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: ushll v2.4s, v2.4h, #0
-; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
; CHECK-NEXT: ushll v3.2d, v2.2s, #0
@@ -242,14 +238,10 @@ define <4 x double> @test_compress_v4f64_with_sve(<4 x double> %vec, <4 x i1> %m
; CHECK-NEXT: lsr x9, x8, #32
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: mov x9, sp
-; CHECK-NEXT: cmlt v3.2d, v3.2d, #0
-; CHECK-NEXT: cmlt v4.2d, v4.2d, #0
-; CHECK-NEXT: and x8, x8, #0x3
-; CHECK-NEXT: lsl x8, x8, #3
-; CHECK-NEXT: and z3.d, z3.d, #0x1
-; CHECK-NEXT: and z4.d, z4.d, #0x1
; CHECK-NEXT: cmpne p1.d, p0/z, z3.d, #0
; CHECK-NEXT: cmpne p0.d, p0/z, z4.d, #0
+; CHECK-NEXT: and x8, x8, #0x3
+; CHECK-NEXT: lsl x8, x8, #3
; CHECK-NEXT: compact z0.d, p1, z0.d
; CHECK-NEXT: compact z1.d, p0, z1.d
; CHECK-NEXT: str q0, [sp]
@@ -263,13 +255,12 @@ define <4 x double> @test_compress_v4f64_with_sve(<4 x double> %vec, <4 x i1> %m
define <2 x i16> @test_compress_v2i16_with_sve(<2 x i16> %vec, <2 x i1> %mask) {
; CHECK-LABEL: test_compress_v2i16_with_sve:
; CHECK: // %bb.0:
-; CHECK-NEXT: ushll v1.2d, v1.2s, #0
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ushll v0.2d, v0.2s, #0
-; CHECK-NEXT: and z1.d, z1.d, #0x1
-; CHECK-NEXT: cmpne p0.d, p0/z, z1.d, #0
-; CHECK-NEXT: compact z0.d, p0, z0.d
-; CHECK-NEXT: xtn v0.2s, v0.2d
+; CHECK-NEXT: shl v1.2s, v1.2s, #31
+; CHECK-NEXT: ptrue p0.s, vl2
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0
+; CHECK-NEXT: compact z0.s, p0, z0.s
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
%out = call <2 x i16> @llvm.experimental.vector.compress(<2 x i16> %vec, <2 x i1> %mask, <2 x i16> poison)
ret <2 x i16> %out
>From 0f533172c9818b8d408e5e8cff1e7312a128d878 Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Tue, 9 Dec 2025 12:31:23 +0000
Subject: [PATCH 2/2] Fixups
---
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7767d06190c7f..ce67e260720ef 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1999,8 +1999,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
// If we have SVE, we can use SVE logic for legal NEON vectors in the lowest
// bits of the SVE register.
- for (auto VT : {MVT::v2i32, MVT::v2i64, MVT::v2f32, MVT::v2f64, MVT::v4i16,
- MVT::v4i32, MVT::v4f32})
+ for (auto VT : {MVT::v2i32, MVT::v2i64, MVT::v2f32, MVT::v2f64, MVT::v4i32,
+ MVT::v4f32})
setOperationAction(ISD::VECTOR_COMPRESS, VT, Custom);
for (auto VT : {MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64,
@@ -7480,6 +7480,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
// Get legal type for compact instruction
EVT ContainerVT = getSVEContainerType(VT);
+ assert(ContainerVT == MVT::nxv4i32 || ContainerVT == MVT::nxv2i64);
// Convert to a packed 32/64-bit SVE vector of the same element count as VT.
Vec = convertToSVEContainerType(DL, Vec, ContainerVT, DAG);
More information about the llvm-commits
mailing list