[llvm] e5827e7 - [AArch64] Protect against scalable vectors in performUADDVAddCombine.

David Green via llvm-commits llvm-commits at lists.llvm.org
Sun Oct 12 00:53:30 PDT 2025


Author: David Green
Date: 2025-10-12T08:53:25+01:00
New Revision: e5827e7b90d8edb5eb98e39b37a7d59f71074be5

URL: https://github.com/llvm/llvm-project/commit/e5827e7b90d8edb5eb98e39b37a7d59f71074be5
DIFF: https://github.com/llvm/llvm-project/commit/e5827e7b90d8edb5eb98e39b37a7d59f71074be5.diff

LOG: [AArch64] Protect against scalable vectors in performUADDVAddCombine.

Fixes #162983

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/vecreduce-add.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index fbce3b0efbb7c..69651168f8539 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -19093,7 +19093,8 @@ static SDValue performUADDVAddCombine(SDValue A, SelectionDAG &DAG) {
     SDValue Ext1 = Op1.getOperand(0);
     if (Ext0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
         Ext1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
-        Ext0.getOperand(0) != Ext1.getOperand(0))
+        Ext0.getOperand(0) != Ext1.getOperand(0) ||
+        Ext0.getOperand(0).getValueType().isScalableVector())
       return SDValue();
     // Check that the type is twice the add types, and the extract are from
     // upper/lower parts of the same source.

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-add.ll b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
index ee04e41d55046..2d0df562b9a4b 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-add.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
@@ -4775,6 +4775,39 @@ entry:
   ret i32 %z5
 }
 
+define i64 @extract_scalable(<2 x i32> %0) "target-features"="+sve2" {
+; CHECK-SD-LABEL: extract_scalable:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    movi v1.2s, #1
+; CHECK-SD-NEXT:    ptrue p0.s, vl2
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-SD-NEXT:    sdivr z0.s, p0/m, z0.s, z1.s
+; CHECK-SD-NEXT:    saddl v0.2d, v0.2s, v0.2s
+; CHECK-SD-NEXT:    addp d0, v0.2d
+; CHECK-SD-NEXT:    fmov x0, d0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: extract_scalable:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    fmov w9, s0
+; CHECK-GI-NEXT:    mov w8, #1 // =0x1
+; CHECK-GI-NEXT:    mov w10, v0.s[1]
+; CHECK-GI-NEXT:    sdiv w9, w8, w9
+; CHECK-GI-NEXT:    sdiv w8, w8, w10
+; CHECK-GI-NEXT:    fmov s0, w9
+; CHECK-GI-NEXT:    mov v0.s[1], w8
+; CHECK-GI-NEXT:    saddl v0.2d, v0.2s, v0.2s
+; CHECK-GI-NEXT:    addp d0, v0.2d
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    ret
+  %2 = sdiv <2 x i32> splat (i32 1), %0
+  %3 = sext <2 x i32> %2 to <2 x i64>
+  %4 = add <2 x i64> %3, %3
+  %5 = tail call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %4)
+  ret i64 %5
+}
+
 declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1 immarg) #1
 declare i16 @llvm.vector.reduce.add.v32i16(<32 x i16>)
 declare i16 @llvm.vector.reduce.add.v24i16(<24 x i16>)


        


More information about the llvm-commits mailing list