[llvm] [AArch64][SVE] Improve code quality of vector unsigned/signed add reductions. (PR #97339)

Dinar Temirbulatov via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 17 06:22:05 PDT 2024


https://github.com/dtemirbulatov updated https://github.com/llvm/llvm-project/pull/97339

>From 3a93f70b5a020b121b0bab7f5cd91ae4260540e0 Mon Sep 17 00:00:00 2001
From: Dinar Temirbulatov <Dinar.Temirbulatov at arm.com>
Date: Mon, 1 Jul 2024 18:32:32 +0000
Subject: [PATCH 1/5] [AArch64][SVE] Improve code quality of vector unsigned
 add reduction.

For SVE we don't have to zero extend and sum part of the result before
issuing UADDV instruction. Also this change allows to handle bigger
than a legal vector type more efficiently and lower a fixed-length
vector type to SVE's UADDV where appropriate.
---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 100 ++++++++++++-
 llvm/test/CodeGen/AArch64/sve-doublereduct.ll |  41 ++----
 llvm/test/CodeGen/AArch64/sve-int-reduce.ll   |  97 +++++++++++++
 ...-streaming-mode-fixed-length-reductions.ll | 135 ++++++++++++++++++
 4 files changed, 345 insertions(+), 28 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reductions.ll

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 1b67f2dbe1a3f..219f7833884ab 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17503,6 +17503,99 @@ static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N,
   return DAG.getNode(ISD::VECREDUCE_ADD, DL, MVT::i32, UADDLP);
 }
 
+static SDValue
+performVecReduceAddZextCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
+                               const AArch64TargetLowering &TLI) {
+  if (N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND)
+    return SDValue();
+
+  SelectionDAG &DAG = DCI.DAG;
+  auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
+  SDNode *ZEXT = N->getOperand(0).getNode();
+  EVT VecVT = ZEXT->getOperand(0).getValueType();
+  SDLoc DL(N);
+
+  SDValue VecOp = ZEXT->getOperand(0);
+  VecVT = VecOp.getValueType();
+  bool IsScalableType = VecVT.isScalableVector();
+
+  if (TLI.isTypeLegal(VecVT)) {
+    if (!IsScalableType &&
+        !TLI.useSVEForFixedLengthVectorVT(
+            VecVT,
+            /*OverrideNEON=*/Subtarget.useSVEForFixedLengthVectors(VecVT)))
+      return SDValue();
+
+    if (!IsScalableType) {
+      EVT ContainerVT = getContainerForFixedLengthVector(DAG, VecVT);
+      VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);
+    }
+    VecVT = VecOp.getValueType();
+    EVT RdxVT = N->getValueType(0);
+    RdxVT = getPackedSVEVectorVT(RdxVT);
+    SDValue Pg = getPredicateForVector(DAG, DL, VecVT);
+    SDValue Res = DAG.getNode(
+        ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64,
+        DAG.getConstant(Intrinsic::aarch64_sve_uaddv, DL, MVT::i64), Pg, VecOp);
+    EVT ResVT = MVT::i64;
+    if (ResVT != N->getValueType(0))
+      Res = DAG.getAnyExtOrTrunc(Res, DL, N->getValueType(0));
+    return Res;
+  }
+
+  SmallVector<SDValue, 4> SplitVals;
+  SmallVector<SDValue, 4> PrevVals;
+  PrevVals.push_back(VecOp);
+  while (true) {
+
+    if (!VecVT.isScalableVector() &&
+        !PrevVals[0].getValueType().getVectorElementCount().isKnownEven())
+      return SDValue();
+
+    for (SDValue Vec : PrevVals) {
+      SDValue Lo, Hi;
+      std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
+      SplitVals.push_back(Lo);
+      SplitVals.push_back(Hi);
+    }
+    if (TLI.isTypeLegal(SplitVals[0].getValueType()))
+      break;
+    PrevVals.clear();
+    std::copy(SplitVals.begin(), SplitVals.end(), std::back_inserter(PrevVals));
+    SplitVals.clear();
+  }
+  SDNode *VecRed = N;
+  EVT ElemType = VecRed->getValueType(0);
+  SmallVector<SDValue, 4> Results;
+
+  if (!IsScalableType &&
+      !TLI.useSVEForFixedLengthVectorVT(
+          SplitVals[0].getValueType(),
+          /*OverrideNEON=*/Subtarget.useSVEForFixedLengthVectors(
+              SplitVals[0].getValueType())))
+    return SDValue();
+
+  for (unsigned Num = 0; Num < SplitVals.size(); ++Num) {
+    SDValue Reg = SplitVals[Num];
+    EVT RdxVT = Reg->getValueType(0);
+    SDValue Pg = getPredicateForVector(DAG, DL, RdxVT);
+    if (!IsScalableType) {
+      EVT ContainerVT = getContainerForFixedLengthVector(DAG, RdxVT);
+      Reg = convertToScalableVector(DAG, ContainerVT, Reg);
+    }
+    SDValue Res = DAG.getNode(
+        ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64,
+        DAG.getConstant(Intrinsic::aarch64_sve_uaddv, DL, MVT::i64), Pg, Reg);
+    if (ElemType != MVT::i64)
+      Res = DAG.getAnyExtOrTrunc(Res, DL, ElemType);
+    Results.push_back(Res);
+  }
+  SDValue ToAdd = Results[0];
+  for (unsigned I = 1; I < SplitVals.size(); ++I)
+    ToAdd = DAG.getNode(ISD::ADD, DL, ElemType, ToAdd, Results[I]);
+  return ToAdd;
+}
+
 // Turn a v8i8/v16i8 extended vecreduce into a udot/sdot and vecreduce
 //   vecreduce.add(ext(A)) to vecreduce.add(DOT(zero, A, one))
 //   vecreduce.add(mul(ext(A), ext(B))) to vecreduce.add(DOT(zero, A, B))
@@ -25188,8 +25281,11 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
     return performInsertVectorEltCombine(N, DCI);
   case ISD::EXTRACT_VECTOR_ELT:
     return performExtractVectorEltCombine(N, DCI, Subtarget);
-  case ISD::VECREDUCE_ADD:
-    return performVecReduceAddCombine(N, DCI.DAG, Subtarget);
+  case ISD::VECREDUCE_ADD: {
+    if (SDValue Val = performVecReduceAddCombine(N, DCI.DAG, Subtarget))
+      return Val;
+    return performVecReduceAddZextCombine(N, DCI, *this);
+  }
   case AArch64ISD::UADDV:
     return performUADDVCombine(N, DAG);
   case AArch64ISD::SMULL:
diff --git a/llvm/test/CodeGen/AArch64/sve-doublereduct.ll b/llvm/test/CodeGen/AArch64/sve-doublereduct.ll
index 7bc31d44bb654..b289dfbec527c 100644
--- a/llvm/test/CodeGen/AArch64/sve-doublereduct.ll
+++ b/llvm/test/CodeGen/AArch64/sve-doublereduct.ll
@@ -103,17 +103,12 @@ define i32 @add_i32(<vscale x 8 x i32> %a, <vscale x 4 x i32> %b) {
 define i16 @add_ext_i16(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: add_ext_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uunpkhi z2.h, z0.b
-; CHECK-NEXT:    uunpklo z0.h, z0.b
-; CHECK-NEXT:    uunpkhi z3.h, z1.b
-; CHECK-NEXT:    uunpklo z1.h, z1.b
-; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    add z0.h, z0.h, z2.h
-; CHECK-NEXT:    add z1.h, z1.h, z3.h
-; CHECK-NEXT:    add z0.h, z0.h, z1.h
-; CHECK-NEXT:    uaddv d0, p0, z0.h
-; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    uaddv d0, p0, z0.b
+; CHECK-NEXT:    uaddv d1, p0, z1.b
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    fmov w9, s1
+; CHECK-NEXT:    add w0, w8, w9
 ; CHECK-NEXT:    ret
   %ae = zext <vscale x 16 x i8> %a to <vscale x 16 x i16>
   %be = zext <vscale x 16 x i8> %b to <vscale x 16 x i16>
@@ -126,21 +121,15 @@ define i16 @add_ext_i16(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 define i16 @add_ext_v32i16(<vscale x 32 x i8> %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: add_ext_v32i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uunpklo z3.h, z1.b
-; CHECK-NEXT:    uunpklo z4.h, z0.b
-; CHECK-NEXT:    uunpkhi z1.h, z1.b
-; CHECK-NEXT:    uunpkhi z0.h, z0.b
-; CHECK-NEXT:    uunpkhi z5.h, z2.b
-; CHECK-NEXT:    uunpklo z2.h, z2.b
-; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    add z0.h, z0.h, z1.h
-; CHECK-NEXT:    add z1.h, z4.h, z3.h
-; CHECK-NEXT:    add z0.h, z1.h, z0.h
-; CHECK-NEXT:    add z1.h, z2.h, z5.h
-; CHECK-NEXT:    add z0.h, z0.h, z1.h
-; CHECK-NEXT:    uaddv d0, p0, z0.h
-; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    uaddv d1, p0, z1.b
+; CHECK-NEXT:    uaddv d0, p0, z0.b
+; CHECK-NEXT:    uaddv d2, p0, z2.b
+; CHECK-NEXT:    fmov w8, s1
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    fmov w9, s2
+; CHECK-NEXT:    add w0, w8, w9
 ; CHECK-NEXT:    ret
   %ae = zext <vscale x 32 x i8> %a to <vscale x 32 x i16>
   %be = zext <vscale x 16 x i8> %b to <vscale x 16 x i16>
diff --git a/llvm/test/CodeGen/AArch64/sve-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-int-reduce.ll
index 8c1b5225b7f25..4b401731e6f80 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-reduce.ll
@@ -188,6 +188,103 @@ define i64 @uaddv_nxv2i64(<vscale x 2 x i64> %a) {
   ret i64 %res
 }
 
+define i32 @uaddv_nxv16i8_nxv16i32(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: uaddv_nxv16i8_nxv16i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    uaddv d0, p0, z0.b
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %1 = zext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.nxv16i32(<vscale x 16 x i32> %1)
+  ret i32 %2
+}
+
+define i64 @uaddv_nxv16i16_nxv16i64(<vscale x 16 x i16> %a) {
+; CHECK-LABEL: uaddv_nxv16i16_nxv16i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    uaddv d1, p0, z1.h
+; CHECK-NEXT:    uaddv d0, p0, z0.h
+; CHECK-NEXT:    fmov x8, d1
+; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    add x0, x9, x8
+; CHECK-NEXT:    ret
+  %1 = zext <vscale x 16 x i16> %a to <vscale x 16 x i64>
+  %2 = call i64 @llvm.vector.reduce.add.nxv16i64(<vscale x 16 x i64> %1)
+  ret i64 %2
+}
+
+define i32 @uaddv_nxv16i16_nxv16i32(<vscale x 32 x i16> %a) {
+; CHECK-LABEL: uaddv_nxv16i16_nxv16i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    uaddv d1, p0, z1.h
+; CHECK-NEXT:    uaddv d0, p0, z0.h
+; CHECK-NEXT:    uaddv d2, p0, z2.h
+; CHECK-NEXT:    uaddv d3, p0, z3.h
+; CHECK-NEXT:    fmov w8, s1
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    fmov w9, s2
+; CHECK-NEXT:    add w8, w8, w9
+; CHECK-NEXT:    fmov w9, s3
+; CHECK-NEXT:    add w0, w8, w9
+; CHECK-NEXT:    ret
+  %1 = zext <vscale x 32 x i16> %a to <vscale x 32 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.nxv32i64(<vscale x 32 x i32> %1)
+  ret i32 %2
+}
+
+define i32 @saddv_nxv16i8_nxv16i32(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: saddv_nxv16i8_nxv16i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sunpkhi z1.h, z0.b
+; CHECK-NEXT:    sunpklo z0.h, z0.b
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    sunpklo z2.s, z1.h
+; CHECK-NEXT:    sunpklo z3.s, z0.h
+; CHECK-NEXT:    sunpkhi z1.s, z1.h
+; CHECK-NEXT:    sunpkhi z0.s, z0.h
+; CHECK-NEXT:    add z0.s, z0.s, z1.s
+; CHECK-NEXT:    add z1.s, z3.s, z2.s
+; CHECK-NEXT:    add z0.s, z1.s, z0.s
+; CHECK-NEXT:    uaddv d0, p0, z0.s
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %1 = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.nxv16i32(<vscale x 16 x i32> %1)
+  ret i32 %2
+}
+
+define i32 @uaddv_nxv32i16_nxv32i32(ptr %a) {
+; CHECK-LABEL: uaddv_nxv32i16_nxv32i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT:    ld1h { z3.h }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT:    uaddv d0, p0, z0.h
+; CHECK-NEXT:    uaddv d1, p0, z1.h
+; CHECK-NEXT:    uaddv d2, p0, z2.h
+; CHECK-NEXT:    uaddv d3, p0, z3.h
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    fmov w9, s1
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    fmov w9, s2
+; CHECK-NEXT:    add w8, w8, w9
+; CHECK-NEXT:    fmov w9, s3
+; CHECK-NEXT:    add w0, w8, w9
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 32 x i16>, ptr %a, align 16
+  %2 = zext <vscale x 32 x i16> %1 to <vscale x 32 x i32>
+  %3 = call i32 @llvm.vector.reduce.add.nxv32i32(<vscale x 32 x i32> %2)
+  ret i32 %3
+}
+
 ; UMINV
 
 define i8 @umin_nxv16i8(<vscale x 16 x i8> %a) {
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reductions.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reductions.ll
new file mode 100644
index 0000000000000..9f076bd3c8e40
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reductions.ll
@@ -0,0 +1,135 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mattr=+sve < %s | FileCheck %s -check-prefixes=CHECK,NO_STREAMING
+; RUN: llc -mattr=+sve -force-streaming-compatible -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128  < %s | FileCheck %s -check-prefixes=CHECK,SVE_128
+; RUN: llc -mattr=+sve -force-streaming-compatible -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s -check-prefixes=CHECK,SVE_MIN_256
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define i32 @reduce_uadd_v16i8(<32 x i8> %a) #0 {
+; NO_STREAMING-LABEL: reduce_uadd_v16i8:
+; NO_STREAMING:       // %bb.0:
+; NO_STREAMING-NEXT:    ushll2 v2.8h, v1.16b, #0
+; NO_STREAMING-NEXT:    ushll2 v3.8h, v0.16b, #0
+; NO_STREAMING-NEXT:    ushll v1.8h, v1.8b, #0
+; NO_STREAMING-NEXT:    ushll v0.8h, v0.8b, #0
+; NO_STREAMING-NEXT:    uaddl2 v4.4s, v3.8h, v2.8h
+; NO_STREAMING-NEXT:    uaddl v2.4s, v3.4h, v2.4h
+; NO_STREAMING-NEXT:    uaddl2 v5.4s, v0.8h, v1.8h
+; NO_STREAMING-NEXT:    uaddl v0.4s, v0.4h, v1.4h
+; NO_STREAMING-NEXT:    add v1.4s, v5.4s, v4.4s
+; NO_STREAMING-NEXT:    add v0.4s, v0.4s, v2.4s
+; NO_STREAMING-NEXT:    add v0.4s, v0.4s, v1.4s
+; NO_STREAMING-NEXT:    addv s0, v0.4s
+; NO_STREAMING-NEXT:    fmov w0, s0
+; NO_STREAMING-NEXT:    ret
+;
+; SVE_128-LABEL: reduce_uadd_v16i8:
+; SVE_128:       // %bb.0:
+; SVE_128-NEXT:    ptrue p0.b
+; SVE_128-NEXT:    // kill: def $q1 killed $q1 def $z1
+; SVE_128-NEXT:    // kill: def $q0 killed $q0 def $z0
+; SVE_128-NEXT:    uaddv d1, p0, z1.b
+; SVE_128-NEXT:    uaddv d0, p0, z0.b
+; SVE_128-NEXT:    fmov x8, d1
+; SVE_128-NEXT:    fmov x9, d0
+; SVE_128-NEXT:    add w0, w9, w8
+; SVE_128-NEXT:    ret
+;
+; SVE_MIN_256-LABEL: reduce_uadd_v16i8:
+; SVE_MIN_256:       // %bb.0:
+; SVE_MIN_256-NEXT:    ptrue p0.b, vl16
+; SVE_MIN_256-NEXT:    // kill: def $q0 killed $q0 def $z0
+; SVE_MIN_256-NEXT:    // kill: def $q1 killed $q1 def $z1
+; SVE_MIN_256-NEXT:    splice z0.b, p0, z0.b, z1.b
+; SVE_MIN_256-NEXT:    ptrue p0.b
+; SVE_MIN_256-NEXT:    uaddv d0, p0, z0.b
+; SVE_MIN_256-NEXT:    fmov x0, d0
+; SVE_MIN_256-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; SVE_MIN_256-NEXT:    ret
+  %1 = zext <32 x i8> %a to <32 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.v16i32(<32 x i32> %1)
+  ret i32 %2
+}
+
+define i32 @reduce_sadd_v16i8(<32 x i8> %a) #0 {
+; NO_STREAMING-LABEL: reduce_sadd_v16i8:
+; NO_STREAMING:       // %bb.0:
+; NO_STREAMING-NEXT:    sshll2 v2.8h, v1.16b, #0
+; NO_STREAMING-NEXT:    sshll2 v3.8h, v0.16b, #0
+; NO_STREAMING-NEXT:    sshll v1.8h, v1.8b, #0
+; NO_STREAMING-NEXT:    sshll v0.8h, v0.8b, #0
+; NO_STREAMING-NEXT:    saddl2 v4.4s, v3.8h, v2.8h
+; NO_STREAMING-NEXT:    saddl v2.4s, v3.4h, v2.4h
+; NO_STREAMING-NEXT:    saddl2 v5.4s, v0.8h, v1.8h
+; NO_STREAMING-NEXT:    saddl v0.4s, v0.4h, v1.4h
+; NO_STREAMING-NEXT:    add v1.4s, v5.4s, v4.4s
+; NO_STREAMING-NEXT:    add v0.4s, v0.4s, v2.4s
+; NO_STREAMING-NEXT:    add v0.4s, v0.4s, v1.4s
+; NO_STREAMING-NEXT:    addv s0, v0.4s
+; NO_STREAMING-NEXT:    fmov w0, s0
+; NO_STREAMING-NEXT:    ret
+;
+; SVE_128-LABEL: reduce_sadd_v16i8:
+; SVE_128:       // %bb.0:
+; SVE_128-NEXT:    // kill: def $q1 killed $q1 def $z1
+; SVE_128-NEXT:    sunpklo z2.h, z1.b
+; SVE_128-NEXT:    // kill: def $q0 killed $q0 def $z0
+; SVE_128-NEXT:    sunpklo z3.h, z0.b
+; SVE_128-NEXT:    ptrue p0.s
+; SVE_128-NEXT:    ext z1.b, z1.b, z1.b, #8
+; SVE_128-NEXT:    ext z0.b, z0.b, z0.b, #8
+; SVE_128-NEXT:    sunpklo z1.h, z1.b
+; SVE_128-NEXT:    sunpklo z0.h, z0.b
+; SVE_128-NEXT:    sunpklo z4.s, z2.h
+; SVE_128-NEXT:    ext z2.b, z2.b, z2.b, #8
+; SVE_128-NEXT:    sunpklo z6.s, z3.h
+; SVE_128-NEXT:    ext z3.b, z3.b, z3.b, #8
+; SVE_128-NEXT:    mov z5.d, z1.d
+; SVE_128-NEXT:    sunpklo z7.s, z0.h
+; SVE_128-NEXT:    ext z0.b, z0.b, z0.b, #8
+; SVE_128-NEXT:    sunpklo z2.s, z2.h
+; SVE_128-NEXT:    sunpklo z3.s, z3.h
+; SVE_128-NEXT:    add z4.s, z6.s, z4.s
+; SVE_128-NEXT:    ext z5.b, z5.b, z1.b, #8
+; SVE_128-NEXT:    sunpklo z1.s, z1.h
+; SVE_128-NEXT:    sunpklo z0.s, z0.h
+; SVE_128-NEXT:    add z2.s, z3.s, z2.s
+; SVE_128-NEXT:    sunpklo z5.s, z5.h
+; SVE_128-NEXT:    add z1.s, z7.s, z1.s
+; SVE_128-NEXT:    add z0.s, z0.s, z5.s
+; SVE_128-NEXT:    add z1.s, z4.s, z1.s
+; SVE_128-NEXT:    add z0.s, z2.s, z0.s
+; SVE_128-NEXT:    add z0.s, z1.s, z0.s
+; SVE_128-NEXT:    uaddv d0, p0, z0.s
+; SVE_128-NEXT:    fmov x0, d0
+; SVE_128-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; SVE_128-NEXT:    ret
+;
+; SVE_MIN_256-LABEL: reduce_sadd_v16i8:
+; SVE_MIN_256:       // %bb.0:
+; SVE_MIN_256-NEXT:    // kill: def $q1 killed $q1 def $z1
+; SVE_MIN_256-NEXT:    // kill: def $q0 killed $q0 def $z0
+; SVE_MIN_256-NEXT:    sunpklo z2.h, z1.b
+; SVE_MIN_256-NEXT:    sunpklo z3.h, z0.b
+; SVE_MIN_256-NEXT:    ptrue p0.s, vl8
+; SVE_MIN_256-NEXT:    ext z1.b, z1.b, z1.b, #8
+; SVE_MIN_256-NEXT:    ext z0.b, z0.b, z0.b, #8
+; SVE_MIN_256-NEXT:    sunpklo z1.h, z1.b
+; SVE_MIN_256-NEXT:    sunpklo z0.h, z0.b
+; SVE_MIN_256-NEXT:    add z2.h, z3.h, z2.h
+; SVE_MIN_256-NEXT:    add z0.h, z0.h, z1.h
+; SVE_MIN_256-NEXT:    sunpklo z1.s, z2.h
+; SVE_MIN_256-NEXT:    sunpklo z0.s, z0.h
+; SVE_MIN_256-NEXT:    add z0.s, z1.s, z0.s
+; SVE_MIN_256-NEXT:    uaddv d0, p0, z0.s
+; SVE_MIN_256-NEXT:    fmov x0, d0
+; SVE_MIN_256-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; SVE_MIN_256-NEXT:    ret
+  %1 = sext <32 x i8> %a to <32 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.v16i32(<32 x i32> %1)
+  ret i32 %2
+}
+
+attributes #0 = { "target-features"="+sve" }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}

>From b48eff857179ebe1dbad092e98c31f93b177d29b Mon Sep 17 00:00:00 2001
From: Dinar Temirbulatov <Dinar.Temirbulatov at arm.com>
Date: Tue, 2 Jul 2024 19:53:01 +0000
Subject: [PATCH 2/5] Resolved remarks.

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 75 +++++++------------
 1 file changed, 28 insertions(+), 47 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 219f7833884ab..ae17f5a90d14c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17518,65 +17518,46 @@ performVecReduceAddZextCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
   SDValue VecOp = ZEXT->getOperand(0);
   VecVT = VecOp.getValueType();
   bool IsScalableType = VecVT.isScalableVector();
+  SmallVector<SDValue, 2> ResultValues;
 
-  if (TLI.isTypeLegal(VecVT)) {
-    if (!IsScalableType &&
-        !TLI.useSVEForFixedLengthVectorVT(
-            VecVT,
-            /*OverrideNEON=*/Subtarget.useSVEForFixedLengthVectors(VecVT)))
-      return SDValue();
-
-    if (!IsScalableType) {
-      EVT ContainerVT = getContainerForFixedLengthVector(DAG, VecVT);
-      VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);
-    }
-    VecVT = VecOp.getValueType();
-    EVT RdxVT = N->getValueType(0);
-    RdxVT = getPackedSVEVectorVT(RdxVT);
-    SDValue Pg = getPredicateForVector(DAG, DL, VecVT);
-    SDValue Res = DAG.getNode(
-        ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64,
-        DAG.getConstant(Intrinsic::aarch64_sve_uaddv, DL, MVT::i64), Pg, VecOp);
-    EVT ResVT = MVT::i64;
-    if (ResVT != N->getValueType(0))
-      Res = DAG.getAnyExtOrTrunc(Res, DL, N->getValueType(0));
-    return Res;
-  }
-
-  SmallVector<SDValue, 4> SplitVals;
-  SmallVector<SDValue, 4> PrevVals;
-  PrevVals.push_back(VecOp);
-  while (true) {
+  if (!TLI.isTypeLegal(VecVT)) {
+    SmallVector<SDValue, 2> PrevValues;
+    PrevValues.push_back(VecOp);
+    while (true) {
 
-    if (!VecVT.isScalableVector() &&
-        !PrevVals[0].getValueType().getVectorElementCount().isKnownEven())
-      return SDValue();
+      if (!VecVT.isScalableVector() &&
+          !PrevValues[0].getValueType().getVectorElementCount().isKnownEven())
+        return SDValue();
 
-    for (SDValue Vec : PrevVals) {
-      SDValue Lo, Hi;
-      std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
-      SplitVals.push_back(Lo);
-      SplitVals.push_back(Hi);
+      for (SDValue Vec : PrevValues) {
+        SDValue Lo, Hi;
+        std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
+        ResultValues.push_back(Lo);
+        ResultValues.push_back(Hi);
+      }
+      if (TLI.isTypeLegal(ResultValues[0].getValueType()))
+        break;
+      PrevValues.clear();
+      std::copy(ResultValues.begin(), ResultValues.end(),
+                std::back_inserter(PrevValues));
+      ResultValues.clear();
     }
-    if (TLI.isTypeLegal(SplitVals[0].getValueType()))
-      break;
-    PrevVals.clear();
-    std::copy(SplitVals.begin(), SplitVals.end(), std::back_inserter(PrevVals));
-    SplitVals.clear();
+  } else {
+    ResultValues.push_back(VecOp);
   }
   SDNode *VecRed = N;
   EVT ElemType = VecRed->getValueType(0);
-  SmallVector<SDValue, 4> Results;
+  SmallVector<SDValue, 2> Results;
 
   if (!IsScalableType &&
       !TLI.useSVEForFixedLengthVectorVT(
-          SplitVals[0].getValueType(),
+          ResultValues[0].getValueType(),
           /*OverrideNEON=*/Subtarget.useSVEForFixedLengthVectors(
-              SplitVals[0].getValueType())))
+              ResultValues[0].getValueType())))
     return SDValue();
 
-  for (unsigned Num = 0; Num < SplitVals.size(); ++Num) {
-    SDValue Reg = SplitVals[Num];
+  for (unsigned Num = 0; Num < ResultValues.size(); ++Num) {
+    SDValue Reg = ResultValues[Num];
     EVT RdxVT = Reg->getValueType(0);
     SDValue Pg = getPredicateForVector(DAG, DL, RdxVT);
     if (!IsScalableType) {
@@ -17591,7 +17572,7 @@ performVecReduceAddZextCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
     Results.push_back(Res);
   }
   SDValue ToAdd = Results[0];
-  for (unsigned I = 1; I < SplitVals.size(); ++I)
+  for (unsigned I = 1; I < ResultValues.size(); ++I)
     ToAdd = DAG.getNode(ISD::ADD, DL, ElemType, ToAdd, Results[I]);
   return ToAdd;
 }

>From 251df1ba6961adb0fa6cd3d849bf3aa55590848e Mon Sep 17 00:00:00 2001
From: Dinar Temirbulatov <Dinar.Temirbulatov at arm.com>
Date: Sun, 7 Jul 2024 19:53:13 +0000
Subject: [PATCH 3/5] Added signed reduction support

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 19 ++++---
 .../CodeGen/AArch64/sve-fixed-vector-zext.ll  | 44 ++++++++------
 llvm/test/CodeGen/AArch64/sve-int-reduce.ll   | 13 +----
 ...-streaming-mode-fixed-length-reductions.ll | 57 ++++---------------
 4 files changed, 53 insertions(+), 80 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index ae17f5a90d14c..df1f4043f284f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17504,10 +17504,12 @@ static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N,
 }
 
 static SDValue
-performVecReduceAddZextCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
-                               const AArch64TargetLowering &TLI) {
-  if (N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND)
+performVecReduceAddExtCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
+                              const AArch64TargetLowering &TLI) {
+  if (N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
+      N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND)
     return SDValue();
+  bool IsSigned = N->getOperand(0).getOpcode() == ISD::SIGN_EXTEND;
 
   SelectionDAG &DAG = DCI.DAG;
   auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
@@ -17564,9 +17566,12 @@ performVecReduceAddZextCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
       EVT ContainerVT = getContainerForFixedLengthVector(DAG, RdxVT);
       Reg = convertToScalableVector(DAG, ContainerVT, Reg);
     }
-    SDValue Res = DAG.getNode(
-        ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64,
-        DAG.getConstant(Intrinsic::aarch64_sve_uaddv, DL, MVT::i64), Pg, Reg);
+    SDValue Res =
+        DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64,
+                    DAG.getConstant(IsSigned ? Intrinsic::aarch64_sve_saddv
+                                             : Intrinsic::aarch64_sve_uaddv,
+                                    DL, MVT::i64),
+                    Pg, Reg);
     if (ElemType != MVT::i64)
       Res = DAG.getAnyExtOrTrunc(Res, DL, ElemType);
     Results.push_back(Res);
@@ -25265,7 +25270,7 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
   case ISD::VECREDUCE_ADD: {
     if (SDValue Val = performVecReduceAddCombine(N, DCI.DAG, Subtarget))
       return Val;
-    return performVecReduceAddZextCombine(N, DCI, *this);
+    return performVecReduceAddExtCombine(N, DCI, *this);
   }
   case AArch64ISD::UADDV:
     return performUADDVCombine(N, DAG);
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-vector-zext.ll b/llvm/test/CodeGen/AArch64/sve-fixed-vector-zext.ll
index 1ab2589bccd5b..24c817d410301 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-vector-zext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-vector-zext.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 
 ; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mcpu=neoverse-v1 -O3 -aarch64-sve-vector-bits-min=256 -verify-machineinstrs | FileCheck %s --check-prefixes=SVE256
 ; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mcpu=neoverse-v1 -O3 -aarch64-sve-vector-bits-min=128 -verify-machineinstrs | FileCheck %s --check-prefixes=NEON
@@ -6,24 +7,31 @@
 
 define internal i32 @test(ptr nocapture readonly %p1, i32 %i1, ptr nocapture readonly %p2, i32 %i2) {
 ; SVE256-LABEL: test:
-; SVE256:       ld1b    { z0.h }, p0/z,
-; SVE256:       ld1b    { z1.h }, p0/z,
-; SVE256:       sub z0.h, z0.h, z1.h
-; SVE256-NEXT:  sunpklo z1.s, z0.h
-; SVE256-NEXT:  ext z0.b, z0.b, z0.b, #16
-; SVE256-NEXT:  sunpklo z0.s, z0.h
-; SVE256-NEXT:  add z0.s, z1.s, z0.s
-; SVE256-NEXT:  uaddv   d0, p1, z0.s
+; SVE256:       // %bb.0: // %L.entry
+; SVE256-NEXT:    ptrue p0.h, vl16
+; SVE256-NEXT:    mov w9, wzr
+; SVE256-NEXT:    mov w10, wzr
+; SVE256-NEXT:    mov w8, wzr
+; SVE256-NEXT:    mov w11, #-16 // =0xfffffff0
+; SVE256-NEXT:    .p2align 5, , 16
+; SVE256-NEXT:  .LBB0_1: // %L1
+; SVE256-NEXT:    // =>This Inner Loop Header: Depth=1
+; SVE256-NEXT:    sxtw x12, w9
+; SVE256-NEXT:    sxtw x13, w10
+; SVE256-NEXT:    adds w11, w11, #1
+; SVE256-NEXT:    add w10, w10, w3
+; SVE256-NEXT:    ld1b { z0.h }, p0/z, [x0, x12]
+; SVE256-NEXT:    ld1b { z1.h }, p0/z, [x2, x13]
+; SVE256-NEXT:    add w9, w9, w1
+; SVE256-NEXT:    sub z0.h, z0.h, z1.h
+; SVE256-NEXT:    saddv d0, p0, z0.h
+; SVE256-NEXT:    fmov w12, s0
+; SVE256-NEXT:    add w8, w12, w8
+; SVE256-NEXT:    b.lo .LBB0_1
+; SVE256-NEXT:  // %bb.2: // %L2
+; SVE256-NEXT:    mov w0, w8
+; SVE256-NEXT:    ret
 
-; NEON-LABEL: test:
-; NEON:       ldr q0, [x0, w9, sxtw]
-; NEON:       ldr q1, [x2, w10, sxtw]
-; NEON:       usubl2  v2.8h, v0.16b, v1.16b
-; NEON-NEXT:  usubl   v0.8h, v0.8b, v1.8b
-; NEON:       saddl2  v1.4s, v0.8h, v2.8h
-; NEON-NEXT:  saddl   v0.4s, v0.4h, v2.4h
-; NEON-NEXT:  add v0.4s, v0.4s, v1.4s
-; NEON-NEXT:  addv    s0, v0.4s
 
 L.entry:
   br label %L1
@@ -55,3 +63,5 @@ L2:                                          ; preds = %L1
 }
 
 declare  i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; NEON: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/sve-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-int-reduce.ll
index 4b401731e6f80..7a7a0d353ecff 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-reduce.ll
@@ -240,17 +240,8 @@ define i32 @uaddv_nxv16i16_nxv16i32(<vscale x 32 x i16> %a) {
 define i32 @saddv_nxv16i8_nxv16i32(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: saddv_nxv16i8_nxv16i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sunpkhi z1.h, z0.b
-; CHECK-NEXT:    sunpklo z0.h, z0.b
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    sunpklo z2.s, z1.h
-; CHECK-NEXT:    sunpklo z3.s, z0.h
-; CHECK-NEXT:    sunpkhi z1.s, z1.h
-; CHECK-NEXT:    sunpkhi z0.s, z0.h
-; CHECK-NEXT:    add z0.s, z0.s, z1.s
-; CHECK-NEXT:    add z1.s, z3.s, z2.s
-; CHECK-NEXT:    add z0.s, z1.s, z0.s
-; CHECK-NEXT:    uaddv d0, p0, z0.s
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    saddv d0, p0, z0.b
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reductions.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reductions.ll
index 9f076bd3c8e40..9d4355c152c65 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reductions.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reductions.ll
@@ -41,7 +41,7 @@ define i32 @reduce_uadd_v16i8(<32 x i8> %a) #0 {
 ; SVE_MIN_256-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; SVE_MIN_256-NEXT:    // kill: def $q1 killed $q1 def $z1
 ; SVE_MIN_256-NEXT:    splice z0.b, p0, z0.b, z1.b
-; SVE_MIN_256-NEXT:    ptrue p0.b
+; SVE_MIN_256-NEXT:    ptrue p0.b, vl32
 ; SVE_MIN_256-NEXT:    uaddv d0, p0, z0.b
 ; SVE_MIN_256-NEXT:    fmov x0, d0
 ; SVE_MIN_256-NEXT:    // kill: def $w0 killed $w0 killed $x0
@@ -71,57 +71,24 @@ define i32 @reduce_sadd_v16i8(<32 x i8> %a) #0 {
 ;
 ; SVE_128-LABEL: reduce_sadd_v16i8:
 ; SVE_128:       // %bb.0:
+; SVE_128-NEXT:    ptrue p0.b
 ; SVE_128-NEXT:    // kill: def $q1 killed $q1 def $z1
-; SVE_128-NEXT:    sunpklo z2.h, z1.b
 ; SVE_128-NEXT:    // kill: def $q0 killed $q0 def $z0
-; SVE_128-NEXT:    sunpklo z3.h, z0.b
-; SVE_128-NEXT:    ptrue p0.s
-; SVE_128-NEXT:    ext z1.b, z1.b, z1.b, #8
-; SVE_128-NEXT:    ext z0.b, z0.b, z0.b, #8
-; SVE_128-NEXT:    sunpklo z1.h, z1.b
-; SVE_128-NEXT:    sunpklo z0.h, z0.b
-; SVE_128-NEXT:    sunpklo z4.s, z2.h
-; SVE_128-NEXT:    ext z2.b, z2.b, z2.b, #8
-; SVE_128-NEXT:    sunpklo z6.s, z3.h
-; SVE_128-NEXT:    ext z3.b, z3.b, z3.b, #8
-; SVE_128-NEXT:    mov z5.d, z1.d
-; SVE_128-NEXT:    sunpklo z7.s, z0.h
-; SVE_128-NEXT:    ext z0.b, z0.b, z0.b, #8
-; SVE_128-NEXT:    sunpklo z2.s, z2.h
-; SVE_128-NEXT:    sunpklo z3.s, z3.h
-; SVE_128-NEXT:    add z4.s, z6.s, z4.s
-; SVE_128-NEXT:    ext z5.b, z5.b, z1.b, #8
-; SVE_128-NEXT:    sunpklo z1.s, z1.h
-; SVE_128-NEXT:    sunpklo z0.s, z0.h
-; SVE_128-NEXT:    add z2.s, z3.s, z2.s
-; SVE_128-NEXT:    sunpklo z5.s, z5.h
-; SVE_128-NEXT:    add z1.s, z7.s, z1.s
-; SVE_128-NEXT:    add z0.s, z0.s, z5.s
-; SVE_128-NEXT:    add z1.s, z4.s, z1.s
-; SVE_128-NEXT:    add z0.s, z2.s, z0.s
-; SVE_128-NEXT:    add z0.s, z1.s, z0.s
-; SVE_128-NEXT:    uaddv d0, p0, z0.s
-; SVE_128-NEXT:    fmov x0, d0
-; SVE_128-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; SVE_128-NEXT:    saddv d1, p0, z1.b
+; SVE_128-NEXT:    saddv d0, p0, z0.b
+; SVE_128-NEXT:    fmov x8, d1
+; SVE_128-NEXT:    fmov x9, d0
+; SVE_128-NEXT:    add w0, w9, w8
 ; SVE_128-NEXT:    ret
 ;
 ; SVE_MIN_256-LABEL: reduce_sadd_v16i8:
 ; SVE_MIN_256:       // %bb.0:
-; SVE_MIN_256-NEXT:    // kill: def $q1 killed $q1 def $z1
+; SVE_MIN_256-NEXT:    ptrue p0.b, vl16
 ; SVE_MIN_256-NEXT:    // kill: def $q0 killed $q0 def $z0
-; SVE_MIN_256-NEXT:    sunpklo z2.h, z1.b
-; SVE_MIN_256-NEXT:    sunpklo z3.h, z0.b
-; SVE_MIN_256-NEXT:    ptrue p0.s, vl8
-; SVE_MIN_256-NEXT:    ext z1.b, z1.b, z1.b, #8
-; SVE_MIN_256-NEXT:    ext z0.b, z0.b, z0.b, #8
-; SVE_MIN_256-NEXT:    sunpklo z1.h, z1.b
-; SVE_MIN_256-NEXT:    sunpklo z0.h, z0.b
-; SVE_MIN_256-NEXT:    add z2.h, z3.h, z2.h
-; SVE_MIN_256-NEXT:    add z0.h, z0.h, z1.h
-; SVE_MIN_256-NEXT:    sunpklo z1.s, z2.h
-; SVE_MIN_256-NEXT:    sunpklo z0.s, z0.h
-; SVE_MIN_256-NEXT:    add z0.s, z1.s, z0.s
-; SVE_MIN_256-NEXT:    uaddv d0, p0, z0.s
+; SVE_MIN_256-NEXT:    // kill: def $q1 killed $q1 def $z1
+; SVE_MIN_256-NEXT:    splice z0.b, p0, z0.b, z1.b
+; SVE_MIN_256-NEXT:    ptrue p0.b, vl32
+; SVE_MIN_256-NEXT:    saddv d0, p0, z0.b
 ; SVE_MIN_256-NEXT:    fmov x0, d0
 ; SVE_MIN_256-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; SVE_MIN_256-NEXT:    ret

>From 5731cf11cf44789601d98fc3900956afc8306a94 Mon Sep 17 00:00:00 2001
From: Dinar Temirbulatov <Dinar.Temirbulatov at arm.com>
Date: Thu, 11 Jul 2024 13:47:44 +0000
Subject: [PATCH 4/5] Resolved remarks.

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 57 ++++++++-----------
 1 file changed, 25 insertions(+), 32 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index df1f4043f284f..d078c32b4f9be 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -95,6 +95,7 @@
 #include <cctype>
 #include <cstdint>
 #include <cstdlib>
+#include <deque>
 #include <iterator>
 #include <limits>
 #include <optional>
@@ -17503,6 +17504,8 @@ static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N,
   return DAG.getNode(ISD::VECREDUCE_ADD, DL, MVT::i32, UADDLP);
 }
 
+// Turn [sign|zero]_extend(vecreduce_add()) into SVE's  SADDV|UADDV
+// instructions.
 static SDValue
 performVecReduceAddExtCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                               const AArch64TargetLowering &TLI) {
@@ -17513,44 +17516,35 @@ performVecReduceAddExtCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
 
   SelectionDAG &DAG = DCI.DAG;
   auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
-  SDNode *ZEXT = N->getOperand(0).getNode();
-  EVT VecVT = ZEXT->getOperand(0).getValueType();
+  SDValue VecOp = N->getOperand(0).getOperand(0);
   SDLoc DL(N);
 
-  SDValue VecOp = ZEXT->getOperand(0);
-  VecVT = VecOp.getValueType();
-  bool IsScalableType = VecVT.isScalableVector();
-  SmallVector<SDValue, 2> ResultValues;
+  bool IsScalableType = VecOp.getValueType().isScalableVector();
+  std::deque<SDValue> ResultValues;
+  ResultValues.push_back(VecOp);
 
-  if (!TLI.isTypeLegal(VecVT)) {
-    SmallVector<SDValue, 2> PrevValues;
-    PrevValues.push_back(VecOp);
+  // Split the input vectors if not legal.
+  while (!TLI.isTypeLegal(ResultValues.front().getValueType())) {
+    if (!ResultValues.front()
+             .getValueType()
+             .getVectorElementCount()
+             .isKnownEven())
+      return SDValue();
+    EVT CurVT = ResultValues.front().getValueType();
     while (true) {
-
-      if (!VecVT.isScalableVector() &&
-          !PrevValues[0].getValueType().getVectorElementCount().isKnownEven())
-        return SDValue();
-
-      for (SDValue Vec : PrevValues) {
-        SDValue Lo, Hi;
-        std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
-        ResultValues.push_back(Lo);
-        ResultValues.push_back(Hi);
-      }
-      if (TLI.isTypeLegal(ResultValues[0].getValueType()))
+      SDValue Vec = ResultValues.front();
+      if (Vec.getValueType() != CurVT)
         break;
-      PrevValues.clear();
-      std::copy(ResultValues.begin(), ResultValues.end(),
-                std::back_inserter(PrevValues));
-      ResultValues.clear();
+      ResultValues.pop_front();
+      SDValue Lo, Hi;
+      std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
+      ResultValues.push_back(Lo);
+      ResultValues.push_back(Hi);
     }
-  } else {
-    ResultValues.push_back(VecOp);
   }
-  SDNode *VecRed = N;
-  EVT ElemType = VecRed->getValueType(0);
-  SmallVector<SDValue, 2> Results;
 
+  EVT ElemType = N->getValueType(0);
+  SmallVector<SDValue, 2> Results;
   if (!IsScalableType &&
       !TLI.useSVEForFixedLengthVectorVT(
           ResultValues[0].getValueType(),
@@ -17558,8 +17552,7 @@ performVecReduceAddExtCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
               ResultValues[0].getValueType())))
     return SDValue();
 
-  for (unsigned Num = 0; Num < ResultValues.size(); ++Num) {
-    SDValue Reg = ResultValues[Num];
+  for (SDValue Reg : ResultValues) {
     EVT RdxVT = Reg->getValueType(0);
     SDValue Pg = getPredicateForVector(DAG, DL, RdxVT);
     if (!IsScalableType) {

>From 65d09007d1cbfdc8b93f579173ec891fc161fbef Mon Sep 17 00:00:00 2001
From: Dinar Temirbulatov <Dinar.Temirbulatov at arm.com>
Date: Wed, 17 Jul 2024 13:17:50 +0000
Subject: [PATCH 5/5] Changes provided by Sander de Smalen
 <sander.desmalen at arm.com>.

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 94 +++++++++----------
 llvm/test/CodeGen/AArch64/double_reduct.ll    |  9 +-
 llvm/test/CodeGen/AArch64/sve-int-reduce.ll   | 32 +++----
 ...-streaming-mode-fixed-length-reductions.ll | 24 ++---
 llvm/test/CodeGen/AArch64/vecreduce-add.ll    | 52 +++++-----
 5 files changed, 100 insertions(+), 111 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d078c32b4f9be..1e2fc81b5648d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17517,62 +17517,56 @@ performVecReduceAddExtCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
   SelectionDAG &DAG = DCI.DAG;
   auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
   SDValue VecOp = N->getOperand(0).getOperand(0);
+  EVT VecOpVT = VecOp.getValueType();
   SDLoc DL(N);
 
-  bool IsScalableType = VecOp.getValueType().isScalableVector();
-  std::deque<SDValue> ResultValues;
-  ResultValues.push_back(VecOp);
-
-  // Split the input vectors if not legal.
-  while (!TLI.isTypeLegal(ResultValues.front().getValueType())) {
-    if (!ResultValues.front()
-             .getValueType()
-             .getVectorElementCount()
-             .isKnownEven())
-      return SDValue();
-    EVT CurVT = ResultValues.front().getValueType();
-    while (true) {
-      SDValue Vec = ResultValues.front();
-      if (Vec.getValueType() != CurVT)
-        break;
-      ResultValues.pop_front();
-      SDValue Lo, Hi;
-      std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
-      ResultValues.push_back(Lo);
-      ResultValues.push_back(Hi);
-    }
+  // Split the input vectors if not legal, e.g.
+  // i32 (vecreduce_add (zext nxv32i8 %op to nxv32i32))
+  // ->
+  // i32 (add
+  //   (i32 vecreduce_add (zext nxv16i8 %op.lo to nxv16i32)),
+  //   (i32 vecreduce_add (zext nxv16i8 %op.hi to nxv16i32)))
+  if (TLI.getTypeAction(*DAG.getContext(), VecOpVT) ==
+      TargetLowering::TypeSplitVector) {
+    SDValue Lo, Hi;
+    std::tie(Lo, Hi) = DAG.SplitVector(VecOp, DL);
+    unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
+    EVT HalfVT = N->getOperand(0).getValueType().getHalfNumVectorElementsVT(
+        *DAG.getContext());
+    Lo = DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0),
+                     DAG.getNode(ExtOpc, DL, HalfVT, Lo));
+    Hi = DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0),
+                     DAG.getNode(ExtOpc, DL, HalfVT, Hi));
+    return DAG.getNode(ISD::ADD, DL, N->getValueType(0), Lo, Hi);
   }
 
-  EVT ElemType = N->getValueType(0);
-  SmallVector<SDValue, 2> Results;
-  if (!IsScalableType &&
-      !TLI.useSVEForFixedLengthVectorVT(
-          ResultValues[0].getValueType(),
-          /*OverrideNEON=*/Subtarget.useSVEForFixedLengthVectors(
-              ResultValues[0].getValueType())))
+  if (!TLI.isTypeLegal(VecOpVT))
     return SDValue();
 
-  for (SDValue Reg : ResultValues) {
-    EVT RdxVT = Reg->getValueType(0);
-    SDValue Pg = getPredicateForVector(DAG, DL, RdxVT);
-    if (!IsScalableType) {
-      EVT ContainerVT = getContainerForFixedLengthVector(DAG, RdxVT);
-      Reg = convertToScalableVector(DAG, ContainerVT, Reg);
-    }
-    SDValue Res =
-        DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64,
-                    DAG.getConstant(IsSigned ? Intrinsic::aarch64_sve_saddv
-                                             : Intrinsic::aarch64_sve_uaddv,
-                                    DL, MVT::i64),
-                    Pg, Reg);
-    if (ElemType != MVT::i64)
-      Res = DAG.getAnyExtOrTrunc(Res, DL, ElemType);
-    Results.push_back(Res);
-  }
-  SDValue ToAdd = Results[0];
-  for (unsigned I = 1; I < ResultValues.size(); ++I)
-    ToAdd = DAG.getNode(ISD::ADD, DL, ElemType, ToAdd, Results[I]);
-  return ToAdd;
+  if (VecOpVT.isFixedLengthVector() &&
+      !TLI.useSVEForFixedLengthVectorVT(VecOpVT, !Subtarget.isNeonAvailable()))
+    return SDValue();
+
+  // The input type is legal so map VECREDUCE_ADD to UADDV/SADDV, e.g.
+  // i32 (vecreduce_add (zext nxv16i8 %op to nxv16i32))
+  // ->
+  // i32 (UADDV nxv16i8:%op)
+  EVT ElemType = N->getValueType(0);
+  SDValue Pg = getPredicateForVector(DAG, DL, VecOpVT);
+  if (VecOpVT.isFixedLengthVector()) {
+    EVT ContainerVT = getContainerForFixedLengthVector(DAG, VecOpVT);
+    VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);
+  }
+  SDValue Res =
+      DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64,
+                  DAG.getConstant(IsSigned ? Intrinsic::aarch64_sve_saddv
+                                           : Intrinsic::aarch64_sve_uaddv,
+                                  DL, MVT::i64),
+                  Pg, VecOp);
+  if (ElemType != MVT::i64)
+    Res = DAG.getAnyExtOrTrunc(Res, DL, ElemType);
+
+  return Res;
 }
 
 // Turn a v8i8/v16i8 extended vecreduce into a udot/sdot and vecreduce
diff --git a/llvm/test/CodeGen/AArch64/double_reduct.ll b/llvm/test/CodeGen/AArch64/double_reduct.ll
index b10114bc0ffa7..cf5e15da0b173 100644
--- a/llvm/test/CodeGen/AArch64/double_reduct.ll
+++ b/llvm/test/CodeGen/AArch64/double_reduct.ll
@@ -145,11 +145,10 @@ define i16 @add_ext_i16(<16 x i8> %a, <16 x i8> %b) {
 define i16 @add_ext_v32i16(<32 x i8> %a, <16 x i8> %b) {
 ; CHECK-LABEL: add_ext_v32i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uaddl2 v3.8h, v0.16b, v1.16b
-; CHECK-NEXT:    uaddl v0.8h, v0.8b, v1.8b
-; CHECK-NEXT:    add v0.8h, v0.8h, v3.8h
-; CHECK-NEXT:    uadalp v0.8h, v2.16b
-; CHECK-NEXT:    addv h0, v0.8h
+; CHECK-NEXT:    uaddlp v1.8h, v1.16b
+; CHECK-NEXT:    uadalp v1.8h, v0.16b
+; CHECK-NEXT:    uadalp v1.8h, v2.16b
+; CHECK-NEXT:    addv h0, v1.8h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
   %ae = zext <32 x i8> %a to <32 x i16>
diff --git a/llvm/test/CodeGen/AArch64/sve-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-int-reduce.ll
index 7a7a0d353ecff..c8dd719aa03c6 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-reduce.ll
@@ -220,17 +220,17 @@ define i32 @uaddv_nxv16i16_nxv16i32(<vscale x 32 x i16> %a) {
 ; CHECK-LABEL: uaddv_nxv16i16_nxv16i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    uaddv d3, p0, z3.h
+; CHECK-NEXT:    uaddv d2, p0, z2.h
 ; CHECK-NEXT:    uaddv d1, p0, z1.h
 ; CHECK-NEXT:    uaddv d0, p0, z0.h
-; CHECK-NEXT:    uaddv d2, p0, z2.h
-; CHECK-NEXT:    uaddv d3, p0, z3.h
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    fmov w8, s3
 ; CHECK-NEXT:    fmov w9, s2
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    fmov w9, s3
-; CHECK-NEXT:    add w0, w8, w9
+; CHECK-NEXT:    fmov w10, s1
+; CHECK-NEXT:    fmov w11, s0
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    add w9, w11, w10
+; CHECK-NEXT:    add w0, w9, w8
 ; CHECK-NEXT:    ret
   %1 = zext <vscale x 32 x i16> %a to <vscale x 32 x i32>
   %2 = call i32 @llvm.vector.reduce.add.nxv32i64(<vscale x 32 x i32> %1)
@@ -254,21 +254,21 @@ define i32 @uaddv_nxv32i16_nxv32i32(ptr %a) {
 ; CHECK-LABEL: uaddv_nxv32i16_nxv32i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0]
-; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT:    ld1h { z3.h }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT:    ld1h { z3.h }, p0/z, [x0]
 ; CHECK-NEXT:    uaddv d0, p0, z0.h
 ; CHECK-NEXT:    uaddv d1, p0, z1.h
 ; CHECK-NEXT:    uaddv d2, p0, z2.h
 ; CHECK-NEXT:    uaddv d3, p0, z3.h
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    fmov w9, s1
+; CHECK-NEXT:    fmov w10, s2
+; CHECK-NEXT:    fmov w11, s3
 ; CHECK-NEXT:    add w8, w9, w8
-; CHECK-NEXT:    fmov w9, s2
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    fmov w9, s3
-; CHECK-NEXT:    add w0, w8, w9
+; CHECK-NEXT:    add w9, w11, w10
+; CHECK-NEXT:    add w0, w9, w8
 ; CHECK-NEXT:    ret
   %1 = load <vscale x 32 x i16>, ptr %a, align 16
   %2 = zext <vscale x 32 x i16> %1 to <vscale x 32 x i32>
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reductions.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reductions.ll
index 9d4355c152c65..608b3bdeac75a 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reductions.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reductions.ll
@@ -9,14 +9,14 @@ define i32 @reduce_uadd_v16i8(<32 x i8> %a) #0 {
 ; NO_STREAMING-LABEL: reduce_uadd_v16i8:
 ; NO_STREAMING:       // %bb.0:
 ; NO_STREAMING-NEXT:    ushll2 v2.8h, v1.16b, #0
-; NO_STREAMING-NEXT:    ushll2 v3.8h, v0.16b, #0
 ; NO_STREAMING-NEXT:    ushll v1.8h, v1.8b, #0
+; NO_STREAMING-NEXT:    ushll2 v3.8h, v0.16b, #0
 ; NO_STREAMING-NEXT:    ushll v0.8h, v0.8b, #0
-; NO_STREAMING-NEXT:    uaddl2 v4.4s, v3.8h, v2.8h
-; NO_STREAMING-NEXT:    uaddl v2.4s, v3.4h, v2.4h
-; NO_STREAMING-NEXT:    uaddl2 v5.4s, v0.8h, v1.8h
-; NO_STREAMING-NEXT:    uaddl v0.4s, v0.4h, v1.4h
-; NO_STREAMING-NEXT:    add v1.4s, v5.4s, v4.4s
+; NO_STREAMING-NEXT:    uaddl2 v4.4s, v1.8h, v2.8h
+; NO_STREAMING-NEXT:    uaddl v1.4s, v1.4h, v2.4h
+; NO_STREAMING-NEXT:    uaddl2 v2.4s, v0.8h, v3.8h
+; NO_STREAMING-NEXT:    uaddl v0.4s, v0.4h, v3.4h
+; NO_STREAMING-NEXT:    add v1.4s, v1.4s, v4.4s
 ; NO_STREAMING-NEXT:    add v0.4s, v0.4s, v2.4s
 ; NO_STREAMING-NEXT:    add v0.4s, v0.4s, v1.4s
 ; NO_STREAMING-NEXT:    addv s0, v0.4s
@@ -55,14 +55,14 @@ define i32 @reduce_sadd_v16i8(<32 x i8> %a) #0 {
 ; NO_STREAMING-LABEL: reduce_sadd_v16i8:
 ; NO_STREAMING:       // %bb.0:
 ; NO_STREAMING-NEXT:    sshll2 v2.8h, v1.16b, #0
-; NO_STREAMING-NEXT:    sshll2 v3.8h, v0.16b, #0
 ; NO_STREAMING-NEXT:    sshll v1.8h, v1.8b, #0
+; NO_STREAMING-NEXT:    sshll2 v3.8h, v0.16b, #0
 ; NO_STREAMING-NEXT:    sshll v0.8h, v0.8b, #0
-; NO_STREAMING-NEXT:    saddl2 v4.4s, v3.8h, v2.8h
-; NO_STREAMING-NEXT:    saddl v2.4s, v3.4h, v2.4h
-; NO_STREAMING-NEXT:    saddl2 v5.4s, v0.8h, v1.8h
-; NO_STREAMING-NEXT:    saddl v0.4s, v0.4h, v1.4h
-; NO_STREAMING-NEXT:    add v1.4s, v5.4s, v4.4s
+; NO_STREAMING-NEXT:    saddl2 v4.4s, v1.8h, v2.8h
+; NO_STREAMING-NEXT:    saddl v1.4s, v1.4h, v2.4h
+; NO_STREAMING-NEXT:    saddl2 v2.4s, v0.8h, v3.8h
+; NO_STREAMING-NEXT:    saddl v0.4s, v0.4h, v3.4h
+; NO_STREAMING-NEXT:    add v1.4s, v1.4s, v4.4s
 ; NO_STREAMING-NEXT:    add v0.4s, v0.4s, v2.4s
 ; NO_STREAMING-NEXT:    add v0.4s, v0.4s, v1.4s
 ; NO_STREAMING-NEXT:    addv s0, v0.4s
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-add.ll b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
index c81fd26a77525..27e786eb1ced1 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-add.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
@@ -1968,10 +1968,9 @@ define i32 @test_udot_v16i8(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-SD-BASE:       // %bb.0: // %entry
 ; CHECK-SD-BASE-NEXT:    umull2 v2.8h, v1.16b, v0.16b
 ; CHECK-SD-BASE-NEXT:    umull v0.8h, v1.8b, v0.8b
-; CHECK-SD-BASE-NEXT:    uaddl2 v1.4s, v0.8h, v2.8h
-; CHECK-SD-BASE-NEXT:    uaddl v0.4s, v0.4h, v2.4h
-; CHECK-SD-BASE-NEXT:    add v0.4s, v0.4s, v1.4s
-; CHECK-SD-BASE-NEXT:    addv s0, v0.4s
+; CHECK-SD-BASE-NEXT:    uaddlp v1.4s, v2.8h
+; CHECK-SD-BASE-NEXT:    uadalp v1.4s, v0.8h
+; CHECK-SD-BASE-NEXT:    addv s0, v1.4s
 ; CHECK-SD-BASE-NEXT:    fmov w0, s0
 ; CHECK-SD-BASE-NEXT:    ret
 ;
@@ -2296,10 +2295,9 @@ define i32 @test_sdot_v16i8(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-SD-BASE:       // %bb.0: // %entry
 ; CHECK-SD-BASE-NEXT:    smull2 v2.8h, v1.16b, v0.16b
 ; CHECK-SD-BASE-NEXT:    smull v0.8h, v1.8b, v0.8b
-; CHECK-SD-BASE-NEXT:    saddl2 v1.4s, v0.8h, v2.8h
-; CHECK-SD-BASE-NEXT:    saddl v0.4s, v0.4h, v2.4h
-; CHECK-SD-BASE-NEXT:    add v0.4s, v0.4s, v1.4s
-; CHECK-SD-BASE-NEXT:    addv s0, v0.4s
+; CHECK-SD-BASE-NEXT:    saddlp v1.4s, v2.8h
+; CHECK-SD-BASE-NEXT:    sadalp v1.4s, v0.8h
+; CHECK-SD-BASE-NEXT:    addv s0, v1.4s
 ; CHECK-SD-BASE-NEXT:    fmov w0, s0
 ; CHECK-SD-BASE-NEXT:    ret
 ;
@@ -3868,10 +3866,9 @@ entry:
 define i16 @add_v32i8_v32i16_zext(<32 x i8> %x) {
 ; CHECK-SD-LABEL: add_v32i8_v32i16_zext:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    uaddl2 v2.8h, v0.16b, v1.16b
-; CHECK-SD-NEXT:    uaddl v0.8h, v0.8b, v1.8b
-; CHECK-SD-NEXT:    add v0.8h, v0.8h, v2.8h
-; CHECK-SD-NEXT:    addv h0, v0.8h
+; CHECK-SD-NEXT:    uaddlp v1.8h, v1.16b
+; CHECK-SD-NEXT:    uadalp v1.8h, v0.16b
+; CHECK-SD-NEXT:    addv h0, v1.8h
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
 ;
@@ -3994,10 +3991,9 @@ entry:
 define i16 @add_v32i8_v32i16_sext(<32 x i8> %x) {
 ; CHECK-SD-LABEL: add_v32i8_v32i16_sext:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    saddl2 v2.8h, v0.16b, v1.16b
-; CHECK-SD-NEXT:    saddl v0.8h, v0.8b, v1.8b
-; CHECK-SD-NEXT:    add v0.8h, v0.8h, v2.8h
-; CHECK-SD-NEXT:    addv h0, v0.8h
+; CHECK-SD-NEXT:    saddlp v1.8h, v1.16b
+; CHECK-SD-NEXT:    sadalp v1.8h, v0.16b
+; CHECK-SD-NEXT:    addv h0, v1.8h
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
 ;
@@ -4238,14 +4234,14 @@ define i32 @add_v32i8_v32i32_zext(<32 x i8> %x) {
 ; CHECK-SD-BASE-LABEL: add_v32i8_v32i32_zext:
 ; CHECK-SD-BASE:       // %bb.0: // %entry
 ; CHECK-SD-BASE-NEXT:    ushll2 v2.8h, v1.16b, #0
-; CHECK-SD-BASE-NEXT:    ushll2 v3.8h, v0.16b, #0
 ; CHECK-SD-BASE-NEXT:    ushll v1.8h, v1.8b, #0
+; CHECK-SD-BASE-NEXT:    ushll2 v3.8h, v0.16b, #0
 ; CHECK-SD-BASE-NEXT:    ushll v0.8h, v0.8b, #0
-; CHECK-SD-BASE-NEXT:    uaddl2 v4.4s, v3.8h, v2.8h
-; CHECK-SD-BASE-NEXT:    uaddl v2.4s, v3.4h, v2.4h
-; CHECK-SD-BASE-NEXT:    uaddl2 v5.4s, v0.8h, v1.8h
-; CHECK-SD-BASE-NEXT:    uaddl v0.4s, v0.4h, v1.4h
-; CHECK-SD-BASE-NEXT:    add v1.4s, v5.4s, v4.4s
+; CHECK-SD-BASE-NEXT:    uaddl2 v4.4s, v1.8h, v2.8h
+; CHECK-SD-BASE-NEXT:    uaddl v1.4s, v1.4h, v2.4h
+; CHECK-SD-BASE-NEXT:    uaddl2 v2.4s, v0.8h, v3.8h
+; CHECK-SD-BASE-NEXT:    uaddl v0.4s, v0.4h, v3.4h
+; CHECK-SD-BASE-NEXT:    add v1.4s, v1.4s, v4.4s
 ; CHECK-SD-BASE-NEXT:    add v0.4s, v0.4s, v2.4s
 ; CHECK-SD-BASE-NEXT:    add v0.4s, v0.4s, v1.4s
 ; CHECK-SD-BASE-NEXT:    addv s0, v0.4s
@@ -4511,14 +4507,14 @@ define i32 @add_v32i8_v32i32_sext(<32 x i8> %x) {
 ; CHECK-SD-BASE-LABEL: add_v32i8_v32i32_sext:
 ; CHECK-SD-BASE:       // %bb.0: // %entry
 ; CHECK-SD-BASE-NEXT:    sshll2 v2.8h, v1.16b, #0
-; CHECK-SD-BASE-NEXT:    sshll2 v3.8h, v0.16b, #0
 ; CHECK-SD-BASE-NEXT:    sshll v1.8h, v1.8b, #0
+; CHECK-SD-BASE-NEXT:    sshll2 v3.8h, v0.16b, #0
 ; CHECK-SD-BASE-NEXT:    sshll v0.8h, v0.8b, #0
-; CHECK-SD-BASE-NEXT:    saddl2 v4.4s, v3.8h, v2.8h
-; CHECK-SD-BASE-NEXT:    saddl v2.4s, v3.4h, v2.4h
-; CHECK-SD-BASE-NEXT:    saddl2 v5.4s, v0.8h, v1.8h
-; CHECK-SD-BASE-NEXT:    saddl v0.4s, v0.4h, v1.4h
-; CHECK-SD-BASE-NEXT:    add v1.4s, v5.4s, v4.4s
+; CHECK-SD-BASE-NEXT:    saddl2 v4.4s, v1.8h, v2.8h
+; CHECK-SD-BASE-NEXT:    saddl v1.4s, v1.4h, v2.4h
+; CHECK-SD-BASE-NEXT:    saddl2 v2.4s, v0.8h, v3.8h
+; CHECK-SD-BASE-NEXT:    saddl v0.4s, v0.4h, v3.4h
+; CHECK-SD-BASE-NEXT:    add v1.4s, v1.4s, v4.4s
 ; CHECK-SD-BASE-NEXT:    add v0.4s, v0.4s, v2.4s
 ; CHECK-SD-BASE-NEXT:    add v0.4s, v0.4s, v1.4s
 ; CHECK-SD-BASE-NEXT:    addv s0, v0.4s



More information about the llvm-commits mailing list