[llvm] [AArch64] Convert concat(uhadd(a,b), uhadd(c,d)) to uhadd(concat(a,c), concat(b,d)) (PR #80674)

Rin Dobrescu via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 5 07:20:34 PST 2024


https://github.com/Rin18 updated https://github.com/llvm/llvm-project/pull/80674

>From 674ef3de6c619baa1a3a2d1a923fbd710cceb8c5 Mon Sep 17 00:00:00 2001
From: Rin Dobrescu <rin.dobrescu at arm.com>
Date: Thu, 25 Jan 2024 12:08:20 +0000
Subject: [PATCH 1/9] Precommit test and fix other test wrong input.

---
 llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll  |  40 ++++--
 .../AArch64/concat-vector-add-combine.ll      | 132 ++++++++++++++++++
 2 files changed, 159 insertions(+), 13 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll

diff --git a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
index 24cce9a2b26b5..5b34bbb0120be 100644
--- a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
@@ -58,18 +58,32 @@ define <16 x i8> @lower_trunc_16xi8(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16
 ; CHECK-LABEL: lower_trunc_16xi8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov s0, w0
-; CHECK-NEXT:    add x8, sp, #56
-; CHECK-NEXT:    ld1r { v1.8h }, [x8]
+; CHECK-NEXT:    ldr h1, [sp]
+; CHECK-NEXT:    add x8, sp, #8
+; CHECK-NEXT:    ld1 { v1.h }[1], [x8]
+; CHECK-NEXT:    add x8, sp, #16
 ; CHECK-NEXT:    mov v0.h[1], w1
-; CHECK-NEXT:    add v3.8h, v1.8h, v1.8h
+; CHECK-NEXT:    ld1 { v1.h }[2], [x8]
+; CHECK-NEXT:    add x8, sp, #24
 ; CHECK-NEXT:    mov v0.h[2], w2
+; CHECK-NEXT:    ld1 { v1.h }[3], [x8]
+; CHECK-NEXT:    add x8, sp, #32
 ; CHECK-NEXT:    mov v0.h[3], w3
+; CHECK-NEXT:    ld1 { v1.h }[4], [x8]
+; CHECK-NEXT:    add x8, sp, #40
+; CHECK-NEXT:    ld1 { v1.h }[5], [x8]
+; CHECK-NEXT:    add x8, sp, #48
 ; CHECK-NEXT:    mov v0.h[4], w4
+; CHECK-NEXT:    ld1 { v1.h }[6], [x8]
+; CHECK-NEXT:    add x8, sp, #56
 ; CHECK-NEXT:    mov v0.h[5], w5
+; CHECK-NEXT:    ld1 { v1.h }[7], [x8]
 ; CHECK-NEXT:    mov v0.h[6], w6
-; CHECK-NEXT:    add v2.8h, v0.8h, v0.8h
+; CHECK-NEXT:    add v2.8h, v1.8h, v1.8h
+; CHECK-NEXT:    mov v0.h[7], w7
+; CHECK-NEXT:    add v3.8h, v0.8h, v0.8h
 ; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    uzp1 v1.16b, v2.16b, v3.16b
+; CHECK-NEXT:    uzp1 v1.16b, v3.16b, v2.16b
 ; CHECK-NEXT:    eor v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
   %a1 = insertelement <16 x i16> poison, i16 %a, i16 0
@@ -80,14 +94,14 @@ define <16 x i8> @lower_trunc_16xi8(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16
   %f1 = insertelement <16 x i16> %e1, i16 %f, i16 5
   %g1 = insertelement <16 x i16> %f1, i16 %g, i16 6
   %h1 = insertelement <16 x i16> %g1, i16 %h, i16 7
-  %i1 = insertelement <16 x i16> %f1, i16 %i, i16 8
-  %j1 = insertelement <16 x i16> %g1, i16 %j, i16 9
-  %k1 = insertelement <16 x i16> %f1, i16 %k, i16 10
-  %l1 = insertelement <16 x i16> %g1, i16 %l, i16 11
-  %m1 = insertelement <16 x i16> %f1, i16 %m, i16 12
-  %n1 = insertelement <16 x i16> %g1, i16 %n, i16 13
-  %o1 = insertelement <16 x i16> %f1, i16 %o, i16 14
-  %p1 = insertelement <16 x i16> %g1, i16 %p, i16 15
+  %i1 = insertelement <16 x i16> %h1, i16 %i, i16 8
+  %j1 = insertelement <16 x i16> %i1, i16 %j, i16 9
+  %k1 = insertelement <16 x i16> %j1, i16 %k, i16 10
+  %l1 = insertelement <16 x i16> %k1, i16 %l, i16 11
+  %m1 = insertelement <16 x i16> %l1, i16 %m, i16 12
+  %n1 = insertelement <16 x i16> %m1, i16 %n, i16 13
+  %o1 = insertelement <16 x i16> %n1, i16 %o, i16 14
+  %p1 = insertelement <16 x i16> %o1, i16 %p, i16 15
   %t = trunc <16 x i16> %p1 to <16 x i8>
   %s = add <16 x i16> %p1, %p1
   %t2 = trunc <16 x i16> %s to <16 x i8>
diff --git a/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll b/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
new file mode 100644
index 0000000000000..8c660dbf6ce01
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
@@ -0,0 +1,132 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64 < %s | FileCheck %s
+
+define i16 @combine_add_16xi16(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16 %f, i16 %g, i16 %h, i16 %i, i16 %j, i16 %k, i16 %l, i16 %m, i16 %n, i16 %o, i16 %p) {
+; CHECK-LABEL: combine_add_16xi16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s0, w0
+; CHECK-NEXT:    ldr h1, [sp]
+; CHECK-NEXT:    add x8, sp, #8
+; CHECK-NEXT:    ld1 { v1.h }[1], [x8]
+; CHECK-NEXT:    add x8, sp, #16
+; CHECK-NEXT:    mov v0.h[1], w1
+; CHECK-NEXT:    ld1 { v1.h }[2], [x8]
+; CHECK-NEXT:    add x8, sp, #24
+; CHECK-NEXT:    mov v0.h[2], w2
+; CHECK-NEXT:    ld1 { v1.h }[3], [x8]
+; CHECK-NEXT:    add x8, sp, #32
+; CHECK-NEXT:    mov v0.h[3], w3
+; CHECK-NEXT:    ld1 { v1.h }[4], [x8]
+; CHECK-NEXT:    add x8, sp, #40
+; CHECK-NEXT:    ld1 { v1.h }[5], [x8]
+; CHECK-NEXT:    add x8, sp, #48
+; CHECK-NEXT:    mov v0.h[4], w4
+; CHECK-NEXT:    ld1 { v1.h }[6], [x8]
+; CHECK-NEXT:    add x8, sp, #56
+; CHECK-NEXT:    mov v0.h[5], w5
+; CHECK-NEXT:    ld1 { v1.h }[7], [x8]
+; CHECK-NEXT:    mov v0.h[6], w6
+; CHECK-NEXT:    xtn v3.8b, v1.8h
+; CHECK-NEXT:    shrn v1.8b, v1.8h, #8
+; CHECK-NEXT:    mov v0.h[7], w7
+; CHECK-NEXT:    uhadd v1.8b, v3.8b, v1.8b
+; CHECK-NEXT:    xtn v2.8b, v0.8h
+; CHECK-NEXT:    shrn v0.8b, v0.8h, #8
+; CHECK-NEXT:    uhadd v0.8b, v2.8b, v0.8b
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    uaddlv h0, v0.16b
+; CHECK-NEXT:    umov w0, v0.h[0]
+; CHECK-NEXT:    ret
+  %a1 = insertelement <16 x i16> poison, i16 %a, i16 0
+  %b1 = insertelement <16 x i16> %a1, i16 %b, i16 1
+  %c1 = insertelement <16 x i16> %b1, i16 %c, i16 2
+  %d1 = insertelement <16 x i16> %c1, i16 %d, i16 3
+  %e1 = insertelement <16 x i16> %d1, i16 %e, i16 4
+  %f1 = insertelement <16 x i16> %e1, i16 %f, i16 5
+  %g1 = insertelement <16 x i16> %f1, i16 %g, i16 6
+  %h1 = insertelement <16 x i16> %g1, i16 %h, i16 7
+  %i1 = insertelement <16 x i16> %h1, i16 %i, i16 8
+  %j1 = insertelement <16 x i16> %i1, i16 %j, i16 9
+  %k1 = insertelement <16 x i16> %j1, i16 %k, i16 10
+  %l1 = insertelement <16 x i16> %k1, i16 %l, i16 11
+  %m1 = insertelement <16 x i16> %l1, i16 %m, i16 12
+  %n1 = insertelement <16 x i16> %m1, i16 %n, i16 13
+  %o1 = insertelement <16 x i16> %n1, i16 %o, i16 14
+  %p1 = insertelement <16 x i16> %o1, i16 %p, i16 15
+  %x = and <16 x i16> %p1, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+  %sh1 = lshr <16 x i16> %p1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  %s = add nuw nsw <16 x i16> %x, %sh1
+  %sh2 = lshr <16 x i16> %s, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %res = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %sh2)
+  ret i16 %res
+}
+
+define i32 @combine_add_8xi32(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) local_unnamed_addr #0 {
+; CHECK-LABEL: combine_add_8xi32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s0, w4
+; CHECK-NEXT:    fmov s1, w0
+; CHECK-NEXT:    mov v0.s[1], w5
+; CHECK-NEXT:    mov v1.s[1], w1
+; CHECK-NEXT:    mov v0.s[2], w6
+; CHECK-NEXT:    mov v1.s[2], w2
+; CHECK-NEXT:    mov v0.s[3], w7
+; CHECK-NEXT:    mov v1.s[3], w3
+; CHECK-NEXT:    xtn v2.4h, v1.4s
+; CHECK-NEXT:    xtn v3.4h, v0.4s
+; CHECK-NEXT:    shrn v1.4h, v1.4s, #16
+; CHECK-NEXT:    shrn v0.4h, v0.4s, #16
+; CHECK-NEXT:    uhadd v1.4h, v2.4h, v1.4h
+; CHECK-NEXT:    uhadd v0.4h, v3.4h, v0.4h
+; CHECK-NEXT:    mov v1.d[1], v0.d[0]
+; CHECK-NEXT:    uaddlv s0, v1.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %a1 = insertelement <8 x i32> poison, i32 %a, i32 0
+  %b1 = insertelement <8 x i32> %a1, i32 %b, i32 1
+  %c1 = insertelement <8 x i32> %b1, i32 %c, i32 2
+  %d1 = insertelement <8 x i32> %c1, i32 %d, i32 3
+  %e1 = insertelement <8 x i32> %d1, i32 %e, i32 4
+  %f1 = insertelement <8 x i32> %e1, i32 %f, i32 5
+  %g1 = insertelement <8 x i32> %f1, i32 %g, i32 6
+  %h1 = insertelement <8 x i32> %g1, i32 %h, i32 7
+  %x = and <8 x i32> %h1, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+  %sh1 = lshr <8 x i32> %h1, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %s = add nuw nsw <8 x i32> %x, %sh1
+  %sh2 = lshr <8 x i32> %s, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %res = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %sh2)
+  ret i32 %res
+}
+
+define i64 @combine_add_4xi64(i64 %a, i64 %b, i64 %c, i64 %d) local_unnamed_addr #0 {
+; CHECK-LABEL: combine_add_4xi64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x2
+; CHECK-NEXT:    fmov d1, x0
+; CHECK-NEXT:    mov v0.d[1], x3
+; CHECK-NEXT:    mov v1.d[1], x1
+; CHECK-NEXT:    xtn v2.2s, v1.2d
+; CHECK-NEXT:    xtn v3.2s, v0.2d
+; CHECK-NEXT:    shrn v1.2s, v1.2d, #32
+; CHECK-NEXT:    shrn v0.2s, v0.2d, #32
+; CHECK-NEXT:    uhadd v1.2s, v2.2s, v1.2s
+; CHECK-NEXT:    uhadd v0.2s, v3.2s, v0.2s
+; CHECK-NEXT:    mov v1.d[1], v0.d[0]
+; CHECK-NEXT:    uaddlv d0, v1.4s
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %a1 = insertelement <4 x i64> poison, i64 %a, i64 0
+  %b1 = insertelement <4 x i64> %a1, i64 %b, i64 1
+  %c1 = insertelement <4 x i64> %b1, i64 %c, i64 2
+  %d1 = insertelement <4 x i64> %c1, i64 %d, i64 3
+  %x = and <4 x i64> %d1, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+  %sh1 = lshr <4 x i64> %d1, <i64 32, i64 32, i64 32, i64 32>
+  %s = add nuw nsw <4 x i64> %x, %sh1
+  %sh2 = lshr <4 x i64> %s, <i64 1, i64 1, i64 1, i64 1>
+  %res = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %sh2)
+  ret i64 %res
+}
+
+declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
+declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
+declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)

>From 8505b7565ef1358bfb8d2aad62d341bbc59cb66a Mon Sep 17 00:00:00 2001
From: Rin Dobrescu <rin.dobrescu at arm.com>
Date: Thu, 25 Jan 2024 12:27:22 +0000
Subject: [PATCH 2/9] [AArch64] Convert concat(uhadd(a,b), uhadd(c,d)) to
 uhadd(concat(a,c), concat(b,d))

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 13 +++++
 llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll  | 53 -------------------
 .../AArch64/concat-vector-add-combine.ll      | 34 ++++--------
 3 files changed, 24 insertions(+), 76 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 2cb7770ee21e4..7b4a9d7e67299 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -18291,6 +18291,19 @@ static SDValue performConcatVectorsCombine(SDNode *N,
     return true;
   };
 
+  // concat(uhadd(a, b), uhadd(c, d)) -> uhadd(concat(a, c), concat(b, d))
+  if (N0Opc == ISD::AVGFLOORU && N1Opc == ISD::AVGFLOORU) {
+    SDValue N00 = N0->getOperand(0);
+    SDValue N01 = N0->getOperand(1);
+    SDValue N10 = N1->getOperand(0);
+    SDValue N11 = N1->getOperand(1);
+    EVT N00VT = N00.getValueType();
+    EVT PairVT = N00VT.getDoubleNumVectorElementsVT(*DAG.getContext());
+    SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N00, N10);
+    SDValue Concat1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N01, N11);
+    return DAG.getNode(ISD::AVGFLOORU, dl, PairVT, Concat0, Concat1);
+  }
+
   // concat(rshrn(x), rshrn(y)) -> rshrn(concat(x, y))
   if (N->getNumOperands() == 2 && IsRSHRN(N0) &&
       ((IsRSHRN(N1) &&
diff --git a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
index 5b34bbb0120be..c4de177176e33 100644
--- a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
@@ -1,59 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=aarch64 < %s | FileCheck %s
 
-define i32 @lower_lshr(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %e, <4 x i32> %f, <4 x i32> %g, <4 x i32> %h) {
-; CHECK-LABEL: lower_lshr:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    addv s0, v0.4s
-; CHECK-NEXT:    addv s1, v1.4s
-; CHECK-NEXT:    addv s4, v4.4s
-; CHECK-NEXT:    addv s5, v5.4s
-; CHECK-NEXT:    addv s2, v2.4s
-; CHECK-NEXT:    addv s6, v6.4s
-; CHECK-NEXT:    mov v0.s[1], v1.s[0]
-; CHECK-NEXT:    addv s1, v3.4s
-; CHECK-NEXT:    addv s3, v7.4s
-; CHECK-NEXT:    mov v4.s[1], v5.s[0]
-; CHECK-NEXT:    mov v0.s[2], v2.s[0]
-; CHECK-NEXT:    mov v4.s[2], v6.s[0]
-; CHECK-NEXT:    mov v0.s[3], v1.s[0]
-; CHECK-NEXT:    mov v4.s[3], v3.s[0]
-; CHECK-NEXT:    xtn v1.4h, v0.4s
-; CHECK-NEXT:    shrn v0.4h, v0.4s, #16
-; CHECK-NEXT:    xtn v2.4h, v4.4s
-; CHECK-NEXT:    shrn v3.4h, v4.4s, #16
-; CHECK-NEXT:    uhadd v0.4h, v1.4h, v0.4h
-; CHECK-NEXT:    uhadd v1.4h, v2.4h, v3.4h
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-NEXT:    uaddlv s0, v0.8h
-; CHECK-NEXT:    fmov w0, s0
-; CHECK-NEXT:    ret
-  %l87  = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a)
-  %l174 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %b)
-  %l257 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %c)
-  %l340 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %d)
-  %l427 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %e)
-  %l514 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %f)
-  %l597 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %g)
-  %l680 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %h)
-  %l681 = insertelement <8 x i32> poison, i32 %l87, i32 0
-  %l682 = insertelement <8 x i32> %l681, i32 %l174, i32 1
-  %l683 = insertelement <8 x i32> %l682, i32 %l257, i32 2
-  %l684 = insertelement <8 x i32> %l683, i32 %l340, i32 3
-  %l685 = insertelement <8 x i32> %l684, i32 %l427, i32 4
-  %l686 = insertelement <8 x i32> %l685, i32 %l514, i32 5
-  %l687 = insertelement <8 x i32> %l686, i32 %l597, i32 6
-  %l688 = insertelement <8 x i32> %l687, i32 %l680, i32 7
-  %l689 = and <8 x i32> %l688, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
-  %l690 = lshr <8 x i32> %l688, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
-  %l691 = add nuw nsw <8 x i32> %l689, %l690
-  %l692 = lshr <8 x i32> %l691, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %l693 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %l692)
-  ret i32 %l693
-}
-declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
-declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
-
 define <16 x i8> @lower_trunc_16xi8(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16 %f, i16 %g, i16 %h, i16 %i, i16 %j, i16 %k, i16 %l, i16 %m, i16 %n, i16 %o, i16 %p) {
 ; CHECK-LABEL: lower_trunc_16xi8:
 ; CHECK:       // %bb.0:
diff --git a/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll b/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
index 8c660dbf6ce01..58d610764f4a9 100644
--- a/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
+++ b/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
@@ -26,14 +26,10 @@ define i16 @combine_add_16xi16(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16 %f, i
 ; CHECK-NEXT:    mov v0.h[5], w5
 ; CHECK-NEXT:    ld1 { v1.h }[7], [x8]
 ; CHECK-NEXT:    mov v0.h[6], w6
-; CHECK-NEXT:    xtn v3.8b, v1.8h
-; CHECK-NEXT:    shrn v1.8b, v1.8h, #8
 ; CHECK-NEXT:    mov v0.h[7], w7
-; CHECK-NEXT:    uhadd v1.8b, v3.8b, v1.8b
-; CHECK-NEXT:    xtn v2.8b, v0.8h
-; CHECK-NEXT:    shrn v0.8b, v0.8h, #8
-; CHECK-NEXT:    uhadd v0.8b, v2.8b, v0.8b
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    uzp2 v2.16b, v0.16b, v1.16b
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    uhadd v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    uaddlv h0, v0.16b
 ; CHECK-NEXT:    umov w0, v0.h[0]
 ; CHECK-NEXT:    ret
@@ -72,14 +68,10 @@ define i32 @combine_add_8xi32(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i3
 ; CHECK-NEXT:    mov v1.s[2], w2
 ; CHECK-NEXT:    mov v0.s[3], w7
 ; CHECK-NEXT:    mov v1.s[3], w3
-; CHECK-NEXT:    xtn v2.4h, v1.4s
-; CHECK-NEXT:    xtn v3.4h, v0.4s
-; CHECK-NEXT:    shrn v1.4h, v1.4s, #16
-; CHECK-NEXT:    shrn v0.4h, v0.4s, #16
-; CHECK-NEXT:    uhadd v1.4h, v2.4h, v1.4h
-; CHECK-NEXT:    uhadd v0.4h, v3.4h, v0.4h
-; CHECK-NEXT:    mov v1.d[1], v0.d[0]
-; CHECK-NEXT:    uaddlv s0, v1.8h
+; CHECK-NEXT:    uzp2 v2.8h, v1.8h, v0.8h
+; CHECK-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT:    uhadd v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    uaddlv s0, v0.8h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
   %a1 = insertelement <8 x i32> poison, i32 %a, i32 0
@@ -105,14 +97,10 @@ define i64 @combine_add_4xi64(i64 %a, i64 %b, i64 %c, i64 %d) local_unnamed_addr
 ; CHECK-NEXT:    fmov d1, x0
 ; CHECK-NEXT:    mov v0.d[1], x3
 ; CHECK-NEXT:    mov v1.d[1], x1
-; CHECK-NEXT:    xtn v2.2s, v1.2d
-; CHECK-NEXT:    xtn v3.2s, v0.2d
-; CHECK-NEXT:    shrn v1.2s, v1.2d, #32
-; CHECK-NEXT:    shrn v0.2s, v0.2d, #32
-; CHECK-NEXT:    uhadd v1.2s, v2.2s, v1.2s
-; CHECK-NEXT:    uhadd v0.2s, v3.2s, v0.2s
-; CHECK-NEXT:    mov v1.d[1], v0.d[0]
-; CHECK-NEXT:    uaddlv d0, v1.4s
+; CHECK-NEXT:    uzp2 v2.4s, v1.4s, v0.4s
+; CHECK-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    uhadd v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    uaddlv d0, v0.4s
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    ret
   %a1 = insertelement <4 x i64> poison, i64 %a, i64 0

>From 5770b0942cd6a634397f29b00a1bbbcdc75664f8 Mon Sep 17 00:00:00 2001
From: Rin Dobrescu <rin.dobrescu at arm.com>
Date: Fri, 26 Jan 2024 13:51:02 +0000
Subject: [PATCH 3/9] Address comments

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 46 +++++++++----------
 1 file changed, 22 insertions(+), 24 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7b4a9d7e67299..4d44b86cb0980 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -18218,17 +18218,7 @@ static SDValue performConcatVectorsCombine(SDNode *N,
   if (DCI.isBeforeLegalizeOps())
     return SDValue();
 
-  // Optimise concat_vectors of two [us]avgceils or [us]avgfloors that use
-  // extracted subvectors from the same original vectors. Combine these into a
-  // single avg that operates on the two original vectors.
-  // avgceil is the target independant name for rhadd, avgfloor is a hadd.
-  // Example:
-  //  (concat_vectors (v8i8 (avgceils (extract_subvector (v16i8 OpA, <0>),
-  //                                   extract_subvector (v16i8 OpB, <0>))),
-  //                  (v8i8 (avgceils (extract_subvector (v16i8 OpA, <8>),
-  //                                   extract_subvector (v16i8 OpB, <8>)))))
-  // ->
-  //  (v16i8(avgceils(v16i8 OpA, v16i8 OpB)))
+  // Optimise concat_vectors of two [us]avgceils or [us]avgfloors.
   if (N->getNumOperands() == 2 && N0Opc == N1Opc &&
       (N0Opc == ISD::AVGCEILU || N0Opc == ISD::AVGCEILS ||
        N0Opc == ISD::AVGFLOORU || N0Opc == ISD::AVGFLOORS)) {
@@ -18240,6 +18230,16 @@ static SDValue performConcatVectorsCombine(SDNode *N,
     EVT N00VT = N00.getValueType();
     EVT N10VT = N10.getValueType();
 
+    // For extracted subvectors from the same original vectors, combine these
+    // into a single avg that operates on the two original vectors. 
+    // avgceil is the target independant name for rhadd, avgfloor is a hadd. 
+    // Example:
+    //  (concat_vectors (v8i8 (avgceils (extract_subvector (v16i8 OpA, <0>),
+    //                                   extract_subvector (v16i8 OpB, <0>))),
+    //                  (v8i8 (avgceils (extract_subvector (v16i8 OpA, <8>),
+    //                                   extract_subvector (v16i8 OpB, <8>)))))
+    // ->
+    //  (v16i8(avgceils(v16i8 OpA, v16i8 OpB)))
     if (N00->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
         N01->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
         N10->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
@@ -18263,6 +18263,17 @@ static SDValue performConcatVectorsCombine(SDNode *N,
           return DAG.getNode(N0Opc, dl, VT, N00Source, N01Source);
       }
     }
+
+    // For a concat of two [u]avgfloors with a 128-bit destination size, combine
+    // into an avg of two contacts of the source vectors.
+    // eg: concat(uhadd(a,b), uhadd(c, d)) -> uhadd(concat(a, c), concat(b, d))
+    if (N0Opc == ISD::AVGFLOORU && N0Opc == N1Opc && VT.is128BitVector() &&
+        N0->hasOneUse() && N1->hasOneUse()) {
+      EVT PairVT = N00VT.getDoubleNumVectorElementsVT(*DAG.getContext());
+      SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N00, N10);
+      SDValue Concat1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N01, N11);
+      return DAG.getNode(ISD::AVGFLOORU, dl, PairVT, Concat0, Concat1);
+    }
   }
 
   auto IsRSHRN = [](SDValue Shr) {
@@ -18291,19 +18302,6 @@ static SDValue performConcatVectorsCombine(SDNode *N,
     return true;
   };
 
-  // concat(uhadd(a, b), uhadd(c, d)) -> uhadd(concat(a, c), concat(b, d))
-  if (N0Opc == ISD::AVGFLOORU && N1Opc == ISD::AVGFLOORU) {
-    SDValue N00 = N0->getOperand(0);
-    SDValue N01 = N0->getOperand(1);
-    SDValue N10 = N1->getOperand(0);
-    SDValue N11 = N1->getOperand(1);
-    EVT N00VT = N00.getValueType();
-    EVT PairVT = N00VT.getDoubleNumVectorElementsVT(*DAG.getContext());
-    SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N00, N10);
-    SDValue Concat1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N01, N11);
-    return DAG.getNode(ISD::AVGFLOORU, dl, PairVT, Concat0, Concat1);
-  }
-
   // concat(rshrn(x), rshrn(y)) -> rshrn(concat(x, y))
   if (N->getNumOperands() == 2 && IsRSHRN(N0) &&
       ((IsRSHRN(N1) &&

>From 68f73b91b56f66e008fa26862f96ae21c4c7d00d Mon Sep 17 00:00:00 2001
From: Rin Dobrescu <rin.dobrescu at arm.com>
Date: Fri, 26 Jan 2024 14:05:08 +0000
Subject: [PATCH 4/9] Remove check for same Opcode as it is already done above

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4d44b86cb0980..abd3243bd7704 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -18231,8 +18231,8 @@ static SDValue performConcatVectorsCombine(SDNode *N,
     EVT N10VT = N10.getValueType();
 
     // For extracted subvectors from the same original vectors, combine these
-    // into a single avg that operates on the two original vectors. 
-    // avgceil is the target independant name for rhadd, avgfloor is a hadd. 
+    // into a single avg that operates on the two original vectors.
+    // avgceil is the target independant name for rhadd, avgfloor is a hadd.
     // Example:
     //  (concat_vectors (v8i8 (avgceils (extract_subvector (v16i8 OpA, <0>),
     //                                   extract_subvector (v16i8 OpB, <0>))),
@@ -18267,7 +18267,7 @@ static SDValue performConcatVectorsCombine(SDNode *N,
     // For a concat of two [u]avgfloors with a 128-bit destination size, combine
     // into an avg of two contacts of the source vectors.
     // eg: concat(uhadd(a,b), uhadd(c, d)) -> uhadd(concat(a, c), concat(b, d))
-    if (N0Opc == ISD::AVGFLOORU && N0Opc == N1Opc && VT.is128BitVector() &&
+    if (N0Opc == ISD::AVGFLOORU && VT.is128BitVector() &&
         N0->hasOneUse() && N1->hasOneUse()) {
       EVT PairVT = N00VT.getDoubleNumVectorElementsVT(*DAG.getContext());
       SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N00, N10);

>From f5e9dfe7e027ccd5ec62960dc3938a00cac6d084 Mon Sep 17 00:00:00 2001
From: Rin Dobrescu <rin.dobrescu at arm.com>
Date: Fri, 26 Jan 2024 14:13:10 +0000
Subject: [PATCH 5/9] Run git-clang-format

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index abd3243bd7704..758a4752b53da 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -18267,8 +18267,8 @@ static SDValue performConcatVectorsCombine(SDNode *N,
     // For a concat of two [u]avgfloors with a 128-bit destination size, combine
     // into an avg of two contacts of the source vectors.
     // eg: concat(uhadd(a,b), uhadd(c, d)) -> uhadd(concat(a, c), concat(b, d))
-    if (N0Opc == ISD::AVGFLOORU && VT.is128BitVector() &&
-        N0->hasOneUse() && N1->hasOneUse()) {
+    if (N0Opc == ISD::AVGFLOORU && VT.is128BitVector() && N0->hasOneUse() &&
+        N1->hasOneUse()) {
       EVT PairVT = N00VT.getDoubleNumVectorElementsVT(*DAG.getContext());
       SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N00, N10);
       SDValue Concat1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N01, N11);

>From fd65b128c4d19b9dff71be375bafaa4fb668f1e9 Mon Sep 17 00:00:00 2001
From: Rin Dobrescu <rin.dobrescu at arm.com>
Date: Fri, 26 Jan 2024 16:55:04 +0000
Subject: [PATCH 6/9] Address review comments

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 55 +++----------------
 1 file changed, 9 insertions(+), 46 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 758a4752b53da..48460283d0712 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -18218,8 +18218,11 @@ static SDValue performConcatVectorsCombine(SDNode *N,
   if (DCI.isBeforeLegalizeOps())
     return SDValue();
 
-  // Optimise concat_vectors of two [us]avgceils or [us]avgfloors.
-  if (N->getNumOperands() == 2 && N0Opc == N1Opc &&
+  // Optimise concat_vectors of two [us]avgceils or [us]avgfloors with a 128-bit
+  // destination size, combine into an avg of two contacts of the source
+  // vectors. eg: concat(uhadd(a,b), uhadd(c, d)) -> uhadd(concat(a, c),
+  // concat(b, d))
+  if (N->getNumOperands() == 2 && N0Opc == N1Opc && VT.is128BitVector() &&
       (N0Opc == ISD::AVGCEILU || N0Opc == ISD::AVGCEILS ||
        N0Opc == ISD::AVGFLOORU || N0Opc == ISD::AVGFLOORS)) {
     SDValue N00 = N0->getOperand(0);
@@ -18230,50 +18233,10 @@ static SDValue performConcatVectorsCombine(SDNode *N,
     EVT N00VT = N00.getValueType();
     EVT N10VT = N10.getValueType();
 
-    // For extracted subvectors from the same original vectors, combine these
-    // into a single avg that operates on the two original vectors.
-    // avgceil is the target independant name for rhadd, avgfloor is a hadd.
-    // Example:
-    //  (concat_vectors (v8i8 (avgceils (extract_subvector (v16i8 OpA, <0>),
-    //                                   extract_subvector (v16i8 OpB, <0>))),
-    //                  (v8i8 (avgceils (extract_subvector (v16i8 OpA, <8>),
-    //                                   extract_subvector (v16i8 OpB, <8>)))))
-    // ->
-    //  (v16i8(avgceils(v16i8 OpA, v16i8 OpB)))
-    if (N00->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
-        N01->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
-        N10->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
-        N11->getOpcode() == ISD::EXTRACT_SUBVECTOR && N00VT == N10VT) {
-      SDValue N00Source = N00->getOperand(0);
-      SDValue N01Source = N01->getOperand(0);
-      SDValue N10Source = N10->getOperand(0);
-      SDValue N11Source = N11->getOperand(0);
-
-      if (N00Source == N10Source && N01Source == N11Source &&
-          N00Source.getValueType() == VT && N01Source.getValueType() == VT) {
-        assert(N0.getValueType() == N1.getValueType());
-
-        uint64_t N00Index = N00.getConstantOperandVal(1);
-        uint64_t N01Index = N01.getConstantOperandVal(1);
-        uint64_t N10Index = N10.getConstantOperandVal(1);
-        uint64_t N11Index = N11.getConstantOperandVal(1);
-
-        if (N00Index == N01Index && N10Index == N11Index && N00Index == 0 &&
-            N10Index == N00VT.getVectorNumElements())
-          return DAG.getNode(N0Opc, dl, VT, N00Source, N01Source);
-      }
-    }
-
-    // For a concat of two [u]avgfloors with a 128-bit destination size, combine
-    // into an avg of two contacts of the source vectors.
-    // eg: concat(uhadd(a,b), uhadd(c, d)) -> uhadd(concat(a, c), concat(b, d))
-    if (N0Opc == ISD::AVGFLOORU && VT.is128BitVector() && N0->hasOneUse() &&
-        N1->hasOneUse()) {
-      EVT PairVT = N00VT.getDoubleNumVectorElementsVT(*DAG.getContext());
-      SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N00, N10);
-      SDValue Concat1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N01, N11);
-      return DAG.getNode(ISD::AVGFLOORU, dl, PairVT, Concat0, Concat1);
-    }
+    EVT PairVT = N00VT.getDoubleNumVectorElementsVT(*DAG.getContext());
+    SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, N00, N10);
+    SDValue Concat1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, N01, N11);
+    return DAG.getNode(N0Opc, dl, PairVT, Concat0, Concat1);
   }
 
   auto IsRSHRN = [](SDValue Shr) {

>From a3c9d8611e288b0a5aed0487dcd2cc0ff49aa920 Mon Sep 17 00:00:00 2001
From: Rin Dobrescu <rin.dobrescu at arm.com>
Date: Tue, 30 Jan 2024 15:17:13 +0000
Subject: [PATCH 7/9] Remove unused variables

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 48460283d0712..4fbe06f9b00bb 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -18230,13 +18230,9 @@ static SDValue performConcatVectorsCombine(SDNode *N,
     SDValue N10 = N1->getOperand(0);
     SDValue N11 = N1->getOperand(1);
 
-    EVT N00VT = N00.getValueType();
-    EVT N10VT = N10.getValueType();
-
-    EVT PairVT = N00VT.getDoubleNumVectorElementsVT(*DAG.getContext());
     SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, N00, N10);
     SDValue Concat1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, N01, N11);
-    return DAG.getNode(N0Opc, dl, PairVT, Concat0, Concat1);
+    return DAG.getNode(N0Opc, dl, VT, Concat0, Concat1);
   }
 
   auto IsRSHRN = [](SDValue Shr) {

>From b68cc0666eaed3f0ba05d24623fda9d63ca15450 Mon Sep 17 00:00:00 2001
From: Rin Dobrescu <rin.dobrescu at arm.com>
Date: Mon, 5 Feb 2024 12:20:07 +0000
Subject: [PATCH 8/9] [AArch64] Convert concat(uhadd(a,b), uhadd(c,d)) to
 uhadd(concat(a,c), concat(b,d))

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4fbe06f9b00bb..7a01294a439c7 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -18224,15 +18224,18 @@ static SDValue performConcatVectorsCombine(SDNode *N,
   // concat(b, d))
   if (N->getNumOperands() == 2 && N0Opc == N1Opc && VT.is128BitVector() &&
       (N0Opc == ISD::AVGCEILU || N0Opc == ISD::AVGCEILS ||
-       N0Opc == ISD::AVGFLOORU || N0Opc == ISD::AVGFLOORS)) {
+       N0Opc == ISD::AVGFLOORU || N0Opc == ISD::AVGFLOORS) &&
+      N0->hasOneUse() && N1->hasOneUse()) {
     SDValue N00 = N0->getOperand(0);
     SDValue N01 = N0->getOperand(1);
     SDValue N10 = N1->getOperand(0);
     SDValue N11 = N1->getOperand(1);
 
-    SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, N00, N10);
-    SDValue Concat1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, N01, N11);
-    return DAG.getNode(N0Opc, dl, VT, Concat0, Concat1);
+    if (!N00.isUndef() && !N01.isUndef() && !N10.isUndef() && !N11.isUndef()) {
+      SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, N00, N10);
+      SDValue Concat1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, N01, N11);
+      return DAG.getNode(N0Opc, dl, VT, Concat0, Concat1);
+    }
   }
 
   auto IsRSHRN = [](SDValue Shr) {

>From ba6b028e40d288c79f0ccda4943fd4468f3b19f3 Mon Sep 17 00:00:00 2001
From: Rin Dobrescu <rin.dobrescu at arm.com>
Date: Mon, 5 Feb 2024 14:33:36 +0000
Subject: [PATCH 9/9] Add negative test to show we don't optimise undef case

---
 .../AArch64/concat-vector-add-combine.ll      | 32 +++++++++++++++++++
 1 file changed, 32 insertions(+)

diff --git a/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll b/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
index 58d610764f4a9..4822db03705cf 100644
--- a/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
+++ b/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
@@ -90,6 +90,38 @@ define i32 @combine_add_8xi32(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i3
   ret i32 %res
 }
 
+define i32 @combine_undef_add_8xi32(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) local_unnamed_addr #0 {
+; CHECK-LABEL: combine_undef_add_8xi32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s1, w0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    mov v1.s[1], w1
+; CHECK-NEXT:    uhadd v0.4h, v0.4h, v0.4h
+; CHECK-NEXT:    mov v1.s[2], w2
+; CHECK-NEXT:    mov v1.s[3], w3
+; CHECK-NEXT:    xtn v2.4h, v1.4s
+; CHECK-NEXT:    shrn v1.4h, v1.4s, #16
+; CHECK-NEXT:    uhadd v1.4h, v2.4h, v1.4h
+; CHECK-NEXT:    mov v1.d[1], v0.d[0]
+; CHECK-NEXT:    uaddlv s0, v1.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %a1 = insertelement <8 x i32> poison, i32 %a, i32 0
+  %b1 = insertelement <8 x i32> %a1, i32 %b, i32 1
+  %c1 = insertelement <8 x i32> %b1, i32 %c, i32 2
+  %d1 = insertelement <8 x i32> %c1, i32 %d, i32 3
+  %e1 = insertelement <8 x i32> %d1, i32 undef, i32 4
+  %f1 = insertelement <8 x i32> %e1, i32 undef, i32 5
+  %g1 = insertelement <8 x i32> %f1, i32 undef, i32 6
+  %h1 = insertelement <8 x i32> %g1, i32 undef, i32 7
+  %x = and <8 x i32> %h1, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+  %sh1 = lshr <8 x i32> %h1, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %s = add nuw nsw <8 x i32> %x, %sh1
+  %sh2 = lshr <8 x i32> %s, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %res = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %sh2)
+  ret i32 %res
+}
+
 define i64 @combine_add_4xi64(i64 %a, i64 %b, i64 %c, i64 %d) local_unnamed_addr #0 {
 ; CHECK-LABEL: combine_add_4xi64:
 ; CHECK:       // %bb.0:



More information about the llvm-commits mailing list