[llvm] [AArch64] Convert concat(uhadd(a,b), uhadd(c,d)) to uhadd(concat(a,c), concat(b,d)) (PR #79464)

Rin Dobrescu via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 26 06:08:05 PST 2024


https://github.com/Rin18 updated https://github.com/llvm/llvm-project/pull/79464

>From 4686886d0d74c0e782715ee17eaa2a61a7051a93 Mon Sep 17 00:00:00 2001
From: Rin Dobrescu <rin.dobrescu at arm.com>
Date: Thu, 25 Jan 2024 12:08:20 +0000
Subject: [PATCH 1/4] Precommit test and fix other test wrong input.

---
 llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll  |  40 ++++--
 .../AArch64/concat-vector-add-combine.ll      | 132 ++++++++++++++++++
 2 files changed, 159 insertions(+), 13 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll

diff --git a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
index 24cce9a2b26b589..5b34bbb0120beda 100644
--- a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
@@ -58,18 +58,32 @@ define <16 x i8> @lower_trunc_16xi8(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16
 ; CHECK-LABEL: lower_trunc_16xi8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov s0, w0
-; CHECK-NEXT:    add x8, sp, #56
-; CHECK-NEXT:    ld1r { v1.8h }, [x8]
+; CHECK-NEXT:    ldr h1, [sp]
+; CHECK-NEXT:    add x8, sp, #8
+; CHECK-NEXT:    ld1 { v1.h }[1], [x8]
+; CHECK-NEXT:    add x8, sp, #16
 ; CHECK-NEXT:    mov v0.h[1], w1
-; CHECK-NEXT:    add v3.8h, v1.8h, v1.8h
+; CHECK-NEXT:    ld1 { v1.h }[2], [x8]
+; CHECK-NEXT:    add x8, sp, #24
 ; CHECK-NEXT:    mov v0.h[2], w2
+; CHECK-NEXT:    ld1 { v1.h }[3], [x8]
+; CHECK-NEXT:    add x8, sp, #32
 ; CHECK-NEXT:    mov v0.h[3], w3
+; CHECK-NEXT:    ld1 { v1.h }[4], [x8]
+; CHECK-NEXT:    add x8, sp, #40
+; CHECK-NEXT:    ld1 { v1.h }[5], [x8]
+; CHECK-NEXT:    add x8, sp, #48
 ; CHECK-NEXT:    mov v0.h[4], w4
+; CHECK-NEXT:    ld1 { v1.h }[6], [x8]
+; CHECK-NEXT:    add x8, sp, #56
 ; CHECK-NEXT:    mov v0.h[5], w5
+; CHECK-NEXT:    ld1 { v1.h }[7], [x8]
 ; CHECK-NEXT:    mov v0.h[6], w6
-; CHECK-NEXT:    add v2.8h, v0.8h, v0.8h
+; CHECK-NEXT:    add v2.8h, v1.8h, v1.8h
+; CHECK-NEXT:    mov v0.h[7], w7
+; CHECK-NEXT:    add v3.8h, v0.8h, v0.8h
 ; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    uzp1 v1.16b, v2.16b, v3.16b
+; CHECK-NEXT:    uzp1 v1.16b, v3.16b, v2.16b
 ; CHECK-NEXT:    eor v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
   %a1 = insertelement <16 x i16> poison, i16 %a, i16 0
@@ -80,14 +94,14 @@ define <16 x i8> @lower_trunc_16xi8(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16
   %f1 = insertelement <16 x i16> %e1, i16 %f, i16 5
   %g1 = insertelement <16 x i16> %f1, i16 %g, i16 6
   %h1 = insertelement <16 x i16> %g1, i16 %h, i16 7
-  %i1 = insertelement <16 x i16> %f1, i16 %i, i16 8
-  %j1 = insertelement <16 x i16> %g1, i16 %j, i16 9
-  %k1 = insertelement <16 x i16> %f1, i16 %k, i16 10
-  %l1 = insertelement <16 x i16> %g1, i16 %l, i16 11
-  %m1 = insertelement <16 x i16> %f1, i16 %m, i16 12
-  %n1 = insertelement <16 x i16> %g1, i16 %n, i16 13
-  %o1 = insertelement <16 x i16> %f1, i16 %o, i16 14
-  %p1 = insertelement <16 x i16> %g1, i16 %p, i16 15
+  %i1 = insertelement <16 x i16> %h1, i16 %i, i16 8
+  %j1 = insertelement <16 x i16> %i1, i16 %j, i16 9
+  %k1 = insertelement <16 x i16> %j1, i16 %k, i16 10
+  %l1 = insertelement <16 x i16> %k1, i16 %l, i16 11
+  %m1 = insertelement <16 x i16> %l1, i16 %m, i16 12
+  %n1 = insertelement <16 x i16> %m1, i16 %n, i16 13
+  %o1 = insertelement <16 x i16> %n1, i16 %o, i16 14
+  %p1 = insertelement <16 x i16> %o1, i16 %p, i16 15
   %t = trunc <16 x i16> %p1 to <16 x i8>
   %s = add <16 x i16> %p1, %p1
   %t2 = trunc <16 x i16> %s to <16 x i8>
diff --git a/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll b/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
new file mode 100644
index 000000000000000..8c660dbf6ce01c9
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
@@ -0,0 +1,132 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64 < %s | FileCheck %s
+
+define i16 @combine_add_16xi16(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16 %f, i16 %g, i16 %h, i16 %i, i16 %j, i16 %k, i16 %l, i16 %m, i16 %n, i16 %o, i16 %p) {
+; CHECK-LABEL: combine_add_16xi16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s0, w0
+; CHECK-NEXT:    ldr h1, [sp]
+; CHECK-NEXT:    add x8, sp, #8
+; CHECK-NEXT:    ld1 { v1.h }[1], [x8]
+; CHECK-NEXT:    add x8, sp, #16
+; CHECK-NEXT:    mov v0.h[1], w1
+; CHECK-NEXT:    ld1 { v1.h }[2], [x8]
+; CHECK-NEXT:    add x8, sp, #24
+; CHECK-NEXT:    mov v0.h[2], w2
+; CHECK-NEXT:    ld1 { v1.h }[3], [x8]
+; CHECK-NEXT:    add x8, sp, #32
+; CHECK-NEXT:    mov v0.h[3], w3
+; CHECK-NEXT:    ld1 { v1.h }[4], [x8]
+; CHECK-NEXT:    add x8, sp, #40
+; CHECK-NEXT:    ld1 { v1.h }[5], [x8]
+; CHECK-NEXT:    add x8, sp, #48
+; CHECK-NEXT:    mov v0.h[4], w4
+; CHECK-NEXT:    ld1 { v1.h }[6], [x8]
+; CHECK-NEXT:    add x8, sp, #56
+; CHECK-NEXT:    mov v0.h[5], w5
+; CHECK-NEXT:    ld1 { v1.h }[7], [x8]
+; CHECK-NEXT:    mov v0.h[6], w6
+; CHECK-NEXT:    xtn v3.8b, v1.8h
+; CHECK-NEXT:    shrn v1.8b, v1.8h, #8
+; CHECK-NEXT:    mov v0.h[7], w7
+; CHECK-NEXT:    uhadd v1.8b, v3.8b, v1.8b
+; CHECK-NEXT:    xtn v2.8b, v0.8h
+; CHECK-NEXT:    shrn v0.8b, v0.8h, #8
+; CHECK-NEXT:    uhadd v0.8b, v2.8b, v0.8b
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    uaddlv h0, v0.16b
+; CHECK-NEXT:    umov w0, v0.h[0]
+; CHECK-NEXT:    ret
+  %a1 = insertelement <16 x i16> poison, i16 %a, i16 0
+  %b1 = insertelement <16 x i16> %a1, i16 %b, i16 1
+  %c1 = insertelement <16 x i16> %b1, i16 %c, i16 2
+  %d1 = insertelement <16 x i16> %c1, i16 %d, i16 3
+  %e1 = insertelement <16 x i16> %d1, i16 %e, i16 4
+  %f1 = insertelement <16 x i16> %e1, i16 %f, i16 5
+  %g1 = insertelement <16 x i16> %f1, i16 %g, i16 6
+  %h1 = insertelement <16 x i16> %g1, i16 %h, i16 7
+  %i1 = insertelement <16 x i16> %h1, i16 %i, i16 8
+  %j1 = insertelement <16 x i16> %i1, i16 %j, i16 9
+  %k1 = insertelement <16 x i16> %j1, i16 %k, i16 10
+  %l1 = insertelement <16 x i16> %k1, i16 %l, i16 11
+  %m1 = insertelement <16 x i16> %l1, i16 %m, i16 12
+  %n1 = insertelement <16 x i16> %m1, i16 %n, i16 13
+  %o1 = insertelement <16 x i16> %n1, i16 %o, i16 14
+  %p1 = insertelement <16 x i16> %o1, i16 %p, i16 15
+  %x = and <16 x i16> %p1, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+  %sh1 = lshr <16 x i16> %p1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  %s = add nuw nsw <16 x i16> %x, %sh1
+  %sh2 = lshr <16 x i16> %s, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %res = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %sh2)
+  ret i16 %res
+}
+
+define i32 @combine_add_8xi32(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) local_unnamed_addr #0 {
+; CHECK-LABEL: combine_add_8xi32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s0, w4
+; CHECK-NEXT:    fmov s1, w0
+; CHECK-NEXT:    mov v0.s[1], w5
+; CHECK-NEXT:    mov v1.s[1], w1
+; CHECK-NEXT:    mov v0.s[2], w6
+; CHECK-NEXT:    mov v1.s[2], w2
+; CHECK-NEXT:    mov v0.s[3], w7
+; CHECK-NEXT:    mov v1.s[3], w3
+; CHECK-NEXT:    xtn v2.4h, v1.4s
+; CHECK-NEXT:    xtn v3.4h, v0.4s
+; CHECK-NEXT:    shrn v1.4h, v1.4s, #16
+; CHECK-NEXT:    shrn v0.4h, v0.4s, #16
+; CHECK-NEXT:    uhadd v1.4h, v2.4h, v1.4h
+; CHECK-NEXT:    uhadd v0.4h, v3.4h, v0.4h
+; CHECK-NEXT:    mov v1.d[1], v0.d[0]
+; CHECK-NEXT:    uaddlv s0, v1.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %a1 = insertelement <8 x i32> poison, i32 %a, i32 0
+  %b1 = insertelement <8 x i32> %a1, i32 %b, i32 1
+  %c1 = insertelement <8 x i32> %b1, i32 %c, i32 2
+  %d1 = insertelement <8 x i32> %c1, i32 %d, i32 3
+  %e1 = insertelement <8 x i32> %d1, i32 %e, i32 4
+  %f1 = insertelement <8 x i32> %e1, i32 %f, i32 5
+  %g1 = insertelement <8 x i32> %f1, i32 %g, i32 6
+  %h1 = insertelement <8 x i32> %g1, i32 %h, i32 7
+  %x = and <8 x i32> %h1, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+  %sh1 = lshr <8 x i32> %h1, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %s = add nuw nsw <8 x i32> %x, %sh1
+  %sh2 = lshr <8 x i32> %s, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %res = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %sh2)
+  ret i32 %res
+}
+
+define i64 @combine_add_4xi64(i64 %a, i64 %b, i64 %c, i64 %d) local_unnamed_addr #0 {
+; CHECK-LABEL: combine_add_4xi64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x2
+; CHECK-NEXT:    fmov d1, x0
+; CHECK-NEXT:    mov v0.d[1], x3
+; CHECK-NEXT:    mov v1.d[1], x1
+; CHECK-NEXT:    xtn v2.2s, v1.2d
+; CHECK-NEXT:    xtn v3.2s, v0.2d
+; CHECK-NEXT:    shrn v1.2s, v1.2d, #32
+; CHECK-NEXT:    shrn v0.2s, v0.2d, #32
+; CHECK-NEXT:    uhadd v1.2s, v2.2s, v1.2s
+; CHECK-NEXT:    uhadd v0.2s, v3.2s, v0.2s
+; CHECK-NEXT:    mov v1.d[1], v0.d[0]
+; CHECK-NEXT:    uaddlv d0, v1.4s
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %a1 = insertelement <4 x i64> poison, i64 %a, i64 0
+  %b1 = insertelement <4 x i64> %a1, i64 %b, i64 1
+  %c1 = insertelement <4 x i64> %b1, i64 %c, i64 2
+  %d1 = insertelement <4 x i64> %c1, i64 %d, i64 3
+  %x = and <4 x i64> %d1, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+  %sh1 = lshr <4 x i64> %d1, <i64 32, i64 32, i64 32, i64 32>
+  %s = add nuw nsw <4 x i64> %x, %sh1
+  %sh2 = lshr <4 x i64> %s, <i64 1, i64 1, i64 1, i64 1>
+  %res = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %sh2)
+  ret i64 %res
+}
+
+declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
+declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
+declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)

>From b0a2a4aefcf6484c63201fb498ad9915c127e292 Mon Sep 17 00:00:00 2001
From: Rin Dobrescu <rin.dobrescu at arm.com>
Date: Thu, 25 Jan 2024 12:27:22 +0000
Subject: [PATCH 2/4] [AArch64] Convert concat(uhadd(a,b), uhadd(c,d)) to
 uhadd(concat(a,c), concat(b,d))

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 13 +++++
 llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll  | 53 -------------------
 .../AArch64/concat-vector-add-combine.ll      | 34 ++++--------
 3 files changed, 24 insertions(+), 76 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 23d37d67864a52f..5215b96640b67b5 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -18200,6 +18200,19 @@ static SDValue performConcatVectorsCombine(SDNode *N,
     return true;
   };
 
+  // concat(uhadd(a, b), uhadd(c, d)) -> uhadd(concat(a, c), concat(b, d))
+  if (N0Opc == ISD::AVGFLOORU && N1Opc == ISD::AVGFLOORU) {
+    SDValue N00 = N0->getOperand(0);
+    SDValue N01 = N0->getOperand(1);
+    SDValue N10 = N1->getOperand(0);
+    SDValue N11 = N1->getOperand(1);
+    EVT N00VT = N00.getValueType();
+    EVT PairVT = N00VT.getDoubleNumVectorElementsVT(*DAG.getContext());
+    SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N00, N10);
+    SDValue Concat1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N01, N11);
+    return DAG.getNode(ISD::AVGFLOORU, dl, PairVT, Concat0, Concat1);
+  }
+
   // concat(rshrn(x), rshrn(y)) -> rshrn(concat(x, y))
   if (N->getNumOperands() == 2 && IsRSHRN(N0) &&
       ((IsRSHRN(N1) &&
diff --git a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
index 5b34bbb0120beda..c4de177176e330b 100644
--- a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
@@ -1,59 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=aarch64 < %s | FileCheck %s
 
-define i32 @lower_lshr(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %e, <4 x i32> %f, <4 x i32> %g, <4 x i32> %h) {
-; CHECK-LABEL: lower_lshr:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    addv s0, v0.4s
-; CHECK-NEXT:    addv s1, v1.4s
-; CHECK-NEXT:    addv s4, v4.4s
-; CHECK-NEXT:    addv s5, v5.4s
-; CHECK-NEXT:    addv s2, v2.4s
-; CHECK-NEXT:    addv s6, v6.4s
-; CHECK-NEXT:    mov v0.s[1], v1.s[0]
-; CHECK-NEXT:    addv s1, v3.4s
-; CHECK-NEXT:    addv s3, v7.4s
-; CHECK-NEXT:    mov v4.s[1], v5.s[0]
-; CHECK-NEXT:    mov v0.s[2], v2.s[0]
-; CHECK-NEXT:    mov v4.s[2], v6.s[0]
-; CHECK-NEXT:    mov v0.s[3], v1.s[0]
-; CHECK-NEXT:    mov v4.s[3], v3.s[0]
-; CHECK-NEXT:    xtn v1.4h, v0.4s
-; CHECK-NEXT:    shrn v0.4h, v0.4s, #16
-; CHECK-NEXT:    xtn v2.4h, v4.4s
-; CHECK-NEXT:    shrn v3.4h, v4.4s, #16
-; CHECK-NEXT:    uhadd v0.4h, v1.4h, v0.4h
-; CHECK-NEXT:    uhadd v1.4h, v2.4h, v3.4h
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-NEXT:    uaddlv s0, v0.8h
-; CHECK-NEXT:    fmov w0, s0
-; CHECK-NEXT:    ret
-  %l87  = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a)
-  %l174 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %b)
-  %l257 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %c)
-  %l340 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %d)
-  %l427 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %e)
-  %l514 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %f)
-  %l597 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %g)
-  %l680 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %h)
-  %l681 = insertelement <8 x i32> poison, i32 %l87, i32 0
-  %l682 = insertelement <8 x i32> %l681, i32 %l174, i32 1
-  %l683 = insertelement <8 x i32> %l682, i32 %l257, i32 2
-  %l684 = insertelement <8 x i32> %l683, i32 %l340, i32 3
-  %l685 = insertelement <8 x i32> %l684, i32 %l427, i32 4
-  %l686 = insertelement <8 x i32> %l685, i32 %l514, i32 5
-  %l687 = insertelement <8 x i32> %l686, i32 %l597, i32 6
-  %l688 = insertelement <8 x i32> %l687, i32 %l680, i32 7
-  %l689 = and <8 x i32> %l688, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
-  %l690 = lshr <8 x i32> %l688, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
-  %l691 = add nuw nsw <8 x i32> %l689, %l690
-  %l692 = lshr <8 x i32> %l691, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %l693 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %l692)
-  ret i32 %l693
-}
-declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
-declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
-
 define <16 x i8> @lower_trunc_16xi8(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16 %f, i16 %g, i16 %h, i16 %i, i16 %j, i16 %k, i16 %l, i16 %m, i16 %n, i16 %o, i16 %p) {
 ; CHECK-LABEL: lower_trunc_16xi8:
 ; CHECK:       // %bb.0:
diff --git a/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll b/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
index 8c660dbf6ce01c9..58d610764f4a97d 100644
--- a/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
+++ b/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
@@ -26,14 +26,10 @@ define i16 @combine_add_16xi16(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16 %f, i
 ; CHECK-NEXT:    mov v0.h[5], w5
 ; CHECK-NEXT:    ld1 { v1.h }[7], [x8]
 ; CHECK-NEXT:    mov v0.h[6], w6
-; CHECK-NEXT:    xtn v3.8b, v1.8h
-; CHECK-NEXT:    shrn v1.8b, v1.8h, #8
 ; CHECK-NEXT:    mov v0.h[7], w7
-; CHECK-NEXT:    uhadd v1.8b, v3.8b, v1.8b
-; CHECK-NEXT:    xtn v2.8b, v0.8h
-; CHECK-NEXT:    shrn v0.8b, v0.8h, #8
-; CHECK-NEXT:    uhadd v0.8b, v2.8b, v0.8b
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    uzp2 v2.16b, v0.16b, v1.16b
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    uhadd v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    uaddlv h0, v0.16b
 ; CHECK-NEXT:    umov w0, v0.h[0]
 ; CHECK-NEXT:    ret
@@ -72,14 +68,10 @@ define i32 @combine_add_8xi32(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i3
 ; CHECK-NEXT:    mov v1.s[2], w2
 ; CHECK-NEXT:    mov v0.s[3], w7
 ; CHECK-NEXT:    mov v1.s[3], w3
-; CHECK-NEXT:    xtn v2.4h, v1.4s
-; CHECK-NEXT:    xtn v3.4h, v0.4s
-; CHECK-NEXT:    shrn v1.4h, v1.4s, #16
-; CHECK-NEXT:    shrn v0.4h, v0.4s, #16
-; CHECK-NEXT:    uhadd v1.4h, v2.4h, v1.4h
-; CHECK-NEXT:    uhadd v0.4h, v3.4h, v0.4h
-; CHECK-NEXT:    mov v1.d[1], v0.d[0]
-; CHECK-NEXT:    uaddlv s0, v1.8h
+; CHECK-NEXT:    uzp2 v2.8h, v1.8h, v0.8h
+; CHECK-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT:    uhadd v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    uaddlv s0, v0.8h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
   %a1 = insertelement <8 x i32> poison, i32 %a, i32 0
@@ -105,14 +97,10 @@ define i64 @combine_add_4xi64(i64 %a, i64 %b, i64 %c, i64 %d) local_unnamed_addr
 ; CHECK-NEXT:    fmov d1, x0
 ; CHECK-NEXT:    mov v0.d[1], x3
 ; CHECK-NEXT:    mov v1.d[1], x1
-; CHECK-NEXT:    xtn v2.2s, v1.2d
-; CHECK-NEXT:    xtn v3.2s, v0.2d
-; CHECK-NEXT:    shrn v1.2s, v1.2d, #32
-; CHECK-NEXT:    shrn v0.2s, v0.2d, #32
-; CHECK-NEXT:    uhadd v1.2s, v2.2s, v1.2s
-; CHECK-NEXT:    uhadd v0.2s, v3.2s, v0.2s
-; CHECK-NEXT:    mov v1.d[1], v0.d[0]
-; CHECK-NEXT:    uaddlv d0, v1.4s
+; CHECK-NEXT:    uzp2 v2.4s, v1.4s, v0.4s
+; CHECK-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    uhadd v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    uaddlv d0, v0.4s
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    ret
   %a1 = insertelement <4 x i64> poison, i64 %a, i64 0

>From 8c3b070b72e20b05410a43705bedace9fe5fc481 Mon Sep 17 00:00:00 2001
From: Rin Dobrescu <rin.dobrescu at arm.com>
Date: Fri, 26 Jan 2024 13:51:02 +0000
Subject: [PATCH 3/4] Address comments

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 46 +++++++++----------
 1 file changed, 22 insertions(+), 24 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 5215b96640b67b5..dbdea9d3c59d4d7 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -18127,17 +18127,7 @@ static SDValue performConcatVectorsCombine(SDNode *N,
   if (DCI.isBeforeLegalizeOps())
     return SDValue();
 
-  // Optimise concat_vectors of two [us]avgceils or [us]avgfloors that use
-  // extracted subvectors from the same original vectors. Combine these into a
-  // single avg that operates on the two original vectors.
-  // avgceil is the target independant name for rhadd, avgfloor is a hadd.
-  // Example:
-  //  (concat_vectors (v8i8 (avgceils (extract_subvector (v16i8 OpA, <0>),
-  //                                   extract_subvector (v16i8 OpB, <0>))),
-  //                  (v8i8 (avgceils (extract_subvector (v16i8 OpA, <8>),
-  //                                   extract_subvector (v16i8 OpB, <8>)))))
-  // ->
-  //  (v16i8(avgceils(v16i8 OpA, v16i8 OpB)))
+  // Optimise concat_vectors of two [us]avgceils or [us]avgfloors.
   if (N->getNumOperands() == 2 && N0Opc == N1Opc &&
       (N0Opc == ISD::AVGCEILU || N0Opc == ISD::AVGCEILS ||
        N0Opc == ISD::AVGFLOORU || N0Opc == ISD::AVGFLOORS)) {
@@ -18149,6 +18139,16 @@ static SDValue performConcatVectorsCombine(SDNode *N,
     EVT N00VT = N00.getValueType();
     EVT N10VT = N10.getValueType();
 
+    // For extracted subvectors from the same original vectors, combine these
+    // into a single avg that operates on the two original vectors. 
+    // avgceil is the target independant name for rhadd, avgfloor is a hadd. 
+    // Example:
+    //  (concat_vectors (v8i8 (avgceils (extract_subvector (v16i8 OpA, <0>),
+    //                                   extract_subvector (v16i8 OpB, <0>))),
+    //                  (v8i8 (avgceils (extract_subvector (v16i8 OpA, <8>),
+    //                                   extract_subvector (v16i8 OpB, <8>)))))
+    // ->
+    //  (v16i8(avgceils(v16i8 OpA, v16i8 OpB)))
     if (N00->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
         N01->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
         N10->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
@@ -18172,6 +18172,17 @@ static SDValue performConcatVectorsCombine(SDNode *N,
           return DAG.getNode(N0Opc, dl, VT, N00Source, N01Source);
       }
     }
+
+    // For a concat of two [u]avgfloors with a 128-bit destination size, combine
+    // into an avg of two contacts of the source vectors.
+    // eg: concat(uhadd(a,b), uhadd(c, d)) -> uhadd(concat(a, c), concat(b, d))
+    if (N0Opc == ISD::AVGFLOORU && N0Opc == N1Opc && VT.is128BitVector() &&
+        N0->hasOneUse() && N1->hasOneUse()) {
+      EVT PairVT = N00VT.getDoubleNumVectorElementsVT(*DAG.getContext());
+      SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N00, N10);
+      SDValue Concat1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N01, N11);
+      return DAG.getNode(ISD::AVGFLOORU, dl, PairVT, Concat0, Concat1);
+    }
   }
 
   auto IsRSHRN = [](SDValue Shr) {
@@ -18200,19 +18211,6 @@ static SDValue performConcatVectorsCombine(SDNode *N,
     return true;
   };
 
-  // concat(uhadd(a, b), uhadd(c, d)) -> uhadd(concat(a, c), concat(b, d))
-  if (N0Opc == ISD::AVGFLOORU && N1Opc == ISD::AVGFLOORU) {
-    SDValue N00 = N0->getOperand(0);
-    SDValue N01 = N0->getOperand(1);
-    SDValue N10 = N1->getOperand(0);
-    SDValue N11 = N1->getOperand(1);
-    EVT N00VT = N00.getValueType();
-    EVT PairVT = N00VT.getDoubleNumVectorElementsVT(*DAG.getContext());
-    SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N00, N10);
-    SDValue Concat1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N01, N11);
-    return DAG.getNode(ISD::AVGFLOORU, dl, PairVT, Concat0, Concat1);
-  }
-
   // concat(rshrn(x), rshrn(y)) -> rshrn(concat(x, y))
   if (N->getNumOperands() == 2 && IsRSHRN(N0) &&
       ((IsRSHRN(N1) &&

>From 3f7db9eb578dc502bf5931820d49303788573560 Mon Sep 17 00:00:00 2001
From: Rin Dobrescu <rin.dobrescu at arm.com>
Date: Fri, 26 Jan 2024 14:05:08 +0000
Subject: [PATCH 4/4] Remove check for same Opcode as it is already done above

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index dbdea9d3c59d4d7..dc04cc868d2c7a1 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -18140,8 +18140,8 @@ static SDValue performConcatVectorsCombine(SDNode *N,
     EVT N10VT = N10.getValueType();
 
     // For extracted subvectors from the same original vectors, combine these
-    // into a single avg that operates on the two original vectors. 
-    // avgceil is the target independant name for rhadd, avgfloor is a hadd. 
+    // into a single avg that operates on the two original vectors.
+    // avgceil is the target independant name for rhadd, avgfloor is a hadd.
     // Example:
     //  (concat_vectors (v8i8 (avgceils (extract_subvector (v16i8 OpA, <0>),
     //                                   extract_subvector (v16i8 OpB, <0>))),
@@ -18176,7 +18176,7 @@ static SDValue performConcatVectorsCombine(SDNode *N,
     // For a concat of two [u]avgfloors with a 128-bit destination size, combine
     // into an avg of two contacts of the source vectors.
     // eg: concat(uhadd(a,b), uhadd(c, d)) -> uhadd(concat(a, c), concat(b, d))
-    if (N0Opc == ISD::AVGFLOORU && N0Opc == N1Opc && VT.is128BitVector() &&
+    if (N0Opc == ISD::AVGFLOORU && VT.is128BitVector() &&
         N0->hasOneUse() && N1->hasOneUse()) {
       EVT PairVT = N00VT.getDoubleNumVectorElementsVT(*DAG.getContext());
       SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, PairVT, N00, N10);



More information about the llvm-commits mailing list