[llvm] 2b9330e - Add baseline tests for D147404

Jun Zhang via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 3 01:58:06 PDT 2023


Author: Jun Zhang
Date: 2023-04-03T16:57:50+08:00
New Revision: 2b9330e41f5405328e28d4b34bb69461fbe227b5

URL: https://github.com/llvm/llvm-project/commit/2b9330e41f5405328e28d4b34bb69461fbe227b5
DIFF: https://github.com/llvm/llvm-project/commit/2b9330e41f5405328e28d4b34bb69461fbe227b5.diff

LOG: Add baseline tests for D147404

Differential Revision: https://reviews.llvm.org/D147409

Signed-off-by: Jun Zhang <jun at junz.org>

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/hadd-combine.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/hadd-combine.ll b/llvm/test/CodeGen/AArch64/hadd-combine.ll
index 17d914b10430..62ccf2e8d99f 100644
--- a/llvm/test/CodeGen/AArch64/hadd-combine.ll
+++ b/llvm/test/CodeGen/AArch64/hadd-combine.ll
@@ -234,7 +234,7 @@ define <8 x i16> @hadds_const_both() {
 define <8 x i16> @hadds_const_bothhigh() {
 ; CHECK-LABEL: hadds_const_bothhigh:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #32766
+; CHECK-NEXT:    mov w8, #32766 // =0x7ffe
 ; CHECK-NEXT:    dup v0.8h, w8
 ; CHECK-NEXT:    ret
   %ext1 = sext <8 x i16> <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766> to <8 x i32>
@@ -313,7 +313,7 @@ define <8 x i16> @hadds_i_const_both() {
 define <8 x i16> @hadds_i_const_bothhigh() {
 ; CHECK-LABEL: hadds_i_const_bothhigh:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #32766
+; CHECK-NEXT:    mov w8, #32766 // =0x7ffe
 ; CHECK-NEXT:    dup v0.8h, w8
 ; CHECK-NEXT:    ret
   %result = call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766>, <8 x i16> <i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767>)
@@ -667,7 +667,244 @@ define <8 x i16> @rhadds_i_undef(<8 x i16> %t, <8 x i16> %src1) {
   ret <8 x i16> %result
 }
 
-declare <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i8> @shadd_v8i8(<8 x i8> %x) {
+; CHECK-LABEL: shadd_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shadd v0.8b, v0.8b, v0.8b
+; CHECK-NEXT:    ret
+  %r = tail call <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8> %x, <8 x i8> %x)
+  ret <8 x i8> %r
+}
+
+define <4 x i16> @shadd_v4i16(<4 x i16> %x) {
+; CHECK-LABEL: shadd_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shadd v0.4h, v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %r = tail call <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16> %x, <4 x i16> %x)
+  ret <4 x i16> %r
+}
+
+define <2 x i32> @shadd_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: shadd_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shadd v0.2s, v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %r = tail call <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32> %x, <2 x i32> %x)
+  ret <2 x i32> %r
+}
+
+define <16 x i8> @shadd_v16i8(<16 x i8> %x) {
+; CHECK-LABEL: shadd_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shadd v0.16b, v0.16b, v0.16b
+; CHECK-NEXT:    ret
+  %r = tail call <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8> %x, <16 x i8> %x)
+  ret <16 x i8> %r
+}
+
+define <8 x i16> @shadd_v8i16(<8 x i16> %x) {
+; CHECK-LABEL: shadd_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shadd v0.8h, v0.8h, v0.8h
+; CHECK-NEXT:    ret
+  %r = tail call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> %x, <8 x i16> %x)
+  ret <8 x i16> %r
+}
+
+define <4 x i32> @shadd_v4i32(<4 x i32> %x) {
+; CHECK-LABEL: shadd_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shadd v0.4s, v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %r = tail call <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32> %x, <4 x i32> %x)
+  ret <4 x i32> %r
+}
+
+define <8 x i8> @uhadd_v8i8(<8 x i8> %x) {
+; CHECK-LABEL: uhadd_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uhadd v0.8b, v0.8b, v0.8b
+; CHECK-NEXT:    ret
+  %r = tail call <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8> %x, <8 x i8> %x)
+  ret <8 x i8> %r
+}
+
+define <4 x i16> @uhadd_v4i16(<4 x i16> %x) {
+; CHECK-LABEL: uhadd_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uhadd v0.4h, v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %r = tail call <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16> %x, <4 x i16> %x)
+  ret <4 x i16> %r
+}
+
+define <2 x i32> @uhadd_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: uhadd_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uhadd v0.2s, v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %r = tail call <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32> %x, <2 x i32> %x)
+  ret <2 x i32> %r
+}
+
+define <16 x i8> @uhadd_v16i8(<16 x i8> %x) {
+; CHECK-LABEL: uhadd_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uhadd v0.16b, v0.16b, v0.16b
+; CHECK-NEXT:    ret
+  %r = tail call <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8> %x, <16 x i8> %x)
+  ret <16 x i8> %r
+}
+
+define <8 x i16> @uhadd_v8i16(<8 x i16> %x) {
+; CHECK-LABEL: uhadd_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uhadd v0.8h, v0.8h, v0.8h
+; CHECK-NEXT:    ret
+  %r = tail call <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16> %x, <8 x i16> %x)
+  ret <8 x i16> %r
+}
+
+define <4 x i32> @uhadd_v4i32(<4 x i32> %x) {
+; CHECK-LABEL: uhadd_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uhadd v0.4s, v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %r = tail call <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32> %x, <4 x i32> %x)
+  ret <4 x i32> %r
+}
+define <8 x i8> @srhadd_v8i8(<8 x i8> %x) {
+; CHECK-LABEL: srhadd_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    srhadd v0.8b, v0.8b, v0.8b
+; CHECK-NEXT:    ret
+  %r = tail call <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8> %x, <8 x i8> %x)
+  ret <8 x i8> %r
+}
+
+define <4 x i16> @srhadd_v4i16(<4 x i16> %x) {
+; CHECK-LABEL: srhadd_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    srhadd v0.4h, v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %r = tail call <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16> %x, <4 x i16> %x)
+  ret <4 x i16> %r
+}
+
+define <2 x i32> @srhadd_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: srhadd_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    srhadd v0.2s, v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %r = tail call <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32> %x, <2 x i32> %x)
+  ret <2 x i32> %r
+}
+
+define <16 x i8> @srhadd_v16i8(<16 x i8> %x) {
+; CHECK-LABEL: srhadd_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    srhadd v0.16b, v0.16b, v0.16b
+; CHECK-NEXT:    ret
+  %r = tail call <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8> %x, <16 x i8> %x)
+  ret <16 x i8> %r
+}
+
+define <8 x i16> @srhadd_v8i16(<8 x i16> %x) {
+; CHECK-LABEL: srhadd_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    srhadd v0.8h, v0.8h, v0.8h
+; CHECK-NEXT:    ret
+  %r = tail call <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16> %x, <8 x i16> %x)
+  ret <8 x i16> %r
+}
+
+define <4 x i32> @srhadd_v4i32(<4 x i32> %x) {
+; CHECK-LABEL: srhadd_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    srhadd v0.4s, v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %r = tail call <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32> %x, <4 x i32> %x)
+  ret <4 x i32> %r
+}
+
+define <8 x i8> @urhadd_v8i8(<8 x i8> %x) {
+; CHECK-LABEL: urhadd_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    urhadd v0.8b, v0.8b, v0.8b
+; CHECK-NEXT:    ret
+  %r = tail call <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8> %x, <8 x i8> %x)
+  ret <8 x i8> %r
+}
+
+define <4 x i16> @urhadd_v4i16(<4 x i16> %x) {
+; CHECK-LABEL: urhadd_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    urhadd v0.4h, v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %r = tail call <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16> %x, <4 x i16> %x)
+  ret <4 x i16> %r
+}
+
+define <2 x i32> @urhadd_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: urhadd_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    urhadd v0.2s, v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %r = tail call <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32> %x, <2 x i32> %x)
+  ret <2 x i32> %r
+}
+
+define <16 x i8> @urhadd_v16i8(<16 x i8> %x) {
+; CHECK-LABEL: urhadd_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    urhadd v0.16b, v0.16b, v0.16b
+; CHECK-NEXT:    ret
+  %r = tail call <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8> %x, <16 x i8> %x)
+  ret <16 x i8> %r
+}
+
+define <8 x i16> @urhadd_v8i16(<8 x i16> %x) {
+; CHECK-LABEL: urhadd_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    urhadd v0.8h, v0.8h, v0.8h
+; CHECK-NEXT:    ret
+  %r = tail call <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16> %x, <8 x i16> %x)
+  ret <8 x i16> %r
+}
+
+define <4 x i32> @urhadd_v4i32(<4 x i32> %x) {
+; CHECK-LABEL: urhadd_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    urhadd v0.4s, v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %r = tail call <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32> %x, <4 x i32> %x)
+  ret <4 x i32> %r
+}
+
+declare <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8>, <8 x i8>)
+declare <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16>, <4 x i16>)
+declare <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32>, <2 x i32>)
+declare <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8>, <8 x i8>)
+declare <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16>, <4 x i16>)
+declare <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32>, <2 x i32>)
+declare <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8>, <16 x i8>)
 declare <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32>, <4 x i32>)
+declare <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32>, <4 x i32>)
+
+declare <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8>, <8 x i8>)
+declare <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16>, <4 x i16>)
+declare <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32>, <2 x i32>)
+declare <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8>, <8 x i8>)
+declare <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16>, <4 x i16>)
+declare <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32>, <2 x i32>)
+declare <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8>, <16 x i8>)
 declare <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32>, <4 x i32>)
+declare <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32>, <4 x i32>)


        


More information about the llvm-commits mailing list