[llvm] a6ad593 - [AArch64] Add more addv tests

Vinay Madhusudan via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 14 03:00:28 PDT 2020


Author: Vinay Madhusudan
Date: 2020-10-14T15:29:44+05:30
New Revision: a6ad5930d5f51a14b6828cfb4bd661c9e7e6e83e

URL: https://github.com/llvm/llvm-project/commit/a6ad5930d5f51a14b6828cfb4bd661c9e7e6e83e
DIFF: https://github.com/llvm/llvm-project/commit/a6ad5930d5f51a14b6828cfb4bd661c9e7e6e83e.diff

LOG: [AArch64] Add more addv tests

Differential Revision: https://reviews.llvm.org/D89365

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/aarch64-addv.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/aarch64-addv.ll b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
index c037a35ba379..ed3e998be0b8 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-addv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
@@ -1,39 +1,57 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=generic | FileCheck %s
 
 ; Function Attrs: nounwind readnone
 declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>)
 declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
 declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>)
+declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>)
 declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
 
 define i8 @add_B(<16 x i8>* %arr)  {
-; CHECK-LABEL: add_B
-; CHECK: addv {{b[0-9]+}}, {{v[0-9]+}}.16b
+; CHECK-LABEL: add_B:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    addv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %bin.rdx = load <16 x i8>, <16 x i8>* %arr
   %r = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %bin.rdx)
   ret i8 %r
 }
 
 define i16 @add_H(<8 x i16>* %arr)  {
-; CHECK-LABEL: add_H
-; CHECK: addv {{h[0-9]+}}, {{v[0-9]+}}.8h
+; CHECK-LABEL: add_H:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    addv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %bin.rdx = load <8 x i16>, <8 x i16>* %arr
   %r = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %bin.rdx)
   ret i16 %r
 }
 
 define i32 @add_S( <4 x i32>* %arr)  {
-; CHECK-LABEL: add_S
-; CHECK: addv {{s[0-9]+}}, {{v[0-9]+}}.4s
+; CHECK-LABEL: add_S:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %bin.rdx = load <4 x i32>, <4 x i32>* %arr
   %r = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %bin.rdx)
   ret i32 %r
 }
 
 define i64 @add_D(<2 x i64>* %arr)  {
-; CHECK-LABEL: add_D
-; CHECK-NOT: addv
-; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
+; CHECK-LABEL: add_D:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    addp d0, v0.2d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
   %bin.rdx = load <2 x i64>, <2 x i64>* %arr
   %r = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %bin.rdx)
   ret i64 %r
@@ -42,8 +60,16 @@ define i64 @add_D(<2 x i64>* %arr)  {
 declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
 
 define i32 @oversized_ADDV_256(i8* noalias nocapture readonly %arg1, i8* noalias nocapture readonly %arg2) {
-; CHECK-LABEL: oversized_ADDV_256
-; CHECK: addv {{s[0-9]+}}, {{v[0-9]+}}.4s
+; CHECK-LABEL: oversized_ADDV_256:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uabdl v0.8h, v0.8b, v1.8b
+; CHECK-NEXT:    ushll v1.4s, v0.4h, #0
+; CHECK-NEXT:    uaddw2 v0.4s, v1.4s, v0.8h
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
 entry:
   %0 = bitcast i8* %arg1 to <8 x i8>*
   %1 = load <8 x i8>, <8 x i8>* %0, align 1
@@ -62,9 +88,81 @@ entry:
 declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
 
 define i32 @oversized_ADDV_512(<16 x i32>* %arr)  {
-; CHECK-LABEL: oversized_ADDV_512
-; CHECK: addv {{s[0-9]+}}, {{v[0-9]+}}.4s
+; CHECK-LABEL: oversized_ADDV_512:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0, #32]
+; CHECK-NEXT:    ldp q3, q2, [x0]
+; CHECK-NEXT:    add v1.4s, v2.4s, v1.4s
+; CHECK-NEXT:    add v0.4s, v3.4s, v0.4s
+; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %bin.rdx = load <16 x i32>, <16 x i32>* %arr
   %r = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %bin.rdx)
   ret i32 %r
 }
+
+define i8 @addv_combine_i8(<8 x i8> %a1, <8 x i8> %a2) {
+; CHECK-LABEL: addv_combine_i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    addv b0, v0.8b
+; CHECK-NEXT:    addv b1, v1.8b
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    fmov w9, s1
+; CHECK-NEXT:    add w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %rdx.1 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %a1)
+  %rdx.2 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %a2)
+  %r = add i8 %rdx.1, %rdx.2
+  ret i8 %r
+}
+
+define i16 @addv_combine_i16(<4 x i16> %a1, <4 x i16> %a2) {
+; CHECK-LABEL: addv_combine_i16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    addv h0, v0.4h
+; CHECK-NEXT:    addv h1, v1.4h
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    fmov w9, s1
+; CHECK-NEXT:    add w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %rdx.1 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %a1)
+  %rdx.2 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %a2)
+  %r = add i16 %rdx.1, %rdx.2
+  ret i16 %r
+}
+
+define i32 @addv_combine_i32(<4 x i32> %a1, <4 x i32> %a2) {
+; CHECK-LABEL: addv_combine_i32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    addv s1, v1.4s
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    fmov w9, s1
+; CHECK-NEXT:    add w0, w8, w9
+; CHECK-NEXT:    ret
+entry:
+  %rdx.1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a1)
+  %rdx.2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a2)
+  %r = add i32 %rdx.1, %rdx.2
+  ret i32 %r
+}
+
+define i64 @addv_combine_i64(<2 x i64> %a1, <2 x i64> %a2) {
+; CHECK-LABEL: addv_combine_i64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    addp d0, v0.2d
+; CHECK-NEXT:    addp d1, v1.2d
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    fmov x9, d1
+; CHECK-NEXT:    add x0, x8, x9
+; CHECK-NEXT:    ret
+entry:
+  %rdx.1 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %a1)
+  %rdx.2 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %a2)
+  %r = add i64 %rdx.1, %rdx.2
+  ret i64 %r
+}


        


More information about the llvm-commits mailing list