[llvm] 78b75b4 - [AArch64] Add TableGen patterns to generate uaddlv

Jingu Kang via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 18 09:43:54 PDT 2021


Author: Jingu Kang
Date: 2021-06-18T17:23:26+01:00
New Revision: 78b75b452b08e4ce3ad468e426e6e4fa1c92f7bd

URL: https://github.com/llvm/llvm-project/commit/78b75b452b08e4ce3ad468e426e6e4fa1c92f7bd
DIFF: https://github.com/llvm/llvm-project/commit/78b75b452b08e4ce3ad468e426e6e4fa1c92f7bd.diff

LOG: [AArch64] Add TableGen patterns to generate uaddlv

uaddv(uaddlp(x)) ==> uaddlv(x)
addp(uaddlp(x))  ==> uaddlv(x)

Differential Revision: https://reviews.llvm.org/D104236

Added: 
    llvm/test/CodeGen/AArch64/neon-uaddlv.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64InstrInfo.td
    llvm/test/CodeGen/AArch64/arm64-vabs.ll
    llvm/test/CodeGen/AArch64/neon-sad.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index c303d87c838b..301f1ed69638 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -5653,6 +5653,25 @@ defm FMAXV   : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
 defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
 defm FMINV   : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
 
+// Patterns for uaddv(uaddlp(x)) ==> uaddlv
+def : Pat<(i32 (vector_extract (v8i16 (insert_subvector undef,
+            (v4i16 (AArch64uaddv (v4i16 (AArch64uaddlp (v8i8 V64:$op))))),
+            (i64 0))), (i64 0))),
+          (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
+           (UADDLVv4i16v V64:$op), ssub), ssub)>;
+def : Pat<(i32 (vector_extract (v8i16 (AArch64uaddv (v8i16 (AArch64uaddlp
+           (v16i8 V128:$op))))), (i64 0))),
+          (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
+           (UADDLVv16i8v V128:$op), hsub), ssub)>;
+def : Pat<(v4i32 (AArch64uaddv (v4i32 (AArch64uaddlp (v8i16 V128:$op))))),
+          (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), (UADDLVv8i16v V128:$op), ssub)>;
+
+// Patterns for addp(uaddlp(x))) ==> uaddlv
+def : Pat<(v2i32 (AArch64uaddv (v2i32 (AArch64uaddlp (v4i16 V64:$op))))),
+          (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), (UADDLVv4i16v V64:$op), ssub)>;
+def : Pat<(v2i64 (AArch64uaddv (v2i64 (AArch64uaddlp (v4i32 V128:$op))))),
+          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (UADDLVv4i32v V128:$op), dsub)>;
+
 // Patterns for across-vector intrinsics, that have a node equivalent, that
 // returns a vector (with only the low lane defined) instead of a scalar.
 // In effect, opNode is the same as (scalar_to_vector (IntNode)).

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index 4d792bcec045..24ddb890085a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -220,8 +220,7 @@ define i32 @uabd16b_rdx_i32(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uabdl.8h v2, v0, v1
 ; CHECK-NEXT:    uabal2.8h v2, v0, v1
-; CHECK-NEXT:    uaddlp.4s v0, v2
-; CHECK-NEXT:    addv.4s s0, v0
+; CHECK-NEXT:    uaddlv.8h s0, v2
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
   %aext = zext <16 x i8> %a to <16 x i32>
@@ -239,8 +238,7 @@ define i32 @sabd16b_rdx_i32(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sabdl.8h v2, v0, v1
 ; CHECK-NEXT:    sabal2.8h v2, v0, v1
-; CHECK-NEXT:    uaddlp.4s v0, v2
-; CHECK-NEXT:    addv.4s s0, v0
+; CHECK-NEXT:    uaddlv.8h s0, v2
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
   %aext = sext <16 x i8> %a to <16 x i32>

diff  --git a/llvm/test/CodeGen/AArch64/neon-sad.ll b/llvm/test/CodeGen/AArch64/neon-sad.ll
index cfd9712efdc3..d0466c615c8c 100644
--- a/llvm/test/CodeGen/AArch64/neon-sad.ll
+++ b/llvm/test/CodeGen/AArch64/neon-sad.ll
@@ -11,8 +11,7 @@ define i32 @test_sad_v16i8_zext(i8* nocapture readonly %a, i8* nocapture readonl
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uabdl v2.8h, v1.8b, v0.8b
 ; CHECK-NEXT:    uabal2 v2.8h, v1.16b, v0.16b
-; CHECK-NEXT:    uaddlp v0.4s, v2.8h
-; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    uaddlv s0, v2.8h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
 entry:
@@ -35,8 +34,7 @@ define i32 @test_sad_v16i8_sext(i8* nocapture readonly %a, i8* nocapture readonl
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    sabdl v2.8h, v1.8b, v0.8b
 ; CHECK-NEXT:    sabal2 v2.8h, v1.16b, v0.16b
-; CHECK-NEXT:    uaddlp v0.4s, v2.8h
-; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    uaddlv s0, v2.8h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/AArch64/neon-uaddlv.ll b/llvm/test/CodeGen/AArch64/neon-uaddlv.ll
new file mode 100644
index 000000000000..3bc55f49f27e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/neon-uaddlv.ll
@@ -0,0 +1,79 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple aarch64-none-linux-gnu < %s | FileCheck %s
+
+declare <4 x i16>  @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8>) nounwind readnone
+declare <8 x i16>  @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8>) nounwind readnone
+declare <4 x i32>  @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16>) nounwind readnone
+declare <2 x i64>  @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32>) nounwind readnone
+declare <2 x i32>  @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16>) nounwind readnone
+
+declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) nounwind readnone
+declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) nounwind readnone
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) nounwind readnone
+declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) nounwind readnone
+declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>) nounwind readnone
+
+define i16 @uaddlv4h_from_v8i8(<8 x i8>* %A) nounwind {
+; CHECK-LABEL: uaddlv4h_from_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    uaddlv s0, v0.4h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i8>, <8 x i8>* %A
+  %tmp3 = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> %tmp1)
+  %tmp5 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %tmp3)
+  ret i16 %tmp5
+}
+
+define i16 @uaddlv16b_from_v16i8(<16 x i8>* %A) nounwind {
+; CHECK-LABEL: uaddlv16b_from_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uaddlv h0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %tmp1 = load <16 x i8>, <16 x i8>* %A
+  %tmp3 = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> %tmp1)
+  %tmp5 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %tmp3)
+  ret i16 %tmp5
+}
+
+define i32 @uaddlv8h_from_v8i16(<8 x i16>* %A) nounwind {
+; CHECK-LABEL: uaddlv8h_from_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uaddlv s0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %tmp1 = load <8 x i16>, <8 x i16>* %A
+  %tmp3 = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> %tmp1)
+  %tmp5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp3)
+  ret i32 %tmp5
+}
+
+define i64 @uaddlv4s_from_v4i32(<4 x i32>* %A) nounwind {
+; CHECK-LABEL: uaddlv4s_from_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uaddlv d0, v0.4s
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i32>, <4 x i32>* %A
+  %tmp3 = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %tmp1)
+  %tmp5 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %tmp3)
+  ret i64 %tmp5
+}
+
+define i32 @uaddlv4h_from_v4i16(<4 x i16>* %A) nounwind {
+; CHECK-LABEL: uaddlv4h_from_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    uaddlv s0, v0.4h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp3 = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> %tmp1)
+  %tmp5 = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %tmp3)
+  ret i32 %tmp5
+}


        


More information about the llvm-commits mailing list