[llvm] 25ed2ab - [SVE] Add isel patterns for SABA/UABA.

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 22 05:12:32 PST 2022


Author: Paul Walker
Date: 2022-02-22T13:09:57Z
New Revision: 25ed2ab3418b09f253e3f3d16b2bfc8b90121f65

URL: https://github.com/llvm/llvm-project/commit/25ed2ab3418b09f253e3f3d16b2bfc8b90121f65
DIFF: https://github.com/llvm/llvm-project/commit/25ed2ab3418b09f253e3f3d16b2bfc8b90121f65.diff

LOG: [SVE] Add isel patterns for SABA/UABA.

Differential Revision: https://reviews.llvm.org/D119830

Added: 
    llvm/test/CodeGen/AArch64/sve-aba.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 2901527a07d36..0bd75e29e7ba5 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -244,6 +244,14 @@ def AArch64fmul_m1 : EitherVSelectOrPassthruPatFrags<int_aarch64_sve_fmul, AArch
 def AArch64fadd_m1 : EitherVSelectOrPassthruPatFrags<int_aarch64_sve_fadd, AArch64fadd_p>;
 def AArch64fsub_m1 : EitherVSelectOrPassthruPatFrags<int_aarch64_sve_fsub, AArch64fsub_p>;
 
+def AArch64saba : PatFrags<(ops node:$op1, node:$op2, node:$op3),
+                           [(int_aarch64_sve_saba node:$op1, node:$op2, node:$op3),
+                            (add node:$op1, (AArch64sabd_p (SVEAllActive), node:$op2, node:$op3))]>;
+
+def AArch64uaba : PatFrags<(ops node:$op1, node:$op2, node:$op3),
+                           [(int_aarch64_sve_uaba node:$op1, node:$op2, node:$op3),
+                            (add node:$op1, (AArch64uabd_p (SVEAllActive), node:$op2, node:$op3))]>;
+
 def SDT_AArch64FCVT : SDTypeProfile<1, 3, [
   SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisVec<3>,
   SDTCVecEltisVT<1,i1>
@@ -2970,8 +2978,8 @@ let Predicates = [HasSVE2orStreamingSVE] in {
   defm SQCADD_ZZI : sve2_int_cadd<0b1, "sqcadd", int_aarch64_sve_sqcadd_x>;
 
   // SVE2 integer absolute 
diff erence and accumulate
-  defm SABA_ZZZ : sve2_int_abs
diff _accum<0b0, "saba", int_aarch64_sve_saba>;
-  defm UABA_ZZZ : sve2_int_abs
diff _accum<0b1, "uaba", int_aarch64_sve_uaba>;
+  defm SABA_ZZZ : sve2_int_abs
diff _accum<0b0, "saba", AArch64saba>;
+  defm UABA_ZZZ : sve2_int_abs
diff _accum<0b1, "uaba", AArch64uaba>;
 
   // SVE2 integer absolute 
diff erence and accumulate long
   defm SABALB_ZZZ : sve2_int_abs
diff _accum_long<0b00, "sabalb", int_aarch64_sve_sabalb>;

diff  --git a/llvm/test/CodeGen/AArch64/sve-aba.ll b/llvm/test/CodeGen/AArch64/sve-aba.ll
new file mode 100644
index 0000000000000..cf7da62bbcf00
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-aba.ll
@@ -0,0 +1,277 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; SABA
+;
+
+define <vscale x 16 x i8> @saba_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) #0 {
+; CHECK-LABEL: saba_b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    saba z0.b, z1.b, z2.b
+; CHECK-NEXT:    ret
+  %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i16>
+  %c.sext = sext <vscale x 16 x i8> %c to <vscale x 16 x i16>
+  %sub = sub <vscale x 16 x i16> %b.sext, %c.sext
+  %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true)
+  %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8>
+  %add = add <vscale x 16 x i8> %a, %trunc
+  ret <vscale x 16 x i8> %add
+}
+
+define <vscale x 16 x i8> @saba_b_promoted_ops(<vscale x 16 x i8> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c) #0 {
+; CHECK-LABEL: saba_b_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z2.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    saba z0.b, z1.b, z2.b
+; CHECK-NEXT:    ret
+  %b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8>
+  %c.sext = sext <vscale x 16 x i1> %c to <vscale x 16 x i8>
+  %sub = sub <vscale x 16 x i8> %b.sext, %c.sext
+  %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true)
+  %add = add <vscale x 16 x i8> %a, %abs
+  ret <vscale x 16 x i8> %add
+}
+
+define <vscale x 8 x i16> @saba_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) #0 {
+; CHECK-LABEL: saba_h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    saba z0.h, z1.h, z2.h
+; CHECK-NEXT:    ret
+  %b.sext = sext <vscale x 8 x i16> %b to <vscale x 8 x i32>
+  %c.sext = sext <vscale x 8 x i16> %c to <vscale x 8 x i32>
+  %sub = sub <vscale x 8 x i32> %b.sext, %c.sext
+  %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true)
+  %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16>
+  %add = add <vscale x 8 x i16> %a, %trunc
+  ret <vscale x 8 x i16> %add
+}
+
+define <vscale x 8 x i16> @saba_h_promoted_ops(<vscale x 8 x i16> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c) #0 {
+; CHECK-LABEL: saba_h_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    sxtb z1.h, p0/m, z1.h
+; CHECK-NEXT:    sxtb z2.h, p0/m, z2.h
+; CHECK-NEXT:    saba z0.h, z1.h, z2.h
+; CHECK-NEXT:    ret
+  %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+  %c.sext = sext <vscale x 8 x i8> %c to <vscale x 8 x i16>
+  %sub = sub <vscale x 8 x i16> %b.sext, %c.sext
+  %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+  %add = add <vscale x 8 x i16> %a, %abs
+  ret <vscale x 8 x i16> %add
+}
+
+define <vscale x 4 x i32> @saba_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) #0 {
+; CHECK-LABEL: saba_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    saba z0.s, z1.s, z2.s
+; CHECK-NEXT:    ret
+  %b.sext = sext <vscale x 4 x i32> %b to <vscale x 4 x i64>
+  %c.sext = sext <vscale x 4 x i32> %c to <vscale x 4 x i64>
+  %sub = sub <vscale x 4 x i64> %b.sext, %c.sext
+  %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true)
+  %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32>
+  %add = add <vscale x 4 x i32> %a, %trunc
+  ret <vscale x 4 x i32> %add
+}
+
+define <vscale x 4 x i32> @saba_s_promoted_ops(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c) #0 {
+; CHECK-LABEL: saba_s_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    sxth z1.s, p0/m, z1.s
+; CHECK-NEXT:    sxth z2.s, p0/m, z2.s
+; CHECK-NEXT:    saba z0.s, z1.s, z2.s
+; CHECK-NEXT:    ret
+  %b.sext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+  %c.sext = sext <vscale x 4 x i16> %c to <vscale x 4 x i32>
+  %sub = sub <vscale x 4 x i32> %b.sext, %c.sext
+  %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+  %add = add <vscale x 4 x i32> %a, %abs
+  ret <vscale x 4 x i32> %add
+}
+
+define <vscale x 2 x i64> @saba_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) #0 {
+; CHECK-LABEL: saba_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    saba z0.d, z1.d, z2.d
+; CHECK-NEXT:    ret
+  %b.sext = sext <vscale x 2 x i64> %b to <vscale x 2 x i128>
+  %c.sext = sext <vscale x 2 x i64> %c to <vscale x 2 x i128>
+  %sub = sub <vscale x 2 x i128> %b.sext, %c.sext
+  %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true)
+  %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64>
+  %add = add <vscale x 2 x i64> %a, %trunc
+  ret <vscale x 2 x i64> %add
+}
+
+define <vscale x 2 x i64> @saba_d_promoted_ops(<vscale x 2 x i64> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c) #0 {
+; CHECK-LABEL: saba_d_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    sxtw z1.d, p0/m, z1.d
+; CHECK-NEXT:    sxtw z2.d, p0/m, z2.d
+; CHECK-NEXT:    saba z0.d, z1.d, z2.d
+; CHECK-NEXT:    ret
+  %b.sext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
+  %c.sext = sext <vscale x 2 x i32> %c to <vscale x 2 x i64>
+  %sub = sub <vscale x 2 x i64> %b.sext, %c.sext
+  %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true)
+  %add = add <vscale x 2 x i64> %a, %abs
+  ret <vscale x 2 x i64> %add
+}
+
+;
+; UABA
+;
+
+define <vscale x 16 x i8> @uaba_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) #0 {
+; CHECK-LABEL: uaba_b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uaba z0.b, z1.b, z2.b
+; CHECK-NEXT:    ret
+  %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i16>
+  %c.zext = zext <vscale x 16 x i8> %c to <vscale x 16 x i16>
+  %sub = sub <vscale x 16 x i16> %b.zext, %c.zext
+  %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true)
+  %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8>
+  %add = add <vscale x 16 x i8> %a, %trunc
+  ret <vscale x 16 x i8> %add
+}
+
+define <vscale x 16 x i8> @uaba_b_promoted_ops(<vscale x 16 x i8> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c) #0 {
+; CHECK-LABEL: uaba_b_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z1.b, p0/z, #1 // =0x1
+; CHECK-NEXT:    mov z2.b, p1/z, #1 // =0x1
+; CHECK-NEXT:    uaba z0.b, z1.b, z2.b
+; CHECK-NEXT:    ret
+  %b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8>
+  %c.zext = zext <vscale x 16 x i1> %c to <vscale x 16 x i8>
+  %sub = sub <vscale x 16 x i8> %b.zext, %c.zext
+  %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true)
+  %add = add <vscale x 16 x i8> %a, %abs
+  ret <vscale x 16 x i8> %add
+}
+
+define <vscale x 8 x i16> @uaba_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) #0 {
+; CHECK-LABEL: uaba_h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uaba z0.h, z1.h, z2.h
+; CHECK-NEXT:    ret
+  %b.zext = zext <vscale x 8 x i16> %b to <vscale x 8 x i32>
+  %c.zext = zext <vscale x 8 x i16> %c to <vscale x 8 x i32>
+  %sub = sub <vscale x 8 x i32> %b.zext, %c.zext
+  %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true)
+  %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16>
+  %add = add <vscale x 8 x i16> %a, %trunc
+  ret <vscale x 8 x i16> %add
+}
+
+define <vscale x 8 x i16> @uaba_h_promoted_ops(<vscale x 8 x i16> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c) #0 {
+; CHECK-LABEL: uaba_h_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and z1.h, z1.h, #0xff
+; CHECK-NEXT:    and z2.h, z2.h, #0xff
+; CHECK-NEXT:    uaba z0.h, z1.h, z2.h
+; CHECK-NEXT:    ret
+  %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+  %c.zext = zext <vscale x 8 x i8> %c to <vscale x 8 x i16>
+  %sub = sub <vscale x 8 x i16> %b.zext, %c.zext
+  %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+  %add = add <vscale x 8 x i16> %a, %abs
+  ret <vscale x 8 x i16> %add
+}
+
+define <vscale x 4 x i32> @uaba_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) #0 {
+; CHECK-LABEL: uaba_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uaba z0.s, z1.s, z2.s
+; CHECK-NEXT:    ret
+  %b.zext = zext <vscale x 4 x i32> %b to <vscale x 4 x i64>
+  %c.zext = zext <vscale x 4 x i32> %c to <vscale x 4 x i64>
+  %sub = sub <vscale x 4 x i64> %b.zext, %c.zext
+  %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true)
+  %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32>
+  %add = add <vscale x 4 x i32> %a, %trunc
+  ret <vscale x 4 x i32> %add
+}
+
+define <vscale x 4 x i32> @uaba_s_promoted_ops(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c) #0 {
+; CHECK-LABEL: uaba_s_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and z1.s, z1.s, #0xffff
+; CHECK-NEXT:    and z2.s, z2.s, #0xffff
+; CHECK-NEXT:    uaba z0.s, z1.s, z2.s
+; CHECK-NEXT:    ret
+  %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+  %c.zext = zext <vscale x 4 x i16> %c to <vscale x 4 x i32>
+  %sub = sub <vscale x 4 x i32> %b.zext, %c.zext
+  %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+  %add = add <vscale x 4 x i32> %a, %abs
+  ret <vscale x 4 x i32> %add
+}
+
+define <vscale x 2 x i64> @uaba_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) #0 {
+; CHECK-LABEL: uaba_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uaba z0.d, z1.d, z2.d
+; CHECK-NEXT:    ret
+  %b.zext = zext <vscale x 2 x i64> %b to <vscale x 2 x i128>
+  %c.zext = zext <vscale x 2 x i64> %c to <vscale x 2 x i128>
+  %sub = sub <vscale x 2 x i128> %b.zext, %c.zext
+  %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true)
+  %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64>
+  %add = add <vscale x 2 x i64> %a, %trunc
+  ret <vscale x 2 x i64> %add
+}
+
+define <vscale x 2 x i64> @uaba_d_promoted_ops(<vscale x 2 x i64> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c) #0 {
+; CHECK-LABEL: uaba_d_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and z1.d, z1.d, #0xffffffff
+; CHECK-NEXT:    and z2.d, z2.d, #0xffffffff
+; CHECK-NEXT:    uaba z0.d, z1.d, z2.d
+; CHECK-NEXT:    ret
+  %b.zext = zext <vscale x 2 x i32> %b to <vscale x 2 x i64>
+  %c.zext = zext <vscale x 2 x i32> %c to <vscale x 2 x i64>
+  %sub = sub <vscale x 2 x i64> %b.zext, %c.zext
+  %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true)
+  %add = add <vscale x 2 x i64> %a, %abs
+  ret <vscale x 2 x i64> %add
+}
+
+; A variant of uaba_s but with the add operands switched.
+define <vscale x 4 x i32> @uaba_s_commutative(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) #0 {
+; CHECK-LABEL: uaba_s_commutative:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uaba z0.s, z1.s, z2.s
+; CHECK-NEXT:    ret
+  %b.zext = zext <vscale x 4 x i32> %b to <vscale x 4 x i64>
+  %c.zext = zext <vscale x 4 x i32> %c to <vscale x 4 x i64>
+  %sub = sub <vscale x 4 x i64> %b.zext, %c.zext
+  %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true)
+  %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32>
+  %add = add <vscale x 4 x i32> %trunc, %a
+  ret <vscale x 4 x i32> %add
+}
+
+declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1)
+
+declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1)
+declare <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16>, i1)
+
+declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1)
+declare <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32>, i1)
+
+declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
+declare <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64>, i1)
+
+declare <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128>, i1)
+
+attributes #0 = { "target-features"="+neon,+sve,+sve2" }


        


More information about the llvm-commits mailing list