[llvm] fc8d033 - [AArch64][SVE] Add addsub carry long instrinsics

Danilo Carvalho Grael via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 12 07:42:28 PST 2020


Author: Danilo Carvalho Grael
Date: 2020-02-12T10:49:10-05:00
New Revision: fc8d033e966305ebed3ffcd7c633d6e9ebef10ae

URL: https://github.com/llvm/llvm-project/commit/fc8d033e966305ebed3ffcd7c633d6e9ebef10ae
DIFF: https://github.com/llvm/llvm-project/commit/fc8d033e966305ebed3ffcd7c633d6e9ebef10ae.diff

LOG: [AArch64][SVE] Add addsub carry long instrinsics

Summary:
Add intrinsics for the following instructions:
- adclb, adclt, sbclb, sbclt

Reviewers: kmclaughlin, c-rhodes, sdesmalen, efriedma, rengolin

Reviewed By: kmclaughlin

Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D74328

Added: 
    llvm/test/CodeGen/AArch64/sve2-int-addsub-long.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsAArch64.td
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/lib/Target/AArch64/SVEInstrFormats.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 0e73cc4c65d3..fbc808ca642c 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -1931,5 +1931,10 @@ def int_aarch64_sve_sqdmlslt    : SVE2_3VectorArg_Long_Intrinsic;
 def int_aarch64_sve_sqdmlalbt   : SVE2_3VectorArg_Long_Intrinsic;
 def int_aarch64_sve_sqdmlslbt   : SVE2_3VectorArg_Long_Intrinsic;
 
+// SVE2 ADDSUB Long Unpredicated.
+def int_aarch64_sve_adclb       : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_adclt       : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_sbclb       : AdvSIMD_3VectorArg_Intrinsic;
+def int_aarch64_sve_sbclt       : AdvSIMD_3VectorArg_Intrinsic;
 
 }

diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 0778c4aed2ca..a986348dff32 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -1619,10 +1619,10 @@ let Predicates = [HasSVE2] in {
   defm UABALT_ZZZ : sve2_int_abs
diff _accum_long<0b11, "uabalt", int_aarch64_sve_uabalt>;
 
   // SVE2 integer add/subtract long with carry
-  defm ADCLB_ZZZ : sve2_int_addsub_long_carry<0b00, "adclb">;
-  defm ADCLT_ZZZ : sve2_int_addsub_long_carry<0b01, "adclt">;
-  defm SBCLB_ZZZ : sve2_int_addsub_long_carry<0b10, "sbclb">;
-  defm SBCLT_ZZZ : sve2_int_addsub_long_carry<0b11, "sbclt">;
+  defm ADCLB_ZZZ : sve2_int_addsub_long_carry<0b00, "adclb", int_aarch64_sve_adclb>;
+  defm ADCLT_ZZZ : sve2_int_addsub_long_carry<0b01, "adclt", int_aarch64_sve_adclt>;
+  defm SBCLB_ZZZ : sve2_int_addsub_long_carry<0b10, "sbclb", int_aarch64_sve_sbclb>;
+  defm SBCLT_ZZZ : sve2_int_addsub_long_carry<0b11, "sbclt", int_aarch64_sve_sbclt>;
 
   // SVE2 bitwise shift right narrow (bottom)
   defm SQSHRUNB_ZZI  : sve2_int_bin_shift_imm_right_narrow_bottom<0b000, "sqshrunb",  int_aarch64_sve_sqshrunb>;

diff  --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 4aaffeaf0756..2df9f890baa1 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -3189,11 +3189,14 @@ multiclass sve2_int_abs
diff _accum_long<bits<2> opc, string asm,
   def : SVE_3_Op_Pat<nxv2i64, op, nxv2i64, nxv4i32, nxv4i32, !cast<Instruction>(NAME # _D)>;
 }
 
-multiclass sve2_int_addsub_long_carry<bits<2> opc, string asm> {
+multiclass sve2_int_addsub_long_carry<bits<2> opc, string asm, SDPatternOperator op> {
   def _S : sve2_int_abs
diff _accum<{ opc{1}, 0b0 }, { 0b010, opc{0} }, asm,
                                   ZPR32, ZPR32>;
   def _D : sve2_int_abs
diff _accum<{ opc{1}, 0b1 }, { 0b010, opc{0} }, asm,
                                   ZPR64, ZPR64>;
+
+  def : SVE_3_Op_Pat<nxv4i32, op, nxv4i32, nxv4i32, nxv4i32, !cast<Instruction>(NAME # _S)>;
+  def : SVE_3_Op_Pat<nxv2i64, op, nxv2i64, nxv2i64, nxv2i64, !cast<Instruction>(NAME # _D)>;
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/AArch64/sve2-int-addsub-long.ll b/llvm/test/CodeGen/AArch64/sve2-int-addsub-long.ll
new file mode 100644
index 000000000000..ff96a2f9776d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve2-int-addsub-long.ll
@@ -0,0 +1,102 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
+
+;
+; ADCLB (vector, long, unpredicated)
+;
+define <vscale x 4 x i32> @adclb_i32(<vscale x 4 x i32> %a,
+                                     <vscale x 4 x i32> %b,
+                                     <vscale x 4 x i32> %c) {
+; CHECK-LABEL: adclb_i32
+; CHECK: adclb z0.s, z1.s, z2.s
+; CHECK-NEXT: ret
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.adclb.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @adclb_i64(<vscale x 2 x i64> %a,
+                                     <vscale x 2 x i64> %b,
+                                     <vscale x 2 x i64> %c) {
+; CHECK-LABEL: adclb_i64
+; CHECK: adclb z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.adclb.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
+  ret <vscale x 2 x i64> %res
+}
+
+;
+; ADCLT (vector, long, unpredicated)
+;
+define <vscale x 4 x i32> @adclt_i32(<vscale x 4 x i32> %a,
+                                     <vscale x 4 x i32> %b,
+                                     <vscale x 4 x i32> %c) {
+; CHECK-LABEL: adclt_i32
+; CHECK: adclt z0.s, z1.s, z2.s
+; CHECK-NEXT: ret
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.adclt.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @adclt_i64(<vscale x 2 x i64> %a,
+                                     <vscale x 2 x i64> %b,
+                                     <vscale x 2 x i64> %c) {
+; CHECK-LABEL: adclt_i64
+; CHECK: adclt z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.adclt.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
+  ret <vscale x 2 x i64> %res
+}
+
+;
+; SBCLB (vector, long, unpredicated)
+;
+define <vscale x 4 x i32> @sbclb_i32(<vscale x 4 x i32> %a,
+                                     <vscale x 4 x i32> %b,
+                                     <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sbclb_i32
+; CHECK: sbclb z0.s, z1.s, z2.s
+; CHECK-NEXT: ret
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sbclb.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sbclb_i64(<vscale x 2 x i64> %a,
+                                     <vscale x 2 x i64> %b,
+                                     <vscale x 2 x i64> %c) {
+; CHECK-LABEL: sbclb_i64
+; CHECK: sbclb z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sbclb.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
+  ret <vscale x 2 x i64> %res
+}
+
+;
+; SBCLT (vector, long, unpredicated)
+;
+define <vscale x 4 x i32> @sbclt_i32(<vscale x 4 x i32> %a,
+                                     <vscale x 4 x i32> %b,
+                                     <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sbclt_i32
+; CHECK: sbclt z0.s, z1.s, z2.s
+; CHECK-NEXT: ret
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sbclt.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sbclt_i64(<vscale x 2 x i64> %a,
+                                     <vscale x 2 x i64> %b,
+                                     <vscale x 2 x i64> %c) {
+; CHECK-LABEL: sbclt_i64
+; CHECK: sbclt z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sbclt.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
+  ret <vscale x 2 x i64> %res
+}
+
+declare <vscale x 4 x i32> @llvm.aarch64.sve.adclb.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.adclb.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.adclt.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.adclt.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sbclb.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sbclb.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sbclt.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sbclt.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>)


        


More information about the llvm-commits mailing list