[llvm] 44a4f5f - [AArch64][SVE] Add SVE2 mla unpredicated intrinsics.
Danilo Carvalho Grael via llvm-commits
llvm-commits at lists.llvm.org
Fri Jan 31 08:42:56 PST 2020
Author: Danilo Carvalho Grael
Date: 2020-01-31T11:39:12-05:00
New Revision: 44a4f5fc6a5a28a599ec255ac33846113073cf07
URL: https://github.com/llvm/llvm-project/commit/44a4f5fc6a5a28a599ec255ac33846113073cf07
DIFF: https://github.com/llvm/llvm-project/commit/44a4f5fc6a5a28a599ec255ac33846113073cf07.diff
LOG: [AArch64][SVE] Add SVE2 mla unpredicated intrinsics.
Summary:
Add intrinsics for the MLA unpredicated sve2 instructions:
- smlalb, smlalt, umlalb, umlalt, smlslb, smlslt, umlslb, umlslt
- sqdmlalb, sqdmlalt, sqdmlslb, sqdmlslt
- sqdmlalbt, sqdmlslbt
Reviewers: efriedma, sdesmalen, cameron.mcinally, c-rhodes, rengolin, kmclaughlin
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits, amehsan
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D73746
Added:
llvm/test/CodeGen/AArch64/sve2-mla-unpredicated.ll
Modified:
llvm/include/llvm/IR/IntrinsicsAArch64.td
llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
llvm/lib/Target/AArch64/SVEInstrFormats.td
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 0e51e2990fe4..9dfbabb42168 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -1816,6 +1816,7 @@ def int_aarch64_sve_sqshrunt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
def int_aarch64_sve_sqrshrunb : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
def int_aarch64_sve_sqrshrunt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
+// SVE2 MLA LANE.
def int_aarch64_sve_smlalb_lane : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_smlalt_lane : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_umlalb_lane : SVE2_3VectorArg_Indexed_Intrinsic;
@@ -1829,4 +1830,22 @@ def int_aarch64_sve_sqdmlalt_lane : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_sqdmlslb_lane : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_sqdmlslt_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+// SVE2 MLA Unpredicated.
+def int_aarch64_sve_smlalb : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_smlalt : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_umlalb : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_umlalt : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_smlslb : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_smlslt : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_umlslb : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_umlslt : SVE2_3VectorArg_Long_Intrinsic;
+
+def int_aarch64_sve_sqdmlalb : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sqdmlalt : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sqdmlslb : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sqdmlslt : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sqdmlalbt : SVE2_3VectorArg_Long_Intrinsic;
+def int_aarch64_sve_sqdmlslbt : SVE2_3VectorArg_Long_Intrinsic;
+
+
}
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 13b063585a44..af5b03106c3f 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -1477,14 +1477,14 @@ let Predicates = [HasSVE2] in {
defm UMLSLT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1111, "umlslt", int_aarch64_sve_umlslt_lane>;
// SVE2 integer multiply-add long (vectors, unpredicated)
- defm SMLALB_ZZZ : sve2_int_mla_long<0b10000, "smlalb">;
- defm SMLALT_ZZZ : sve2_int_mla_long<0b10001, "smlalt">;
- defm UMLALB_ZZZ : sve2_int_mla_long<0b10010, "umlalb">;
- defm UMLALT_ZZZ : sve2_int_mla_long<0b10011, "umlalt">;
- defm SMLSLB_ZZZ : sve2_int_mla_long<0b10100, "smlslb">;
- defm SMLSLT_ZZZ : sve2_int_mla_long<0b10101, "smlslt">;
- defm UMLSLB_ZZZ : sve2_int_mla_long<0b10110, "umlslb">;
- defm UMLSLT_ZZZ : sve2_int_mla_long<0b10111, "umlslt">;
+ defm SMLALB_ZZZ : sve2_int_mla_long<0b10000, "smlalb", int_aarch64_sve_smlalb>;
+ defm SMLALT_ZZZ : sve2_int_mla_long<0b10001, "smlalt", int_aarch64_sve_smlalt>;
+ defm UMLALB_ZZZ : sve2_int_mla_long<0b10010, "umlalb", int_aarch64_sve_umlalb>;
+ defm UMLALT_ZZZ : sve2_int_mla_long<0b10011, "umlalt", int_aarch64_sve_umlalt>;
+ defm SMLSLB_ZZZ : sve2_int_mla_long<0b10100, "smlslb", int_aarch64_sve_smlslb>;
+ defm SMLSLT_ZZZ : sve2_int_mla_long<0b10101, "smlslt", int_aarch64_sve_smlslt>;
+ defm UMLSLB_ZZZ : sve2_int_mla_long<0b10110, "umlslb", int_aarch64_sve_umlslb>;
+ defm UMLSLT_ZZZ : sve2_int_mla_long<0b10111, "umlslt", int_aarch64_sve_umlslt>;
// SVE2 saturating multiply-add long (indexed)
defm SQDMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0100, "sqdmlalb", int_aarch64_sve_sqdmlalb_lane>;
@@ -1493,14 +1493,14 @@ let Predicates = [HasSVE2] in {
defm SQDMLSLT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0111, "sqdmlslt", int_aarch64_sve_sqdmlslt_lane>;
// SVE2 saturating multiply-add long (vectors, unpredicated)
- defm SQDMLALB_ZZZ : sve2_int_mla_long<0b11000, "sqdmlalb">;
- defm SQDMLALT_ZZZ : sve2_int_mla_long<0b11001, "sqdmlalt">;
- defm SQDMLSLB_ZZZ : sve2_int_mla_long<0b11010, "sqdmlslb">;
- defm SQDMLSLT_ZZZ : sve2_int_mla_long<0b11011, "sqdmlslt">;
+ defm SQDMLALB_ZZZ : sve2_int_mla_long<0b11000, "sqdmlalb", int_aarch64_sve_sqdmlalb>;
+ defm SQDMLALT_ZZZ : sve2_int_mla_long<0b11001, "sqdmlalt", int_aarch64_sve_sqdmlalt>;
+ defm SQDMLSLB_ZZZ : sve2_int_mla_long<0b11010, "sqdmlslb", int_aarch64_sve_sqdmlslb>;
+ defm SQDMLSLT_ZZZ : sve2_int_mla_long<0b11011, "sqdmlslt", int_aarch64_sve_sqdmlslt>;
// SVE2 saturating multiply-add interleaved long
- defm SQDMLALBT_ZZZ : sve2_int_mla_long<0b00010, "sqdmlalbt">;
- defm SQDMLSLBT_ZZZ : sve2_int_mla_long<0b00011, "sqdmlslbt">;
+ defm SQDMLALBT_ZZZ : sve2_int_mla_long<0b00010, "sqdmlalbt", int_aarch64_sve_sqdmlalbt>;
+ defm SQDMLSLBT_ZZZ : sve2_int_mla_long<0b00011, "sqdmlslbt", int_aarch64_sve_sqdmlslbt>;
// SVE2 integer halving add/subtract (predicated)
defm SHADD_ZPmZ : sve2_int_arith_pred<0b100000, "shadd", int_aarch64_sve_shadd>;
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index a3043917196c..85c9421aa32d 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -2352,10 +2352,14 @@ multiclass sve2_int_mla<bit S, string asm, SDPatternOperator op> {
def : SVE_3_Op_Pat<nxv2i64, op, nxv2i64, nxv2i64, nxv2i64, !cast<Instruction>(NAME # _D)>;
}
-multiclass sve2_int_mla_long<bits<5> opc, string asm> {
+multiclass sve2_int_mla_long<bits<5> opc, string asm, SDPatternOperator op> {
def _H : sve2_int_mla<0b01, opc, asm, ZPR16, ZPR8>;
def _S : sve2_int_mla<0b10, opc, asm, ZPR32, ZPR16>;
def _D : sve2_int_mla<0b11, opc, asm, ZPR64, ZPR32>;
+
+ def : SVE_3_Op_Pat<nxv8i16, op, nxv8i16, nxv16i8, nxv16i8, !cast<Instruction>(NAME # _H)>;
+ def : SVE_3_Op_Pat<nxv4i32, op, nxv4i32, nxv8i16, nxv8i16, !cast<Instruction>(NAME # _S)>;
+ def : SVE_3_Op_Pat<nxv2i64, op, nxv2i64, nxv4i32, nxv4i32, !cast<Instruction>(NAME # _D)>;
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/AArch64/sve2-mla-unpredicated.ll b/llvm/test/CodeGen/AArch64/sve2-mla-unpredicated.ll
new file mode 100644
index 000000000000..1f8a136fb9f2
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve2-mla-unpredicated.ll
@@ -0,0 +1,590 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
+
+;
+; SMLALB
+;
+define <vscale x 8 x i16> @smlalb_i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c) {
+; CHECK-LABEL: smlalb_i16
+; CHECK: smlalb z0.h, z1.b, z2.b
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.smlalb.nxv8i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @smlalb_i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c) {
+; CHECK-LABEL: smlalb_i32
+; CHECK: smlalb z0.s, z1.h, z2.h
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.nxv4i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @smlalb_i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c) {
+; CHECK-LABEL: smlalb_i64
+; CHECK: smlalb z0.d, z1.s, z2.s
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalb.nxv2i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c)
+ ret <vscale x 2 x i64> %res
+}
+
+;
+; SMLALT
+;
+define <vscale x 8 x i16> @smlalt_i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c) {
+; CHECK-LABEL: smlalt_i16
+; CHECK: smlalt z0.h, z1.b, z2.b
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.smlalt.nxv8i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @smlalt_i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c) {
+; CHECK-LABEL: smlalt_i32
+; CHECK: smlalt z0.s, z1.h, z2.h
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalt.nxv4i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @smlalt_i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c) {
+; CHECK-LABEL: smlalt_i64
+; CHECK: smlalt z0.d, z1.s, z2.s
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalt.nxv2i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c)
+ ret <vscale x 2 x i64> %res
+}
+
+;
+; UMLALB
+;
+define <vscale x 8 x i16> @umlalb_i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c) {
+; CHECK-LABEL: umlalb_i16
+; CHECK: umlalb z0.h, z1.b, z2.b
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.umlalb.nxv8i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @umlalb_i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c) {
+; CHECK-LABEL: umlalb_i32
+; CHECK: umlalb z0.s, z1.h, z2.h
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalb.nxv4i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @umlalb_i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c) {
+; CHECK-LABEL: umlalb_i64
+; CHECK: umlalb z0.d, z1.s, z2.s
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalb.nxv2i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c)
+ ret <vscale x 2 x i64> %res
+}
+
+;
+; UMLALT
+;
+define <vscale x 8 x i16> @umlalt_i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c) {
+; CHECK-LABEL: umlalt_i16
+; CHECK: umlalt z0.h, z1.b, z2.b
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.umlalt.nxv8i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @umlalt_i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c) {
+; CHECK-LABEL: umlalt_i32
+; CHECK: umlalt z0.s, z1.h, z2.h
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalt.nxv4i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @umlalt_i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c) {
+; CHECK-LABEL: umlalt_i64
+; CHECK: umlalt z0.d, z1.s, z2.s
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalt.nxv2i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c)
+ ret <vscale x 2 x i64> %res
+}
+
+;
+; SMLSLB
+;
+define <vscale x 8 x i16> @smlslb_i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c) {
+; CHECK-LABEL: smlslb_i16
+; CHECK: smlslb z0.h, z1.b, z2.b
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.smlslb.nxv8i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @smlslb_i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c) {
+; CHECK-LABEL: smlslb_i32
+; CHECK: smlslb z0.s, z1.h, z2.h
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslb.nxv4i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @smlslb_i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c) {
+; CHECK-LABEL: smlslb_i64
+; CHECK: smlslb z0.d, z1.s, z2.s
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslb.nxv2i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c)
+ ret <vscale x 2 x i64> %res
+}
+
+;
+; SMLSLT
+;
+define <vscale x 8 x i16> @smlslt_i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c) {
+; CHECK-LABEL: smlslt_i16
+; CHECK: smlslt z0.h, z1.b, z2.b
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.smlslt.nxv8i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @smlslt_i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c) {
+; CHECK-LABEL: smlslt_i32
+; CHECK: smlslt z0.s, z1.h, z2.h
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.nxv4i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @smlslt_i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c) {
+; CHECK-LABEL: smlslt_i64
+; CHECK: smlslt z0.d, z1.s, z2.s
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.nxv2i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c)
+ ret <vscale x 2 x i64> %res
+}
+
+;
+; UMLSLB
+;
+define <vscale x 8 x i16> @umlslb_i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c) {
+; CHECK-LABEL: umlslb_i16
+; CHECK: umlslb z0.h, z1.b, z2.b
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.umlslb.nxv8i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @umlslb_i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c) {
+; CHECK-LABEL: umlslb_i32
+; CHECK: umlslb z0.s, z1.h, z2.h
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslb.nxv4i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @umlslb_i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c) {
+; CHECK-LABEL: umlslb_i64
+; CHECK: umlslb z0.d, z1.s, z2.s
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslb.nxv2i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c)
+ ret <vscale x 2 x i64> %res
+}
+
+;
+; UMLSLT
+;
+define <vscale x 8 x i16> @umlslt_i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c) {
+; CHECK-LABEL: umlslt_i16
+; CHECK: umlslt z0.h, z1.b, z2.b
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.umlslt.nxv8i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @umlslt_i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c) {
+; CHECK-LABEL: umlslt_i32
+; CHECK: umlslt z0.s, z1.h, z2.h
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.nxv4i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @umlslt_i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c) {
+; CHECK-LABEL: umlslt_i64
+; CHECK: umlslt z0.d, z1.s, z2.s
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.nxv2i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c)
+ ret <vscale x 2 x i64> %res
+}
+
+;
+; SQDMLALB
+;
+define <vscale x 8 x i16> @sqdmlalb_i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c) {
+; CHECK-LABEL: sqdmlalb_i16
+; CHECK: sqdmlalb z0.h, z1.b, z2.b
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlalb.nxv8i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @sqdmlalb_i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c) {
+; CHECK-LABEL: sqdmlalb_i32
+; CHECK: sqdmlalb z0.s, z1.h, z2.h
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalb.nxv4i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sqdmlalb_i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sqdmlalb_i64
+; CHECK: sqdmlalb z0.d, z1.s, z2.s
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalb.nxv2i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c)
+ ret <vscale x 2 x i64> %res
+}
+
+;
+; SQDMLALT
+;
+define <vscale x 8 x i16> @sqdmlalt_i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c) {
+; CHECK-LABEL: sqdmlalt_i16
+; CHECK: sqdmlalt z0.h, z1.b, z2.b
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlalt.nxv8i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @sqdmlalt_i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c) {
+; CHECK-LABEL: sqdmlalt_i32
+; CHECK: sqdmlalt z0.s, z1.h, z2.h
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalt.nxv4i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sqdmlalt_i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sqdmlalt_i64
+; CHECK: sqdmlalt z0.d, z1.s, z2.s
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalt.nxv2i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c)
+ ret <vscale x 2 x i64> %res
+}
+
+;
+; SQDMLSLB
+;
+define <vscale x 8 x i16> @sqdmlslb_i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c) {
+; CHECK-LABEL: sqdmlslb_i16
+; CHECK: sqdmlslb z0.h, z1.b, z2.b
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlslb.nxv8i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @sqdmlslb_i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c) {
+; CHECK-LABEL: sqdmlslb_i32
+; CHECK: sqdmlslb z0.s, z1.h, z2.h
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslb.nxv4i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sqdmlslb_i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sqdmlslb_i64
+; CHECK: sqdmlslb z0.d, z1.s, z2.s
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslb.nxv2i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c)
+ ret <vscale x 2 x i64> %res
+}
+
+;
+; SQDMLSLT
+;
+define <vscale x 8 x i16> @sqdmlslt_i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c) {
+; CHECK-LABEL: sqdmlslt_i16
+; CHECK: sqdmlslt z0.h, z1.b, z2.b
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlslt.nxv8i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @sqdmlslt_i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c) {
+; CHECK-LABEL: sqdmlslt_i32
+; CHECK: sqdmlslt z0.s, z1.h, z2.h
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslt.nxv4i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sqdmlslt_i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sqdmlslt_i64
+; CHECK: sqdmlslt z0.d, z1.s, z2.s
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslt.nxv2i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c)
+ ret <vscale x 2 x i64> %res
+}
+
+;
+; SQDMLALBT
+;
+define <vscale x 8 x i16> @sqdmlalbt_i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c) {
+; CHECK-LABEL: sqdmlalbt_i16
+; CHECK: sqdmlalbt z0.h, z1.b, z2.b
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlalbt.nxv8i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @sqdmlalbt_i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c) {
+; CHECK-LABEL: sqdmlalbt_i32
+; CHECK: sqdmlalbt z0.s, z1.h, z2.h
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalbt.nxv4i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sqdmlalbt_i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sqdmlalbt_i64
+; CHECK: sqdmlalbt z0.d, z1.s, z2.s
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalbt.nxv2i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c)
+ ret <vscale x 2 x i64> %res
+}
+
+;
+; SQDMLSLBT
+;
+define <vscale x 8 x i16> @sqdmlslbt_i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c) {
+; CHECK-LABEL: sqdmlslbt_i16
+; CHECK: sqdmlslbt z0.h, z1.b, z2.b
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlslbt.nxv8i16(<vscale x 8 x i16> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i8> %c)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @sqdmlslbt_i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c) {
+; CHECK-LABEL: sqdmlslbt_i32
+; CHECK: sqdmlslbt z0.s, z1.h, z2.h
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslbt.nxv4i32(<vscale x 4 x i32> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i16> %c)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sqdmlslbt_i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sqdmlslbt_i64
+; CHECK: sqdmlslbt z0.d, z1.s, z2.s
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslbt.nxv2i64(<vscale x 2 x i64> %a,
+ <vscale x 4 x i32> %b,
+ <vscale x 4 x i32> %c)
+ ret <vscale x 2 x i64> %res
+}
+
+declare <vscale x 8 x i16> @llvm.aarch64.sve.smlalb.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.smlalb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.smlalt.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.smlalt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.smlalt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.umlalb.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.umlalb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.umlalb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.umlalt.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.umlalt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.umlalt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.smlslb.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.smlslb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.smlslb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.smlslt.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.umlslb.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.umlslb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.umlslb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.umlslt.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlalb.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlalt.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlslb.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlslt.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlalbt.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalbt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalbt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlslbt.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslbt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslbt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
More information about the llvm-commits
mailing list