[llvm] 0610637 - [AArch64][SVE] Add remaining SVE2 mla indexed intrinsics.

Danilo Carvalho Grael via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 30 10:26:23 PST 2020


Author: Danilo Carvalho Grael
Date: 2020-01-30T13:32:11-05:00
New Revision: 0610637aac9cf2dedbb92da2d52dfbfd9b6331d2

URL: https://github.com/llvm/llvm-project/commit/0610637aac9cf2dedbb92da2d52dfbfd9b6331d2
DIFF: https://github.com/llvm/llvm-project/commit/0610637aac9cf2dedbb92da2d52dfbfd9b6331d2.diff

LOG: [AArch64][SVE] Add remaining SVE2 mla indexed intrinsics.

Summary:
Add remaining SVE2 mla indexed intrinsics:
- sqdmlalb, sqdmlalt, sqdmlslb, sqdmlslt

Add suffix _lanes and switch immediate types to i32 for all mla indexed intrinsics to align with ACLE builtin definitions.

Reviewers: efriedma, sdesmalen, cameron.mcinally, c-rhodes, rengolin, kmclaughlin

Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, arphaman, psnobl, llvm-commits, amehsan

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D73633

Added: 
    

Modified: 
    llvm/include/llvm/IR/IntrinsicsAArch64.td
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/lib/Target/AArch64/SVEInstrFormats.td
    llvm/test/CodeGen/AArch64/sve2-mla-indexed.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 6fbcfe8b6690..91a9b2454e61 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -1100,7 +1100,7 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
                 [LLVMMatchType<0>,
                  LLVMSubdivide2VectorType<0>,
                  LLVMSubdivide2VectorType<0>,
-                 llvm_i64_ty],
+                 llvm_i32_ty],
                 [IntrNoMem, ImmArg<3>]>;
 
   // NOTE: There is no relationship between these intrinsics beyond an attempt
@@ -1791,13 +1791,17 @@ def int_aarch64_sve_sqshrunt  : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
 def int_aarch64_sve_sqrshrunb : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
 def int_aarch64_sve_sqrshrunt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
 
-def int_aarch64_sve_smlalb    : SVE2_3VectorArg_Indexed_Intrinsic;
-def int_aarch64_sve_smlalt    : SVE2_3VectorArg_Indexed_Intrinsic;
-def int_aarch64_sve_umlalb    : SVE2_3VectorArg_Indexed_Intrinsic;
-def int_aarch64_sve_umlalt    : SVE2_3VectorArg_Indexed_Intrinsic;
-def int_aarch64_sve_smlslb    : SVE2_3VectorArg_Indexed_Intrinsic;
-def int_aarch64_sve_smlslt    : SVE2_3VectorArg_Indexed_Intrinsic;
-def int_aarch64_sve_umlslb    : SVE2_3VectorArg_Indexed_Intrinsic;
-def int_aarch64_sve_umlslt    : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_smlalb_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_smlalt_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_umlalb_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_umlalt_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_smlslb_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_smlslt_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_umlslb_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_umlslt_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_sqdmlalb_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_sqdmlalt_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_sqdmlslb_lane : SVE2_3VectorArg_Indexed_Intrinsic;
+def int_aarch64_sve_sqdmlslt_lane : SVE2_3VectorArg_Indexed_Intrinsic;
 
 }

diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 924313f774a3..c2e607a11536 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -1467,14 +1467,14 @@ let Predicates = [HasSVE2] in {
   defm SQDMULLT_ZZZI : sve2_int_mul_long_by_indexed_elem<0b101, "sqdmullt">;
 
   // SVE2 integer multiply-add long (indexed)
-  defm SMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1000, "smlalb", int_aarch64_sve_smlalb>;
-  defm SMLALT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1001, "smlalt", int_aarch64_sve_smlalt>;
-  defm UMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1010, "umlalb", int_aarch64_sve_umlalb>;
-  defm UMLALT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1011, "umlalt", int_aarch64_sve_umlalt>;
-  defm SMLSLB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1100, "smlslb", int_aarch64_sve_smlslb>;
-  defm SMLSLT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1101, "smlslt", int_aarch64_sve_smlslt>;
-  defm UMLSLB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1110, "umlslb", int_aarch64_sve_umlslb>;
-  defm UMLSLT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1111, "umlslt", int_aarch64_sve_umlslt>;
+  defm SMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1000, "smlalb", int_aarch64_sve_smlalb_lane>;
+  defm SMLALT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1001, "smlalt", int_aarch64_sve_smlalt_lane>;
+  defm UMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1010, "umlalb", int_aarch64_sve_umlalb_lane>;
+  defm UMLALT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1011, "umlalt", int_aarch64_sve_umlalt_lane>;
+  defm SMLSLB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1100, "smlslb", int_aarch64_sve_smlslb_lane>;
+  defm SMLSLT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1101, "smlslt", int_aarch64_sve_smlslt_lane>;
+  defm UMLSLB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1110, "umlslb", int_aarch64_sve_umlslb_lane>;
+  defm UMLSLT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1111, "umlslt", int_aarch64_sve_umlslt_lane>;
 
   // SVE2 integer multiply-add long (vectors, unpredicated)
   defm SMLALB_ZZZ : sve2_int_mla_long<0b10000, "smlalb">;
@@ -1487,10 +1487,10 @@ let Predicates = [HasSVE2] in {
   defm UMLSLT_ZZZ : sve2_int_mla_long<0b10111, "umlslt">;
 
   // SVE2 saturating multiply-add long (indexed)
-  defm SQDMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0100, "sqdmlalb", null_frag>;
-  defm SQDMLALT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0101, "sqdmlalt", null_frag>;
-  defm SQDMLSLB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0110, "sqdmlslb", null_frag>;
-  defm SQDMLSLT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0111, "sqdmlslt", null_frag>;
+  defm SQDMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0100, "sqdmlalb", int_aarch64_sve_sqdmlalb_lane>;
+  defm SQDMLALT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0101, "sqdmlalt", int_aarch64_sve_sqdmlalt_lane>;
+  defm SQDMLSLB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0110, "sqdmlslb", int_aarch64_sve_sqdmlslb_lane>;
+  defm SQDMLSLT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0111, "sqdmlslt", int_aarch64_sve_sqdmlslt_lane>;
 
   // SVE2 saturating multiply-add long (vectors, unpredicated)
   defm SQDMLALB_ZZZ : sve2_int_mla_long<0b11000, "sqdmlalb">;

diff  --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 97cb9f71c3b0..76ef6042a88a 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -2414,7 +2414,7 @@ multiclass sve2_int_mla_by_indexed_elem<bits<2> opc, bit S, string asm,
 
 multiclass sve2_int_mla_long_by_indexed_elem<bits<4> opc, string asm, SDPatternOperator op> {
   def _S : sve2_int_mla_by_indexed_elem<0b10, { opc{3}, 0b0, opc{2-1}, ?, opc{0} },
-                                        asm, ZPR32, ZPR16, ZPR3b16, VectorIndexH> {
+                                        asm, ZPR32, ZPR16, ZPR3b16, VectorIndexH32b> {
     bits<3> Zm;
     bits<3> iop;
     let Inst{20-19} = iop{2-1};
@@ -2422,7 +2422,7 @@ multiclass sve2_int_mla_long_by_indexed_elem<bits<4> opc, string asm, SDPatternO
     let Inst{11} = iop{0};
   }
   def _D : sve2_int_mla_by_indexed_elem<0b11, { opc{3}, 0b0, opc{2-1}, ?, opc{0} },
-                                        asm, ZPR64, ZPR32, ZPR4b32, VectorIndexS> {
+                                        asm, ZPR64, ZPR32, ZPR4b32, VectorIndexS32b> {
     bits<4> Zm;
     bits<2> iop;
     let Inst{20} = iop{1};
@@ -2430,8 +2430,8 @@ multiclass sve2_int_mla_long_by_indexed_elem<bits<4> opc, string asm, SDPatternO
     let Inst{11} = iop{0};
   }
 
-  def : SVE_4_Op_Imm_Pat<nxv4i32, op, nxv4i32, nxv8i16, nxv8i16, i64, VectorIndexH_timm, !cast<Instruction>(NAME # _S)>;
-  def : SVE_4_Op_Imm_Pat<nxv2i64, op, nxv2i64, nxv4i32, nxv4i32, i64, VectorIndexS_timm, !cast<Instruction>(NAME # _D)>;
+  def : SVE_4_Op_Imm_Pat<nxv4i32, op, nxv4i32, nxv8i16, nxv8i16, i32, VectorIndexH32b_timm, !cast<Instruction>(NAME # _S)>;
+  def : SVE_4_Op_Imm_Pat<nxv2i64, op, nxv2i64, nxv4i32, nxv4i32, i32, VectorIndexS32b_timm, !cast<Instruction>(NAME # _D)>;
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/AArch64/sve2-mla-indexed.ll b/llvm/test/CodeGen/AArch64/sve2-mla-indexed.ll
index ded415598c5a..5a98dc8806c5 100644
--- a/llvm/test/CodeGen/AArch64/sve2-mla-indexed.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-mla-indexed.ll
@@ -9,10 +9,10 @@ define <vscale x 4 x i32> @smlalb_i32(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: smlalb_i32
 ; CHECK: smlalb z0.s, z1.h, z2.h[1]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 1)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 1)
   ret <vscale x 4 x i32> %res
 }
 
@@ -22,10 +22,10 @@ define <vscale x 4 x i32> @smlalb_i32_2(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: smlalb_i32_2
 ; CHECK: smlalb z0.s, z1.h, z2.h[7]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 7)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 7)
   ret <vscale x 4 x i32> %res
 }
 
@@ -35,10 +35,10 @@ define <vscale x 2 x i64> @smlalb_i64(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: smlalb_i64
 ; CHECK: smlalb z0.d, z1.s, z2.s[0]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalb.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 0)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalb.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 0)
   ret <vscale x 2 x i64> %res
 }
 
@@ -48,10 +48,10 @@ define <vscale x 2 x i64> @smlalb_i64_2(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: smlalb_i64_2
 ; CHECK: smlalb z0.d, z1.s, z2.s[3]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalb.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 3)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalb.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 3)
   ret <vscale x 2 x i64> %res
 }
 
@@ -64,10 +64,10 @@ define <vscale x 4 x i32> @smlalt_i32(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: smlalt_i32
 ; CHECK: smlalt z0.s, z1.h, z2.h[1]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalt.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 1)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalt.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 1)
   ret <vscale x 4 x i32> %res
 }
 
@@ -77,10 +77,10 @@ define <vscale x 4 x i32> @smlalt_i32_2(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: smlalt_i32_2
 ; CHECK: smlalt z0.s, z1.h, z2.h[7]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalt.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 7)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalt.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 7)
   ret <vscale x 4 x i32> %res
 }
 
@@ -90,10 +90,10 @@ define <vscale x 2 x i64> @smlalt_i64(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: smlalt_i64
 ; CHECK: smlalt z0.d, z1.s, z2.s[0]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalt.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 0)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalt.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 0)
   ret <vscale x 2 x i64> %res
 }
 
@@ -103,10 +103,10 @@ define <vscale x 2 x i64> @smlalt_i64_2(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: smlalt_i64_2
 ; CHECK: smlalt z0.d, z1.s, z2.s[3]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalt.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 3)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalt.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 3)
   ret <vscale x 2 x i64> %res
 }
 
@@ -119,10 +119,10 @@ define <vscale x 4 x i32> @umlalb_i32(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: umlalb_i32
 ; CHECK: umlalb z0.s, z1.h, z2.h[1]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalb.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 1)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalb.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 1)
   ret <vscale x 4 x i32> %res
 }
 
@@ -132,10 +132,10 @@ define <vscale x 4 x i32> @umlalb_i32_2(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: umlalb_i32_2
 ; CHECK: umlalb z0.s, z1.h, z2.h[7]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalb.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 7)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalb.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 7)
   ret <vscale x 4 x i32> %res
 }
 
@@ -145,10 +145,10 @@ define <vscale x 2 x i64> @umlalb_i64(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: umlalb_i64
 ; CHECK: umlalb z0.d, z1.s, z2.s[0]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalb.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 0)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalb.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 0)
   ret <vscale x 2 x i64> %res
 }
 
@@ -158,10 +158,10 @@ define <vscale x 2 x i64> @umlalb_i64_2(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: umlalb_i64_2
 ; CHECK: umlalb z0.d, z1.s, z2.s[3]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalb.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 3)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalb.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 3)
   ret <vscale x 2 x i64> %res
 }
 
@@ -174,10 +174,10 @@ define <vscale x 4 x i32> @umlalt_i32(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: umlalt_i32
 ; CHECK: umlalt z0.s, z1.h, z2.h[1]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalt.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 1)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalt.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 1)
   ret <vscale x 4 x i32> %res
 }
 
@@ -187,10 +187,10 @@ define <vscale x 4 x i32> @umlalt_i32_2(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: umlalt_i32_2
 ; CHECK: umlalt z0.s, z1.h, z2.h[7]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalt.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 7)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalt.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 7)
   ret <vscale x 4 x i32> %res
 }
 
@@ -200,10 +200,10 @@ define <vscale x 2 x i64> @umlalt_i64(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: umlalt_i64
 ; CHECK: umlalt z0.d, z1.s, z2.s[0]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalt.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 0)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalt.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 0)
   ret <vscale x 2 x i64> %res
 }
 
@@ -213,10 +213,10 @@ define <vscale x 2 x i64> @umlalt_i64_2(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: umlalt_i64_2
 ; CHECK: umlalt z0.d, z1.s, z2.s[3]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalt.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 3)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalt.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 3)
   ret <vscale x 2 x i64> %res
 }
 
@@ -229,10 +229,10 @@ define <vscale x 4 x i32> @smlslb_i32(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: smlslb_i32
 ; CHECK: smlslb z0.s, z1.h, z2.h[1]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslb.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 1)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslb.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 1)
   ret <vscale x 4 x i32> %res
 }
 
@@ -242,10 +242,10 @@ define <vscale x 4 x i32> @smlslb_i32_2(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: smlslb_i32_2
 ; CHECK: smlslb z0.s, z1.h, z2.h[7]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslb.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 7)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslb.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 7)
   ret <vscale x 4 x i32> %res
 }
 
@@ -255,10 +255,10 @@ define <vscale x 2 x i64> @smlslb_i64(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: smlslb_i64
 ; CHECK: smlslb z0.d, z1.s, z2.s[0]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslb.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 0)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslb.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 0)
   ret <vscale x 2 x i64> %res
 }
 
@@ -268,10 +268,10 @@ define <vscale x 2 x i64> @smlslb_i64_2(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: smlslb_i64_2
 ; CHECK: smlslb z0.d, z1.s, z2.s[3]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslb.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 3)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslb.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 3)
   ret <vscale x 2 x i64> %res
 }
 
@@ -284,10 +284,10 @@ define <vscale x 4 x i32> @smlslt_i32(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: smlslt_i32
 ; CHECK: smlslt z0.s, z1.h, z2.h[1]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 1)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 1)
   ret <vscale x 4 x i32> %res
 }
 
@@ -297,10 +297,10 @@ define <vscale x 4 x i32> @smlslt_i32_2(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: smlslt_i32_2
 ; CHECK: smlslt z0.s, z1.h, z2.h[7]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 7)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 7)
   ret <vscale x 4 x i32> %res
 }
 
@@ -310,10 +310,10 @@ define <vscale x 2 x i64> @smlslt_i64(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: smlslt_i64
 ; CHECK: smlslt z0.d, z1.s, z2.s[0]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 0)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 0)
   ret <vscale x 2 x i64> %res
 }
 
@@ -323,10 +323,10 @@ define <vscale x 2 x i64> @smlslt_i64_2(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: smlslt_i64_2
 ; CHECK: smlslt z0.d, z1.s, z2.s[3]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 3)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 3)
   ret <vscale x 2 x i64> %res
 }
 
@@ -339,10 +339,10 @@ define <vscale x 4 x i32> @umlslb_i32(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: umlslb_i32
 ; CHECK: umlslb z0.s, z1.h, z2.h[1]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslb.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 1)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslb.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 1)
   ret <vscale x 4 x i32> %res
 }
 
@@ -352,10 +352,10 @@ define <vscale x 4 x i32> @umlslb_i32_2(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: umlslb_i32_2
 ; CHECK: umlslb z0.s, z1.h, z2.h[7]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslb.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 7)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslb.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 7)
   ret <vscale x 4 x i32> %res
 }
 
@@ -365,10 +365,10 @@ define <vscale x 2 x i64> @umlslb_i64(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: umlslb_i64
 ; CHECK: umlslb z0.d, z1.s, z2.s[0]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslb.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 0)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslb.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 0)
   ret <vscale x 2 x i64> %res
 }
 
@@ -378,10 +378,10 @@ define <vscale x 2 x i64> @umlslb_i64_2(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: umlslb_i64_2
 ; CHECK: umlslb z0.d, z1.s, z2.s[3]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslb.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 3)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslb.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 3)
   ret <vscale x 2 x i64> %res
 }
 
@@ -394,10 +394,10 @@ define <vscale x 4 x i32> @umlslt_i32(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: umlslt_i32
 ; CHECK: umlslt z0.s, z1.h, z2.h[1]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 1)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 1)
   ret <vscale x 4 x i32> %res
 }
 
@@ -407,10 +407,10 @@ define <vscale x 4 x i32> @umlslt_i32_2(<vscale x 4 x i32> %a,
 ; CHECK-LABEL: umlslt_i32_2
 ; CHECK: umlslt z0.s, z1.h, z2.h[7]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.nxv4i32(<vscale x 4 x i32> %a,
-                                                                  <vscale x 8 x i16> %b,
-                                                                  <vscale x 8 x i16> %c,
-                                                                  i64 7)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                       <vscale x 8 x i16> %b,
+                                                                       <vscale x 8 x i16> %c,
+                                                                       i32 7)
   ret <vscale x 4 x i32> %res
 }
 
@@ -420,10 +420,10 @@ define <vscale x 2 x i64> @umlslt_i64(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: umlslt_i64
 ; CHECK: umlslt z0.d, z1.s, z2.s[0]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 0)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 0)
   ret <vscale x 2 x i64> %res
 }
 
@@ -433,26 +433,254 @@ define <vscale x 2 x i64> @umlslt_i64_2(<vscale x 2 x i64> %a,
 ; CHECK-LABEL: umlslt_i64_2
 ; CHECK: umlslt z0.d, z1.s, z2.s[3]
 ; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.nxv2i64(<vscale x 2 x i64> %a,
-                                                                  <vscale x 4 x i32> %b,
-                                                                  <vscale x 4 x i32> %c,
-                                                                  i64 3)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                       <vscale x 4 x i32> %b,
+                                                                       <vscale x 4 x i32> %c,
+                                                                       i32 3)
   ret <vscale x 2 x i64> %res
 }
 
-declare <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.smlalb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.smlalt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.smlalt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.umlalb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.umlalb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.umlalt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.umlalt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.smlslb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.smlslb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.umlslb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.umlslb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
+;
+; SQDMLALB
+;
+define <vscale x 4 x i32> @sqdmlalb_i32(<vscale x 4 x i32> %a,
+                                        <vscale x 8 x i16> %b,
+                                        <vscale x 8 x i16> %c) {
+; CHECK-LABEL: sqdmlalb_i32
+; CHECK: sqdmlalb z0.s, z1.h, z2.h[1]
+; CHECK-NEXT: ret
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalb.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                         <vscale x 8 x i16> %b,
+                                                                         <vscale x 8 x i16> %c,
+                                                                         i32 1)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 4 x i32> @sqdmlalb_i32_2(<vscale x 4 x i32> %a,
+                                          <vscale x 8 x i16> %b,
+                                          <vscale x 8 x i16> %c) {
+; CHECK-LABEL: sqdmlalb_i32_2
+; CHECK: sqdmlalb z0.s, z1.h, z2.h[7]
+; CHECK-NEXT: ret
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalb.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                         <vscale x 8 x i16> %b,
+                                                                         <vscale x 8 x i16> %c,
+                                                                         i32 7)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sqdmlalb_i64(<vscale x 2 x i64> %a,
+                                        <vscale x 4 x i32> %b,
+                                        <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sqdmlalb_i64
+; CHECK: sqdmlalb z0.d, z1.s, z2.s[0]
+; CHECK-NEXT: ret
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalb.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                         <vscale x 4 x i32> %b,
+                                                                         <vscale x 4 x i32> %c,
+                                                                         i32 0)
+  ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x i64> @sqdmlalb_i64_2(<vscale x 2 x i64> %a,
+                                          <vscale x 4 x i32> %b,
+                                          <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sqdmlalb_i64_2
+; CHECK: sqdmlalb z0.d, z1.s, z2.s[3]
+; CHECK-NEXT: ret
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalb.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                         <vscale x 4 x i32> %b,
+                                                                         <vscale x 4 x i32> %c,
+                                                                         i32 3)
+  ret <vscale x 2 x i64> %res
+}
+
+;
+; SQDMLALT
+;
+define <vscale x 4 x i32> @sqdmlalt_i32(<vscale x 4 x i32> %a,
+                                        <vscale x 8 x i16> %b,
+                                        <vscale x 8 x i16> %c) {
+; CHECK-LABEL: sqdmlalt_i32
+; CHECK: sqdmlalt z0.s, z1.h, z2.h[1]
+; CHECK-NEXT: ret
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalt.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                         <vscale x 8 x i16> %b,
+                                                                         <vscale x 8 x i16> %c,
+                                                                         i32 1)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 4 x i32> @sqdmlalt_i32_2(<vscale x 4 x i32> %a,
+                                          <vscale x 8 x i16> %b,
+                                          <vscale x 8 x i16> %c) {
+; CHECK-LABEL: sqdmlalt_i32_2
+; CHECK: sqdmlalt z0.s, z1.h, z2.h[7]
+; CHECK-NEXT: ret
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalt.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                         <vscale x 8 x i16> %b,
+                                                                         <vscale x 8 x i16> %c,
+                                                                         i32 7)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sqdmlalt_i64(<vscale x 2 x i64> %a,
+                                        <vscale x 4 x i32> %b,
+                                        <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sqdmlalt_i64
+; CHECK: sqdmlalt z0.d, z1.s, z2.s[0]
+; CHECK-NEXT: ret
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalt.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                         <vscale x 4 x i32> %b,
+                                                                         <vscale x 4 x i32> %c,
+                                                                         i32 0)
+  ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x i64> @sqdmlalt_i64_2(<vscale x 2 x i64> %a,
+                                          <vscale x 4 x i32> %b,
+                                          <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sqdmlalt_i64_2
+; CHECK: sqdmlalt z0.d, z1.s, z2.s[3]
+; CHECK-NEXT: ret
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalt.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                         <vscale x 4 x i32> %b,
+                                                                         <vscale x 4 x i32> %c,
+                                                                         i32 3)
+  ret <vscale x 2 x i64> %res
+}
+
+;
+; SQDMLSLB
+;
+define <vscale x 4 x i32> @sqdmlslb_i32(<vscale x 4 x i32> %a,
+                                        <vscale x 8 x i16> %b,
+                                        <vscale x 8 x i16> %c) {
+; CHECK-LABEL: sqdmlslb_i32
+; CHECK: sqdmlslb z0.s, z1.h, z2.h[1]
+; CHECK-NEXT: ret
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslb.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                         <vscale x 8 x i16> %b,
+                                                                         <vscale x 8 x i16> %c,
+                                                                         i32 1)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 4 x i32> @sqdmlslb_i32_2(<vscale x 4 x i32> %a,
+                                          <vscale x 8 x i16> %b,
+                                          <vscale x 8 x i16> %c) {
+; CHECK-LABEL: sqdmlslb_i32_2
+; CHECK: sqdmlslb z0.s, z1.h, z2.h[7]
+; CHECK-NEXT: ret
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslb.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                         <vscale x 8 x i16> %b,
+                                                                         <vscale x 8 x i16> %c,
+                                                                         i32 7)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sqdmlslb_i64(<vscale x 2 x i64> %a,
+                                        <vscale x 4 x i32> %b,
+                                        <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sqdmlslb_i64
+; CHECK: sqdmlslb z0.d, z1.s, z2.s[0]
+; CHECK-NEXT: ret
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslb.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                         <vscale x 4 x i32> %b,
+                                                                         <vscale x 4 x i32> %c,
+                                                                         i32 0)
+  ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x i64> @sqdmlslb_i64_2(<vscale x 2 x i64> %a,
+                                          <vscale x 4 x i32> %b,
+                                          <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sqdmlslb_i64_2
+; CHECK: sqdmlslb z0.d, z1.s, z2.s[3]
+; CHECK-NEXT: ret
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslb.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                         <vscale x 4 x i32> %b,
+                                                                         <vscale x 4 x i32> %c,
+                                                                         i32 3)
+  ret <vscale x 2 x i64> %res
+}
+
+;
+; SQDMLSLT
+;
+define <vscale x 4 x i32> @sqdmlslt_i32(<vscale x 4 x i32> %a,
+                                        <vscale x 8 x i16> %b,
+                                        <vscale x 8 x i16> %c) {
+; CHECK-LABEL: sqdmlslt_i32
+; CHECK: sqdmlslt z0.s, z1.h, z2.h[1]
+; CHECK-NEXT: ret
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslt.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                         <vscale x 8 x i16> %b,
+                                                                         <vscale x 8 x i16> %c,
+                                                                         i32 1)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 4 x i32> @sqdmlslt_i32_2(<vscale x 4 x i32> %a,
+                                          <vscale x 8 x i16> %b,
+                                          <vscale x 8 x i16> %c) {
+; CHECK-LABEL: sqdmlslt_i32_2
+; CHECK: sqdmlslt z0.s, z1.h, z2.h[7]
+; CHECK-NEXT: ret
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslt.lane.nxv4i32(<vscale x 4 x i32> %a,
+                                                                         <vscale x 8 x i16> %b,
+                                                                         <vscale x 8 x i16> %c,
+                                                                         i32 7)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sqdmlslt_i64(<vscale x 2 x i64> %a,
+                                        <vscale x 4 x i32> %b,
+                                        <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sqdmlslt_i64
+; CHECK: sqdmlslt z0.d, z1.s, z2.s[0]
+; CHECK-NEXT: ret
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslt.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                         <vscale x 4 x i32> %b,
+                                                                         <vscale x 4 x i32> %c,
+                                                                         i32 0)
+  ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x i64> @sqdmlslt_i64_2(<vscale x 2 x i64> %a,
+                                          <vscale x 4 x i32> %b,
+                                          <vscale x 4 x i32> %c) {
+; CHECK-LABEL: sqdmlslt_i64_2
+; CHECK: sqdmlslt z0.d, z1.s, z2.s[3]
+; CHECK-NEXT: ret
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslt.lane.nxv2i64(<vscale x 2 x i64> %a,
+                                                                         <vscale x 4 x i32> %b,
+                                                                         <vscale x 4 x i32> %c,
+                                                                         i32 3)
+  ret <vscale x 2 x i64> %res
+}
+
+declare <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.smlalb.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.smlalt.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.smlalt.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.umlalb.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.umlalb.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.umlalt.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.umlalt.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.smlslb.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.smlslb.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.umlslb.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.umlslb.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalb.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalb.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalt.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalt.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslb.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslb.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslt.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslt.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)


        


More information about the llvm-commits mailing list