[llvm] a582a05 - [AArch64][SME2] Add Multi-vector add/sub, storing into ZA intrinsic
Caroline Concatto via llvm-commits
llvm-commits at lists.llvm.org
Tue Jan 24 02:50:29 PST 2023
Author: Caroline Concatto
Date: 2023-01-24T10:49:51Z
New Revision: a582a05929104b851616ac36a5741607ca6fa03c
URL: https://github.com/llvm/llvm-project/commit/a582a05929104b851616ac36a5741607ca6fa03c
DIFF: https://github.com/llvm/llvm-project/commit/a582a05929104b851616ac36a5741607ca6fa03c.diff
LOG: [AArch64][SME2] Add Multi-vector add/sub, storing into ZA intrinsic
Add the following intrinsic:
ADD single & multi
SUB single & multi
NOTE: These intrinsics are still in development and are subject to future changes.
Reviewed By: david-arm
Differential Revision: https://reviews.llvm.org/D142114
Added:
llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll
llvm/test/CodeGen/AArch64/sme2-intrinsics-sub.ll
Modified:
llvm/include/llvm/IR/IntrinsicsAArch64.td
llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index e365f2a07142..b1f85563195f 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -2770,6 +2770,19 @@ let TargetPrefix = "aarch64" in {
LLVMMatchType<0>, llvm_i32_ty],
[ImmArg<ArgIndex<6>>]>;
+ class SME2_ZA_Write_VG2_Intrinsic
+ : DefaultAttrsIntrinsic<[],
+ [llvm_i32_ty,
+ llvm_anyvector_ty, LLVMMatchType<0>],
+ []>;
+
+ class SME2_ZA_Write_VG4_Intrinsic
+ : DefaultAttrsIntrinsic<[],
+ [llvm_i32_ty,
+ llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
+ []>;
+
class SME2_CVT_VG2_SINGLE_Intrinsic
: DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>],
[llvm_anyvector_ty, LLVMMatchType<0>],
@@ -2899,4 +2912,20 @@ let TargetPrefix = "aarch64" in {
def int_aarch64_sve_sqcvtn_x4 : SME2_CVT_VG4_SINGLE_Intrinsic;
def int_aarch64_sve_uqcvtn_x4 : SME2_CVT_VG4_SINGLE_Intrinsic;
def int_aarch64_sve_sqcvtun_x4 : SME2_CVT_VG4_SINGLE_Intrinsic;
+
+ //
+ // Multi-Single add/sub
+ //
+ def int_aarch64_sme_add_write_single_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic;
+ def int_aarch64_sme_sub_write_single_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic;
+ def int_aarch64_sme_add_write_single_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic;
+ def int_aarch64_sme_sub_write_single_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic;
+
+ //
+ // Multi-Multi add/sub
+ //
+ def int_aarch64_sme_add_write_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic;
+ def int_aarch64_sme_sub_write_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic;
+ def int_aarch64_sme_add_write_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic;
+ def int_aarch64_sme_sub_write_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic;
}
diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
index a213d65cf789..d0c5bfe72566 100644
--- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
@@ -247,18 +247,18 @@ def : Pat<(i64 (AArch64ObscureCopy (i64 GPR64:$idx))),
// SME2 Instructions
//===----------------------------------------------------------------------===//
let Predicates = [HasSME2] in {
-defm ADD_VG2_M2ZZ_S : sme2_dot_mla_add_sub_array_vg24_single<"add", 0b0011010, MatrixOp32, ZZ_s, ZPR4b32>;
-defm ADD_VG4_M4ZZ_S : sme2_dot_mla_add_sub_array_vg24_single<"add", 0b0111010, MatrixOp32, ZZZZ_s, ZPR4b32>;
-defm ADD_VG2_M2Z2Z_S : sme2_dot_mla_add_sub_array_vg2_multi<"add", 0b011010, MatrixOp32, ZZ_s_mul_r, nxv4i32, null_frag>;
-defm ADD_VG4_M4Z4Z_S : sme2_dot_mla_add_sub_array_vg4_multi<"add", 0b011010, MatrixOp32, ZZZZ_s_mul_r, nxv4i32, null_frag>;
+defm ADD_VG2_M2ZZ_S : sme2_dot_mla_add_sub_array_vg2_single<"add", 0b0011010, MatrixOp32, ZZ_s, ZPR4b32, nxv4i32, int_aarch64_sme_add_write_single_za_vg1x2>;
+defm ADD_VG4_M4ZZ_S : sme2_dot_mla_add_sub_array_vg4_single<"add", 0b0111010, MatrixOp32, ZZZZ_s, ZPR4b32, nxv4i32, int_aarch64_sme_add_write_single_za_vg1x4>;
+defm ADD_VG2_M2Z2Z_S : sme2_dot_mla_add_sub_array_vg2_multi<"add", 0b011010, MatrixOp32, ZZ_s_mul_r, nxv4i32, int_aarch64_sme_add_write_za_vg1x2>;
+defm ADD_VG4_M4Z4Z_S : sme2_dot_mla_add_sub_array_vg4_multi<"add", 0b011010, MatrixOp32, ZZZZ_s_mul_r, nxv4i32, int_aarch64_sme_add_write_za_vg1x4>;
defm ADD_VG2_2ZZ : sme2_int_sve_destructive_vector_vg2_single<"add", 0b0110000>;
defm ADD_VG4_4ZZ : sme2_int_sve_destructive_vector_vg4_single<"add", 0b0110000>;
-defm SUB_VG2_M2ZZ_S : sme2_dot_mla_add_sub_array_vg24_single<"sub", 0b0011011, MatrixOp32, ZZ_s, ZPR4b32>;
-defm SUB_VG4_M4ZZ_S : sme2_dot_mla_add_sub_array_vg24_single<"sub", 0b0111011, MatrixOp32, ZZZZ_s, ZPR4b32>;
-defm SUB_VG2_M2Z2Z_S : sme2_dot_mla_add_sub_array_vg2_multi<"sub", 0b011011, MatrixOp32, ZZ_s_mul_r, nxv4i32, null_frag>;
-defm SUB_VG4_M4Z4Z_S : sme2_dot_mla_add_sub_array_vg4_multi<"sub", 0b011011, MatrixOp32, ZZZZ_s_mul_r, nxv4i32, null_frag>;
+defm SUB_VG2_M2ZZ_S : sme2_dot_mla_add_sub_array_vg2_single<"sub", 0b0011011, MatrixOp32, ZZ_s, ZPR4b32, nxv4i32, int_aarch64_sme_sub_write_single_za_vg1x2>;
+defm SUB_VG4_M4ZZ_S : sme2_dot_mla_add_sub_array_vg4_single<"sub", 0b0111011, MatrixOp32, ZZZZ_s, ZPR4b32, nxv4i32, int_aarch64_sme_sub_write_single_za_vg1x4>;
+defm SUB_VG2_M2Z2Z_S : sme2_dot_mla_add_sub_array_vg2_multi<"sub", 0b011011, MatrixOp32, ZZ_s_mul_r, nxv4i32, int_aarch64_sme_sub_write_za_vg1x2>;
+defm SUB_VG4_M4Z4Z_S : sme2_dot_mla_add_sub_array_vg4_multi<"sub", 0b011011, MatrixOp32, ZZZZ_s_mul_r, nxv4i32, int_aarch64_sme_sub_write_za_vg1x4>;
defm FMLA_VG2_M2ZZ_S : sme2_dot_mla_add_sub_array_vg2_single<"fmla", 0b0011000, MatrixOp32, ZZ_s, ZPR4b32, nxv4f32, int_aarch64_sme_fmla_single_vg1x2>;
defm FMLA_VG4_M4ZZ_S : sme2_dot_mla_add_sub_array_vg4_single<"fmla", 0b0111000, MatrixOp32, ZZZZ_s, ZPR4b32, nxv4f32, int_aarch64_sme_fmla_single_vg1x4>;
@@ -705,15 +705,15 @@ defm STNT1D_VG4_M4ZPXI : sme2_st_vector_vg4_multi_scalar_immediate<0b11, 0b1, ZZ
}
let Predicates = [HasSME2, HasSMEI16I64] in {
-defm ADD_VG2_M2ZZ_D : sme2_dot_mla_add_sub_array_vg24_single<"add", 0b1011010, MatrixOp64, ZZ_d, ZPR4b64>;
-defm ADD_VG4_M4ZZ_D : sme2_dot_mla_add_sub_array_vg24_single<"add", 0b1111010, MatrixOp64, ZZZZ_d, ZPR4b64>;
-defm ADD_VG2_M2Z2Z_D : sme2_dot_mla_add_sub_array_vg2_multi<"add", 0b111010, MatrixOp64, ZZ_d_mul_r, nxv2i64, null_frag>;
-defm ADD_VG4_M4Z4Z_D : sme2_dot_mla_add_sub_array_vg4_multi<"add", 0b111010, MatrixOp64, ZZZZ_d_mul_r, nxv2i64, null_frag>;
-
-defm SUB_VG2_M2ZZ_D : sme2_dot_mla_add_sub_array_vg24_single<"sub", 0b1011011, MatrixOp64, ZZ_d, ZPR4b64>;
-defm SUB_VG4_M4ZZ_D : sme2_dot_mla_add_sub_array_vg24_single<"sub", 0b1111011, MatrixOp64, ZZZZ_d, ZPR4b64>;
-defm SUB_VG2_M2Z2Z_D : sme2_dot_mla_add_sub_array_vg2_multi<"sub", 0b111011, MatrixOp64, ZZ_d_mul_r, nxv2i64, null_frag>;
-defm SUB_VG4_M4Z4Z_D : sme2_dot_mla_add_sub_array_vg4_multi<"sub", 0b111011, MatrixOp64, ZZZZ_d_mul_r, nxv2i64, null_frag>;
+defm ADD_VG2_M2ZZ_D : sme2_dot_mla_add_sub_array_vg2_single<"add", 0b1011010, MatrixOp64, ZZ_d, ZPR4b64, nxv2i64, int_aarch64_sme_add_write_single_za_vg1x2>;
+defm ADD_VG4_M4ZZ_D : sme2_dot_mla_add_sub_array_vg4_single<"add", 0b1111010, MatrixOp64, ZZZZ_d, ZPR4b64, nxv2i64, int_aarch64_sme_add_write_single_za_vg1x4>;
+defm ADD_VG2_M2Z2Z_D : sme2_dot_mla_add_sub_array_vg2_multi<"add", 0b111010, MatrixOp64, ZZ_d_mul_r, nxv2i64, int_aarch64_sme_add_write_za_vg1x2>;
+defm ADD_VG4_M4Z4Z_D : sme2_dot_mla_add_sub_array_vg4_multi<"add", 0b111010, MatrixOp64, ZZZZ_d_mul_r, nxv2i64, int_aarch64_sme_add_write_za_vg1x4>;
+
+defm SUB_VG2_M2ZZ_D : sme2_dot_mla_add_sub_array_vg2_single<"sub", 0b1011011, MatrixOp64, ZZ_d, ZPR4b64, nxv2i64, int_aarch64_sme_sub_write_single_za_vg1x2>;
+defm SUB_VG4_M4ZZ_D : sme2_dot_mla_add_sub_array_vg4_single<"sub", 0b1111011, MatrixOp64, ZZZZ_d, ZPR4b64, nxv2i64, int_aarch64_sme_sub_write_single_za_vg1x4>;
+defm SUB_VG2_M2Z2Z_D : sme2_dot_mla_add_sub_array_vg2_multi<"sub", 0b111011, MatrixOp64, ZZ_d_mul_r, nxv2i64, int_aarch64_sme_sub_write_za_vg1x2>;
+defm SUB_VG4_M4Z4Z_D : sme2_dot_mla_add_sub_array_vg4_multi<"sub", 0b111011, MatrixOp64, ZZZZ_d_mul_r, nxv2i64, int_aarch64_sme_sub_write_za_vg1x4>;
defm ADD_VG2_M2Z_D : sme2_multivec_accum_add_sub_vg2<"add", 0b1010, MatrixOp64, ZZ_d_mul_r>;
defm ADD_VG4_M4Z_D : sme2_multivec_accum_add_sub_vg4<"add", 0b1010, MatrixOp64, ZZZZ_d_mul_r>;
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll
new file mode 100644
index 000000000000..18a49ae29735
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll
@@ -0,0 +1,226 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -mattr=+sme-i16i64 -mattr=+sme-f64f64 -verify-machineinstrs < %s | FileCheck %s
+
+;
+; ADD Multi-Single x2
+;
+
+define void @multi_vector_add_write_single_za_vg1x2_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zm) {
+; CHECK-LABEL: multi_vector_add_write_single_za_vg1x2_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: add za.s[w8, 0, vgx2], { z0.s, z1.s }, z2.s
+; CHECK-NEXT: add za.s[w8, 7, vgx2], { z0.s, z1.s }, z2.s
+; CHECK-NEXT: ret
+ call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv4i32(i32 %slice,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zm)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv4i32(i32 %slice.7,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zm)
+ ret void
+}
+
+define void @multi_vector_add_write_single_za_vg1x2_i64(i32 %slice, <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zm) {
+; CHECK-LABEL: multi_vector_add_write_single_za_vg1x2_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: add za.d[w8, 0, vgx2], { z0.d, z1.d }, z2.d
+; CHECK-NEXT: add za.d[w8, 7, vgx2], { z0.d, z1.d }, z2.d
+; CHECK-NEXT: ret
+ call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv2i64(i32 %slice,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zm)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv2i64(i32 %slice.7,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zm)
+ ret void
+}
+
+;
+; ADD Multi-Single x4
+;
+
+define void @multi_vector_add_write_single_za_vg1x4_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+; CHECK-LABEL: multi_vector_add_write_single_za_vg1x4_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: add za.s[w8, 0, vgx4], { z0.s - z3.s }, z4.s
+; CHECK-NEXT: add za.s[w8, 7, vgx4], { z0.s - z3.s }, z4.s
+; CHECK-NEXT: ret
+ <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3,
+ <vscale x 4 x i32> %zm) {
+ call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv4i32(i32 %slice,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3,
+ <vscale x 4 x i32> %zm)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv4i32(i32 %slice.7,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3,
+ <vscale x 4 x i32> %zm)
+ ret void
+}
+
+define void @multi_vector_add_write_single_za_vg1x4_i64(i32 %slice,
+; CHECK-LABEL: multi_vector_add_write_single_za_vg1x4_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: add za.d[w8, 0, vgx4], { z0.d - z3.d }, z4.d
+; CHECK-NEXT: add za.d[w8, 7, vgx4], { z0.d - z3.d }, z4.d
+; CHECK-NEXT: ret
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3,
+ <vscale x 2 x i64> %zm) {
+ call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv2i64(i32 %slice,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3,
+ <vscale x 2 x i64> %zm)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv2i64(i32 %slice.7,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3,
+ <vscale x 2 x i64> %zm)
+ ret void
+}
+
+;
+; ADD Multi-Multi x2
+;
+
+define void @multi_vector_add_write_za_vg1x2_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+; CHECK-LABEL: multi_vector_add_write_za_vg1x2_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: add za.s[w8, 0, vgx2], { z0.s, z1.s }, { z2.s, z3.s }
+; CHECK-NEXT: add za.s[w8, 7, vgx2], { z0.s, z1.s }, { z2.s, z3.s }
+; CHECK-NEXT: ret
+ <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2) {
+ call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv4i32(i32 %slice,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv4i32(i32 %slice.7,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2)
+ ret void
+}
+
+
+define void @multi_vector_add_write_za_vg1x2_i64(i32 %slice, <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+; CHECK-LABEL: multi_vector_add_write_za_vg1x2_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: add za.d[w8, 0, vgx2], { z0.d, z1.d }, { z2.d, z3.d }
+; CHECK-NEXT: add za.d[w8, 7, vgx2], { z0.d, z1.d }, { z2.d, z3.d }
+; CHECK-NEXT: ret
+ <vscale x 2 x i64> %zm1, <vscale x 2 x i64> %zm2) {
+ call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv2i64(i32 %slice,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zm1, <vscale x 2 x i64> %zm2)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv2i64(i32 %slice.7,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zm1, <vscale x 2 x i64> %zm2)
+ ret void
+}
+
+
+;
+; ADD Multi-Multi x4
+;
+
+define void @multi_vector_add_write_za_vg1x4_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+; CHECK-LABEL: multi_vector_add_write_za_vg1x4_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: add za.s[w8, 0, vgx4], { z0.s - z3.s }, { z4.s - z7.s }
+; CHECK-NEXT: add za.s[w8, 7, vgx4], { z0.s - z3.s }, { z4.s - z7.s }
+; CHECK-NEXT: ret
+ <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3,
+ <vscale x 4 x i32> %zm0, <vscale x 4 x i32> %zm1,
+ <vscale x 4 x i32> %zm2, <vscale x 4 x i32> %zm3) {
+ call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv4i32(i32 %slice,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3,
+ <vscale x 4 x i32> %zm0, <vscale x 4 x i32> %zm1,
+ <vscale x 4 x i32> %zm2, <vscale x 4 x i32> %zm3)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv4i32(i32 %slice.7,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3,
+ <vscale x 4 x i32> %zm0, <vscale x 4 x i32> %zm1,
+ <vscale x 4 x i32> %zm2, <vscale x 4 x i32> %zm3)
+ ret void
+}
+
+define void @multi_vector_add_write_za_vg1x4_i64(i32 %slice, <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+; CHECK-LABEL: multi_vector_add_write_za_vg1x4_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: add za.d[w8, 0, vgx4], { z0.d - z3.d }, { z4.d - z7.d }
+; CHECK-NEXT: add za.d[w8, 7, vgx4], { z0.d - z3.d }, { z4.d - z7.d }
+; CHECK-NEXT: ret
+ <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3,
+ <vscale x 2 x i64> %zm0, <vscale x 2 x i64> %zm1,
+ <vscale x 2 x i64> %zm2, <vscale x 2 x i64> %zm3) {
+ call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv2i64(i32 %slice,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3,
+ <vscale x 2 x i64> %zm0, <vscale x 2 x i64> %zm1,
+ <vscale x 2 x i64> %zm2, <vscale x 2 x i64> %zm3)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv2i64(i32 %slice.7,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3,
+ <vscale x 2 x i64> %zm0, <vscale x 2 x i64> %zm1,
+ <vscale x 2 x i64> %zm2, <vscale x 2 x i64> %zm3)
+ ret void
+}
+
+declare void at llvm.aarch64.sme.add.write.single.za.vg1x2.nxv4i32(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare void at llvm.aarch64.sme.add.write.single.za.vg1x2.nxv2i64(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+declare void at llvm.aarch64.sme.add.write.single.za.vg1x4.nxv4i32(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare void at llvm.aarch64.sme.add.write.single.za.vg1x4.nxv2i64(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+declare void at llvm.aarch64.sme.add.write.za.vg1x2.nxv4i32(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare void at llvm.aarch64.sme.add.write.za.vg1x2.nxv2i64(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+declare void at llvm.aarch64.sme.add.write.za.vg1x4.nxv4i32(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare void at llvm.aarch64.sme.add.write.za.vg1x4.nxv2i64(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>)
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-sub.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-sub.ll
new file mode 100644
index 000000000000..4a36ea79861c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-sub.ll
@@ -0,0 +1,228 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -mattr=+sme-i16i64 -mattr=+sme-f64f64 -verify-machineinstrs < %s | FileCheck %s
+
+;
+; SUB Multi-Single x2
+;
+
+define void @multi_vector_sub_write_single_za_vg1x2_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zm) {
+; CHECK-LABEL: multi_vector_sub_write_single_za_vg1x2_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: sub za.s[w8, 0, vgx2], { z0.s, z1.s }, z2.s
+; CHECK-NEXT: sub za.s[w8, 7, vgx2], { z0.s, z1.s }, z2.s
+; CHECK-NEXT: ret
+ call void @llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv4i32(i32 %slice,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zm)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv4i32(i32 %slice.7,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zm)
+ ret void
+}
+
+define void @multi_vector_sub_write_single_za_vg1x2_i64(i32 %slice, <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zm) {
+; CHECK-LABEL: multi_vector_sub_write_single_za_vg1x2_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: sub za.d[w8, 0, vgx2], { z0.d, z1.d }, z2.d
+; CHECK-NEXT: sub za.d[w8, 7, vgx2], { z0.d, z1.d }, z2.d
+; CHECK-NEXT: ret
+ call void @llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv2i64(i32 %slice,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zm)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv2i64(i32 %slice.7,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zm)
+ ret void
+}
+
+;
+; SUB Multi-Single x4
+;
+
+define void @multi_vector_sub_write_single_za_vg1x4_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+; CHECK-LABEL: multi_vector_sub_write_single_za_vg1x4_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: sub za.s[w8, 0, vgx4], { z0.s - z3.s }, z4.s
+; CHECK-NEXT: sub za.s[w8, 7, vgx4], { z0.s - z3.s }, z4.s
+; CHECK-NEXT: ret
+ <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3,
+ <vscale x 4 x i32> %zm) {
+ call void @llvm.aarch64.sme.sub.write.single.za.vg1x4.nxv4i32(i32 %slice,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3,
+ <vscale x 4 x i32> %zm)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.sub.write.single.za.vg1x4.nxv4i32(i32 %slice.7,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3,
+ <vscale x 4 x i32> %zm)
+ ret void
+}
+
+define void @multi_vector_sub_write_single_za_vg1x4_i64(i32 %slice,
+; CHECK-LABEL: multi_vector_sub_write_single_za_vg1x4_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: sub za.d[w8, 0, vgx4], { z0.d - z3.d }, z4.d
+; CHECK-NEXT: sub za.d[w8, 7, vgx4], { z0.d - z3.d }, z4.d
+; CHECK-NEXT: ret
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3,
+ <vscale x 2 x i64> %zm) {
+ call void @llvm.aarch64.sme.sub.write.single.za.vg1x4.nxv2i64(i32 %slice,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3,
+ <vscale x 2 x i64> %zm)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.sub.write.single.za.vg1x4.nxv2i64(i32 %slice.7,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3,
+ <vscale x 2 x i64> %zm)
+ ret void
+}
+
+;
+; SUB Multi-Multi x2
+;
+
+define void @multi_vector_sub_write_za_vg1x2_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+; CHECK-LABEL: multi_vector_sub_write_za_vg1x2_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: sub za.s[w8, 0, vgx2], { z0.s, z1.s }, { z2.s, z3.s }
+; CHECK-NEXT: sub za.s[w8, 7, vgx2], { z0.s, z1.s }, { z2.s, z3.s }
+; CHECK-NEXT: ret
+ <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2) {
+ call void @llvm.aarch64.sme.sub.write.za.vg1x2.nxv4i32(i32 %slice,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.sub.write.za.vg1x2.nxv4i32(i32 %slice.7,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2)
+ ret void
+}
+
+
+define void @multi_vector_sub_write_za_vg1x2_i64(i32 %slice, <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+; CHECK-LABEL: multi_vector_sub_write_za_vg1x2_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: sub za.d[w8, 0, vgx2], { z0.d, z1.d }, { z2.d, z3.d }
+; CHECK-NEXT: sub za.d[w8, 7, vgx2], { z0.d, z1.d }, { z2.d, z3.d }
+; CHECK-NEXT: ret
+ <vscale x 2 x i64> %zm1, <vscale x 2 x i64> %zm2) {
+ call void @llvm.aarch64.sme.sub.write.za.vg1x2.nxv2i64(i32 %slice,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zm1, <vscale x 2 x i64> %zm2)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.sub.write.za.vg1x2.nxv2i64(i32 %slice.7,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zm1, <vscale x 2 x i64> %zm2)
+ ret void
+}
+
+
+;
+; SUB Multi-Multi x4
+;
+
+define void @multi_vector_sub_write_za_vg1x4_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+; CHECK-LABEL: multi_vector_sub_write_za_vg1x4_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: sub za.s[w8, 0, vgx4], { z0.s - z3.s }, { z4.s - z7.s }
+; CHECK-NEXT: sub za.s[w8, 7, vgx4], { z0.s - z3.s }, { z4.s - z7.s }
+; CHECK-NEXT: ret
+ <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3,
+ <vscale x 4 x i32> %zm0, <vscale x 4 x i32> %zm1,
+ <vscale x 4 x i32> %zm2, <vscale x 4 x i32> %zm3) {
+ call void @llvm.aarch64.sme.sub.write.za.vg1x4.nxv4i32(i32 %slice,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3,
+ <vscale x 4 x i32> %zm0, <vscale x 4 x i32> %zm1,
+ <vscale x 4 x i32> %zm2, <vscale x 4 x i32> %zm3)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.sub.write.za.vg1x4.nxv4i32(i32 %slice.7,
+ <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1,
+ <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3,
+ <vscale x 4 x i32> %zm0, <vscale x 4 x i32> %zm1,
+ <vscale x 4 x i32> %zm2, <vscale x 4 x i32> %zm3)
+ ret void
+}
+
+define void @multi_vector_sub_write_za_vg1x4_i64(i32 %slice, <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+; CHECK-LABEL: multi_vector_sub_write_za_vg1x4_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; CHECK-NEXT: sub za.d[w8, 0, vgx4], { z0.d - z3.d }, { z4.d - z7.d }
+; CHECK-NEXT: sub za.d[w8, 7, vgx4], { z0.d - z3.d }, { z4.d - z7.d }
+; CHECK-NEXT: ret
+ <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3,
+ <vscale x 2 x i64> %zm0, <vscale x 2 x i64> %zm1,
+ <vscale x 2 x i64> %zm2, <vscale x 2 x i64> %zm3) {
+ call void @llvm.aarch64.sme.sub.write.za.vg1x4.nxv2i64(i32 %slice,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3,
+ <vscale x 2 x i64> %zm0, <vscale x 2 x i64> %zm1,
+ <vscale x 2 x i64> %zm2, <vscale x 2 x i64> %zm3)
+ %slice.7 = add i32 %slice, 7
+ call void @llvm.aarch64.sme.sub.write.za.vg1x4.nxv2i64(i32 %slice.7,
+ <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1,
+ <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3,
+ <vscale x 2 x i64> %zm0, <vscale x 2 x i64> %zm1,
+ <vscale x 2 x i64> %zm2, <vscale x 2 x i64> %zm3)
+ ret void
+}
+
+declare void at llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv4i32(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare void at llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv2i64(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+declare void at llvm.aarch64.sme.sub.write.single.za.vg1x4.nxv4i32(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>,
+<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare void at llvm.aarch64.sme.sub.write.single.za.vg1x4.nxv2i64(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>,
+<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare void at llvm.aarch64.sme.sub.write.za.vg1x2.nxv4i32(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare void at llvm.aarch64.sme.sub.write.za.vg1x2.nxv2i64(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+declare void at llvm.aarch64.sme.sub.write.za.vg1x4.nxv4i32(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare void at llvm.aarch64.sme.sub.write.za.vg1x4.nxv2i64(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>)
More information about the llvm-commits
mailing list