[clang] [llvm] [Clang][LLVM] Implement single-single vectors MOP4{A/S} (PR #127797)

Virginia Cangelosi via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 19 05:55:41 PST 2025


https://github.com/virginia-cangelosi created https://github.com/llvm/llvm-project/pull/127797

Implement all {BF/F/S/U/SU/US}MOP4{A/S} instructions in clang and llvm following the acle in https://github.com/ARM-software/acle/pull/381/files

>From 87b7d61f23b8aef863d37dcb137603b866ab8c77 Mon Sep 17 00:00:00 2001
From: Virginia Cangelosi <virginia.cangelosi at arm.com>
Date: Tue, 18 Feb 2025 11:02:07 +0000
Subject: [PATCH 1/2] [Clang][LLVM] Implement single-single vectors MOP4{A/S}

---
 clang/include/clang/Basic/arm_sme.td          |  54 ++
 .../sme2-intrinsics/acle_sme2_mop4_1x1.c      | 465 ++++++++++++++++++
 llvm/include/llvm/IR/IntrinsicsAArch64.td     |  51 +-
 .../lib/Target/AArch64/AArch64SMEInstrInfo.td |  68 +--
 llvm/lib/Target/AArch64/SMEInstrFormats.td    |  93 +++-
 .../AArch64/sme2-intrinsics-mop4a_1x1.ll      | 247 ++++++++++
 6 files changed, 903 insertions(+), 75 deletions(-)
 create mode 100644 clang/test/CodeGen/AArch64/sme2-intrinsics/acle_sme2_mop4_1x1.c
 create mode 100644 llvm/test/CodeGen/AArch64/sme2-intrinsics-mop4a_1x1.ll

diff --git a/clang/include/clang/Basic/arm_sme.td b/clang/include/clang/Basic/arm_sme.td
index 288a8c04c217f..2af29ad6699b6 100644
--- a/clang/include/clang/Basic/arm_sme.td
+++ b/clang/include/clang/Basic/arm_sme.td
@@ -376,6 +376,19 @@ let SMETargetGuard = "sme2" in {
 // Outer product and accumulate/subtract
 //
 
+multiclass MOP4SingleSingle<string name, string n, string t, string i, string wide> {
+  def NAME : Inst<"svmop4" # name # "_1x1_" # n # "[_{d}_{d}]", "vidd", t, MergeNone, i # wide # "_1x1", [IsInOutZA, IsStreaming], [ImmCheck<0, ImmCheck0_3>]>;
+}
+
+multiclass MOP4MixedSignsSingleSingle<string n_suffix1, string n_suffix2, string za, string t> {
+  def NAME : SInst<"sv" # n_suffix2 # "_1x1_" # za # "[_{2}_{3}]",
+                              "vid" # !cond(!eq(n_suffix1, "su") : "u", true: "x"),
+                              !cond(!eq(n_suffix1, "su") : "", true: "U") # t,
+                              MergeNone, "aarch64_sme_" # n_suffix2 # "_wide_1x1",
+                              [IsStreaming, IsInOutZA],
+                              [ImmCheck<0, ImmCheck0_3>]>;
+}
+
 let SMETargetGuard = "sme2" in {
   def SVSMOPA  : Inst<"svmopa_za32[_{d}]_m", "viPPdd", "s", MergeNone, "aarch64_sme_smopa_za32", [IsInOutZA, IsStreaming], [ImmCheck<0, ImmCheck0_3>]>;
   def SVUSMOPA : Inst<"svmopa_za32[_{d}]_m", "viPPdd", "Us", MergeNone, "aarch64_sme_umopa_za32", [IsInOutZA, IsStreaming], [ImmCheck<0, ImmCheck0_3>]>;
@@ -387,6 +400,29 @@ let SMETargetGuard = "sme2" in {
 
   def SVBMOPS : Inst<"svbmops_za32[_{d}]_m", "viPPdd", "iUi", MergeNone, "aarch64_sme_bmops_za32", [IsInOutZA, IsStreaming], [ImmCheck<0, ImmCheck0_3>]>;
 
+  defm SVSMOP4A_MZZ_HtoS  : MOP4SingleSingle<"a", "za32", "s", "aarch64_sme_mop4a", "_wide">;
+  defm SVSMOP4S_MZZ_HtoS  : MOP4SingleSingle<"s", "za32", "s", "aarch64_sme_mop4s", "_wide">;
+  defm SVSMOP4A_MZZ_BToS  : MOP4SingleSingle<"a", "za32", "c", "aarch64_sme_mop4a", "_wide">;
+  defm SVSMOP4S_MZZ_BToS  : MOP4SingleSingle<"s", "za32", "c", "aarch64_sme_mop4s", "_wide">;
+
+  defm SVUMOP4A_MZZ_HtoS  : MOP4SingleSingle<"a", "za32", "Us", "aarch64_sme_mop4a", "_wide">;
+  defm SVUMOP4S_MZZ_HtoS  : MOP4SingleSingle<"s", "za32", "Us", "aarch64_sme_mop4s", "_wide">;
+  defm SVUMOP4A_MZZ_BToS  : MOP4SingleSingle<"a", "za32", "Uc", "aarch64_sme_mop4a", "_wide">;
+  defm SVUMOP4S_MZZ_BToS  : MOP4SingleSingle<"s", "za32", "Uc", "aarch64_sme_mop4s", "_wide">;
+
+  defm SVFMOP4A_MZZ_HtoS  : MOP4SingleSingle<"a", "za32", "h", "aarch64_sme_mop4a", "_wide">;
+  defm SVFMOP4S_MZZ_HtoS  : MOP4SingleSingle<"s", "za32", "h", "aarch64_sme_mop4s", "_wide">;
+  defm SVFMOP4A_MZZ_S     : MOP4SingleSingle<"a", "za32", "f", "aarch64_sme_mop4a", "">;
+  defm SVFMOP4S_MZZ_S     : MOP4SingleSingle<"s", "za32", "f", "aarch64_sme_mop4s", "">;
+
+  defm SVBMOP4A_MZZ_S     : MOP4SingleSingle<"a", "za32", "b", "aarch64_sme_mop4a", "_wide">;
+  defm SVBMOP4S_MZZ_S     : MOP4SingleSingle<"s", "za32", "b", "aarch64_sme_mop4s", "_wide">;
+
+  defm SVSUMOP4A_MZZ_BtoS : MOP4MixedSignsSingleSingle<"su", "mop4a", "za32", "c">;
+  defm SVUSMOP4A_MZZ_BtoS : MOP4MixedSignsSingleSingle<"us", "mop4a", "za32", "c">;
+  defm SVSUMOP4S_MZZ_BtoS : MOP4MixedSignsSingleSingle<"su", "mop4s", "za32", "c">;
+  defm SVUSMOP4S_MZZ_BtoS : MOP4MixedSignsSingleSingle<"us", "mop4s", "za32", "c">;
+
   // VERTICAL DOT-PRODUCT
   def SVVDOT_LANE_ZA32_VG1x2_S : Inst<"svvdot_lane_za32[_{d}]_vg1x2", "vm2di", "s", MergeNone, "aarch64_sme_svdot_lane_za32_vg1x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
   def SVVDOT_LANE_ZA32_VG1x4_S : Inst<"svvdot_lane_za32[_{d}]_vg1x4", "vm4di", "c", MergeNone, "aarch64_sme_svdot_lane_za32_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
@@ -437,6 +473,15 @@ let SMETargetGuard = "sme2" in {
 }
 
 let SMETargetGuard = "sme2,sme-i16i64" in {
+  defm SVSMOP4A_MZZ_HtoD  : MOP4SingleSingle<"a", "za64", "s", "aarch64_sme_mop4a", "_wide">;
+  defm SVSMOP4S_MZZ_HtoD  : MOP4SingleSingle<"s", "za64", "s", "aarch64_sme_mop4s", "_wide">;
+  defm SVUMOP4A_MZZ_HtoD  : MOP4SingleSingle<"a", "za64", "Us", "aarch64_sme_mop4a", "_wide">;
+  defm SVUMOP4S_MZZ_HtoD  : MOP4SingleSingle<"s", "za64", "Us", "aarch64_sme_mop4s", "_wide">;
+  defm SVSUMOP4A_MZZ_HtoD : MOP4MixedSignsSingleSingle<"su", "mop4a", "za64", "s">;
+  defm SVUSMOP4A_MZZ_HtoD : MOP4MixedSignsSingleSingle<"us", "mop4a", "za64", "s">;
+  defm SVSUMOP4S_MZZ_HtoD : MOP4MixedSignsSingleSingle<"su", "mop4s", "za64", "s">;
+  defm SVUSMOP4S_MZZ_HtoD : MOP4MixedSignsSingleSingle<"us", "mop4s", "za64", "s">;
+
   def SVVDOT_LANE_ZA64_VG1x4_S : Inst<"svvdot_lane_za64[_{d}]_vg1x4", "vm4di", "s", MergeNone, "aarch64_sme_svdot_lane_za64_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_1>]>;
   def SVVDOT_LANE_ZA64_VG1x4_U : Inst<"svvdot_lane_za64[_{d}]_vg1x4", "vm4di", "Us", MergeNone, "aarch64_sme_uvdot_lane_za64_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_1>]>;
 
@@ -473,6 +518,9 @@ let SMETargetGuard = "sme2" in {
 }
 
 let SMETargetGuard = "sme2,sme-f64f64" in {
+  defm SVFMOP4A_MZZ_D : MOP4SingleSingle<"a", "za64", "d", "aarch64_sme_mop4a", "">;
+  defm SVFMOP4S_MZZ_D : MOP4SingleSingle<"s", "za64", "d", "aarch64_sme_mop4s", "">;
+
   def SVMLA_MULTI_VG1x2_F64 : Inst<"svmla_za64[_{d}]_vg1x2", "vm22", "d", MergeNone, "aarch64_sme_fmla_vg1x2", [IsStreaming, IsInOutZA], []>;
   def SVMLA_MULTI_VG1x4_F64 : Inst<"svmla_za64[_{d}]_vg1x4", "vm44", "d", MergeNone, "aarch64_sme_fmla_vg1x4", [IsStreaming, IsInOutZA], []>;
   def SVMLS_MULTI_VG1x2_F64 : Inst<"svmls_za64[_{d}]_vg1x2", "vm22", "d", MergeNone, "aarch64_sme_fmls_vg1x2", [IsStreaming, IsInOutZA], []>;
@@ -490,6 +538,9 @@ let SMETargetGuard = "sme2,sme-f64f64" in {
 }
 
 let SMETargetGuard = "sme-f16f16" in {
+  defm SVFMOP4A_MZZ_H : MOP4SingleSingle<"a", "za16", "h", "aarch64_sme_mop4a", "">;
+  defm SVFMOP4S_MZZ_H : MOP4SingleSingle<"s", "za16", "h", "aarch64_sme_mop4s", "">;
+
   def SVMLA_MULTI_VG1x2_F16 : Inst<"svmla_za16[_f16]_vg1x2", "vm22", "h", MergeNone, "aarch64_sme_fmla_vg1x2", [IsStreaming, IsInOutZA], []>;
   def SVMLA_MULTI_VG1x4_F16 : Inst<"svmla_za16[_f16]_vg1x4", "vm44", "h", MergeNone, "aarch64_sme_fmla_vg1x4", [IsStreaming, IsInOutZA], []>;
   def SVMLS_MULTI_VG1x2_F16 : Inst<"svmls_za16[_f16]_vg1x2", "vm22", "h", MergeNone, "aarch64_sme_fmls_vg1x2", [IsStreaming, IsInOutZA], []>;
@@ -507,6 +558,9 @@ let SMETargetGuard = "sme-f16f16" in {
 }
 
 let SMETargetGuard = "sme-b16b16" in {
+  defm SVBMOP4A_MZZ_H : MOP4SingleSingle<"a", "za16", "bf", "aarch64_sme_mop4a", "">;
+  defm SVBMOP4S_MZZ_H : MOP4SingleSingle<"s", "za16", "bf", "aarch64_sme_mop4s", "">;
+
   def SVMLA_MULTI_VG1x2_BF16 : Inst<"svmla_za16[_bf16]_vg1x2", "vm22", "b", MergeNone, "aarch64_sme_fmla_vg1x2", [IsStreaming, IsInOutZA], []>;
   def SVMLA_MULTI_VG1x4_BF16 : Inst<"svmla_za16[_bf16]_vg1x4", "vm44", "b", MergeNone, "aarch64_sme_fmla_vg1x4", [IsStreaming, IsInOutZA], []>;
   def SVMLS_MULTI_VG1x2_BF16 : Inst<"svmls_za16[_bf16]_vg1x2", "vm22", "b", MergeNone, "aarch64_sme_fmls_vg1x2", [IsStreaming, IsInOutZA], []>;
diff --git a/clang/test/CodeGen/AArch64/sme2-intrinsics/acle_sme2_mop4_1x1.c b/clang/test/CodeGen/AArch64/sme2-intrinsics/acle_sme2_mop4_1x1.c
new file mode 100644
index 0000000000000..37238053009fd
--- /dev/null
+++ b/clang/test/CodeGen/AArch64/sme2-intrinsics/acle_sme2_mop4_1x1.c
@@ -0,0 +1,465 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+
+// REQUIRES: aarch64-registered-target
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme-mop4 -target-feature +sme-f16f16 -target-feature +sme-i16i64 -target-feature +sme-b16b16 -target-feature +sme-f64f64 -target-feature +sme -target-feature +sme2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme-mop4 -target-feature +sme-f16f16 -target-feature +sme-i16i64 -target-feature +sme-b16b16 -target-feature +sme-f64f64 -target-feature +sme -target-feature +sme2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme-mop4 -target-feature +sme-f16f16 -target-feature +sme-i16i64 -target-feature +sme-b16b16 -target-feature +sme-f64f64 -target-feature +sme -target-feature +sme2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +bf16 -target-feature +sme-mop4 -target-feature +sme-f16f16 -target-feature +sme-i16i64 -target-feature +sme-b16b16 -target-feature +sme-f64f64 -target-feature +sme -target-feature +sme2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64 -target-feature +bf16 -target-feature +sme-mop4 -target-feature +sme-f16f16 -target-feature +sme-i16i64 -target-feature +sme-b16b16 -target-feature +sme-f64f64 -target-feature +sme -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+
+
+#include <arm_sme.h>
+
+#ifdef SME_OVERLOADED_FORMS
+#define SME_ACLE_FUNC(A1,A2_UNUSED,A3) A1##A3
+#else
+#define SME_ACLE_FUNC(A1,A2,A3) A1##A2##A3
+#endif
+
+// CHECK-LABEL: @test_svmop4a_1x1_za32_s8_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z27test_svmop4a_1x1_za32_s8_s8u10__SVInt8_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za32_s8_s8(svint8_t zn, svint8_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za32,_s8_s8,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za32_s8_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z27test_svmop4s_1x1_za32_s8_s8u10__SVInt8_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za32_s8_s8(svint8_t zn, svint8_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za32,_s8_s8,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4a_1x1_za32_u8_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z27test_svmop4a_1x1_za32_u8_u8u11__SVUint8_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za32_u8_u8(svuint8_t zn, svuint8_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za32,_u8_u8,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za32_u8_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z27test_svmop4s_1x1_za32_u8_u8u11__SVUint8_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za32_u8_u8(svuint8_t zn, svuint8_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za32,_u8_u8,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4a_1x1_za32_s16_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4a_1x1_za32_s16_s16u11__SVInt16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za32_s16_s16(svint16_t zn, svint16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za32,_s16_s16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za32_s16_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4s_1x1_za32_s16_s16u11__SVInt16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za32_s16_s16(svint16_t zn, svint16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za32,_s16_s16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4a_1x1_za32_u16_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4a_1x1_za32_u16_u16u12__SVUint16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za32_u16_u16(svuint16_t zn, svuint16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za32,_u16_u16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za32_u16_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4s_1x1_za32_u16_u16u12__SVUint16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za32_u16_u16(svuint16_t zn, svuint16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za32,_u16_u16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4a_1x1_za32_f16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8f16(i32 3, <vscale x 8 x half> [[ZN:%.*]], <vscale x 8 x half> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4a_1x1_za32_f16_f16u13__SVFloat16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8f16(i32 3, <vscale x 8 x half> [[ZN:%.*]], <vscale x 8 x half> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za32_f16_f16(svfloat16_t zn, svfloat16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za32,_f16_f16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za32_f16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8f16(i32 3, <vscale x 8 x half> [[ZN:%.*]], <vscale x 8 x half> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4s_1x1_za32_f16_f16u13__SVFloat16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8f16(i32 3, <vscale x 8 x half> [[ZN:%.*]], <vscale x 8 x half> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za32_f16_f16(svfloat16_t zn, svfloat16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za32,_f16_f16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4a_1x1_za32_bf16_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8bf16(i32 3, <vscale x 8 x bfloat> [[ZN:%.*]], <vscale x 8 x bfloat> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z31test_svmop4a_1x1_za32_bf16_bf16u14__SVBfloat16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8bf16(i32 3, <vscale x 8 x bfloat> [[ZN:%.*]], <vscale x 8 x bfloat> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za32_bf16_bf16(svbfloat16_t zn, svbfloat16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za32,_bf16_bf16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za32_bf16_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8bf16(i32 3, <vscale x 8 x bfloat> [[ZN:%.*]], <vscale x 8 x bfloat> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z31test_svmop4s_1x1_za32_bf16_bf16u14__SVBfloat16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8bf16(i32 3, <vscale x 8 x bfloat> [[ZN:%.*]], <vscale x 8 x bfloat> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za32_bf16_bf16(svbfloat16_t zn, svbfloat16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za32,_bf16_bf16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4a_1x1_za64_s16_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4a_1x1_za64_s16_s16u11__SVInt16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za64_s16_s16(svint16_t zn, svint16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za32,_s16_s16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za64_s16_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4s_1x1_za64_s16_s16u11__SVInt16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za64_s16_s16(svint16_t zn, svint16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za32,_s16_s16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4a_1x1_za64_u16_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4a_1x1_za64_u16_u16u12__SVUint16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za64_u16_u16(svuint16_t zn, svuint16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za64,_u16_u16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za64_u16_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4s_1x1_za64_u16_u16u12__SVUint16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za64_u16_u16(svuint16_t zn, svuint16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za64,_u16_u16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4a_1x1_za64_s16_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4a_1x1_za64_s16_u16u11__SVInt16_tu12__SVUint16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za64_s16_u16(svint16_t zn, svuint16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za64,_s16_u16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za64_s16_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4s_1x1_za64_s16_u16u11__SVInt16_tu12__SVUint16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za64_s16_u16(svint16_t zn, svuint16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za64,_s16_u16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4a_1x1_za64_u16_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4a_1x1_za64_u16_s16u12__SVUint16_tu11__SVInt16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za64_u16_s16(svuint16_t zn, svint16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za64,_u16_s16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za64_u16_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4s_1x1_za64_u16_s16u12__SVUint16_tu11__SVInt16_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 3, <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za64_u16_s16(svuint16_t zn, svint16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za64,_u16_s16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4a_1x1_za32_s8_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z27test_svmop4a_1x1_za32_s8_u8u10__SVInt8_tu11__SVUint8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za32_s8_u8(svint8_t zn, svuint8_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za32,_s8_u8,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za32_s8_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z27test_svmop4s_1x1_za32_s8_u8u10__SVInt8_tu11__SVUint8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za32_s8_u8(svint8_t zn, svuint8_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za32,_s8_u8,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4a_1x1_za32_u8_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z27test_svmop4a_1x1_za32_u8_s8u11__SVUint8_tu10__SVInt8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za32_u8_s8(svuint8_t zn, svint8_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za32,_u8_s8,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za32_u8_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z27test_svmop4s_1x1_za32_u8_s8u11__SVUint8_tu10__SVInt8_t(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv16i8(i32 3, <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za32_u8_s8(svuint8_t zn, svint8_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za32,_u8_s8,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4a_1x1_za16_f16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.1x1.nxv8f16(i32 3, <vscale x 8 x half> [[ZN:%.*]], <vscale x 8 x half> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4a_1x1_za16_f16_f16u13__SVFloat16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.1x1.nxv8f16(i32 3, <vscale x 8 x half> [[ZN:%.*]], <vscale x 8 x half> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za16_f16_f16(svfloat16_t zn, svfloat16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za16,_f16_f16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za16_f16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.1x1.nxv8f16(i32 3, <vscale x 8 x half> [[ZN:%.*]], <vscale x 8 x half> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4s_1x1_za16_f16_f16u13__SVFloat16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.1x1.nxv8f16(i32 3, <vscale x 8 x half> [[ZN:%.*]], <vscale x 8 x half> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za16_f16_f16(svfloat16_t zn, svfloat16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za16,_f16_f16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4a_1x1_za32_f32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.1x1.nxv4f32(i32 3, <vscale x 4 x float> [[ZN:%.*]], <vscale x 4 x float> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4a_1x1_za32_f32_f32u13__SVFloat32_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.1x1.nxv4f32(i32 3, <vscale x 4 x float> [[ZN:%.*]], <vscale x 4 x float> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za32_f32_f32(svfloat32_t zn, svfloat32_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za32,_f32_f32,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za32_f32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.1x1.nxv4f32(i32 3, <vscale x 4 x float> [[ZN:%.*]], <vscale x 4 x float> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4s_1x1_za32_f32_f32u13__SVFloat32_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.1x1.nxv4f32(i32 3, <vscale x 4 x float> [[ZN:%.*]], <vscale x 4 x float> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za32_f32_f32(svfloat32_t zn, svfloat32_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za32,_f32_f32,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4a_1x1_za64_f64_f64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.1x1.nxv2f64(i32 3, <vscale x 2 x double> [[ZN:%.*]], <vscale x 2 x double> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4a_1x1_za64_f64_f64u13__SVFloat64_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.1x1.nxv2f64(i32 3, <vscale x 2 x double> [[ZN:%.*]], <vscale x 2 x double> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za64_f64_f64(svfloat64_t zn, svfloat64_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za64,_f64_f64,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za64_f64_f64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.1x1.nxv2f64(i32 3, <vscale x 2 x double> [[ZN:%.*]], <vscale x 2 x double> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z29test_svmop4s_1x1_za64_f64_f64u13__SVFloat64_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.1x1.nxv2f64(i32 3, <vscale x 2 x double> [[ZN:%.*]], <vscale x 2 x double> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za64_f64_f64(svfloat64_t zn, svfloat64_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za64,_f64_f64,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4a_1x1_za16_bf16_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.1x1.nxv8bf16(i32 3, <vscale x 8 x bfloat> [[ZN:%.*]], <vscale x 8 x bfloat> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z31test_svmop4a_1x1_za16_bf16_bf16u14__SVBfloat16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4a.1x1.nxv8bf16(i32 3, <vscale x 8 x bfloat> [[ZN:%.*]], <vscale x 8 x bfloat> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4a_1x1_za16_bf16_bf16(svbfloat16_t zn, svbfloat16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4a_1x1_za16,_bf16_bf16,)(3, zn, zm);
+}
+
+// CHECK-LABEL: @test_svmop4s_1x1_za16_bf16_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.1x1.nxv8bf16(i32 3, <vscale x 8 x bfloat> [[ZN:%.*]], <vscale x 8 x bfloat> [[ZM:%.*]])
+// CHECK-NEXT:    ret void
+//
+// CPP-CHECK-LABEL: @_Z31test_svmop4s_1x1_za16_bf16_bf16u14__SVBfloat16_tS_(
+// CPP-CHECK-NEXT:  entry:
+// CPP-CHECK-NEXT:    tail call void @llvm.aarch64.sme.mop4s.1x1.nxv8bf16(i32 3, <vscale x 8 x bfloat> [[ZN:%.*]], <vscale x 8 x bfloat> [[ZM:%.*]])
+// CPP-CHECK-NEXT:    ret void
+//
+void test_svmop4s_1x1_za16_bf16_bf16(svbfloat16_t zn, svbfloat16_t zm) __arm_streaming __arm_inout("za") {
+  SME_ACLE_FUNC(svmop4s_1x1_za16,_bf16_bf16,)(3, zn, zm);
+}
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 6dfc3c8f2a393..0714602a2f09b 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -1497,7 +1497,7 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
                  LLVMSubdivide2VectorType<0>,
                  llvm_i32_ty],
                 [IntrNoMem, ImmArg<ArgIndex<3>>]>;
-  
+
   class SVE2_1VectorArgIndexed_Intrinsic
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                 [LLVMMatchType<0>,
@@ -1512,7 +1512,7 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
                  llvm_i32_ty,
                  llvm_i32_ty],
                 [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
-  
+
   class SVE2_1VectorArg_Pred_Intrinsic
     : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                             [llvm_anyvector_ty],
@@ -1522,7 +1522,7 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
     : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                             [llvm_anyvector_ty, llvm_i32_ty],
                             [IntrNoMem, ImmArg<ArgIndex<1>>]>;
-  
+
   class SVE2_Pred_1VectorArgIndexed_Intrinsic
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                             [LLVMMatchType<0>,
@@ -3064,6 +3064,17 @@ let TargetPrefix = "aarch64" in {
   def int_aarch64_sme_usmopa_wide : SME_OuterProduct_Intrinsic;
   def int_aarch64_sme_usmops_wide : SME_OuterProduct_Intrinsic;
 
+  class SME_OuterProduct_QuaterTile
+      : DefaultAttrsIntrinsic<[],
+          [llvm_i32_ty,
+          llvm_anyvector_ty,
+          LLVMMatchType<0>], [ImmArg<ArgIndex<0>>]>;
+
+  def int_aarch64_sme_mop4a_wide_1x1 : SME_OuterProduct_QuaterTile;
+  def int_aarch64_sme_mop4s_wide_1x1 : SME_OuterProduct_QuaterTile;
+  def int_aarch64_sme_mop4a_1x1 : SME_OuterProduct_QuaterTile;
+  def int_aarch64_sme_mop4s_1x1 : SME_OuterProduct_QuaterTile;
+
   class SME_AddVectorToTile_Intrinsic
       : DefaultAttrsIntrinsic<[],
           [llvm_i32_ty,
@@ -3319,11 +3330,11 @@ let TargetPrefix = "aarch64" in {
     : DefaultAttrsIntrinsic<[llvm_nxv8bf16_ty],
                             [llvm_nxv4f32_ty, llvm_nxv4f32_ty],
                             [IntrNoMem]>;
-  
+
   class SME2_CVT_WIDENING_VG2_Intrinsic
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
                             [LLVMSubdivide2VectorType<0>], [IntrNoMem]>;
-  
+
 
   class SME2_CVT_VG4_SINGLE_Intrinsic
     : DefaultAttrsIntrinsic<[LLVMSubdivide4VectorType<0>],
@@ -3564,7 +3575,7 @@ let TargetPrefix = "aarch64" in {
   foreach vg = ["vg1x2", "vg1x4", "vg2x1", "vg2x2", "vg2x4", "vg4x1", "vg4x2", "vg4x4"] in {
     def int_aarch64_sme_zero_za64_ # vg : DefaultAttrsIntrinsic<[], [llvm_i32_ty],  [IntrNoMem, IntrHasSideEffects]>;
   }
-  
+
   // Multi-vector signed saturating doubling multiply high
 
   def int_aarch64_sve_sqdmulh_single_vgx2 : SME2_VG2_Multi_Single_Intrinsic;
@@ -3634,7 +3645,7 @@ let TargetPrefix = "aarch64" in {
   //
   //Multi-vector floating-point convert from half-precision to deinterleaved single-precision.
   //
-  
+
   def int_aarch64_sve_fcvtl_widen_x2  : SME2_CVT_WIDENING_VG2_Intrinsic;
 
   //
@@ -3826,7 +3837,7 @@ let TargetPrefix = "aarch64" in {
   def int_aarch64_sme_luti4_lane_zt
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_i32_ty, llvm_nxv16i8_ty, llvm_i32_ty],
                             [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>, IntrReadMem]>;
-                            
+
   // Lookup table expand two registers
   //
   def int_aarch64_sme_luti2_lane_zt_x2
@@ -3835,7 +3846,7 @@ let TargetPrefix = "aarch64" in {
   def int_aarch64_sme_luti4_lane_zt_x2
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], [llvm_i32_ty, llvm_nxv16i8_ty, llvm_i32_ty],
                             [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>, IntrReadMem]>;
-                            
+
   //
   // Lookup table expand four registers
   //
@@ -3853,7 +3864,7 @@ let TargetPrefix = "aarch64" in {
                             [llvm_i32_ty, llvm_nxv16i8_ty, llvm_nxv16i8_ty],
                             [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects]>;
 
-  
+
   //
   // Register scaling
   //
@@ -3901,7 +3912,7 @@ def int_aarch64_sve_extq : AdvSIMD_2VectorArgIndexed_Intrinsic;
 //
 // SVE2.1 - Move predicate to/from vector
 //
-def int_aarch64_sve_pmov_to_pred_lane : SVE2_1VectorArgIndexed_Pred_Intrinsic; 
+def int_aarch64_sve_pmov_to_pred_lane : SVE2_1VectorArgIndexed_Pred_Intrinsic;
 
 def int_aarch64_sve_pmov_to_pred_lane_zero : SVE2_1VectorArg_Pred_Intrinsic;
 
@@ -3943,10 +3954,10 @@ let TargetPrefix = "aarch64" in {
     : DefaultAttrsIntrinsic<[llvm_nxv16i8_ty],
                             [llvm_anyvector_ty, LLVMMatchType<0>],
                             [IntrReadMem, IntrInaccessibleMemOnly]>;
-  
+
   def int_aarch64_sve_fp8_cvtn  : SVE2_FP8_Narrow_Cvt;
   def int_aarch64_sve_fp8_cvtnb : SVE2_FP8_Narrow_Cvt;
-  
+
   def int_aarch64_sve_fp8_cvtnt
     : DefaultAttrsIntrinsic<[llvm_nxv16i8_ty],
                             [llvm_nxv16i8_ty, llvm_anyvector_ty, LLVMMatchType<0>],
@@ -3958,32 +3969,32 @@ let TargetPrefix = "aarch64" in {
                             [LLVMMatchType<0>,
                              llvm_nxv16i8_ty, llvm_nxv16i8_ty],
                             [IntrReadMem, IntrInaccessibleMemOnly]>;
-  
+
   class SVE2_FP8_FMLA_FDOT_Lane
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                             [LLVMMatchType<0>,
                              llvm_nxv16i8_ty, llvm_nxv16i8_ty, llvm_i32_ty],
                             [IntrReadMem, IntrInaccessibleMemOnly, ImmArg<ArgIndex<3>>]>;
-  
+
   def int_aarch64_sve_fp8_fdot      : SVE2_FP8_FMLA_FDOT;
   def int_aarch64_sve_fp8_fdot_lane : SVE2_FP8_FMLA_FDOT_Lane;
 
   // Fused multiply-add
   def int_aarch64_sve_fp8_fmlalb      : SVE2_FP8_FMLA_FDOT;
   def int_aarch64_sve_fp8_fmlalb_lane : SVE2_FP8_FMLA_FDOT_Lane;
-  
+
   def int_aarch64_sve_fp8_fmlalt      : SVE2_FP8_FMLA_FDOT;
   def int_aarch64_sve_fp8_fmlalt_lane : SVE2_FP8_FMLA_FDOT_Lane;
-  
+
   def int_aarch64_sve_fp8_fmlallbb      : SVE2_FP8_FMLA_FDOT;
   def int_aarch64_sve_fp8_fmlallbb_lane : SVE2_FP8_FMLA_FDOT_Lane;
-  
+
   def int_aarch64_sve_fp8_fmlallbt      : SVE2_FP8_FMLA_FDOT;
   def int_aarch64_sve_fp8_fmlallbt_lane : SVE2_FP8_FMLA_FDOT_Lane;
-  
+
   def int_aarch64_sve_fp8_fmlalltb      : SVE2_FP8_FMLA_FDOT;
   def int_aarch64_sve_fp8_fmlalltb_lane : SVE2_FP8_FMLA_FDOT_Lane;
-  
+
   def int_aarch64_sve_fp8_fmlalltt      : SVE2_FP8_FMLA_FDOT;
   def int_aarch64_sve_fp8_fmlalltt_lane : SVE2_FP8_FMLA_FDOT_Lane;
 
diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
index d2aa86f388db2..0673394d4daa9 100644
--- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
@@ -148,30 +148,30 @@ defm USMOPS_MPPZZ_D : sme_int_outer_product_i64<0b101, "usmops", int_aarch64_sme
 }
 
 let Predicates = [HasSME_MOP4] in {
-  defm SMOP4A  : sme_quarter_outer_product_i8_i32<0b0, 0b0, 0b0, "smop4a">;
-  defm SMOP4S  : sme_quarter_outer_product_i8_i32<0b0, 0b0, 0b1, "smop4s">;
-  defm SUMOP4A : sme_quarter_outer_product_i8_i32<0b0, 0b1, 0b0, "sumop4a">;
-  defm SUMOP4S : sme_quarter_outer_product_i8_i32<0b0, 0b1, 0b1, "sumop4s">;
-  defm USMOP4A : sme_quarter_outer_product_i8_i32<0b1, 0b0, 0b0, "usmop4a">;
-  defm USMOP4S : sme_quarter_outer_product_i8_i32<0b1, 0b0, 0b1, "usmop4s">;
-  defm UMOP4A  : sme_quarter_outer_product_i8_i32<0b1, 0b1, 0b0, "umop4a">;
-  defm UMOP4S  : sme_quarter_outer_product_i8_i32<0b1, 0b1, 0b1, "umop4s">;
-
-  defm SMOP4A : sme_quarter_outer_product_i16_i32<0b0, 0b0, "smop4a">;
-  defm SMOP4S : sme_quarter_outer_product_i16_i32<0b0, 0b1, "smop4s">;
-  defm UMOP4A : sme_quarter_outer_product_i16_i32<0b1, 0b0, "umop4a">;
-  defm UMOP4S : sme_quarter_outer_product_i16_i32<0b1, 0b1, "umop4s">;
+  defm SMOP4A  : sme_quarter_outer_product_i8_i32<0b0, 0b0, 0b0, "smop4a", int_aarch64_sme_mop4a_wide_1x1>;
+  defm SMOP4S  : sme_quarter_outer_product_i8_i32<0b0, 0b0, 0b1, "smop4s", int_aarch64_sme_mop4s_wide_1x1>;
+  defm SUMOP4A : sme_quarter_outer_product_i8_i32<0b0, 0b1, 0b0, "sumop4a", int_aarch64_sme_mop4a_wide_1x1>;
+  defm SUMOP4S : sme_quarter_outer_product_i8_i32<0b0, 0b1, 0b1, "sumop4s", int_aarch64_sme_mop4a_wide_1x1>;
+  defm USMOP4A : sme_quarter_outer_product_i8_i32<0b1, 0b0, 0b0, "usmop4a", int_aarch64_sme_mop4a_wide_1x1>;
+  defm USMOP4S : sme_quarter_outer_product_i8_i32<0b1, 0b0, 0b1, "usmop4s", int_aarch64_sme_mop4a_wide_1x1>;
+  defm UMOP4A  : sme_quarter_outer_product_i8_i32<0b1, 0b1, 0b0, "umop4a", int_aarch64_sme_mop4a_wide_1x1>;
+  defm UMOP4S  : sme_quarter_outer_product_i8_i32<0b1, 0b1, 0b1, "umop4s", int_aarch64_sme_mop4s_wide_1x1>;
+
+  defm SMOP4A : sme_quarter_outer_product_i16_i32<0b0, 0b0, "smop4a", int_aarch64_sme_mop4a_wide_1x1>;
+  defm SMOP4S : sme_quarter_outer_product_i16_i32<0b0, 0b1, "smop4s", int_aarch64_sme_mop4s_wide_1x1>;
+  defm UMOP4A : sme_quarter_outer_product_i16_i32<0b1, 0b0, "umop4a", int_aarch64_sme_mop4a_wide_1x1>;
+  defm UMOP4S : sme_quarter_outer_product_i16_i32<0b1, 0b1, "umop4s", int_aarch64_sme_mop4s_wide_1x1>;
 }
 
 let Predicates = [HasSME_MOP4, HasSMEI16I64] in {
-  defm SMOP4A : sme_quarter_outer_product_i64<0b0, 0b0, 0b0, "smop4a">;
-  defm SMOP4S : sme_quarter_outer_product_i64<0b0, 0b0, 0b1, "smop4s">;
-  defm SUMOP4A : sme_quarter_outer_product_i64<0b0, 0b1, 0b0, "sumop4a">;
-  defm SUMOP4S : sme_quarter_outer_product_i64<0b0, 0b1, 0b1, "sumop4s">;
-  defm UMOP4A : sme_quarter_outer_product_i64<0b1, 0b1, 0b0, "umop4a">;
-  defm UMOP4S : sme_quarter_outer_product_i64<0b1, 0b1, 0b1, "umop4s">;
-  defm USMOP4A : sme_quarter_outer_product_i64<0b1, 0b0, 0b0, "usmop4a">;
-  defm USMOP4S : sme_quarter_outer_product_i64<0b1, 0b0, 0b1, "usmop4s">;
+  defm SMOP4A : sme_quarter_outer_product_i64<0b0, 0b0, 0b0, "smop4a", int_aarch64_sme_mop4a_wide_1x1>;
+  defm SMOP4S : sme_quarter_outer_product_i64<0b0, 0b0, 0b1, "smop4s", int_aarch64_sme_mop4s_wide_1x1>;
+  defm SUMOP4A : sme_quarter_outer_product_i64<0b0, 0b1, 0b0, "sumop4a", int_aarch64_sme_mop4a_wide_1x1>;
+  defm SUMOP4S : sme_quarter_outer_product_i64<0b0, 0b1, 0b1, "sumop4s", int_aarch64_sme_mop4s_wide_1x1>;
+  defm UMOP4A : sme_quarter_outer_product_i64<0b1, 0b1, 0b0, "umop4a", int_aarch64_sme_mop4a_wide_1x1>;
+  defm UMOP4S : sme_quarter_outer_product_i64<0b1, 0b1, 0b1, "umop4s", int_aarch64_sme_mop4s_wide_1x1>;
+  defm USMOP4A : sme_quarter_outer_product_i64<0b1, 0b0, 0b0, "usmop4a", int_aarch64_sme_mop4a_wide_1x1>;
+  defm USMOP4S : sme_quarter_outer_product_i64<0b1, 0b0, 0b1, "usmop4s", int_aarch64_sme_mop4s_wide_1x1>;
 }
 
 let Predicates = [HasSME_TMOP] in {
@@ -1054,14 +1054,14 @@ let Predicates = [HasSME2, HasSVEBFSCALE] in {
 }
 
 let Predicates = [HasSME_MOP4] in {
-  defm BFMOP4A : sme2_bfmop4as_widening<0, "bfmop4a">;
-  defm BFMOP4S : sme2_bfmop4as_widening<1, "bfmop4s">;
+  defm BFMOP4A : sme2_bfmop4as_widening<0, "bfmop4a", int_aarch64_sme_mop4a_wide_1x1>;
+  defm BFMOP4S : sme2_bfmop4as_widening<1, "bfmop4s", int_aarch64_sme_mop4s_wide_1x1>;
 
-  defm FMOP4A : sme2_fmop4as_fp16_fp32_widening<0, "fmop4a">;
-  defm FMOP4S : sme2_fmop4as_fp16_fp32_widening<1, "fmop4s">;
+  defm FMOP4A : sme2_fmop4as_fp16_fp32_widening<0, "fmop4a", int_aarch64_sme_mop4a_wide_1x1>;
+  defm FMOP4S : sme2_fmop4as_fp16_fp32_widening<1, "fmop4s", int_aarch64_sme_mop4s_wide_1x1>;
 
-  defm FMOP4A : sme2_fmop4as_fp32_non_widening<0, "fmop4a">;
-  defm FMOP4S : sme2_fmop4as_fp32_non_widening<1, "fmop4s">;
+  defm FMOP4A : sme2_fmop4as_fp32_non_widening<0, "fmop4a", int_aarch64_sme_mop4a_1x1>;
+  defm FMOP4S : sme2_fmop4as_fp32_non_widening<1, "fmop4s", int_aarch64_sme_mop4s_1x1>;
 }
 
 let Predicates = [HasSME_TMOP] in {
@@ -1084,7 +1084,7 @@ let Predicates = [HasSME_TMOP, HasSMEB16B16] in {
 
 let Predicates = [HasSME_TMOP, HasSMEF8F32], Uses = [FPMR, FPCR] in {
   def FTMOPA_M2ZZZI_BtoS : sme_tmopa_32b<0b01000, ZZ_b_mul_r, ZPR8, "ftmopa">;
-} 
+}
 
 let Predicates = [HasSME_TMOP, HasSMEF8F16], Uses = [FPMR, FPCR] in {
   def FTMOPA_M2ZZZI_BtoH : sme_tmopa_16b<0b01001, ZZ_b_mul_r, ZPR8, "ftmopa">;
@@ -1099,8 +1099,8 @@ let Predicates = [HasSME_TMOP, HasSMEF16F16] in {
 }
 
 let Predicates = [HasSME_MOP4, HasSMEF16F16] in {
-  defm FMOP4A : sme2_fmop4as_fp16_non_widening<0, "fmop4a">;
-  defm FMOP4S : sme2_fmop4as_fp16_non_widening<1, "fmop4s">;
+  defm FMOP4A : sme2_fmop4as_fp16_non_widening<0, "fmop4a", int_aarch64_sme_mop4a_1x1>;
+  defm FMOP4S : sme2_fmop4as_fp16_non_widening<1, "fmop4s", int_aarch64_sme_mop4s_1x1>;
 }
 
 let Predicates = [HasSME2, HasSVEBFSCALE] in {
@@ -1115,11 +1115,11 @@ let Predicates = [HasSME_MOP4, HasSMEF8F32] in {
 }
 
 let Predicates = [HasSME_MOP4, HasSMEB16B16] in {
-  defm BFMOP4A : sme2_bfmop4as_non_widening<0, "bfmop4a">;
-  defm BFMOP4S : sme2_bfmop4as_non_widening<1, "bfmop4s">;
+  defm BFMOP4A : sme2_bfmop4as_non_widening<0, "bfmop4a", int_aarch64_sme_mop4a_1x1>;
+  defm BFMOP4S : sme2_bfmop4as_non_widening<1, "bfmop4s", int_aarch64_sme_mop4s_1x1>;
 }
 
 let Predicates = [HasSME_MOP4, HasSMEF64F64] in {
-  defm FMOP4A : sme2_fmop4as_fp64_non_widening<0, "fmop4a">;
-  defm FMOP4S : sme2_fmop4as_fp64_non_widening<1, "fmop4s">;
+  defm FMOP4A : sme2_fmop4as_fp64_non_widening<0, "fmop4a", int_aarch64_sme_mop4a_1x1>;
+  defm FMOP4S : sme2_fmop4as_fp64_non_widening<1, "fmop4s", int_aarch64_sme_mop4s_1x1>;
 }
diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td
index 4f6a413ba5e5c..5a3d12e9f7b8b 100644
--- a/llvm/lib/Target/AArch64/SMEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td
@@ -104,6 +104,15 @@ class sme_outer_product_pseudo<ZPRRegOp zpr_ty, SMEMatrixTypeEnum za_flag>
   let usesCustomInserter = 1;
 }
 
+class sme2_quarter_tile_outer_product_pseudo<ZPRRegOp zn_ty, ZPRRegOp zm_ty, SMEMatrixTypeEnum za_flag>
+    : Pseudo<(outs), (ins i32imm:$tile,
+                          zn_ty:$zn, zm_ty:$zm), []>,
+      Sched<[]> {
+  // Translated to the actual instructions in AArch64ISelLowering.cpp
+  let SMEMatrixType = za_flag;
+  let usesCustomInserter = 1;
+}
+
 class sme2_za_array_2op_multi_single_pseudo<string name, Operand index_ty, RegisterOperand multi_vector_ty,
                                             ZPRRegOp zpr_ty, SMEMatrixTypeEnum za_flag>
     : SMEPseudo2Instr<name, 0>,
@@ -251,12 +260,15 @@ class SME2_Tile_VG4_Multi_Pat<string name, SDPatternOperator intrinsic, Operand
 
 class SME2_Zero_Matrix_Pat<string name, SDPatternOperator intrinsic, Operand offset_ty, ComplexPattern tileslice>
     : Pat<(intrinsic (i32 (tileslice MatrixIndexGPR32Op8_11:$base, offset_ty:$offset))),
-    (!cast<Instruction>(name) $base, $offset)>; 
+    (!cast<Instruction>(name) $base, $offset)>;
 
 class SME2_Tile_Movaz_Pat<string name, SDPatternOperator intrinsic, ValueType out_vt, Operand tile_imm, Operand index_ty, ComplexPattern tileslice>
     : Pat<(out_vt (intrinsic tile_imm:$tile, (i32 (tileslice MatrixIndexGPR32Op12_15:$base, index_ty:$offset)))),
           (!cast<Instruction>(name # _PSEUDO) $tile, $base, $offset)>;
 
+class SME2_ZA_Tile_TwoVec_Pat<string name, SDPatternOperator intrinsic, Operand imm_ty, ValueType vt>
+    : Pat<(intrinsic imm_ty:$tile, vt:$Zn, vt:$Zm),
+          (!cast<Instruction>(name # _PSEUDO) $tile, $Zn, $Zm)>;
 
 //===----------------------------------------------------------------------===//
 // SME pattern match helpers.
@@ -600,9 +612,14 @@ class sme_quarter_outer_product_i16_i32<bit u0, bit N, bit M, bit subtr, Registe
   let Constraints = "$ZAda = $_ZAda";
 }
 
-multiclass sme_quarter_outer_product_i8_i32<bit zn_u, bit zm_u, bit subtr, string mnemonic>{
+multiclass sme_quarter_outer_product_i8_i32<bit zn_u, bit zm_u, bit subtr, string mnemonic, SDPatternOperator op>{
   def _MZZ_BToS   : sme_quarter_outer_product_i8_i32<{zn_u, 0}, {zm_u, 0}, subtr,
-                                                        ZPR8Mul2_Lo, ZPR8Mul2_Hi, mnemonic>;
+                                                        ZPR8Mul2_Lo, ZPR8Mul2_Hi, mnemonic>, SMEPseudo2Instr<NAME # _MZZ_BToS, 1>;
+
+  def NAME # _MZZ_BToS # _PSEUDO : sme2_quarter_tile_outer_product_pseudo<ZPR8Mul2_Lo, ZPR8Mul2_Hi, SMEMatrixTileS>, SMEPseudo2Instr<NAME # _MZZ_BToS, 0>;
+
+  def : SME2_ZA_Tile_TwoVec_Pat<NAME # _MZZ_BToS, op, timm32_0_3, nxv16i8>;
+
   def _M2ZZ_BToS  : sme_quarter_outer_product_i8_i32<{zn_u, 1}, {zm_u, 0}, subtr,
                                                          ZZ_b_mul_r_Lo, ZPR8Mul2_Hi, mnemonic>;
   def _MZ2Z_BToS  : sme_quarter_outer_product_i8_i32<{zn_u, 0}, {zm_u, 1}, subtr,
@@ -611,9 +628,14 @@ multiclass sme_quarter_outer_product_i8_i32<bit zn_u, bit zm_u, bit subtr, strin
                                                           ZZ_b_mul_r_Lo, ZZ_b_mul_r_Hi, mnemonic>;
 }
 
-multiclass sme_quarter_outer_product_i16_i32<bit unsigned, bit subtr, string mnemonic>{
+multiclass sme_quarter_outer_product_i16_i32<bit unsigned, bit subtr, string mnemonic, SDPatternOperator op>{
   def _MZZ_HToS   : sme_quarter_outer_product_i16_i32<unsigned, 0b0, 0b0, subtr,
-                                                        ZPR16Mul2_Lo, ZPR16Mul2_Hi, mnemonic>;
+                                                        ZPR16Mul2_Lo, ZPR16Mul2_Hi, mnemonic>, SMEPseudo2Instr<NAME # _MZZ_HToS, 1>;
+
+  def NAME # _MZZ_HToS # _PSEUDO : sme2_quarter_tile_outer_product_pseudo<ZPR16Mul2_Lo, ZPR16Mul2_Hi, SMEMatrixTileS>, SMEPseudo2Instr<NAME # _MZZ_HToS, 0>;
+
+  def : SME2_ZA_Tile_TwoVec_Pat<NAME # _MZZ_HToS, op, timm32_0_3, nxv8i16>;
+
   def _M2ZZ_HToS  : sme_quarter_outer_product_i16_i32<unsigned, 0b1, 0b0, subtr,
                                                          ZZ_h_mul_r_Lo, ZPR16Mul2_Hi, mnemonic>;
   def _MZ2Z_HToS  : sme_quarter_outer_product_i16_i32<unsigned, 0b0, 0b1, subtr,
@@ -622,9 +644,14 @@ multiclass sme_quarter_outer_product_i16_i32<bit unsigned, bit subtr, string mne
                                                           ZZ_h_mul_r_Lo, ZZ_h_mul_r_Hi, mnemonic>;
 }
 
-multiclass sme_quarter_outer_product_i64<bit zn_u, bit zm_u, bit subtr, string mnemonic>{
+multiclass sme_quarter_outer_product_i64<bit zn_u, bit zm_u, bit subtr, string mnemonic, SDPatternOperator op>{
   def _MZZ_HtoD   : sme_quarter_outer_product_i64<{zn_u, 0}, {zm_u, 0}, subtr,
-                                                        ZPR16Mul2_Lo, ZPR16Mul2_Hi, mnemonic>;
+                                                        ZPR16Mul2_Lo, ZPR16Mul2_Hi, mnemonic>, SMEPseudo2Instr<NAME # _MZZ_HtoD, 1>;
+
+  def NAME # _MZZ_HtoD # _PSEUDO : sme2_quarter_tile_outer_product_pseudo<ZPR16Mul2_Lo, ZPR16Mul2_Hi, SMEMatrixTileD>, SMEPseudo2Instr<NAME # _MZZ_HtoD, 0>;
+
+  def : SME2_ZA_Tile_TwoVec_Pat<NAME # _MZZ_HtoD, op, timm32_0_3, nxv8i16>;
+
   def _M2ZZ_HtoD  : sme_quarter_outer_product_i64<{zn_u, 1}, {zm_u, 0}, subtr,
                                                          ZZ_h_mul_r_Lo, ZPR16Mul2_Hi, mnemonic>;
   def _MZ2Z_HtoD  : sme_quarter_outer_product_i64<{zn_u, 0}, {zm_u, 1}, subtr,
@@ -2231,7 +2258,7 @@ multiclass sme2_int_mla_long_array_vg2_single<string mnemonic, bits<2> op, SDPat
 multiclass sme2_fp_mla_long_array_vg4_single<string mnemonic, bits<3> op, MatrixOperand matrix_ty,
                                              RegisterOperand multi_vector_ty, ZPRRegOp vector_ty,
                                              ValueType zpr_ty, SDPatternOperator intrinsic, list<Register> uses=[]> {
-  def NAME : sme2_mla_long_array_vg24_single<0b00, 0b1, op{2-1}, op{0}, matrix_ty, multi_vector_ty, 
+  def NAME : sme2_mla_long_array_vg24_single<0b00, 0b1, op{2-1}, op{0}, matrix_ty, multi_vector_ty,
                                              vector_ty, mnemonic, "vgx4">, SMEPseudo2Instr<NAME, 1> {
     let Uses = uses;
   }
@@ -5304,7 +5331,7 @@ multiclass sme2p1_zero_matrix<string mnemonic> {
   def : SME2_Zero_Matrix_Pat<NAME # _4Z_PSEUDO, int_aarch64_sme_zero_za64_vg4x1, uimm1s4range, tileslicerange1s4>;
   def : SME2_Zero_Matrix_Pat<NAME # _VG2_4Z_PSEUDO, int_aarch64_sme_zero_za64_vg4x2, uimm0s4range, tileslicerange0s4>;
   def : SME2_Zero_Matrix_Pat<NAME # _VG4_4Z_PSEUDO, int_aarch64_sme_zero_za64_vg4x4, uimm0s4range, tileslicerange0s4>;
-} 
+}
 
 //===----------------------------------------------------------------------===//
 // SME2.1 lookup table expand two non-contiguous registers
@@ -5470,9 +5497,13 @@ class sme2_bf16_fp32_quarter_tile_outer_product<bit M, bit N, bit S, string mnem
   let Constraints = "$ZAda = $_ZAda";
 }
 
-multiclass sme2_bfmop4as_widening<bit S, string mnemonic> {
+multiclass sme2_bfmop4as_widening<bit S, string mnemonic, SDPatternOperator op> {
   // Single vectors
-  def _MZZ_S : sme2_bf16_fp32_quarter_tile_outer_product<0, 0, S, mnemonic, ZPR16Mul2_Lo, ZPR16Mul2_Hi>;
+  def _MZZ_S : sme2_bf16_fp32_quarter_tile_outer_product<0, 0, S, mnemonic, ZPR16Mul2_Lo, ZPR16Mul2_Hi>, SMEPseudo2Instr<NAME # _MZZ_S, 1>;
+
+  def NAME # _MZZ_S # _PSEUDO : sme2_quarter_tile_outer_product_pseudo<ZPR16Mul2_Lo, ZPR16Mul2_Hi, SMEMatrixTileS>, SMEPseudo2Instr<NAME # _MZZ_S, 0>;
+
+  def : SME2_ZA_Tile_TwoVec_Pat<NAME # _MZZ_S, op, timm32_0_3, nxv8bf16>;
 
   // Multiple and single vectors
   def _M2ZZ_S : sme2_bf16_fp32_quarter_tile_outer_product<0, 1, S, mnemonic, ZZ_h_mul_r_Lo, ZPR16Mul2_Hi>;
@@ -5617,9 +5648,13 @@ class sme2_fp16_quarter_tile_outer_product<bit M, bit N, bit S, string mnemonic,
   let Constraints = "$ZAda = $_ZAda";
 }
 
-multiclass sme2_fmop4as_fp16_non_widening<bit S, string mnemonic> {
+multiclass sme2_fmop4as_fp16_non_widening<bit S, string mnemonic, SDPatternOperator op> {
   // Single vectors
-  def _MZZ_H : sme2_fp16_quarter_tile_outer_product<0, 0, S, mnemonic, ZPR16Mul2_Lo, ZPR16Mul2_Hi>;
+  def _MZZ_H : sme2_fp16_quarter_tile_outer_product<0, 0, S, mnemonic, ZPR16Mul2_Lo, ZPR16Mul2_Hi>, SMEPseudo2Instr<NAME # _MZZ_H, 1>;
+
+  def NAME # _MZZ_H # _PSEUDO : sme2_quarter_tile_outer_product_pseudo<ZPR16Mul2_Lo, ZPR16Mul2_Hi, SMEMatrixTileH>, SMEPseudo2Instr<NAME # _MZZ_H, 0>;
+
+  def : SME2_ZA_Tile_TwoVec_Pat<NAME # _MZZ_H, op, timm32_0_3, nxv8f16>;
 
   // Multiple and single vectors
   def _M2ZZ_H : sme2_fp16_quarter_tile_outer_product<0, 1, S, mnemonic, ZZ_h_mul_r_Lo, ZPR16Mul2_Hi>;
@@ -5689,9 +5724,13 @@ class sme2_bf16_fp16_quarter_tile_outer_product<bit M, bit N, bit S, string mnem
   let Constraints = "$ZAda = $_ZAda";
 }
 
-multiclass sme2_bfmop4as_non_widening<bit S, string mnemonic> {
+multiclass sme2_bfmop4as_non_widening<bit S, string mnemonic, SDPatternOperator op> {
   // Single vectors
-  def _MZZ_H : sme2_bf16_fp16_quarter_tile_outer_product<0, 0, S, mnemonic, ZPR16Mul2_Lo, ZPR16Mul2_Hi>;
+  def _MZZ_H : sme2_bf16_fp16_quarter_tile_outer_product<0, 0, S, mnemonic, ZPR16Mul2_Lo, ZPR16Mul2_Hi>, SMEPseudo2Instr<NAME # _MZZ_H, 1>;
+
+  def NAME # _MZZ_H # _PSEUDO : sme2_quarter_tile_outer_product_pseudo<ZPR16Mul2_Lo, ZPR16Mul2_Hi, SMEMatrixTileH>, SMEPseudo2Instr<NAME # _MZZ_H, 0>;
+
+  def : SME2_ZA_Tile_TwoVec_Pat<NAME # _MZZ_H, op, timm32_0_3, nxv8bf16>;
 
   // Multiple and single vectors
   def _M2ZZ_H : sme2_bf16_fp16_quarter_tile_outer_product<0, 1, S, mnemonic, ZZ_h_mul_r_Lo, ZPR16Mul2_Hi>;
@@ -5726,9 +5765,13 @@ class sme2_fp32_quarter_tile_outer_product<bit M, bit N, bit S, string mnemonic,
   let Constraints = "$ZAda = $_ZAda";
 }
 
-multiclass sme2_fmop4as_fp32_non_widening<bit S, string mnemonic> {
+multiclass sme2_fmop4as_fp32_non_widening<bit S, string mnemonic, SDPatternOperator op> {
   // Single vectors
-  def _MZZ_S : sme2_fp32_quarter_tile_outer_product<0, 0, S, mnemonic, ZPR32Mul2_Lo, ZPR32Mul2_Hi>;
+  def _MZZ_S : sme2_fp32_quarter_tile_outer_product<0, 0, S, mnemonic, ZPR32Mul2_Lo, ZPR32Mul2_Hi>, SMEPseudo2Instr<NAME # _MZZ_S, 1>;
+
+  def NAME # _MZZ_S # _PSEUDO : sme2_quarter_tile_outer_product_pseudo<ZPR16Mul2_Lo, ZPR16Mul2_Hi, SMEMatrixTileS>, SMEPseudo2Instr<NAME # _MZZ_S, 0>;
+
+  def : SME2_ZA_Tile_TwoVec_Pat<NAME # _MZZ_S, op, timm32_0_3, nxv4f32>;
 
   // Multiple and single vectors
   def _M2ZZ_S : sme2_fp32_quarter_tile_outer_product<0, 1, S, mnemonic, ZZ_s_mul_r_Lo, ZPR32Mul2_Hi>;
@@ -5763,9 +5806,13 @@ class sme2_fp64_quarter_tile_outer_product<bit M, bit N, bit S, string mnemonic,
   let Constraints = "$ZAda = $_ZAda";
 }
 
-multiclass sme2_fmop4as_fp64_non_widening<bit S, string mnemonic> {
+multiclass sme2_fmop4as_fp64_non_widening<bit S, string mnemonic, SDPatternOperator op> {
   // Single vectors
-  def _MZZ_D : sme2_fp64_quarter_tile_outer_product<0, 0, S, mnemonic, ZPR64Mul2_Lo, ZPR64Mul2_Hi>;
+  def _MZZ_D : sme2_fp64_quarter_tile_outer_product<0, 0, S, mnemonic, ZPR64Mul2_Lo, ZPR64Mul2_Hi>, SMEPseudo2Instr<NAME # _MZZ_D, 1>;
+
+  def NAME # _MZZ_D # _PSEUDO : sme2_quarter_tile_outer_product_pseudo<ZPR64Mul2_Lo, ZPR64Mul2_Hi, SMEMatrixTileD>, SMEPseudo2Instr<NAME # _MZZ_D, 0>;
+
+  def : SME2_ZA_Tile_TwoVec_Pat<NAME # _MZZ_D, op, timm32_0_3, nxv2f64>;
 
   // Multiple and single vectors
   def _M2ZZ_D : sme2_fp64_quarter_tile_outer_product<0, 1, S, mnemonic, ZZ_d_mul_r_Lo, ZPR64Mul2_Hi>;
@@ -5800,9 +5847,13 @@ class sme2_fp16_fp32_quarter_tile_outer_product<bit M, bit N, bit S, string mnem
   let Constraints = "$ZAda = $_ZAda";
 }
 
-multiclass sme2_fmop4as_fp16_fp32_widening<bit S, string mnemonic> {
+multiclass sme2_fmop4as_fp16_fp32_widening<bit S, string mnemonic, SDPatternOperator op> {
   // Single vectors
-  def _MZZ_HtoS : sme2_fp16_fp32_quarter_tile_outer_product<0, 0, S, mnemonic, ZPR16Mul2_Lo, ZPR16Mul2_Hi>;
+  def _MZZ_HtoS : sme2_fp16_fp32_quarter_tile_outer_product<0, 0, S, mnemonic, ZPR16Mul2_Lo, ZPR16Mul2_Hi>, SMEPseudo2Instr<NAME # _MZZ_HtoS, 1>;
+
+  def NAME # _MZZ_HtoS # _PSEUDO : sme2_quarter_tile_outer_product_pseudo<ZPR16Mul2_Lo, ZPR16Mul2_Hi, SMEMatrixTileS>, SMEPseudo2Instr<NAME # _MZZ_HtoS, 0>;
+
+  def : SME2_ZA_Tile_TwoVec_Pat<NAME # _MZZ_HtoS, op, timm32_0_3, nxv8f16>;
 
   // Multiple and single vectors
   def _M2ZZ_HtoS : sme2_fp16_fp32_quarter_tile_outer_product<0, 1, S, mnemonic, ZZ_h_mul_r_Lo, ZPR16Mul2_Hi>;
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-mop4a_1x1.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-mop4a_1x1.ll
new file mode 100644
index 0000000000000..df985675f3070
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-mop4a_1x1.ll
@@ -0,0 +1,247 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -force-streaming -verify-machineinstrs < %s | FileCheck %s
+
+target triple = "aarch64-linux"
+
+; Widening
+define void @mop4a_za32_s8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) #0 {
+; CHECK-LABEL: mop4a_za32_s8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    smop4a za1.s, z0.b, z24.b
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv16i8(i32 1, <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm)
+  ret void
+}
+
+define void @mop4s_za32_s8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) #0 {
+; CHECK-LABEL: mop4s_za32_s8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    smop4s za1.s, z0.b, z24.b
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv16i8(i32 1, <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm)
+  ret void
+}
+
+define void @mop4a_za32_u8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) #0 {
+; CHECK-LABEL: mop4a_za32_u8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    smop4a za1.s, z0.b, z24.b
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv16i8(i32 1, <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm)
+  ret void
+}
+
+define void @mop4s_za32_u8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) #0 {
+; CHECK-LABEL: mop4s_za32_u8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    smop4s za1.s, z0.b, z24.b
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv16i8(i32 1, <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm)
+  ret void
+}
+
+define void @mop4a_za32_s16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) #0 {
+; CHECK-LABEL: mop4a_za32_s16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    smop4a za1.s, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 1, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
+  ret void
+}
+
+define void @mop4s_za32_s16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) #0 {
+; CHECK-LABEL: mop4s_za32_s16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    smop4s za1.s, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 1, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
+  ret void
+}
+
+define void @mop4a_za32_u16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) #0 {
+; CHECK-LABEL: mop4a_za32_u16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    smop4a za1.s, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 1, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
+  ret void
+}
+
+define void @mop4s_za32_u16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) #0 {
+; CHECK-LABEL: mop4s_za32_u16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    smop4s za1.s, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 1, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
+  ret void
+}
+
+define void @mop4a_za32_f16(<vscale x 8 x half> %zn, <vscale x 8 x half> %zm) #0 {
+; CHECK-LABEL: mop4a_za32_f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    fmop4a za1.s, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8f16(i32 1, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm)
+  ret void
+}
+
+define void @mop4s_za32_f16(<vscale x 8 x half> %zn, <vscale x 8 x half> %zm) #0 {
+; CHECK-LABEL: mop4s_za32_f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    fmop4s za1.s, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8f16(i32 1, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm)
+  ret void
+}
+
+define void @mop4a_za32_bf16(<vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) #0 {
+; CHECK-LABEL: mop4a_za32_bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    bfmop4a za1.s, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8bf16(i32 1, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm)
+  ret void
+}
+
+define void @mop4s_za32_bf16(<vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) #0 {
+; CHECK-LABEL: mop4s_za32_bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    bfmop4s za1.s, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8bf16(i32 1, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm)
+  ret void
+}
+
+define void @mop4a_za64_s16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) #0 {
+; CHECK-LABEL: mop4a_za64_s16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    smop4a za1.s, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 1, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
+  ret void
+}
+
+define void @mop4s_za64_s16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) #0 {
+; CHECK-LABEL: mop4s_za64_s16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    smop4s za1.s, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 1, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
+  ret void
+}
+
+define void @mop4a_za64_u16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) #0 {
+; CHECK-LABEL: mop4a_za64_u16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    smop4a za1.s, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4a.wide.1x1.nxv8i16(i32 1, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
+  ret void
+}
+
+define void @mop4s_za64_u16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) #0 {
+; CHECK-LABEL: mop4s_za64_u16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    smop4s za1.s, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4s.wide.1x1.nxv8i16(i32 1, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
+  ret void
+}
+
+; Non-widening
+define void @mop4a_za16_f16(<vscale x 8 x half> %zn, <vscale x 8 x half> %zm) #0 {
+; CHECK-LABEL: mop4a_za16_f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    fmop4a za1.h, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4a.1x1.nxv8f16(i32 1, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm)
+  ret void
+}
+
+define void @mop4s_za16_f16(<vscale x 8 x half> %zn, <vscale x 8 x half> %zm) #0 {
+; CHECK-LABEL: mop4s_za16_f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    fmop4s za1.h, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4s.1x1.nxv8f16(i32 1, <vscale x 8 x half> %zn, <vscale x 8 x half> %zm)
+  ret void
+}
+
+define void @mop4a_za32_f32(<vscale x 4 x float> %zn, <vscale x 4 x float> %zm) #0 {
+; CHECK-LABEL: mop4a_za32_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    fmop4a za1.s, z0.s, z24.s
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4a.1x1.nxv4f32(i32 1, <vscale x 4 x float> %zn, <vscale x 4 x float> %zm)
+  ret void
+}
+
+define void @mop4s_za32_f32(<vscale x 4 x float> %zn, <vscale x 4 x float> %zm) #0 {
+; CHECK-LABEL: mop4s_za32_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    fmop4s za1.s, z0.s, z24.s
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4s.1x1.nxv4f32(i32 1, <vscale x 4 x float> %zn, <vscale x 4 x float> %zm)
+  ret void
+}
+
+define void @mop4a_za64_f64(<vscale x 2 x double> %zn, <vscale x 2 x double> %zm) #0 {
+; CHECK-LABEL: mop4a_za64_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    fmop4a za1.d, z0.d, z24.d
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4a.1x1.nxv2f64(i32 1, <vscale x 2 x double> %zn, <vscale x 2 x double> %zm)
+  ret void
+}
+
+define void @mop4s_za64_f64(<vscale x 2 x double> %zn, <vscale x 2 x double> %zm) #0 {
+; CHECK-LABEL: mop4s_za64_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    fmop4s za1.d, z0.d, z24.d
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4s.1x1.nxv2f64(i32 1, <vscale x 2 x double> %zn, <vscale x 2 x double> %zm)
+  ret void
+}
+
+define void @mop4a_za16_bf16(<vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) #0 {
+; CHECK-LABEL: mop4a_za16_bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    bfmop4a za1.h, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4a.1x1.nxv8bf16(i32 1, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm)
+  ret void
+}
+
+define void @mop4s_za16_bf16(<vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) #0 {
+; CHECK-LABEL: mop4s_za16_bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z24.d, z1.d
+; CHECK-NEXT:    bfmop4s za1.h, z0.h, z24.h
+; CHECK-NEXT:    ret
+  call void @llvm.aarch64.sme.mop4s.1x1.nxv8bf16(i32 1, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm)
+  ret void
+}
+attributes #0 = {nounwind "target-features" = "+sme-i16i64,+sme-f64f64,+sme-b16b16,+sme2p1,+bf16,+sme-f16f16,+sme-mop4" }

>From 228b75729a799d929a114fbebbcad223520c7d77 Mon Sep 17 00:00:00 2001
From: Virginia Cangelosi <virginia.cangelosi at arm.com>
Date: Wed, 19 Feb 2025 11:58:46 +0000
Subject: [PATCH 2/2] Add white spaces back in to simply patch

---
 llvm/include/llvm/IR/IntrinsicsAArch64.td  | 38 +++++++++++-----------
 llvm/lib/Target/AArch64/SMEInstrFormats.td |  6 ++--
 2 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 0714602a2f09b..24052d8a45d75 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -1497,7 +1497,7 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
                  LLVMSubdivide2VectorType<0>,
                  llvm_i32_ty],
                 [IntrNoMem, ImmArg<ArgIndex<3>>]>;
-
+  
   class SVE2_1VectorArgIndexed_Intrinsic
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                 [LLVMMatchType<0>,
@@ -1512,7 +1512,7 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
                  llvm_i32_ty,
                  llvm_i32_ty],
                 [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
-
+  
   class SVE2_1VectorArg_Pred_Intrinsic
     : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                             [llvm_anyvector_ty],
@@ -1522,7 +1522,7 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
     : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                             [llvm_anyvector_ty, llvm_i32_ty],
                             [IntrNoMem, ImmArg<ArgIndex<1>>]>;
-
+  
   class SVE2_Pred_1VectorArgIndexed_Intrinsic
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                             [LLVMMatchType<0>,
@@ -3330,11 +3330,11 @@ let TargetPrefix = "aarch64" in {
     : DefaultAttrsIntrinsic<[llvm_nxv8bf16_ty],
                             [llvm_nxv4f32_ty, llvm_nxv4f32_ty],
                             [IntrNoMem]>;
-
+  
   class SME2_CVT_WIDENING_VG2_Intrinsic
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
                             [LLVMSubdivide2VectorType<0>], [IntrNoMem]>;
-
+  
 
   class SME2_CVT_VG4_SINGLE_Intrinsic
     : DefaultAttrsIntrinsic<[LLVMSubdivide4VectorType<0>],
@@ -3575,7 +3575,7 @@ let TargetPrefix = "aarch64" in {
   foreach vg = ["vg1x2", "vg1x4", "vg2x1", "vg2x2", "vg2x4", "vg4x1", "vg4x2", "vg4x4"] in {
     def int_aarch64_sme_zero_za64_ # vg : DefaultAttrsIntrinsic<[], [llvm_i32_ty],  [IntrNoMem, IntrHasSideEffects]>;
   }
-
+  
   // Multi-vector signed saturating doubling multiply high
 
   def int_aarch64_sve_sqdmulh_single_vgx2 : SME2_VG2_Multi_Single_Intrinsic;
@@ -3645,7 +3645,7 @@ let TargetPrefix = "aarch64" in {
   //
   //Multi-vector floating-point convert from half-precision to deinterleaved single-precision.
   //
-
+  
   def int_aarch64_sve_fcvtl_widen_x2  : SME2_CVT_WIDENING_VG2_Intrinsic;
 
   //
@@ -3837,7 +3837,7 @@ let TargetPrefix = "aarch64" in {
   def int_aarch64_sme_luti4_lane_zt
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_i32_ty, llvm_nxv16i8_ty, llvm_i32_ty],
                             [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>, IntrReadMem]>;
-
+                            
   // Lookup table expand two registers
   //
   def int_aarch64_sme_luti2_lane_zt_x2
@@ -3864,7 +3864,7 @@ let TargetPrefix = "aarch64" in {
                             [llvm_i32_ty, llvm_nxv16i8_ty, llvm_nxv16i8_ty],
                             [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects]>;
 
-
+  
   //
   // Register scaling
   //
@@ -3912,7 +3912,7 @@ def int_aarch64_sve_extq : AdvSIMD_2VectorArgIndexed_Intrinsic;
 //
 // SVE2.1 - Move predicate to/from vector
 //
-def int_aarch64_sve_pmov_to_pred_lane : SVE2_1VectorArgIndexed_Pred_Intrinsic;
+def int_aarch64_sve_pmov_to_pred_lane : SVE2_1VectorArgIndexed_Pred_Intrinsic; 
 
 def int_aarch64_sve_pmov_to_pred_lane_zero : SVE2_1VectorArg_Pred_Intrinsic;
 
@@ -3954,10 +3954,10 @@ let TargetPrefix = "aarch64" in {
     : DefaultAttrsIntrinsic<[llvm_nxv16i8_ty],
                             [llvm_anyvector_ty, LLVMMatchType<0>],
                             [IntrReadMem, IntrInaccessibleMemOnly]>;
-
+  
   def int_aarch64_sve_fp8_cvtn  : SVE2_FP8_Narrow_Cvt;
   def int_aarch64_sve_fp8_cvtnb : SVE2_FP8_Narrow_Cvt;
-
+  
   def int_aarch64_sve_fp8_cvtnt
     : DefaultAttrsIntrinsic<[llvm_nxv16i8_ty],
                             [llvm_nxv16i8_ty, llvm_anyvector_ty, LLVMMatchType<0>],
@@ -3969,32 +3969,32 @@ let TargetPrefix = "aarch64" in {
                             [LLVMMatchType<0>,
                              llvm_nxv16i8_ty, llvm_nxv16i8_ty],
                             [IntrReadMem, IntrInaccessibleMemOnly]>;
-
+  
   class SVE2_FP8_FMLA_FDOT_Lane
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                             [LLVMMatchType<0>,
                              llvm_nxv16i8_ty, llvm_nxv16i8_ty, llvm_i32_ty],
                             [IntrReadMem, IntrInaccessibleMemOnly, ImmArg<ArgIndex<3>>]>;
-
+  
   def int_aarch64_sve_fp8_fdot      : SVE2_FP8_FMLA_FDOT;
   def int_aarch64_sve_fp8_fdot_lane : SVE2_FP8_FMLA_FDOT_Lane;
 
   // Fused multiply-add
   def int_aarch64_sve_fp8_fmlalb      : SVE2_FP8_FMLA_FDOT;
   def int_aarch64_sve_fp8_fmlalb_lane : SVE2_FP8_FMLA_FDOT_Lane;
-
+  
   def int_aarch64_sve_fp8_fmlalt      : SVE2_FP8_FMLA_FDOT;
   def int_aarch64_sve_fp8_fmlalt_lane : SVE2_FP8_FMLA_FDOT_Lane;
-
+  
   def int_aarch64_sve_fp8_fmlallbb      : SVE2_FP8_FMLA_FDOT;
   def int_aarch64_sve_fp8_fmlallbb_lane : SVE2_FP8_FMLA_FDOT_Lane;
-
+  
   def int_aarch64_sve_fp8_fmlallbt      : SVE2_FP8_FMLA_FDOT;
   def int_aarch64_sve_fp8_fmlallbt_lane : SVE2_FP8_FMLA_FDOT_Lane;
-
+  
   def int_aarch64_sve_fp8_fmlalltb      : SVE2_FP8_FMLA_FDOT;
   def int_aarch64_sve_fp8_fmlalltb_lane : SVE2_FP8_FMLA_FDOT_Lane;
-
+  
   def int_aarch64_sve_fp8_fmlalltt      : SVE2_FP8_FMLA_FDOT;
   def int_aarch64_sve_fp8_fmlalltt_lane : SVE2_FP8_FMLA_FDOT_Lane;
 
diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td
index 5a3d12e9f7b8b..755531505636d 100644
--- a/llvm/lib/Target/AArch64/SMEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td
@@ -260,7 +260,7 @@ class SME2_Tile_VG4_Multi_Pat<string name, SDPatternOperator intrinsic, Operand
 
 class SME2_Zero_Matrix_Pat<string name, SDPatternOperator intrinsic, Operand offset_ty, ComplexPattern tileslice>
     : Pat<(intrinsic (i32 (tileslice MatrixIndexGPR32Op8_11:$base, offset_ty:$offset))),
-    (!cast<Instruction>(name) $base, $offset)>;
+    (!cast<Instruction>(name) $base, $offset)>; 
 
 class SME2_Tile_Movaz_Pat<string name, SDPatternOperator intrinsic, ValueType out_vt, Operand tile_imm, Operand index_ty, ComplexPattern tileslice>
     : Pat<(out_vt (intrinsic tile_imm:$tile, (i32 (tileslice MatrixIndexGPR32Op12_15:$base, index_ty:$offset)))),
@@ -2258,7 +2258,7 @@ multiclass sme2_int_mla_long_array_vg2_single<string mnemonic, bits<2> op, SDPat
 multiclass sme2_fp_mla_long_array_vg4_single<string mnemonic, bits<3> op, MatrixOperand matrix_ty,
                                              RegisterOperand multi_vector_ty, ZPRRegOp vector_ty,
                                              ValueType zpr_ty, SDPatternOperator intrinsic, list<Register> uses=[]> {
-  def NAME : sme2_mla_long_array_vg24_single<0b00, 0b1, op{2-1}, op{0}, matrix_ty, multi_vector_ty,
+  def NAME : sme2_mla_long_array_vg24_single<0b00, 0b1, op{2-1}, op{0}, matrix_ty, multi_vector_ty, 
                                              vector_ty, mnemonic, "vgx4">, SMEPseudo2Instr<NAME, 1> {
     let Uses = uses;
   }
@@ -5331,7 +5331,7 @@ multiclass sme2p1_zero_matrix<string mnemonic> {
   def : SME2_Zero_Matrix_Pat<NAME # _4Z_PSEUDO, int_aarch64_sme_zero_za64_vg4x1, uimm1s4range, tileslicerange1s4>;
   def : SME2_Zero_Matrix_Pat<NAME # _VG2_4Z_PSEUDO, int_aarch64_sme_zero_za64_vg4x2, uimm0s4range, tileslicerange0s4>;
   def : SME2_Zero_Matrix_Pat<NAME # _VG4_4Z_PSEUDO, int_aarch64_sme_zero_za64_vg4x4, uimm0s4range, tileslicerange0s4>;
-}
+} 
 
 //===----------------------------------------------------------------------===//
 // SME2.1 lookup table expand two non-contiguous registers



More information about the llvm-commits mailing list