[Mlir-commits] [mlir] [mlir][ArmSME] Support widening outer products (PR #78975)
Cullen Rhodes
llvmlistbot at llvm.org
Tue Jan 23 07:09:05 PST 2024
https://github.com/c-rhodes updated https://github.com/llvm/llvm-project/pull/78975
>From b52e26e780405bd49accff57b1856871463422e1 Mon Sep 17 00:00:00 2001
From: Cullen Rhodes <cullen.rhodes at arm.com>
Date: Tue, 12 Dec 2023 15:03:34 +0000
Subject: [PATCH 1/3] [mlir][ArmSME] Support widening outer products
This patch introduces support for widening outer products. This enables
the folding of 2 or 4 'arm_sme.outerproduct' operations that are chained
via the accumulator into single widened operations.
Changes:
- Add 'llvm.aarch64.sme.[us]mop[as].za32' intrinsics for 2-way variants.
These map to instruction variants added in SME2 and use different
intrinsics. Intrinsics are already implemented for widening variants
from SME1.
- Mark 'arm_sme.outerproduct' as Pure. This is consistent with
'vector.outerproduct' and enables them to be removed if dead.
- Adds the following operations:
- fmopa_wide_2way, fmops_wide_2way
- smopa_wide_2way, smops_wide_2way
- umopa_wide_2way, umops_wide_2way
- smopa_wide_4way, smops_wide_4way
- umopa_wide_4way, umops_wide_4way
- sumopa_wide_4way, sumops_wide_4way
- sumopa_wide_4way, sumops_wide_4way
- Implements conversions for the above ops to intrinsics in ArmSMEToLLVM.
- Adds a pass 'arm-sme-outer-product' widening that folds
'arm_sme.outerproduct' operations.
For a detailed description of these operations see the
'arm_sme.fmopa_wide_2way' and 'arm_sme.smopa_wide_4way' descriptions.
The reason for introducing many operations rather than one is the
signed/unsigned variants can't be distinguished with types (e.g., ui16,
si16) since 'arith.extui' and 'arith.extsi' only support signless
integers. A single operation would require this information and an
attribute (for example) for the sign doesn't feel right if
floating-point types are also supported where this wouldn't apply.
Furthermore, the SME FP8 extensions (FEAT_SME_F8F16, FEAT_SME_F8F32)
introduce FMOPA 2-way (FP8 to FP16) and 4-way (FP8 to FP32) variants but
no subtract variant. Whilst these are not supported in this patch, it
felt simpler to have separate ops for add/subtract given this.
---
.../Dialect/ArmSME/IR/ArmSMEIntrinsicOps.td | 4 +
.../mlir/Dialect/ArmSME/IR/ArmSMEOps.td | 645 ++++++++++++++
.../mlir/Dialect/ArmSME/Transforms/Passes.h | 3 +
.../mlir/Dialect/ArmSME/Transforms/Passes.td | 39 +
.../Dialect/ArmSME/Transforms/Transforms.h | 4 +
mlir/include/mlir/Dialect/ArmSVE/IR/ArmSVE.td | 4 +
.../Conversion/ArmSMEToLLVM/ArmSMEToLLVM.cpp | 82 +-
.../Dialect/ArmSME/Transforms/CMakeLists.txt | 2 +
.../Transforms/OuterProductWidening.cpp | 501 +++++++++++
.../ArmSMEToLLVM/arm-sme-to-llvm.mlir | 272 ++++++
mlir/test/Dialect/ArmSME/cse.mlir | 25 +-
mlir/test/Dialect/ArmSME/invalid.mlir | 66 ++
.../ArmSME/outer-product-widening.mlir | 785 ++++++++++++++++++
mlir/test/Dialect/ArmSME/roundtrip.mlir | 272 ++++++
.../ArmSME/test-outerproduct-f16f16f32.mlir | 100 +++
.../CPU/ArmSME/test-outerproduct-i8i8i32.mlir | 142 ++++
mlir/test/Target/LLVMIR/arm-sme.mlir | 12 +
mlir/test/Target/LLVMIR/arm-sve.mlir | 7 +
18 files changed, 2953 insertions(+), 12 deletions(-)
create mode 100644 mlir/lib/Dialect/ArmSME/Transforms/OuterProductWidening.cpp
create mode 100644 mlir/test/Dialect/ArmSME/outer-product-widening.mlir
create mode 100644 mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f16f16f32.mlir
create mode 100644 mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-i8i8i32.mlir
diff --git a/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEIntrinsicOps.td b/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEIntrinsicOps.td
index d85ef963ae5dc46..f051e03efbcda64 100644
--- a/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEIntrinsicOps.td
+++ b/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEIntrinsicOps.td
@@ -105,6 +105,10 @@ def LLVM_aarch64_sme_sumopa_wide : ArmSME_IntrMopOverloadedOp<"sumopa.wide">;
def LLVM_aarch64_sme_sumops_wide : ArmSME_IntrMopOverloadedOp<"sumops.wide">;
def LLVM_aarch64_sme_usmopa_wide : ArmSME_IntrMopOverloadedOp<"usmopa.wide">;
def LLVM_aarch64_sme_usmops_wide : ArmSME_IntrMopOverloadedOp<"usmops.wide">;
+def LLVM_aarch64_sme_smopa_za32 : ArmSME_IntrMopOverloadedOp<"smopa.za32">;
+def LLVM_aarch64_sme_umopa_za32 : ArmSME_IntrMopOverloadedOp<"umopa.za32">;
+def LLVM_aarch64_sme_smops_za32 : ArmSME_IntrMopOverloadedOp<"smops.za32">;
+def LLVM_aarch64_sme_umops_za32 : ArmSME_IntrMopOverloadedOp<"umops.za32">;
class ArmSME_IntrLoadStoreOp<string mnemonic>
: ArmSME_IntrOp<mnemonic,
diff --git a/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td b/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td
index 8a34ad7e52012fe..1365dd38c115ef2 100644
--- a/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td
+++ b/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td
@@ -736,6 +736,7 @@ class OuterProductResultTileTypeConstraint<string operand> :
def OuterProductOp :
ArmSME_Op<"outerproduct", [
+ Pure,
ArmSMETileOpInterface,
AttrSizedOperandSegments,
AllTypesMatch<["lhs", "rhs"]>,
@@ -814,6 +815,650 @@ let arguments = (ins
}];
}
+class OuterProductWideBase<string mnemonic,
+ list<Type> allowedInputVectorTypes,
+ list<Type> allowedResultVectorTypes,
+ int numOuterProducts> :
+ ArmSME_Op<mnemonic, [
+ Pure,
+ ArmSMETileOpInterface,
+ AttrSizedOperandSegments,
+ AllTypesMatch<["lhs", "rhs"]>,
+ HasMatchingMaskTypeConstraint<"lhs", "lhsMask">,
+ HasMatchingMaskTypeConstraint<"rhs", "rhsMask">,
+ PredOpTrait<
+ "both `lhsMask` and `rhsMask` should be provided or neither",
+ CPred<"bool(getLhsMask()) == bool(getRhsMask())">
+ >,
+ OptionalTypesMatchWith<"result and acc have the same type",
+ "result", "acc", "::llvm::cast<Type>($_self)">,
+ // this trait ensures the input type match the correct output type for ops
+ // that takes multiple inputs and outputs (i.e., 4-way).
+ PredOpTrait<
+ "tile element size equals lhs element size * " # numOuterProducts,
+ CPred<"getTileType().getElementTypeBitWidth() == "
+ "(getLhsType().getElementTypeBitWidth() * " # numOuterProducts # ")">
+ >,
+ ]> {
+
+ let arguments = (ins
+ AnyTypeOf<allowedInputVectorTypes>:$lhs, AnyVector:$rhs,
+ Optional<AnyVector>:$lhsMask, Optional<AnyVector>:$rhsMask,
+ Optional<AnyVector>:$acc);
+ let results = (outs AnyTypeOf<allowedResultVectorTypes>:$result);
+
+ let assemblyFormat = [{
+ $lhs `,` $rhs
+ oilist(
+ `acc` `` `(` $acc `)`
+ | `masks` `` `(` $lhsMask `,` $rhsMask `)`
+ ) attr-dict `:` type($lhs) `,` type($rhs) `into` type($result)
+ }];
+
+ let extraClassDeclaration = [{
+ VectorType getLhsType() { return llvm::cast<VectorType>(getLhs().getType()); }
+ VectorType getRhsType() { return llvm::cast<VectorType>(getRhs().getType()); }
+ VectorType getResultType() { return llvm::cast<VectorType>(getResult().getType()); }
+ std::optional<arm_sme::ArmSMETileType> getAllocatedTileType() {
+ // The outerproduct op allocates a new tile if no accumulator is passed.
+ if (!getAcc())
+ return arm_sme::getSMETileType(getResultType());
+ return std::nullopt;
+ }
+ VectorType getTileType() {
+ return getResultType();
+ }
+ }];
+}
+
+class OuterProductWide2Way<string mnemonic,
+ list<Type> allowedInputVectorTypes,
+ list<Type> allowedResultVectorTypes>
+ : OuterProductWideBase<mnemonic, allowedInputVectorTypes,
+ allowedResultVectorTypes, /*numOuterProducts=*/2>;
+
+class OuterProductWide4Way<string mnemonic,
+ list<Type> allowedInputVectorTypes,
+ list<Type> allowedResultVectorTypes>
+ : OuterProductWideBase<mnemonic, allowedInputVectorTypes,
+ allowedResultVectorTypes, /*numOuterProducts=*/4>;
+
+def FMopaWide2WayOp
+ : OuterProductWide2Way<"fmopa_wide_2way",
+ [ScalableVectorOfRankAndLengthAndType<[1], [8], [F16, BF16]>],
+ [nxnxv4f32]> {
+ let summary = "Floating-point sum of 2 outer products and accumulate";
+
+ let description = [{
+ This operation represents a sum of 2 widened outer products. It takes 2 1-D
+ scalable vectors as input and a 2-D scalable vector (ZA tile) as output.
+
+ For example (fp16 to fp32):
+
+ ```mlir
+ %result = arm_sme.fmopa_wide_2way %lhs, %rhs :
+ vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ ```
+
+ The `lhs` encodes a matrix of shape SVLSx2 and the `rhs` a matrix of
+ 2xSVLS, where SVLS (spec [1], section B2.1) is the number of 32-bit
+ elements in a vector of SVL bits. To illustrate, below is a breakdown of
+ this operation for SVL=128 (i.e., vscale=1):
+
+ ```
+ LHS RHS
+ [A0 A1 A2 A3 A4 A5 A6 A7] [B0 B1 B2 B3 B4 B5 B6 B7]
+
+ ----------------------------------------------------------------------------
+
+ implicit layout
+
+ [A0 A1] |
+ [A2 A3] | [B0 B2 B4 B6]
+ [A4 A5] | [B1 B3 B5 B7]
+ [A6 A7] |
+
+ ----------------------------------------------------------------------------
+
+ 2 outer products
+
+ Acol0 ⊗ Brow0 | Acol1 ⊗ Brow1
+ ------------- | -------------
+ |
+ [B0 B2 B4 B6] | [B1 B3 B5 B7]
+ |
+ [A0 [A0B0 A0B2 A0B4 A0B6] | [A1 [A1B1 A1B3 A1B5 A1B7]
+ A2 [A2B0 A2B2 A2B4 A2B6] | A3 [A3B1 A3B3 A3B5 A3B7]
+ A4 [A4B0 A4B2 A4B4 A4B6] | A5 [A5B1 A5B3 A5B5 A5B7]
+ A6] [A6B0 A6B2 A6B4 A6B6] | A7] [A7B1 A7B3 A7B5 A7B7]
+ |
+
+ ----------------------------------------------------------------------------
+
+ sum of 2 outer products
+
+ Acol0 ⊗ Brow0 + Acol1 ⊗ Brow1
+
+ [A0B0 + A1B1 A0B2 + A1B3 A0B4 + A1B5 A0B6 + A1B7]
+ [A2B0 + A3B1 A2B2 + A3B3 A2B4 + A3B5 A2B6 + A3B7]
+ [A4B0 + A5B1 A4B2 + A5B3 A4B4 + A5B5 A4B6 + A5B7]
+ [A6B0 + A7B1 A6B2 + A7B3 A6B4 + A7B5 A6B6 + A7B7]
+
+ ----------------------------------------------------------------------------
+ ```
+
+ This operation enables the folding of 2 outer products chained via the
+ accumulator into a single outer product.
+
+ For example:
+
+ ```mlir
+ %a0_ext = arith.extf %a0 : vector<[4]xf16> to vector<[4]xf32>
+ %b0_ext = arith.extf %b0 : vector<[4]xf16> to vector<[4]xf32>
+ %a1_ext = arith.extf %a1 : vector<[4]xf16> to vector<[4]xf32>
+ %b1_ext = arith.extf %b1 : vector<[4]xf16> to vector<[4]xf32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext : vector<[4]xf32>, vector<[4]xf32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext acc(%0) : vector<[4]xf32>, vector<[4]xf32>
+ ```
+
+ The 2 outer products in the example above can be fused into a single outer
+ product as follows:
+
+ ```mlir
+ %undef = llvm.mlir.undef : vector<[8]xf16>
+ %a0_ins = vector.scalable.ins %a0_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %a1_ins = vector.scalable.ins %a1_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %a_packed = "arm_sve.intr.zip1"(%a0_ins, %a1_ins) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
+ %b0_ins = vector.scalable.ins %b0_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %b1_ins = vector.scalable.ins %b1_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %b_packed = "arm_sve.intr.zip1"(%b0_ins, %b1_ins) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
+ %0 = arm_sme.fmopa_wide_2way %a_packed, %b_packed : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ ```
+
+ This is implemented in the `-arm-sme-outer-product-widening` pass.
+
+ Example: FP16 to FP32
+ ```mlir
+ %result = arm_sme.fmopa_wide_2way $lhs, $rhs : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ ```
+
+ Example: BF16 to FP32
+ ```mlir
+ %result = arm_sme.fmopa_wide_2way $lhs, $rhs : vector<[8]xbf16>, vector<[8]xbf16> into vector<[4]x[4]xf32>
+ ```
+
+ | Spec | Features |
+ | ---- | -------- |
+ | [FMOPA (widening, 2-way, FP16 to FP32)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/FMOPA--widening--2-way--FP16-to-FP32---Half-precision-floating-point-sum-of-outer-products-and-accumulate-) | +sme |
+ | [BFMOPA (widening, 2-way, BF16 to FP32)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/BFMOPA--widening---BFloat16-sum-of-outer-products-and-accumulate-) | +sme |
+me
+ [1] https://developer.arm.com/documentation/ddi0616
+ }];
+}
+
+// TODO: support:
+// - FMOPA 2-way FP8 to FP16
+// - FMOPA 4-way FP16 to FP32
+// once intrinsic support lands in the backend.
+
+def FMopsWide2WayOp
+ : OuterProductWide2Way<"fmops_wide_2way",
+ [ScalableVectorOfRankAndLengthAndType<[1], [8], [F16, BF16]>],
+ [nxnxv4f32]> {
+ let summary = "Floating-point sum of 2 outer products and subtract";
+ let description = [{
+ Equivalent to `fmopa_wide_2way` but outer products are subtracted from
+ destination `result`.
+
+ Example: FP16 to FP32
+ ```mlir
+ %result = arm_sme.fmops_wide_2way $lhs, $rhs : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ ```
+
+ Example: BF16 to FP32
+ ```mlir
+ %result = arm_sme.fmops_wide_2way $lhs, $rhs : vector<[8]xbf16>, vector<[8]xbf16> into vector<[4]x[4]xf32>
+
+ Refer to
+ [fmopa_wide_2way](#arm_smefmopa_wide_2way-arm_smefmopa_wide_2wayop) for a
+ detailed description of 2-way outer products.
+
+ | Spec | Features |
+ | ---- | -------- |
+ | [FMOPS (widening, 2-way, FP16 to FP32)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/FMOPS--widening---Half-precision-floating-point-sum-of-outer-products-and-subtract-) | +sme |
+ | [BFMOPS (widening, 2-way, BF16 to FP32)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/BMOPS--Bitwise-exclusive-NOR-population-count-outer-product-and-subtract-) | +sme |
+ ```
+ }];
+}
+
+def SMopaWide2WayOp
+ : OuterProductWide2Way<"smopa_wide_2way",
+ [ScalableVectorOfRankAndLengthAndType<[1], [8], [I16]>],
+ [nxnxv4i32]> {
+ let summary = "Signed integer sum of 2 outer products and accumulate";
+ let description = [{
+ Example:
+ ```mlir
+ %result = arm_sme.smopa_wide_2way $lhs, $rhs : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+
+ Refer to
+ [fmopa_wide_2way](#arm_smefmopa_wide_2way-arm_smefmopa_wide_2wayop) for a
+ detailed description of 2-way outer products.
+
+ | Spec | Features |
+ | ---- | -------- |
+ | [SMOPA (2-way)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/SMOPA--2-way---Signed-integer-sum-of-outer-products-and-accumulate-) | +sme2 |
+ ```
+ }];
+}
+
+def SMopsWide2WayOp
+ : OuterProductWide2Way<"smops_wide_2way",
+ [ScalableVectorOfRankAndLengthAndType<[1], [8], [I16]>],
+ [nxnxv4i32]> {
+ let summary = "Signed integer sum of 2 outer products and subtract";
+ let description = [{
+ Example:
+ ```mlir
+ %result = arm_sme.smops_wide_2way $lhs, $rhs : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+
+ Refer to
+ [fmopa_wide_2way](#arm_smefmopa_wide_2way-arm_smefmopa_wide_2wayop) for a
+ detailed description of 2-way outer products.
+
+ | Spec | Features |
+ | ---- | -------- |
+ | [SMOPS (2-way)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/SMOPS--2-way---Signed-integer-sum-of-outer-products-and-subtract-) | +sme2 |
+ ```
+ }];
+}
+
+def UMopaWide2WayOp
+ : OuterProductWide2Way<"umopa_wide_2way",
+ [ScalableVectorOfRankAndLengthAndType<[1], [8], [I16]>],
+ [nxnxv4i32]> {
+ let summary = "Unsiged integer sum of 2 outer products and accumulate";
+ let description = [{
+ Example:
+ ```mlir
+ %result = arm_sme.umopa_wide_2way $lhs, $rhs : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+
+ Refer to
+ [fmopa_wide_2way](#arm_smefmopa_wide_2way-arm_smefmopa_wide_2wayop) for a
+ detailed description of 2-way outer products.
+
+ | Spec | Features |
+ | ---- | -------- |
+ | [UMOPA (2-way)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/UMOPA--2-way---Unsigned-integer-sum-of-outer-products-and-accumulate-) | +sme2 |
+ ```
+ }];
+}
+
+def UMopsWide2WayOp
+ : OuterProductWide2Way<"umops_wide_2way",
+ [ScalableVectorOfRankAndLengthAndType<[1], [8], [I16]>],
+ [nxnxv4i32]> {
+ let summary = "Unsiged integer sum of 2 outer products and subtract";
+ let description = [{
+ Example:
+ ```mlir
+ %result = arm_sme.umops_wide_2way $lhs, $rhs : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+
+ Refer to
+ [fmopa_wide_2way](#arm_smefmopa_wide_2way-arm_smefmopa_wide_2wayop) for a
+ detailed description of 2-way outer products.
+
+ | Spec | Features |
+ | ---- | -------- |
+ | [UMOPS (2-way)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/UMOPS--2-way---Unsigned-integer-sum-of-outer-products-and-subtract-) | +sme2 |
+ ```
+ }];
+}
+
+def SMopaWide4WayOp
+ : OuterProductWide4Way<"smopa_wide_4way",
+ [ScalableVectorOfRankAndLengthAndType<[1], [16], [I8]>,
+ ScalableVectorOfRankAndLengthAndType<[1], [8], [I16]>],
+ [nxnxv4i32, nxnxv2i64]> {
+ let summary = "Signed integer sum of 4 outer products and accumulate";
+ let description = [{
+ This operation represents a sum of 4 widened outer products. It takes 2 1-D
+ scalable vectors as input and a 2-D scalable vector (ZA tile) as output.
+
+ For example (i8 to i32):
+
+ ```mlir
+ %result = arm_sme.smopa_wide_4way $lhs, $rhs :
+ vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ ```
+
+ The `lhs` encodes a matrix of shape SVLSx4 and the `rhs` a matrix of
+ 4xSVLS, where SVLS (spec [1], section B2.1) is the number of 32-bit
+ elements in a vector of SVL bits. To illustrate, below is a breakdown of
+ this operation for SVL=128 (i.e., vscale=1):
+
+ ```
+ LHS
+ [A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 A10 A11 A12 A15 A14 A15]
+
+ RHS
+ [B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 B10 B11 B12 B13 B14 B15]
+
+ ----------------------------------------------------------------------------
+
+ implicit layout
+
+ [A0 A1 A2 A3] | [B0 B4 B8 B12]
+ [A4 A5 A6 A7] | [B1 B5 B9 B13]
+ [A8 A9 A10 A11] | [B2 B6 B10 B14]
+ [A12 A13 A14 A15] | [B3 B7 B11 B15]
+
+ ----------------------------------------------------------------------------
+
+ 4 outer products
+
+ Acol0 ⊗ Brow0 | Acol1 ⊗ Brow1
+ ------------- | -------------
+ |
+ [B0 B4 B8 B12] | [B1 B5 B9 B13]
+ |
+ [A0 [ A0B0 A0B4 A0B8 A0B12] | [A1 [ A1B1 A1B5 A1B9 A1B13]
+ A4 [ A4B0 A4B4 A4B8 A4B12] | A5 [ A5B1 A5B5 A5B9 A5B13]
+ A8 [ A8B0 A8B4 A8B8 A8B12] | A9 [ A9B1 A9B5 A9B9 A9B13]
+ A12] [A12B0 A12B4 A12B8 A12B12] | A13] [A13B1 A13B5 A13B9 A13B13]
+ |
+ Acol2 ⊗ Brow2 | Acol3 ⊗ Brow3
+ ------------- | -------------
+ |
+ [B2, B6, B10, B14] | [B3 B7 B11 B15]
+ |
+ [A2 [ A2B2 A2B6 A2B10 A2B14] | [A3 [ A3B3 A3B7 A3B11 A3B15]
+ A6 [ A6B2 A6B6 A6B10 A6B14] | A7 [ A7B3 A7B7 A7B11 A7B15]
+ A10 [A10B2 A10B6 A10B10 A10B14] | A11 [A11B3 A11B7 A11B11 A11B15]
+ A14] [A14B2 A14B6 A14B10 A14B14] | A15] [A15B3 A15B7 A15B11 A15B15]
+ |
+
+ ----------------------------------------------------------------------------
+
+ sum of 4 outer products
+
+ Acol0 ⊗ Brow0 + Acol1 ⊗ Brow1 + Acol2 ⊗ Brow2 + Acol3 ⊗ Brow3
+
+ [ A0B0 + A1B1 + A2B2 + A3B3 ... ... A0B12 + A1B13 + A2B14 + A3B15]
+ [ A4B0 + A5B1 + A6B2 + A7B3 ... ... A4B12 + A5B13 + A6B14 + A7B15]
+ [ A8B0 + A9B1 + A10B2 + A11B3 ... ... A8B12 + A9B13 + A10B14 + A11B15]
+ [A12B0 + A13B1 + A14B2 + A15B3 ... ... A12B12 + A13B13 + A14B14 + A15B15]
+
+ ----------------------------------------------------------------------------
+ ```
+
+ This operation enables the folding of 4 outer products chained via the
+ accumulator into a single outer product.
+
+ For example:
+
+ ```mlir
+ %a0_ext = arith.extsi %a0 : vector<[4]xi8> to vector<[4]xi32>
+ %b0_ext = arith.extsi %b0 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a1_ext = arith.extsi %a1 : vector<[4]xi8> to vector<[4]xi32>
+ %b1_ext = arith.extsi %b1 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a2_ext = arith.extsi %a2 : vector<[4]xi8> to vector<[4]xi32>
+ %b2_ext = arith.extsi %b2 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a3_ext = arith.extsi %a3 : vector<[4]xi8> to vector<[4]xi32>
+ %b3_ext = arith.extsi %b3 : vector<[4]xi8> to vector<[4]xi32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext : vector<[4]xi32>, vector<[4]xi32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext acc(%0) : vector<[4]xi32>, vector<[4]xi32>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext acc(%1) : vector<[4]xi32>, vector<[4]xi32>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext acc(%2) : vector<[4]xi32>, vector<[4]xi32>
+ ```
+
+ The 4 outer products in the example above can be fused into a single outer
+ product as follows:
+
+ ```mlir
+ %undef = llvm.mlir.undef : vector<[8]xf16>
+ %a0_ins = vector.scalable.ins %a0_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %a1_ins = vector.scalable.ins %a1_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %a2_ins = vector.scalable.ins %a2_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %a3_ins = vector.scalable.ins %a3_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %lhs0 = "arm_sve.intr.zip1"(%a0_ins, %a2_ins) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
+ %lhs1 = "arm_sve.intr.zip1"(%a1_ins, %a3_ins) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
+ %lhs = "arm_sve.intr.zip1"(%lhs0, %lhs1) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
+
+ %b0_ins = vector.scalable.ins %b0_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %b1_ins = vector.scalable.ins %b1_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %b2_ins = vector.scalable.ins %b2_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %b3_ins = vector.scalable.ins %b3_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %rhs0 = "arm_sve.intr.zip1"(%b0_ins, %b2_ins) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
+ %rhs1 = "arm_sve.intr.zip1"(%b1_ins, %b3_ins) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
+ %rhs = "arm_sve.intr.zip1"(%rhs0, %rhs1) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
+
+ %0 = arm_sme.smopa_wide_4way %lhs, %rhs : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ ```
+
+ This is implemented in the `-arm-sme-outer-product-widening` pass.
+
+ Example: I8 to I32
+ ```mlir
+ %result = arm_sme.smopa_wide_4way $lhs, $rhs : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ ```
+
+ Example: I16 to I64
+ ```mlir
+ %result = arm_sme.smopa_wide_4way $lhs, $rhs : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+
+ | Spec | Features |
+ | ---- | -------- |
+ | [SMOPA (4-way)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/SMOPA--4-way---Signed-integer-sum-of-outer-products-and-accumulate-) | +sme (32-bit), +sme-i16i64 (64-bit)|
+me
+ ```
+ }];
+}
+
+def SMopsWide4WayOp
+ : OuterProductWide4Way<"smops_wide_4way",
+ [ScalableVectorOfRankAndLengthAndType<[1], [16], [I8]>,
+ ScalableVectorOfRankAndLengthAndType<[1], [8], [I16]>],
+ [nxnxv4i32, nxnxv2i64]> {
+ let summary = "Signed integer sum of 4 outer products and subtract";
+ let description = [{
+ Equivalent to `smopa_wide_4way` but outer products are subtracted from
+ destination `result`.
+
+ Example: I8 to I32
+ ```mlir
+ %result = arm_sme.smops_wide_4way $lhs, $rhs : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ ```
+
+ Example: I16 to I64
+ ```mlir
+ %result = arm_sme.smops_wide_4way $lhs, $rhs : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+
+ Refer to
+ [smopa_wide_4way](#arm_smesmopa_wide_4way-arm_smesmopa_wide_4wayop) for a
+ detailed description of 4-way outer products.
+
+ | Spec | Features |
+ | ---- | -------- |
+ | [SMOPS (4-way)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/SMOPS--4-way---Signed-integer-sum-of-outer-products-and-subtract-) | +sme (32-bit), +sme-i16i64 (64-bit)|
+me
+ ```
+ }];
+}
+
+def UMopaWide4WayOp
+ : OuterProductWide4Way<"umopa_wide_4way",
+ [ScalableVectorOfRankAndLengthAndType<[1], [16], [I8]>,
+ ScalableVectorOfRankAndLengthAndType<[1], [8], [I16]>],
+ [nxnxv4i32, nxnxv2i64]> {
+ let summary = "Unsigned integer sum of 4 outer products and accumulate";
+ let description = [{
+ Example: I8 to I32
+ ```mlir
+ %result = arm_sme.umopa_wide_4way $lhs, $rhs : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ ```
+
+ Example: I16 to I64
+ ```mlir
+ %result = arm_sme.umopa_wide_4way $lhs, $rhs : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+
+ Refer to
+ [smopa_wide_4way](#arm_smesmopa_wide_4way-arm_smesmopa_wide_4wayop) for a
+ detailed description of 4-way outer products.
+
+ | Spec | Features |
+ | ---- | -------- |
+ | [UMOPA (4-way)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/UMOPA--4-way---Unsigned-integer-sum-of-outer-products-and-accumulate-) | +sme (32-bit), +sme-i16i64 (64-bit)|
+me
+ ```
+ }];
+}
+
+def UMopsWide4WayOp
+ : OuterProductWide4Way<"umops_wide_4way",
+ [ScalableVectorOfRankAndLengthAndType<[1], [16], [I8]>,
+ ScalableVectorOfRankAndLengthAndType<[1], [8], [I16]>],
+ [nxnxv4i32, nxnxv2i64]> {
+ let summary = "Unsigned integer sum of 4 outer products and subtract";
+ let description = [{
+ Example: I8 to I32
+ ```mlir
+ %result = arm_sme.umops_wide_4way $lhs, $rhs : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ ```
+
+ Example: I16 to I64
+ ```mlir
+ %result = arm_sme.umops_wide_4way $lhs, $rhs : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+
+ Refer to
+ [smopa_wide_4way](#arm_smesmopa_wide_4way-arm_smesmopa_wide_4wayop) for a
+ detailed description of 4-way outer products.
+
+ | Spec | Features |
+ | ---- | -------- |
+ | [UMOPS (4-way)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/UMOPS--4-way---Unsigned-integer-sum-of-outer-products-and-subtract-) | +sme (32-bit), +sme-i16i64 (64-bit)|
+me
+ ```
+ }];
+}
+
+def SuMopaWide4WayOp
+ : OuterProductWide4Way<"sumopa_wide_4way",
+ [ScalableVectorOfRankAndLengthAndType<[1], [16], [I8]>,
+ ScalableVectorOfRankAndLengthAndType<[1], [8], [I16]>],
+ [nxnxv4i32, nxnxv2i64]> {
+ let summary = "Signed by unsigned integer sum of 4 outer products and accumulate";
+ let description = [{
+ Example: I8 to I32
+ ```mlir
+ %result = arm_sme.sumopa_wide_4way $lhs, $rhs : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ ```
+
+ Example: I16 to I64
+ ```mlir
+ %result = arm_sme.sumopa_wide_4way $lhs, $rhs : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+
+ Refer to
+ [smopa_wide_4way](#arm_smesmopa_wide_4way-arm_smesmopa_wide_4wayop) for a
+ detailed description of 4-way outer products.
+
+ | Spec | Features |
+ | ---- | -------- |
+ | [SUMOPA (4-way)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/SUMOPA--Signed-by-unsigned-integer-sum-of-outer-products-and-accumulate-) | +sme (32-bit), +sme-i16i64 (64-bit)|
+me
+ ```
+ }];
+}
+
+def SuMopsWide4WayOp
+ : OuterProductWide4Way<"sumops_wide_4way",
+ [ScalableVectorOfRankAndLengthAndType<[1], [16], [I8]>,
+ ScalableVectorOfRankAndLengthAndType<[1], [8], [I16]>],
+ [nxnxv4i32, nxnxv2i64]> {
+ let summary = "Signed by unsigned integer sum of 4 outer products and subtract";
+ let description = [{
+ Example: I8 to I32
+ ```mlir
+ %result = arm_sme.sumops_wide_4way $lhs, $rhs : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ ```
+
+ Example: I16 to I64
+ ```mlir
+ %result = arm_sme.sumops_wide_4way $lhs, $rhs : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+
+ Refer to
+ [smopa_wide_4way](#arm_smesmopa_wide_4way-arm_smesmopa_wide_4wayop) for a
+ detailed description of 4-way outer products.
+
+ | Spec | Features |
+ | ---- | -------- |
+ | [SUMOPS (4-way)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/SUMOPS--Signed-by-unsigned-integer-sum-of-outer-products-and-subtract-) | +sme (32-bit), +sme-i16i64 (64-bit)|
+me
+ ```
+ }];
+}
+
+def UsMopaWide4WayOp
+ : OuterProductWide4Way<"usmopa_wide_4way",
+ [ScalableVectorOfRankAndLengthAndType<[1], [16], [I8]>,
+ ScalableVectorOfRankAndLengthAndType<[1], [8], [I16]>],
+ [nxnxv4i32, nxnxv2i64]> {
+ let summary = "Unsigned by signed integer sum of 4 outer products and accumulate";
+ let description = [{
+ Example: I8 to I32
+ ```mlir
+ %result = arm_sme.usmopa_wide_4way $lhs, $rhs : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ ```
+
+ Example: I16 to I64
+ ```mlir
+ %result = arm_sme.usmopa_wide_4way $lhs, $rhs : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+
+ Refer to
+ [smopa_wide_4way](#arm_smesmopa_wide_4way-arm_smesmopa_wide_4wayop) for a
+ detailed description of 4-way outer products.
+
+ | Spec | Features |
+ | ---- | -------- |
+ | [USMOPA (4-way)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/USMOPA--Unsigned-by-signed-integer-sum-of-outer-products-and-accumulate-) | +sme (32-bit), +sme-i16i64 (64-bit)|
+me
+ ```
+ }];
+}
+
+def UsMopsWide4WayOp
+ : OuterProductWide4Way<"usmops_wide_4way",
+ [ScalableVectorOfRankAndLengthAndType<[1], [16], [I8]>,
+ ScalableVectorOfRankAndLengthAndType<[1], [8], [I16]>],
+ [nxnxv4i32, nxnxv2i64]> {
+ let summary = "Unsigned by signed integer sum of 4 outer products and subtract";
+ let description = [{
+ Example: I8 to I32
+ ```mlir
+ %result = arm_sme.usmops_wide_4way $lhs, $rhs : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ ```
+
+ Example: I16 to I64
+ ```mlir
+ %result = arm_sme.usmops_wide_4way $lhs, $rhs : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+
+ Refer to
+ [smopa_wide_4way](#arm_smesmopa_wide_4way-arm_smesmopa_wide_4wayop) for a
+ detailed description of 4-way outer products.
+
+ | Spec | Features |
+ | ---- | -------- |
+ | [USMOPS (4-way)](https://developer.arm.com/documentation/ddi0602/2023-09/SME-Instructions/USMOPS--Unsigned-by-signed-integer-sum-of-outer-products-and-subtract-) | +sme (32-bit), +sme-i16i64 (64-bit)|
+me
+ ```
+ }];
+}
+
def StreamingVLOp : ArmSME_Op<"streaming_vl", [Pure]>
{
let summary = "Query the streaming vector length";
diff --git a/mlir/include/mlir/Dialect/ArmSME/Transforms/Passes.h b/mlir/include/mlir/Dialect/ArmSME/Transforms/Passes.h
index aef2959265a7cd7..d3e4fccd628489f 100644
--- a/mlir/include/mlir/Dialect/ArmSME/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/ArmSME/Transforms/Passes.h
@@ -32,6 +32,9 @@ std::unique_ptr<Pass> createEnableArmStreamingPass(
/// Pass that allocates tile IDs to ArmSME operations.
std::unique_ptr<Pass> createTileAllocationPass();
+/// Pass that folds 'arm_sme.outerproduct' ops into widening variants.
+std::unique_ptr<Pass> createOuterProductWideningPass();
+
//===----------------------------------------------------------------------===//
// Registration
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/ArmSME/Transforms/Passes.td b/mlir/include/mlir/Dialect/ArmSME/Transforms/Passes.td
index 8d1ba6ed34e805b..cb87a3ba9f04a43 100644
--- a/mlir/include/mlir/Dialect/ArmSME/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/ArmSME/Transforms/Passes.td
@@ -122,4 +122,43 @@ def TileAllocation
let dependentDialects = ["func::FuncDialect"];
}
+def OuterProductWidening
+ : Pass<"arm-sme-outer-product-widening", "mlir::func::FuncOp"> {
+ let summary = "Fold 'arm_sme.outerproduct' operations into widening variants";
+ let description = [{
+ This pass folds 'arm_sme.outerproduct' operations that are chained via the
+ accumulator into 2-way or 4-way ArmSME outer product operations.
+
+ For example:
+ ```mlir
+ %a0_ext = arith.extf %a0 : vector<[4]xf16> to vector<[4]xf32>
+ %b0_ext = arith.extf %b0 : vector<[4]xf16> to vector<[4]xf32>
+ %a1_ext = arith.extf %a1 : vector<[4]xf16> to vector<[4]xf32>
+ %b1_ext = arith.extf %b1 : vector<[4]xf16> to vector<[4]xf32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext : vector<[4]xf32>, vector<[4]xf32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext acc(%0) : vector<[4]xf32>, vector<[4]xf32>
+ ```
+
+ Becomes:
+
+ ```mlir
+ %undef = llvm.mlir.undef : vector<[8]xf16>
+ %a0_ins = vector.scalable.ins %a0_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %a1_ins = vector.scalable.ins %a1_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %a_packed = "arm_sve.intr.zip1"(%a0_ins, %a1_ins) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
+ %b0_ins = vector.scalable.ins %b0_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %b1_ins = vector.scalable.ins %b1_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %b_packed = "arm_sve.intr.zip1"(%b0_ins, %b1_ins) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
+ %0 = arm_sme.fmopa_wide_2way %a_packed, %b_packed : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ ```
+
+ For further information on the widening ops see:
+ https://mlir.llvm.org/docs/Dialects/ArmSME/#arm_smefmopa_wide_2way-arm_smefmopa_wide_2wayop
+ https://mlir.llvm.org/docs/Dialects/ArmSME/#arm_smesmopa_wide_4way-arm_smesmopa_wide_4wayop
+ }];
+ let constructor = "mlir::arm_sme::createOuterProductWideningPass()";
+ let dependentDialects = ["func::FuncDialect", "arm_sme::ArmSMEDialect", "arm_sve::ArmSVEDialect", "LLVM::LLVMDialect"];
+}
+
#endif // MLIR_DIALECT_ARMSME_TRANSFORMS_PASSES_TD
diff --git a/mlir/include/mlir/Dialect/ArmSME/Transforms/Transforms.h b/mlir/include/mlir/Dialect/ArmSME/Transforms/Transforms.h
index f622bc0562e9e1a..09e3b4375fa5f98 100644
--- a/mlir/include/mlir/Dialect/ArmSME/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/ArmSME/Transforms/Transforms.h
@@ -15,6 +15,10 @@ class LLVMConversionTarget;
class LLVMTypeConverter;
class RewritePatternSet;
+namespace arm_sme {
+void populateOuterProductWideningPatterns(RewritePatternSet &patterns);
+} // namespace arm_sme
+
} // namespace mlir
#endif // MLIR_DIALECT_ARMSME_TRANSFORMS_H
diff --git a/mlir/include/mlir/Dialect/ArmSVE/IR/ArmSVE.td b/mlir/include/mlir/Dialect/ArmSVE/IR/ArmSVE.td
index e3f3d9e62e8fb39..754413a1ad491ec 100644
--- a/mlir/include/mlir/Dialect/ArmSVE/IR/ArmSVE.td
+++ b/mlir/include/mlir/Dialect/ArmSVE/IR/ArmSVE.td
@@ -410,4 +410,8 @@ def ConvertToSvboolIntrOp :
/*overloadedResults=*/[]>,
Arguments<(ins SVEPredicate:$mask)>;
+def Zip1IntrOp :
+ ArmSVE_IntrBinaryOverloadedOp<"zip1">,
+ Arguments<(ins AnyScalableVector, AnyScalableVector)>;
+
#endif // ARMSVE_OPS
diff --git a/mlir/lib/Conversion/ArmSMEToLLVM/ArmSMEToLLVM.cpp b/mlir/lib/Conversion/ArmSMEToLLVM/ArmSMEToLLVM.cpp
index bbef3b996e40b88..1ead3117de3634b 100644
--- a/mlir/lib/Conversion/ArmSMEToLLVM/ArmSMEToLLVM.cpp
+++ b/mlir/lib/Conversion/ArmSMEToLLVM/ArmSMEToLLVM.cpp
@@ -776,6 +776,49 @@ struct OuterProductOpConversion
}
};
+/// Lower 2-way and 4-way outer products to intrinsics.
+template <class OuterProductWideOp, class OuterProductWideIntrOp>
+struct OuterProductWideOpConversion
+ : public ConvertArmSMEOpToLLVMPattern<OuterProductWideOp> {
+ using ConvertArmSMEOpToLLVMPattern<
+ OuterProductWideOp>::ConvertArmSMEOpToLLVMPattern;
+
+ LogicalResult
+ matchAndRewrite(OuterProductWideOp op,
+ typename OuterProductWideOp::Adaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ auto tileId = getTileIdOrError(op);
+ if (!tileId)
+ return failure();
+
+ Value acc = op.getAcc();
+ if (!acc)
+ // Initalize accumulator with zero.
+ acc = op.template createOpAndForwardTileId<arm_sme::ZeroOp>(
+ rewriter, op.getLoc(), op.getResultType());
+
+ Value lhsMask = op.getLhsMask();
+ Value rhsMask = op.getRhsMask();
+ if (!lhsMask || !rhsMask) {
+ auto predTy = op.getLhsType().cloneWith({}, rewriter.getI1Type());
+ Value allActiveMask = rewriter.create<arith::ConstantOp>(
+ op.getLoc(), DenseElementsAttr::get(predTy, true));
+ lhsMask = allActiveMask;
+ rhsMask = allActiveMask;
+ }
+
+ rewriter.create<OuterProductWideIntrOp>(op.getLoc(), tileId, lhsMask,
+ rhsMask, adaptor.getLhs(),
+ adaptor.getRhs());
+
+ // The outerproduct intrinsics have no result, replace
+ // 'arm_sme.outerproduct' with the input tile to preserve dataflow.
+ rewriter.replaceOp(op, acc);
+
+ return success();
+ }
+};
+
/// Lower `arm_sme.streaming_vl` to SME CNTS intrinsics.
///
/// Example:
@@ -854,6 +897,13 @@ void mlir::configureArmSMEToLLVMConversionLegality(ConversionTarget &target) {
arm_sme::aarch64_sme_st1q_vert, arm_sme::aarch64_sme_read_horiz,
arm_sme::aarch64_sme_read_vert, arm_sme::aarch64_sme_write_horiz,
arm_sme::aarch64_sme_write_vert, arm_sme::aarch64_sme_mopa,
+ arm_sme::aarch64_sme_mopa_wide, arm_sme::aarch64_sme_mops_wide,
+ arm_sme::aarch64_sme_smopa_wide, arm_sme::aarch64_sme_smops_wide,
+ arm_sme::aarch64_sme_umopa_wide, arm_sme::aarch64_sme_umops_wide,
+ arm_sme::aarch64_sme_smopa_za32, arm_sme::aarch64_sme_smops_za32,
+ arm_sme::aarch64_sme_umopa_za32, arm_sme::aarch64_sme_umops_za32,
+ arm_sme::aarch64_sme_sumopa_wide, arm_sme::aarch64_sme_sumops_wide,
+ arm_sme::aarch64_sme_usmopa_wide, arm_sme::aarch64_sme_usmops_wide,
arm_sme::aarch64_sme_cntsb, arm_sme::aarch64_sme_cntsh,
arm_sme::aarch64_sme_cntsw, arm_sme::aarch64_sme_cntsd>();
target.addLegalDialect<arith::ArithDialect,
@@ -876,8 +926,36 @@ void mlir::populateArmSMEToLLVMConversionPatterns(LLVMTypeConverter &converter,
addArmSMEConversionPatterns<
LoadTileSliceConversion, MoveTileSliceToVectorConversion,
MoveVectorToTileSliceConversion, StoreTileSliceConversion,
- OuterProductOpConversion, ZeroOpConversion, GetTileConversion,
- StreamingVLOpConversion>(patterns, converter);
+ StreamingVLOpConversion, OuterProductOpConversion,
+ OuterProductWideOpConversion<arm_sme::FMopaWide2WayOp,
+ arm_sme::aarch64_sme_mopa_wide>,
+ OuterProductWideOpConversion<arm_sme::FMopsWide2WayOp,
+ arm_sme::aarch64_sme_mops_wide>,
+ OuterProductWideOpConversion<arm_sme::SMopaWide2WayOp,
+ arm_sme::aarch64_sme_smopa_za32>,
+ OuterProductWideOpConversion<arm_sme::SMopsWide2WayOp,
+ arm_sme::aarch64_sme_smops_za32>,
+ OuterProductWideOpConversion<arm_sme::UMopaWide2WayOp,
+ arm_sme::aarch64_sme_umopa_za32>,
+ OuterProductWideOpConversion<arm_sme::UMopsWide2WayOp,
+ arm_sme::aarch64_sme_umops_za32>,
+ OuterProductWideOpConversion<arm_sme::SMopaWide4WayOp,
+ arm_sme::aarch64_sme_smopa_wide>,
+ OuterProductWideOpConversion<arm_sme::SMopsWide4WayOp,
+ arm_sme::aarch64_sme_smops_wide>,
+ OuterProductWideOpConversion<arm_sme::UMopaWide4WayOp,
+ arm_sme::aarch64_sme_umopa_wide>,
+ OuterProductWideOpConversion<arm_sme::UMopsWide4WayOp,
+ arm_sme::aarch64_sme_umops_wide>,
+ OuterProductWideOpConversion<arm_sme::SuMopaWide4WayOp,
+ arm_sme::aarch64_sme_sumopa_wide>,
+ OuterProductWideOpConversion<arm_sme::SuMopsWide4WayOp,
+ arm_sme::aarch64_sme_sumops_wide>,
+ OuterProductWideOpConversion<arm_sme::UsMopaWide4WayOp,
+ arm_sme::aarch64_sme_usmopa_wide>,
+ OuterProductWideOpConversion<arm_sme::UsMopsWide4WayOp,
+ arm_sme::aarch64_sme_usmops_wide>,
+ ZeroOpConversion, GetTileConversion>(patterns, converter);
}
std::unique_ptr<Pass> mlir::createConvertArmSMEToLLVMPass() {
diff --git a/mlir/lib/Dialect/ArmSME/Transforms/CMakeLists.txt b/mlir/lib/Dialect/ArmSME/Transforms/CMakeLists.txt
index 96eb58442043843..24942b6f28d2cdd 100644
--- a/mlir/lib/Dialect/ArmSME/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/ArmSME/Transforms/CMakeLists.txt
@@ -1,5 +1,6 @@
add_mlir_dialect_library(MLIRArmSMETransforms
EnableArmStreaming.cpp
+ OuterProductWidening.cpp
TileAllocation.cpp
ADDITIONAL_HEADER_DIRS
@@ -10,6 +11,7 @@ add_mlir_dialect_library(MLIRArmSMETransforms
LINK_LIBS PUBLIC
MLIRArmSMEDialect
+ MLIRArmSVEDialect
MLIRFuncDialect
MLIRLLVMCommonConversion
MLIRVectorDialect
diff --git a/mlir/lib/Dialect/ArmSME/Transforms/OuterProductWidening.cpp b/mlir/lib/Dialect/ArmSME/Transforms/OuterProductWidening.cpp
new file mode 100644
index 000000000000000..8d17d1071ddfdf1
--- /dev/null
+++ b/mlir/lib/Dialect/ArmSME/Transforms/OuterProductWidening.cpp
@@ -0,0 +1,501 @@
+//===- OuterProductWidening.cpp - Widen 'arm_sme.outerproduct' ops --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements rewrites that fold 'arm_sme.outerproduct' operations
+// into the 2-way or 4-way widening outerproduct operations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/ArmSME/IR/ArmSME.h"
+#include "mlir/Dialect/ArmSME/Transforms/Passes.h"
+#include "mlir/Dialect/ArmSME/Transforms/Transforms.h"
+#include "mlir/Dialect/ArmSVE/IR/ArmSVEDialect.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
+#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
+#include "mlir/IR/PatternMatch.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+
+#define DEBUG_TYPE "arm-sme-outerproduct-widening"
+
+namespace mlir::arm_sme {
+#define GEN_PASS_DEF_OUTERPRODUCTWIDENING
+#include "mlir/Dialect/ArmSME/Transforms/Passes.h.inc"
+} // namespace mlir::arm_sme
+
+using namespace mlir;
+using namespace mlir::arm_sme;
+
+namespace {
+// Fold two 'arm_sme.outerproduct' operations that are chained via the
+// accumulator into 2-way outer product operation.
+//
+// For example:
+//
+// %a0_ext = arith.extf %a0 : vector<[4]xf16> to vector<[4]xf32>
+// %b0_ext = arith.extf %b0 : vector<[4]xf16> to vector<[4]xf32>
+// %0 = arm_sme.outerproduct %a0_ext, %b0_ext : vector<[4]xf32>,
+// vector<[4]xf32>
+//
+// %a1_ext = arith.extf %a1 : vector<[4]xf16> to vector<[4]xf32>
+// %b1_ext = arith.extf %b1 : vector<[4]xf16> to vector<[4]xf32>
+// %1 = arm_sme.outerproduct %a1_ext, %b1_ext, %0 : vector<[4]xf32>,
+// vector<[4]xf32>
+//
+// Becomes:
+//
+// %a_packed = arm_sve.zip %a0, %a1 : vector<[8]xf16> to vector<[8]xf16>
+// %b_packed = arm_sve.zip %b0, %b1 : vector<[8]xf16> to vector<[8]xf16>
+// %0 = arm_sme.fmopa_wide_2way %a_packed, %b_packed : vector<[8]xf16>,
+// vector<[4]xf32>
+class OuterProduct2WayWidening
+ : public OpRewritePattern<arm_sme::OuterProductOp> {
+public:
+ using OpRewritePattern::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(arm_sme::OuterProductOp op,
+ PatternRewriter &rewriter) const override {
+ Value acc = op.getAcc();
+ if (!acc)
+ return rewriter.notifyMatchFailure(op, "no accumulator operand");
+
+ arm_sme::OuterProductOp op1 = acc.getDefiningOp<arm_sme::OuterProductOp>();
+ arm_sme::OuterProductOp op2 = op;
+ if (!op1)
+ return rewriter.notifyMatchFailure(op,
+ "defining op of accumulator operand "
+ "must be an 'arm_sme.outerproduct'");
+
+ if (op1.getKind() != op2.getKind())
+ return rewriter.notifyMatchFailure(
+ op, "combining kind (add or sub) of outer products must match");
+
+ if (!llvm::hasSingleElement(op1->getUses())) {
+ // We could still widen, but if the first outer product has an
+ // accumulator it will be used as the root for tile allocation and since
+ // the widening outer product uses the same accumulator it will get
+ // assigned the same tile ID, resulting in 3 outer products and incorrect
+ // results. No accumulator would be ok, but it's simpler to prevent this
+ // altogether, since it has no benefit.
+ return rewriter.notifyMatchFailure(
+ op, "first outer product is not single use and cannot be removed, "
+ "no benefit to widening");
+ }
+
+ auto nxnxv4i32 =
+ VectorType::get({4, 4}, rewriter.getI32Type(), {true, true});
+ auto nxnxv4f32 =
+ VectorType::get({4, 4}, rewriter.getF32Type(), {true, true});
+ auto nxv4i16 = VectorType::get({4}, rewriter.getI16Type(), true);
+ auto nxv4f16 = VectorType::get({4}, rewriter.getF16Type(), true);
+ auto nxv4bf16 = VectorType::get({4}, rewriter.getBF16Type(), true);
+ if ((failed(
+ isWidenable<arith::ExtFOp>(rewriter, op1, nxnxv4f32, nxv4f16)) ||
+ failed(
+ isWidenable<arith::ExtFOp>(rewriter, op2, nxnxv4f32, nxv4f16))) &&
+ (failed(
+ isWidenable<arith::ExtFOp>(rewriter, op1, nxnxv4f32, nxv4bf16)) ||
+ failed(
+ isWidenable<arith::ExtFOp>(rewriter, op2, nxnxv4f32, nxv4bf16))) &&
+ (failed(
+ isWidenable<arith::ExtSIOp>(rewriter, op1, nxnxv4i32, nxv4i16)) ||
+ failed(
+ isWidenable<arith::ExtSIOp>(rewriter, op2, nxnxv4i32, nxv4i16))) &&
+ (failed(
+ isWidenable<arith::ExtUIOp>(rewriter, op1, nxnxv4i32, nxv4i16)) ||
+ failed(
+ isWidenable<arith::ExtUIOp>(rewriter, op2, nxnxv4i32, nxv4i16))))
+ return failure();
+
+ auto loc = op.getLoc();
+
+ // zip(lhs, rhs)
+ auto packInputs = [&](VectorType type, Value lhs, Value rhs) {
+ auto undef = rewriter.create<LLVM::UndefOp>(loc, type);
+ auto insertLHS =
+ rewriter.create<vector::ScalableInsertOp>(loc, lhs, undef, 0);
+ auto insertRHS =
+ rewriter.create<vector::ScalableInsertOp>(loc, rhs, undef, 0);
+ return rewriter.create<arm_sve::Zip1IntrOp>(loc, type, insertLHS,
+ insertRHS);
+ };
+
+ auto extOp = op.getLhs().getDefiningOp();
+ VectorType extSourceVectorType =
+ cast<VectorType>(extOp->getOperand(0).getType());
+ VectorType widenedVectorType =
+ VectorType::Builder(extSourceVectorType)
+ .setDim(0, extSourceVectorType.getShape()[0] * 2);
+ auto lhs = packInputs(widenedVectorType,
+ op1.getLhs().getDefiningOp()->getOperand(0),
+ op2.getLhs().getDefiningOp()->getOperand(0));
+ auto rhs = packInputs(widenedVectorType,
+ op1.getRhs().getDefiningOp()->getOperand(0),
+ op2.getRhs().getDefiningOp()->getOperand(0));
+
+ Value lhsMask, rhsMask;
+ if (op1.getLhsMask() || op2.getLhsMask()) {
+ if (!(op1.getLhsMask() && op2.getLhsMask()))
+ return rewriter.notifyMatchFailure(
+ op, "unsupported masking, either both outerproducts are masked "
+ "or neither");
+
+ VectorType maskType = VectorType::Builder(widenedVectorType)
+ .setElementType(rewriter.getI1Type());
+ lhsMask = packInputs(maskType, op1.getLhsMask(), op2.getLhsMask());
+ rhsMask = packInputs(maskType, op1.getRhsMask(), op2.getRhsMask());
+ }
+
+ arm_sme::CombiningKind kind = op.getKind();
+ assert((kind == arm_sme::CombiningKind::Add ||
+ kind == arm_sme::CombiningKind::Sub) &&
+ "unhandled arm_sme::CombiningKind!");
+
+ if (isa<arith::ExtFOp>(extOp)) {
+ if (kind == arm_sme::CombiningKind::Add)
+ rewriter.replaceOpWithNewOp<arm_sme::FMopaWide2WayOp>(
+ op2, op.getResultType(), lhs, rhs, lhsMask, rhsMask, op1.getAcc());
+ else
+ rewriter.replaceOpWithNewOp<arm_sme::FMopsWide2WayOp>(
+ op2, op.getResultType(), lhs, rhs, lhsMask, rhsMask, op1.getAcc());
+ } else if (isa<arith::ExtSIOp>(extOp)) {
+ if (kind == arm_sme::CombiningKind::Add)
+ rewriter.replaceOpWithNewOp<arm_sme::SMopaWide2WayOp>(
+ op2, op.getResultType(), lhs, rhs, lhsMask, rhsMask, op1.getAcc());
+ else
+ rewriter.replaceOpWithNewOp<arm_sme::SMopsWide2WayOp>(
+ op2, op.getResultType(), lhs, rhs, lhsMask, rhsMask, op1.getAcc());
+ } else if (isa<arith::ExtUIOp>(extOp)) {
+ if (kind == arm_sme::CombiningKind::Add)
+ rewriter.replaceOpWithNewOp<arm_sme::UMopaWide2WayOp>(
+ op2, op.getResultType(), lhs, rhs, lhsMask, rhsMask, op1.getAcc());
+ else
+ rewriter.replaceOpWithNewOp<arm_sme::UMopsWide2WayOp>(
+ op2, op.getResultType(), lhs, rhs, lhsMask, rhsMask, op1.getAcc());
+ } else
+ llvm_unreachable("unexpected extend op!");
+
+ op1.erase();
+
+ return success();
+ }
+
+private:
+ template <typename ExtOp>
+ LogicalResult isWidenable(PatternRewriter &rewriter,
+ arm_sme::OuterProductOp op, VectorType resultType,
+ VectorType inputType) const {
+ if (op.getResultType() != resultType)
+ return rewriter.notifyMatchFailure(
+ op, "unsupported result type, expected 'vector<[4]x[4]xi32>' or "
+ "'vector<[4]x[4]xf32>'");
+
+ auto lhsDefOp = op.getLhs().getDefiningOp<ExtOp>();
+ auto rhsDefOp = op.getRhs().getDefiningOp<ExtOp>();
+
+ if (!lhsDefOp || !rhsDefOp)
+ return rewriter.notifyMatchFailure(
+ op, "defining op of outerproduct operands must be 'arith.extf' or "
+ "'arith.extsi' or 'arith.extui'");
+
+ auto lhsInType = cast<VectorType>(lhsDefOp->getOperand(0).getType());
+ auto rhsInType = cast<VectorType>(rhsDefOp->getOperand(0).getType());
+
+ if (lhsInType != inputType || rhsInType != inputType)
+ return rewriter.notifyMatchFailure(
+ op, "unsupported input types, expected 'vector<[4]xi16>' or "
+ "'vector<[4]xf16>' or 'vector<[4]xbf16>'");
+ return success();
+ }
+};
+
+// Fold four 'arm_sme.outerproduct' operations that are chained via the
+// accumulator into 4-way outer product operation.
+class OuterProduct4WayWidening
+ : public OpRewritePattern<arm_sme::OuterProductOp> {
+public:
+ using OpRewritePattern::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(arm_sme::OuterProductOp op,
+ PatternRewriter &rewriter) const override {
+ Value acc = op.getAcc();
+ if (!acc)
+ return rewriter.notifyMatchFailure(op, "no accumulator operand");
+
+ arm_sme::OuterProductOp op4 = op;
+ arm_sme::OuterProductOp op3 = acc.getDefiningOp<arm_sme::OuterProductOp>();
+ if (!op3)
+ return rewriter.notifyMatchFailure(op,
+ "defining op of accumulator operand "
+ "must be an 'arm_sme.outerproduct'");
+
+ acc = op3.getAcc();
+ if (!acc)
+ return rewriter.notifyMatchFailure(op, "no accumulator operand");
+
+ arm_sme::OuterProductOp op2 = acc.getDefiningOp<arm_sme::OuterProductOp>();
+ if (!op2)
+ return rewriter.notifyMatchFailure(op,
+ "defining op of accumulator operand "
+ "must be an 'arm_sme.outerproduct'");
+
+ acc = op2.getAcc();
+ if (!acc)
+ return rewriter.notifyMatchFailure(op, "no accumulator operand");
+
+ arm_sme::OuterProductOp op1 = acc.getDefiningOp<arm_sme::OuterProductOp>();
+ if (!op1)
+ return rewriter.notifyMatchFailure(op,
+ "defining op of accumulator operand "
+ "must be an 'arm_sme.outerproduct'");
+
+ arm_sme::CombiningKind kind = op1.getKind();
+ if (op2.getKind() != kind || op3.getKind() != kind || op4.getKind() != kind)
+ return rewriter.notifyMatchFailure(
+ op, "combining kind (add or sub) of outer products must match");
+
+ if (!llvm::hasSingleElement(op1->getUses()) ||
+ !llvm::hasSingleElement(op2->getUses()) ||
+ !llvm::hasSingleElement(op3->getUses()))
+ return rewriter.notifyMatchFailure(
+ op, "outer products are not single use and cannot be removed, "
+ "no benefit to widening");
+
+ auto nxnxv4i32 =
+ VectorType::get({4, 4}, rewriter.getI32Type(), {true, true});
+ auto nxnxv2i64 =
+ VectorType::get({2, 2}, rewriter.getI64Type(), {true, true});
+ auto nxv4i8 = VectorType::get({4}, rewriter.getI8Type(), true);
+ auto nxv2i16 = VectorType::get({2}, rewriter.getI16Type(), true);
+ if (
+ // signed, i8i8i32
+ (failed(
+ isWidenable<arith::ExtSIOp>(rewriter, op1, nxnxv4i32, nxv4i8)) ||
+ failed(
+ isWidenable<arith::ExtSIOp>(rewriter, op2, nxnxv4i32, nxv4i8)) ||
+ failed(
+ isWidenable<arith::ExtSIOp>(rewriter, op3, nxnxv4i32, nxv4i8)) ||
+ failed(
+ isWidenable<arith::ExtSIOp>(rewriter, op4, nxnxv4i32, nxv4i8))) &&
+ // signed, i16i16i64
+ (failed(
+ isWidenable<arith::ExtSIOp>(rewriter, op1, nxnxv2i64, nxv2i16)) ||
+ failed(
+ isWidenable<arith::ExtSIOp>(rewriter, op2, nxnxv2i64, nxv2i16)) ||
+ failed(
+ isWidenable<arith::ExtSIOp>(rewriter, op3, nxnxv2i64, nxv2i16)) ||
+ failed(
+ isWidenable<arith::ExtSIOp>(rewriter, op4, nxnxv2i64, nxv2i16))) &&
+ // unsigned, i8i8i32
+ (failed(
+ isWidenable<arith::ExtUIOp>(rewriter, op1, nxnxv4i32, nxv4i8)) ||
+ failed(
+ isWidenable<arith::ExtUIOp>(rewriter, op2, nxnxv4i32, nxv4i8)) ||
+ failed(
+ isWidenable<arith::ExtUIOp>(rewriter, op3, nxnxv4i32, nxv4i8)) ||
+ failed(
+ isWidenable<arith::ExtUIOp>(rewriter, op4, nxnxv4i32, nxv4i8))) &&
+ // unsigned, i16i16i64
+ (failed(
+ isWidenable<arith::ExtUIOp>(rewriter, op1, nxnxv2i64, nxv2i16)) ||
+ failed(
+ isWidenable<arith::ExtUIOp>(rewriter, op2, nxnxv2i64, nxv2i16)) ||
+ failed(
+ isWidenable<arith::ExtUIOp>(rewriter, op3, nxnxv2i64, nxv2i16)) ||
+ failed(
+ isWidenable<arith::ExtUIOp>(rewriter, op4, nxnxv2i64, nxv2i16))) &&
+ // signed by unsigned, i8i8i32
+ (failed(isWidenable<arith::ExtSIOp, arith::ExtUIOp>(
+ rewriter, op1, nxnxv4i32, nxv4i8)) ||
+ failed(isWidenable<arith::ExtSIOp, arith::ExtUIOp>(
+ rewriter, op2, nxnxv4i32, nxv4i8)) ||
+ failed(isWidenable<arith::ExtSIOp, arith::ExtUIOp>(
+ rewriter, op3, nxnxv4i32, nxv4i8)) ||
+ failed(isWidenable<arith::ExtSIOp, arith::ExtUIOp>(
+ rewriter, op4, nxnxv4i32, nxv4i8))) &&
+ // signed by unsigned, i16i16i64
+ (failed(isWidenable<arith::ExtSIOp, arith::ExtUIOp>(
+ rewriter, op1, nxnxv2i64, nxv2i16)) ||
+ failed(isWidenable<arith::ExtSIOp, arith::ExtUIOp>(
+ rewriter, op2, nxnxv2i64, nxv2i16)) ||
+ failed(isWidenable<arith::ExtSIOp, arith::ExtUIOp>(
+ rewriter, op3, nxnxv2i64, nxv2i16)) ||
+ failed(isWidenable<arith::ExtSIOp, arith::ExtUIOp>(
+ rewriter, op4, nxnxv2i64, nxv2i16))) &&
+ // unsigned by signed, i8i8i32
+ (failed(isWidenable<arith::ExtUIOp, arith::ExtSIOp>(
+ rewriter, op1, nxnxv4i32, nxv4i8)) ||
+ failed(isWidenable<arith::ExtUIOp, arith::ExtSIOp>(
+ rewriter, op2, nxnxv4i32, nxv4i8)) ||
+ failed(isWidenable<arith::ExtUIOp, arith::ExtSIOp>(
+ rewriter, op3, nxnxv4i32, nxv4i8)) ||
+ failed(isWidenable<arith::ExtUIOp, arith::ExtSIOp>(
+ rewriter, op4, nxnxv4i32, nxv4i8))) &&
+ // unsigned by signed, i16i16i64
+ (failed(isWidenable<arith::ExtUIOp, arith::ExtSIOp>(
+ rewriter, op1, nxnxv2i64, nxv2i16)) ||
+ failed(isWidenable<arith::ExtUIOp, arith::ExtSIOp>(
+ rewriter, op2, nxnxv2i64, nxv2i16)) ||
+ failed(isWidenable<arith::ExtUIOp, arith::ExtSIOp>(
+ rewriter, op3, nxnxv2i64, nxv2i16)) ||
+ failed(isWidenable<arith::ExtUIOp, arith::ExtSIOp>(
+ rewriter, op4, nxnxv2i64, nxv2i16))))
+ return failure();
+
+ auto loc = op.getLoc();
+
+ auto packInputs = [&](VectorType type, Value lhs, Value rhs) {
+ auto undef = rewriter.create<LLVM::UndefOp>(loc, type);
+ auto insertLHS =
+ rewriter.create<vector::ScalableInsertOp>(loc, lhs, undef, 0);
+ auto insertRHS =
+ rewriter.create<vector::ScalableInsertOp>(loc, rhs, undef, 0);
+ return rewriter.create<arm_sve::Zip1IntrOp>(loc, type, insertLHS,
+ insertRHS);
+ };
+
+ auto lhsExtOp = op.getLhs().getDefiningOp();
+ auto rhsExtOp = op.getRhs().getDefiningOp();
+ VectorType extSourceVectorType =
+ cast<VectorType>(lhsExtOp->getOperand(0).getType());
+ VectorType widenedVectorType =
+ VectorType::Builder(extSourceVectorType)
+ .setDim(0, extSourceVectorType.getShape()[0] * 4);
+ auto lhs0 = packInputs(widenedVectorType,
+ op1.getLhs().getDefiningOp()->getOperand(0),
+ op3.getLhs().getDefiningOp()->getOperand(0));
+ auto lhs1 = packInputs(widenedVectorType,
+ op2.getLhs().getDefiningOp()->getOperand(0),
+ op4.getLhs().getDefiningOp()->getOperand(0));
+ auto lhs = rewriter.create<arm_sve::Zip1IntrOp>(loc, widenedVectorType,
+ lhs0, lhs1);
+
+ auto rhs0 = packInputs(widenedVectorType,
+ op1.getRhs().getDefiningOp()->getOperand(0),
+ op3.getRhs().getDefiningOp()->getOperand(0));
+ auto rhs1 = packInputs(widenedVectorType,
+ op2.getRhs().getDefiningOp()->getOperand(0),
+ op4.getRhs().getDefiningOp()->getOperand(0));
+ auto rhs = rewriter.create<arm_sve::Zip1IntrOp>(loc, widenedVectorType,
+ rhs0, rhs1);
+
+ Value lhsMask, rhsMask;
+ if (op1.getLhsMask() || op2.getLhsMask() || op3.getLhsMask() ||
+ op4.getLhsMask()) {
+ if (!(op1.getLhsMask() && op2.getLhsMask() && op3.getLhsMask() &&
+ op4.getLhsMask()))
+ return rewriter.notifyMatchFailure(
+ op, "unsupported masking, either all outerproducts are masked "
+ "or none");
+
+ VectorType maskType = VectorType::Builder(widenedVectorType)
+ .setElementType(rewriter.getI1Type());
+ auto lhs0Mask = packInputs(maskType, op1.getLhsMask(), op3.getLhsMask());
+ auto lhs1Mask = packInputs(maskType, op2.getLhsMask(), op4.getLhsMask());
+ lhsMask = rewriter.create<arm_sve::Zip1IntrOp>(loc, maskType, lhs0Mask,
+ lhs1Mask);
+
+ auto rhs0Mask = packInputs(maskType, op1.getRhsMask(), op3.getRhsMask());
+ auto rhs1Mask = packInputs(maskType, op2.getRhsMask(), op4.getRhsMask());
+ rhsMask = rewriter.create<arm_sve::Zip1IntrOp>(loc, maskType, rhs0Mask,
+ rhs1Mask);
+ }
+
+ assert((kind == arm_sme::CombiningKind::Add ||
+ kind == arm_sme::CombiningKind::Sub) &&
+ "unhandled arm_sme::CombiningKind!");
+ if (isa<arith::ExtSIOp>(lhsExtOp) && isa<arith::ExtSIOp>(rhsExtOp)) {
+ if (kind == arm_sme::CombiningKind::Add)
+ rewriter.replaceOpWithNewOp<arm_sme::SMopaWide4WayOp>(
+ op4, op.getResultType(), lhs, rhs, lhsMask, rhsMask, op1.getAcc());
+ else
+ rewriter.replaceOpWithNewOp<arm_sme::SMopsWide4WayOp>(
+ op4, op.getResultType(), lhs, rhs, lhsMask, rhsMask, op1.getAcc());
+ } else if (isa<arith::ExtUIOp>(lhsExtOp) && isa<arith::ExtUIOp>(rhsExtOp)) {
+ if (kind == arm_sme::CombiningKind::Add)
+ rewriter.replaceOpWithNewOp<arm_sme::UMopaWide4WayOp>(
+ op4, op.getResultType(), lhs, rhs, lhsMask, rhsMask, op1.getAcc());
+ else
+ rewriter.replaceOpWithNewOp<arm_sme::UMopsWide4WayOp>(
+ op4, op.getResultType(), lhs, rhs, lhsMask, rhsMask, op1.getAcc());
+ } else if (isa<arith::ExtSIOp>(lhsExtOp) && isa<arith::ExtUIOp>(rhsExtOp)) {
+ if (kind == arm_sme::CombiningKind::Add)
+ rewriter.replaceOpWithNewOp<arm_sme::SuMopaWide4WayOp>(
+ op4, op.getResultType(), lhs, rhs, lhsMask, rhsMask, op1.getAcc());
+ else
+ rewriter.replaceOpWithNewOp<arm_sme::SuMopsWide4WayOp>(
+ op4, op.getResultType(), lhs, rhs, lhsMask, rhsMask, op1.getAcc());
+ } else if (isa<arith::ExtUIOp>(lhsExtOp) && isa<arith::ExtSIOp>(rhsExtOp)) {
+ if (kind == arm_sme::CombiningKind::Add)
+ rewriter.replaceOpWithNewOp<arm_sme::UsMopaWide4WayOp>(
+ op4, op.getResultType(), lhs, rhs, lhsMask, rhsMask, op1.getAcc());
+ else
+ rewriter.replaceOpWithNewOp<arm_sme::UsMopsWide4WayOp>(
+ op4, op.getResultType(), lhs, rhs, lhsMask, rhsMask, op1.getAcc());
+ } else
+ llvm_unreachable("unexpected extend op!");
+
+ op3.erase();
+ op2.erase();
+ op1.erase();
+
+ return success();
+ }
+
+private:
+ template <typename LhsExtOp, typename RhsExtOp = LhsExtOp>
+ LogicalResult isWidenable(PatternRewriter &rewriter,
+ arm_sme::OuterProductOp op, VectorType resultType,
+ VectorType inputType) const {
+ if (op.getResultType() != resultType)
+ return rewriter.notifyMatchFailure(
+ op, "unsupported result type, expected 'vector<[4]x[4]xi32>' or "
+ "'vector<[2]x[2]xi64>'");
+
+ auto lhsDefOp = op.getLhs().getDefiningOp<LhsExtOp>();
+ auto rhsDefOp = op.getRhs().getDefiningOp<RhsExtOp>();
+
+ if (!lhsDefOp || !rhsDefOp)
+ return rewriter.notifyMatchFailure(
+ op, "defining op of outerproduct operands must be 'arith.extsi' or "
+ "'arith.extui'");
+
+ auto lhsInType = cast<VectorType>(lhsDefOp->getOperand(0).getType());
+ auto rhsInType = cast<VectorType>(rhsDefOp->getOperand(0).getType());
+
+ if (lhsInType != inputType || rhsInType != inputType)
+ return rewriter.notifyMatchFailure(
+ op, "unsupported input types, expected 'vector<[4]xi8>' or "
+ "'vector<[2]xi16>'");
+ return success();
+ }
+};
+
+struct OuterProductWideningPass
+ : public arm_sme::impl::OuterProductWideningBase<OuterProductWideningPass> {
+
+ void runOnOperation() override {
+ RewritePatternSet patterns(&getContext());
+ populateOuterProductWideningPatterns(patterns);
+
+ if (failed(
+ applyPatternsAndFoldGreedily(getOperation(), std::move(patterns))))
+ signalPassFailure();
+ }
+};
+
+} // namespace
+
+void mlir::arm_sme::populateOuterProductWideningPatterns(
+ RewritePatternSet &patterns) {
+ patterns.add<OuterProduct2WayWidening, OuterProduct4WayWidening>(
+ patterns.getContext());
+}
+
+std::unique_ptr<Pass> mlir::arm_sme::createOuterProductWideningPass() {
+ return std::make_unique<OuterProductWideningPass>();
+}
diff --git a/mlir/test/Conversion/ArmSMEToLLVM/arm-sme-to-llvm.mlir b/mlir/test/Conversion/ArmSMEToLLVM/arm-sme-to-llvm.mlir
index f9cf77ca15ffb9d..562e6125243251c 100644
--- a/mlir/test/Conversion/ArmSMEToLLVM/arm-sme-to-llvm.mlir
+++ b/mlir/test/Conversion/ArmSMEToLLVM/arm-sme-to-llvm.mlir
@@ -601,3 +601,275 @@ func.func @arm_sme_streaming_vl_double_words() -> index {
%svl_d = arm_sme.streaming_vl <double>
return %svl_d : index
}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.fmopa_wide_2way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+// CHECK-LABEL: arm_sme_fmopa_wide_2way_f16f16_to_f32
+// CHECK: "arm_sme.intr.mopa.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xf16>, vector<[8]xf16>) -> ()
+func.func @arm_sme_fmopa_wide_2way_f16f16_to_f32(%vecA: vector<[8]xf16>, %vecB: vector<[8]xf16>) -> vector<[4]x[4]xf32> {
+ %result = arm_sme.fmopa_wide_2way %vecA, %vecB : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ return %result : vector<[4]x[4]xf32>
+}
+
+// -----
+
+// CHECK-LABEL: arm_sme_fmopa_wide_2way_bf16bf16_to_f32
+// CHECK: "arm_sme.intr.mopa.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xbf16>, vector<[8]xbf16>) -> ()
+func.func @arm_sme_fmopa_wide_2way_bf16bf16_to_f32(%vecA: vector<[8]xbf16>, %vecB: vector<[8]xbf16>) -> vector<[4]x[4]xf32> {
+ %result = arm_sme.fmopa_wide_2way %vecA, %vecB : vector<[8]xbf16>, vector<[8]xbf16> into vector<[4]x[4]xf32>
+ return %result : vector<[4]x[4]xf32>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.fmops_wide_2way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+// CHECK-LABEL: arm_sme_fmops_wide_2way_f16f16_to_f32
+// CHECK: "arm_sme.intr.mops.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xf16>, vector<[8]xf16>) -> ()
+func.func @arm_sme_fmops_wide_2way_f16f16_to_f32(%vecA: vector<[8]xf16>, %vecB: vector<[8]xf16>) -> vector<[4]x[4]xf32> {
+ %result = arm_sme.fmops_wide_2way %vecA, %vecB : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ return %result : vector<[4]x[4]xf32>
+}
+
+// -----
+
+// CHECK-LABEL: arm_sme_fmops_wide_2way_bf16bf16_to_f32
+// CHECK: "arm_sme.intr.mops.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xbf16>, vector<[8]xbf16>) -> ()
+func.func @arm_sme_fmops_wide_2way_bf16bf16_to_f32(%vecA: vector<[8]xbf16>, %vecB: vector<[8]xbf16>) -> vector<[4]x[4]xf32> {
+ %result = arm_sme.fmops_wide_2way %vecA, %vecB : vector<[8]xbf16>, vector<[8]xbf16> into vector<[4]x[4]xf32>
+ return %result : vector<[4]x[4]xf32>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.smopa_wide_2way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+// CHECK-LABEL: arm_sme_smopa_wide_2way_i16i16_to_i32
+// CHECK: "arm_sme.intr.smopa.za32"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
+func.func @arm_sme_smopa_wide_2way_i16i16_to_i32(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[4]x[4]xi32> {
+ %result = arm_sme.smopa_wide_2way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.smops_wide_2way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+// CHECK-LABEL: arm_sme_smops_wide_2way_i16i16_to_i32
+// CHECK: "arm_sme.intr.smops.za32"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
+func.func @arm_sme_smops_wide_2way_i16i16_to_i32(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[4]x[4]xi32> {
+ %result = arm_sme.smops_wide_2way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.umopa_wide_2way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+// CHECK-LABEL: arm_sme_umopa_wide_2way_i16i16_to_i32
+// CHECK: "arm_sme.intr.umopa.za32"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
+func.func @arm_sme_umopa_wide_2way_i16i16_to_i32(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[4]x[4]xi32> {
+ %result = arm_sme.umopa_wide_2way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.umops_wide_2way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+// CHECK-LABEL: arm_sme_umops_wide_2way_i16i16_to_i32
+// CHECK: "arm_sme.intr.umops.za32"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
+func.func @arm_sme_umops_wide_2way_i16i16_to_i32(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[4]x[4]xi32> {
+ %result = arm_sme.umops_wide_2way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.smopa_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+// CHECK-LABEL: arm_sme_smopa_wide_4way_i8i8_to_i32
+// CHECK: "arm_sme.intr.smopa.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[16]xi1>, vector<[16]xi1>, vector<[16]xi8>, vector<[16]xi8>) -> ()
+func.func @arm_sme_smopa_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ %result = arm_sme.smopa_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: arm_sme_smopa_wide_4way_i16i16_to_i64
+// CHECK: "arm_sme.intr.smopa.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
+func.func @arm_sme_smopa_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ %result = arm_sme.smopa_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %result : vector<[2]x[2]xi64>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.smops_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+// CHECK-LABEL: arm_sme_smops_wide_4way_i8i8_to_i32
+// CHECK: "arm_sme.intr.smops.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[16]xi1>, vector<[16]xi1>, vector<[16]xi8>, vector<[16]xi8>) -> ()
+func.func @arm_sme_smops_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ %result = arm_sme.smops_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: arm_sme_smops_wide_4way_i16i16_to_i64
+// CHECK: "arm_sme.intr.smops.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
+func.func @arm_sme_smops_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ %result = arm_sme.smops_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %result : vector<[2]x[2]xi64>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.umopa_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+// CHECK-LABEL: arm_sme_umopa_wide_4way_i8i8_to_i32
+// CHECK: "arm_sme.intr.umopa.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[16]xi1>, vector<[16]xi1>, vector<[16]xi8>, vector<[16]xi8>) -> ()
+func.func @arm_sme_umopa_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ %result = arm_sme.umopa_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: arm_sme_umopa_wide_4way_i16i16_to_i64
+// CHECK: "arm_sme.intr.umopa.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
+func.func @arm_sme_umopa_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ %result = arm_sme.umopa_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %result : vector<[2]x[2]xi64>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.umops_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+// CHECK-LABEL: arm_sme_umops_wide_4way_i8i8_to_i32
+// CHECK: "arm_sme.intr.umops.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[16]xi1>, vector<[16]xi1>, vector<[16]xi8>, vector<[16]xi8>) -> ()
+func.func @arm_sme_umops_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ %result = arm_sme.umops_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: arm_sme_umops_wide_4way_i16i16_to_i64
+// CHECK: "arm_sme.intr.umops.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
+func.func @arm_sme_umops_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ %result = arm_sme.umops_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %result : vector<[2]x[2]xi64>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.sumopa_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+// CHECK-LABEL: arm_sme_sumopa_wide_4way_i8i8_to_i32
+// CHECK: "arm_sme.intr.sumopa.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[16]xi1>, vector<[16]xi1>, vector<[16]xi8>, vector<[16]xi8>) -> ()
+func.func @arm_sme_sumopa_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ %result = arm_sme.sumopa_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: arm_sme_sumopa_wide_4way_i16i16_to_i64
+// CHECK: "arm_sme.intr.sumopa.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
+func.func @arm_sme_sumopa_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ %result = arm_sme.sumopa_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %result : vector<[2]x[2]xi64>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.sumops_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+// CHECK-LABEL: arm_sme_sumops_wide_4way_i8i8_to_i32
+// CHECK: "arm_sme.intr.sumops.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[16]xi1>, vector<[16]xi1>, vector<[16]xi8>, vector<[16]xi8>) -> ()
+func.func @arm_sme_sumops_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ %result = arm_sme.sumops_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: arm_sme_sumops_wide_4way_i16i16_to_i64
+// CHECK: "arm_sme.intr.sumops.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
+func.func @arm_sme_sumops_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ %result = arm_sme.sumops_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %result : vector<[2]x[2]xi64>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.usmopa_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+// CHECK-LABEL: arm_sme_usmopa_wide_4way_i8i8_to_i32
+// CHECK: "arm_sme.intr.usmopa.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[16]xi1>, vector<[16]xi1>, vector<[16]xi8>, vector<[16]xi8>) -> ()
+func.func @arm_sme_usmopa_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ %reuslt = arm_sme.usmopa_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %reuslt : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: arm_sme_usmopa_wide_4way_i16i16_to_i64
+// CHECK: "arm_sme.intr.usmopa.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
+func.func @arm_sme_usmopa_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ %reuslt = arm_sme.usmopa_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %reuslt : vector<[2]x[2]xi64>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.usmops_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+// CHECK-LABEL: arm_sme_usmops_wide_4way_i8i8_to_i32
+// CHECK: "arm_sme.intr.usmops.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[16]xi1>, vector<[16]xi1>, vector<[16]xi8>, vector<[16]xi8>) -> ()
+func.func @arm_sme_usmops_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ %reuslt = arm_sme.usmops_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %reuslt : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: arm_sme_usmops_wide_4way_i16i16_to_i64
+// CHECK: "arm_sme.intr.usmops.wide"({{.*}}) <{tile_id = 0 : i32}> : (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
+func.func @arm_sme_usmops_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ %reuslt = arm_sme.usmops_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %reuslt : vector<[2]x[2]xi64>
+}
diff --git a/mlir/test/Dialect/ArmSME/cse.mlir b/mlir/test/Dialect/ArmSME/cse.mlir
index 74e7293eaeca5fc..2decdb53d8659c1 100644
--- a/mlir/test/Dialect/ArmSME/cse.mlir
+++ b/mlir/test/Dialect/ArmSME/cse.mlir
@@ -1,30 +1,35 @@
-// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.module(func.func(cse))' | FileCheck %s
+// RUN: mlir-opt %s -cse -split-input-file | FileCheck %s
-// This test is checking that CSE does not remove 'arm_sme.zero/get_tile' ops as
+// These tests check that CSE does not remove 'arm_sme.zero/get_tile' ops as
// duplicates.
// CHECK-LABEL: @zero_tile
// CHECK: %[[TILE_0:.*]] = arm_sme.zero : vector<[4]x[4]xi32>
// CHECK: %[[TILE_1:.*]] = arm_sme.zero : vector<[4]x[4]xi32>
-// CHECK: "prevent.dce"(%[[TILE_0]]) : (vector<[4]x[4]xi32>) -> ()
-// CHECK: "prevent.dce"(%[[TILE_1]]) : (vector<[4]x[4]xi32>) -> ()
func.func @zero_tile() {
%tile_1 = arm_sme.zero : vector<[4]x[4]xi32>
%tile_2 = arm_sme.zero : vector<[4]x[4]xi32>
- "prevent.dce"(%tile_1) : (vector<[4]x[4]xi32>) -> ()
- "prevent.dce"(%tile_2) : (vector<[4]x[4]xi32>) -> ()
return
}
+// -----
+
// CHECK-LABEL: @get_tile
// CHECK: %[[TILE_0:.*]] = arm_sme.get_tile : vector<[4]x[4]xi32>
// CHECK: %[[TILE_1:.*]] = arm_sme.get_tile : vector<[4]x[4]xi32>
-// CHECK: "prevent.dce"(%[[TILE_0]]) : (vector<[4]x[4]xi32>) -> ()
-// CHECK: "prevent.dce"(%[[TILE_1]]) : (vector<[4]x[4]xi32>) -> ()
func.func @get_tile() {
%tile_1 = arm_sme.get_tile : vector<[4]x[4]xi32>
%tile_2 = arm_sme.get_tile : vector<[4]x[4]xi32>
- "prevent.dce"(%tile_1) : (vector<[4]x[4]xi32>) -> ()
- "prevent.dce"(%tile_2) : (vector<[4]x[4]xi32>) -> ()
+ return
+}
+
+// -----
+
+// Operation is pure and should be removed if it's trivially dead.
+
+// CHECK-LABEL: @dead_outerproduct
+// CHECK-NOT: arm_sme.outerproduct
+func.func @dead_outerproduct(%lhs : vector<[4]xf32>, %rhs : vector<[4]xf32>) {
+ %0 = arm_sme.outerproduct %lhs, %rhs : vector<[4]xf32>, vector<[4]xf32>
return
}
diff --git a/mlir/test/Dialect/ArmSME/invalid.mlir b/mlir/test/Dialect/ArmSME/invalid.mlir
index 85b95a8b6cf12b7..110ad4d6291deef 100644
--- a/mlir/test/Dialect/ArmSME/invalid.mlir
+++ b/mlir/test/Dialect/ArmSME/invalid.mlir
@@ -173,3 +173,69 @@ func.func @arm_sme_outerproduct__bad_vector_type(%vecA: vector<[4]xf32>, %vecB:
%0 = arm_sme.outerproduct %vecA, %vecB : vector<[4]xf32>, vector<[8]xf32>
return %0 : vector<[4]x[4]xf32>
}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.fmopa_wide_2way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_fmopa_wide_2way__bad_rhs_vector_type(%vecA: vector<[8]xf16>, %vecB: vector<[4]xf32>) -> vector<[4]x[4]xf32>
+{
+ // expected-error at +1 {{op failed to verify that all of {lhs, rhs} have same type}}
+ %0 = arm_sme.fmopa_wide_2way %vecA, %vecB : vector<[8]xf16>, vector<[4]xf32> into vector<[4]x[4]xf32>
+ return %0 : vector<[4]x[4]xf32>
+}
+
+// -----
+
+func.func @arm_sme_fmopa_wide_2way__bad_lhs_mask_type(%vecA: vector<[8]xf16>, %vecB: vector<[8]xf16>, %maskA : vector<[4]xi1>, %maskB : vector<[8]xi1>) -> vector<[4]x[4]xf32>
+{
+ // expected-note at -2 {{prior use here}}
+ // expected-error at +1 {{use of value '%maskA' expects different type than prior uses: 'vector<[8]xi1>' vs 'vector<[4]xi1>}}
+ %0 = arm_sme.fmopa_wide_2way %vecA, %vecB masks(%maskA, %maskB) : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ return %0 : vector<[4]x[4]xf32>
+}
+
+// -----
+
+func.func @arm_sme_fmopa_wide_2way__bad_rhs_mask_type(%vecA: vector<[8]xf16>, %vecB: vector<[8]xf16>, %maskA : vector<[8]xi1>, %maskB : vector<[4]xi1>) -> vector<[4]x[4]xf32>
+{
+ // expected-note at -2 {{prior use here}}
+ // expected-error at +1 {{use of value '%maskB' expects different type than prior uses: 'vector<[8]xi1>' vs 'vector<[4]xi1>}}
+ %0 = arm_sme.fmopa_wide_2way %vecA, %vecB masks(%maskA, %maskB) : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ return %0 : vector<[4]x[4]xf32>
+}
+
+// -----
+
+func.func @arm_sme_fmopa_wide_2way__no_rhs_mask(%vecA: vector<[8]xf16>, %vecB: vector<[8]xf16>, %maskA : vector<[8]xi1>) -> vector<[4]x[4]xf32>
+{
+ // expected-error at +1 {{op failed to verify that both `lhsMask` and `rhsMask` should be provided or neither}}
+ %0 = arm_sme.fmopa_wide_2way %vecA, %vecB masks(%maskA,) : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ return %0 : vector<[4]x[4]xf32>
+}
+
+// -----
+
+func.func @arm_sme_fmopa_wide_2way__bad_acc_type(%vecA: vector<[8]xf16>, %vecB: vector<[8]xf16>) -> vector<[4]x[4]xf32>
+{
+ %acc = arm_sme.zero : vector<[2]x[2]xi64>
+ // expected-note at -1 {{prior use here}}
+ // expected-error at +1 {{use of value '%acc' expects different type than prior uses: 'vector<[4]x[4]xf32>' vs 'vector<[2]x[2]xi64>'}}
+ %0 = arm_sme.fmopa_wide_2way %vecA, %vecB masks(%maskA, %maskB) acc(%acc) : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ return %0 : vector<[4]x[4]xf32>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.smopa_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_smopa_wide_4way__bad_tile_type(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[4]x[4]xi32>
+{
+ // expected-error at +1 {{op failed to verify that tile element size equals lhs element size * 4}}
+ %0 = arm_sme.smopa_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+ return %0 : vector<[4]x[4]xi32>
+}
diff --git a/mlir/test/Dialect/ArmSME/outer-product-widening.mlir b/mlir/test/Dialect/ArmSME/outer-product-widening.mlir
new file mode 100644
index 000000000000000..c62cedc6a08a555
--- /dev/null
+++ b/mlir/test/Dialect/ArmSME/outer-product-widening.mlir
@@ -0,0 +1,785 @@
+// RUN: mlir-opt %s -arm-sme-outer-product-widening -cse -split-input-file | FileCheck %s
+
+// CHECK-LABEL: @outerproduct_add_widening_2way_f16f16f32
+// CHECK-SAME: %[[A0:.*]]: vector<[4]xf16>, %[[B0:.*]]: vector<[4]xf16>, %[[A1:.*]]: vector<[4]xf16>, %[[B1:.*]]: vector<[4]xf16>,
+// CHECK-SAME: %[[A0_MASK:.*]]: vector<[4]xi1>, %[[B0_MASK:.*]]: vector<[4]xi1>, %[[A1_MASK:.*]]: vector<[4]xi1>, %[[B1_MASK:.*]]: vector<[4]xi1>
+// CHECK-DAG: %[[ACC:.*]] = arith.constant dense<0.000000e+00> : vector<[4]x[4]xf32>
+// CHECK-DAG: %[[VEC_UNDEF:.*]] = llvm.mlir.undef : vector<[8]xf16>
+// CHECK-DAG: %[[A0_INSERT:.*]] = vector.scalable.insert %[[A0]], %[[VEC_UNDEF]][0] : vector<[4]xf16> into vector<[8]xf16>
+// CHECK-DAG: %[[B0_INSERT:.*]] = vector.scalable.insert %[[B0]], %[[VEC_UNDEF]][0] : vector<[4]xf16> into vector<[8]xf16>
+// CHECK-DAG: %[[A1_INSERT:.*]] = vector.scalable.insert %[[A1]], %[[VEC_UNDEF]][0] : vector<[4]xf16> into vector<[8]xf16>
+// CHECK-DAG: %[[B1_INSERT:.*]] = vector.scalable.insert %[[B1]], %[[VEC_UNDEF]][0] : vector<[4]xf16> into vector<[8]xf16>
+// CHECK-DAG: %[[LHS:.*]] = "arm_sve.intr.zip1"(%[[A0_INSERT]], %[[A1_INSERT]]) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
+// CHECK-DAG: %[[RHS:.*]] = "arm_sve.intr.zip1"(%[[B0_INSERT]], %[[B1_INSERT]]) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
+// CHECK-DAG: %[[MASK_UNDEF:.*]] = llvm.mlir.undef : vector<[8]xi1>
+// CHECK-DAG: %[[A0_MASK_INSERT:.*]] = vector.scalable.insert %[[A0_MASK]], %[[MASK_UNDEF]][0] : vector<[4]xi1> into vector<[8]xi1>
+// CHECK-DAG: %[[B0_MASK_INSERT:.*]] = vector.scalable.insert %[[B0_MASK]], %[[MASK_UNDEF]][0] : vector<[4]xi1> into vector<[8]xi1>
+// CHECK-DAG: %[[A1_MASK_INSERT:.*]] = vector.scalable.insert %[[A1_MASK]], %[[MASK_UNDEF]][0] : vector<[4]xi1> into vector<[8]xi1>
+// CHECK-DAG: %[[B1_MASK_INSERT:.*]] = vector.scalable.insert %[[B1_MASK]], %[[MASK_UNDEF]][0] : vector<[4]xi1> into vector<[8]xi1>
+// CHECK-DAG: %[[LHS_MASK:.*]] = "arm_sve.intr.zip1"(%[[A0_MASK_INSERT]], %[[A1_MASK_INSERT]]) : (vector<[8]xi1>, vector<[8]xi1>) -> vector<[8]xi1>
+// CHECK-DAG: %[[RHS_MASK:.*]] = "arm_sve.intr.zip1"(%[[B0_MASK_INSERT]], %[[B1_MASK_INSERT]]) : (vector<[8]xi1>, vector<[8]xi1>) -> vector<[8]xi1>
+// CHECK-DAG: arm_sme.fmopa_wide_2way %[[LHS]], %[[RHS]] acc(%[[ACC]]) masks(%[[LHS_MASK]], %[[RHS_MASK]]) : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+func.func @outerproduct_add_widening_2way_f16f16f32(
+ %a0 : vector<[4]xf16>, %b0 : vector<[4]xf16>,
+ %a1 : vector<[4]xf16>, %b1 : vector<[4]xf16>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>) -> vector<[4]x[4]xf32> {
+ %a0_ext = arith.extf %a0 : vector<[4]xf16> to vector<[4]xf32>
+ %b0_ext = arith.extf %b0 : vector<[4]xf16> to vector<[4]xf32>
+ %a1_ext = arith.extf %a1 : vector<[4]xf16> to vector<[4]xf32>
+ %b1_ext = arith.extf %b1 : vector<[4]xf16> to vector<[4]xf32>
+
+ %acc = arith.constant dense<0.0> : vector<[4]x[4]xf32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xf32>, vector<[4]xf32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xf32>, vector<[4]xf32>
+
+ return %1 : vector<[4]x[4]xf32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_sub_widening_2way_f16f16f32
+// CHECK: arm_sme.fmops_wide_2way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+func.func @outerproduct_sub_widening_2way_f16f16f32(
+ %a0 : vector<[4]xf16>, %b0 : vector<[4]xf16>,
+ %a1 : vector<[4]xf16>, %b1 : vector<[4]xf16>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>) -> vector<[4]x[4]xf32> {
+ %a0_ext = arith.extf %a0 : vector<[4]xf16> to vector<[4]xf32>
+ %b0_ext = arith.extf %b0 : vector<[4]xf16> to vector<[4]xf32>
+ %a1_ext = arith.extf %a1 : vector<[4]xf16> to vector<[4]xf32>
+ %b1_ext = arith.extf %b1 : vector<[4]xf16> to vector<[4]xf32>
+
+ %acc = arith.constant dense<0.0> : vector<[4]x[4]xf32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext kind<sub> acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xf32>, vector<[4]xf32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext kind<sub> acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xf32>, vector<[4]xf32>
+
+ return %1 : vector<[4]x[4]xf32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_add_widening_2way_bf16bf16f32
+// CHECK: arm_sme.fmopa_wide_2way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[8]xbf16>, vector<[8]xbf16> into vector<[4]x[4]xf32>
+func.func @outerproduct_add_widening_2way_bf16bf16f32(
+ %a0 : vector<[4]xbf16>, %b0 : vector<[4]xbf16>,
+ %a1 : vector<[4]xbf16>, %b1 : vector<[4]xbf16>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>) -> vector<[4]x[4]xf32> {
+ %a0_ext = arith.extf %a0 : vector<[4]xbf16> to vector<[4]xf32>
+ %b0_ext = arith.extf %b0 : vector<[4]xbf16> to vector<[4]xf32>
+ %a1_ext = arith.extf %a1 : vector<[4]xbf16> to vector<[4]xf32>
+ %b1_ext = arith.extf %b1 : vector<[4]xbf16> to vector<[4]xf32>
+
+ %acc = arith.constant dense<0.0> : vector<[4]x[4]xf32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xf32>, vector<[4]xf32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xf32>, vector<[4]xf32>
+
+ return %1 : vector<[4]x[4]xf32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_sub_widening_2way_bf16bf16f32
+// CHECK: arm_sme.fmops_wide_2way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[8]xbf16>, vector<[8]xbf16> into vector<[4]x[4]xf32>
+func.func @outerproduct_sub_widening_2way_bf16bf16f32(
+ %a0 : vector<[4]xbf16>, %b0 : vector<[4]xbf16>,
+ %a1 : vector<[4]xbf16>, %b1 : vector<[4]xbf16>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>) -> vector<[4]x[4]xf32> {
+ %a0_ext = arith.extf %a0 : vector<[4]xbf16> to vector<[4]xf32>
+ %b0_ext = arith.extf %b0 : vector<[4]xbf16> to vector<[4]xf32>
+ %a1_ext = arith.extf %a1 : vector<[4]xbf16> to vector<[4]xf32>
+ %b1_ext = arith.extf %b1 : vector<[4]xbf16> to vector<[4]xf32>
+
+ %acc = arith.constant dense<0.0> : vector<[4]x[4]xf32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext kind<sub> acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xf32>, vector<[4]xf32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext kind<sub> acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xf32>, vector<[4]xf32>
+
+ return %1 : vector<[4]x[4]xf32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_add_widening_2way_signed_i16i16i32
+// CHECK: arm_sme.smopa_wide_2way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+func.func @outerproduct_add_widening_2way_signed_i16i16i32(
+ %a0 : vector<[4]xi16>, %b0 : vector<[4]xi16>,
+ %a1 : vector<[4]xi16>, %b1 : vector<[4]xi16>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>) -> vector<[4]x[4]xi32> {
+ %a0_ext = arith.extsi %a0 : vector<[4]xi16> to vector<[4]xi32>
+ %b0_ext = arith.extsi %b0 : vector<[4]xi16> to vector<[4]xi32>
+ %a1_ext = arith.extsi %a1 : vector<[4]xi16> to vector<[4]xi32>
+ %b1_ext = arith.extsi %b1 : vector<[4]xi16> to vector<[4]xi32>
+
+ %acc = arith.constant dense<0> : vector<[4]x[4]xi32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xi32>, vector<[4]xi32>
+
+ return %1 : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_sub_widening_2way_signed_i16i16i32
+// CHECK: arm_sme.smops_wide_2way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+func.func @outerproduct_sub_widening_2way_signed_i16i16i32(
+ %a0 : vector<[4]xi16>, %b0 : vector<[4]xi16>,
+ %a1 : vector<[4]xi16>, %b1 : vector<[4]xi16>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>) -> vector<[4]x[4]xi32> {
+ %a0_ext = arith.extsi %a0 : vector<[4]xi16> to vector<[4]xi32>
+ %b0_ext = arith.extsi %b0 : vector<[4]xi16> to vector<[4]xi32>
+ %a1_ext = arith.extsi %a1 : vector<[4]xi16> to vector<[4]xi32>
+ %b1_ext = arith.extsi %b1 : vector<[4]xi16> to vector<[4]xi32>
+
+ %acc = arith.constant dense<0> : vector<[4]x[4]xi32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext kind<sub> acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext kind<sub> acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xi32>, vector<[4]xi32>
+
+ return %1 : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_add_widening_2way_unsigned_i16i16i32
+// CHECK: arm_sme.umopa_wide_2way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+func.func @outerproduct_add_widening_2way_unsigned_i16i16i32(
+ %a0 : vector<[4]xi16>, %b0 : vector<[4]xi16>,
+ %a1 : vector<[4]xi16>, %b1 : vector<[4]xi16>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>) -> vector<[4]x[4]xi32> {
+ %a0_ext = arith.extui %a0 : vector<[4]xi16> to vector<[4]xi32>
+ %b0_ext = arith.extui %b0 : vector<[4]xi16> to vector<[4]xi32>
+ %a1_ext = arith.extui %a1 : vector<[4]xi16> to vector<[4]xi32>
+ %b1_ext = arith.extui %b1 : vector<[4]xi16> to vector<[4]xi32>
+
+ %acc = arith.constant dense<0> : vector<[4]x[4]xi32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xi32>, vector<[4]xi32>
+
+ return %1 : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_sub_widening_2way_unsigned_i16i16i32
+// CHECK: arm_sme.umops_wide_2way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+func.func @outerproduct_sub_widening_2way_unsigned_i16i16i32(
+ %a0 : vector<[4]xi16>, %b0 : vector<[4]xi16>,
+ %a1 : vector<[4]xi16>, %b1 : vector<[4]xi16>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>) -> vector<[4]x[4]xi32> {
+ %a0_ext = arith.extui %a0 : vector<[4]xi16> to vector<[4]xi32>
+ %b0_ext = arith.extui %b0 : vector<[4]xi16> to vector<[4]xi32>
+ %a1_ext = arith.extui %a1 : vector<[4]xi16> to vector<[4]xi32>
+ %b1_ext = arith.extui %b1 : vector<[4]xi16> to vector<[4]xi32>
+
+ %acc = arith.constant dense<0> : vector<[4]x[4]xi32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext kind<sub> acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext kind<sub> acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xi32>, vector<[4]xi32>
+
+ return %1 : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_add_widening_4way_signed_i8i8i32
+// CHECK-SAME: %[[A0:.*]]: vector<[4]xi8>, %[[B0:.*]]: vector<[4]xi8>, %[[A1:.*]]: vector<[4]xi8>, %[[B1:.*]]: vector<[4]xi8>, %[[A2:.*]]: vector<[4]xi8>, %[[B2:.*]]: vector<[4]xi8>, %[[A3:.*]]: vector<[4]xi8>, %[[B3:.*]]: vector<[4]xi8>,
+// CHECK-SAME: %[[A0_MASK:.*]]: vector<[4]xi1>, %[[B0_MASK:.*]]: vector<[4]xi1>, %[[A1_MASK:.*]]: vector<[4]xi1>, %[[B1_MASK:.*]]: vector<[4]xi1>, %[[A2_MASK:.*]]: vector<[4]xi1>, %[[B2_MASK:.*]]: vector<[4]xi1>, %[[A3_MASK:.*]]: vector<[4]xi1>, %[[B3_MASK:.*]]: vector<[4]xi1>
+// CHECK-DAG: %[[ACC:.*]] = arith.constant dense<0> : vector<[4]x[4]xi32>
+// CHECK-DAG: %[[VEC_UNDEF:.*]] = llvm.mlir.undef : vector<[16]xi8>
+// CHECK-DAG: %[[A0_INSERT:.*]] = vector.scalable.insert %[[A0]], %[[VEC_UNDEF]][0] : vector<[4]xi8> into vector<[16]xi8>
+// CHECK-DAG: %[[B0_INSERT:.*]] = vector.scalable.insert %[[B0]], %[[VEC_UNDEF]][0] : vector<[4]xi8> into vector<[16]xi8>
+// CHECK-DAG: %[[A1_INSERT:.*]] = vector.scalable.insert %[[A1]], %[[VEC_UNDEF]][0] : vector<[4]xi8> into vector<[16]xi8>
+// CHECK-DAG: %[[B1_INSERT:.*]] = vector.scalable.insert %[[B1]], %[[VEC_UNDEF]][0] : vector<[4]xi8> into vector<[16]xi8>
+// CHECK-DAG: %[[A2_INSERT:.*]] = vector.scalable.insert %[[A2]], %[[VEC_UNDEF]][0] : vector<[4]xi8> into vector<[16]xi8>
+// CHECK-DAG: %[[B2_INSERT:.*]] = vector.scalable.insert %[[B2]], %[[VEC_UNDEF]][0] : vector<[4]xi8> into vector<[16]xi8>
+// CHECK-DAG: %[[A3_INSERT:.*]] = vector.scalable.insert %[[A3]], %[[VEC_UNDEF]][0] : vector<[4]xi8> into vector<[16]xi8>
+// CHECK-DAG: %[[B3_INSERT:.*]] = vector.scalable.insert %[[B3]], %[[VEC_UNDEF]][0] : vector<[4]xi8> into vector<[16]xi8>
+// CHECK-DAG: %[[LHS0:.*]] = "arm_sve.intr.zip1"(%[[A0_INSERT]], %[[A2_INSERT]]) : (vector<[16]xi8>, vector<[16]xi8>) -> vector<[16]xi8>
+// CHECK-DAG: %[[LHS1:.*]] = "arm_sve.intr.zip1"(%[[A1_INSERT]], %[[A3_INSERT]]) : (vector<[16]xi8>, vector<[16]xi8>) -> vector<[16]xi8>
+// CHECK-DAG: %[[RHS0:.*]] = "arm_sve.intr.zip1"(%[[B0_INSERT]], %[[B2_INSERT]]) : (vector<[16]xi8>, vector<[16]xi8>) -> vector<[16]xi8>
+// CHECK-DAG: %[[RHS1:.*]] = "arm_sve.intr.zip1"(%[[B1_INSERT]], %[[B3_INSERT]]) : (vector<[16]xi8>, vector<[16]xi8>) -> vector<[16]xi8>
+// CHECK-DAG: %[[LHS:.*]] = "arm_sve.intr.zip1"(%[[LHS0]], %[[LHS1]]) : (vector<[16]xi8>, vector<[16]xi8>) -> vector<[16]xi8>
+// CHECK-DAG: %[[RHS:.*]] = "arm_sve.intr.zip1"(%[[RHS0]], %[[RHS1]]) : (vector<[16]xi8>, vector<[16]xi8>) -> vector<[16]xi8>
+// CHECK-DAG: %[[MASK_UNDEF:.*]] = llvm.mlir.undef : vector<[16]xi1>
+// CHECK-DAG: %[[A0_MASK_INSERT:.*]] = vector.scalable.insert %[[A0_MASK]], %[[MASK_UNDEF]][0] : vector<[4]xi1> into vector<[16]xi1>
+// CHECK-DAG: %[[B0_MASK_INSERT:.*]] = vector.scalable.insert %[[B0_MASK]], %[[MASK_UNDEF]][0] : vector<[4]xi1> into vector<[16]xi1>
+// CHECK-DAG: %[[A1_MASK_INSERT:.*]] = vector.scalable.insert %[[A1_MASK]], %[[MASK_UNDEF]][0] : vector<[4]xi1> into vector<[16]xi1>
+// CHECK-DAG: %[[B1_MASK_INSERT:.*]] = vector.scalable.insert %[[B1_MASK]], %[[MASK_UNDEF]][0] : vector<[4]xi1> into vector<[16]xi1>
+// CHECK-DAG: %[[A2_MASK_INSERT:.*]] = vector.scalable.insert %[[A2_MASK]], %[[MASK_UNDEF]][0] : vector<[4]xi1> into vector<[16]xi1>
+// CHECK-DAG: %[[B2_MASK_INSERT:.*]] = vector.scalable.insert %[[B2_MASK]], %[[MASK_UNDEF]][0] : vector<[4]xi1> into vector<[16]xi1>
+// CHECK-DAG: %[[A3_MASK_INSERT:.*]] = vector.scalable.insert %[[A3_MASK]], %[[MASK_UNDEF]][0] : vector<[4]xi1> into vector<[16]xi1>
+// CHECK-DAG: %[[B3_MASK_INSERT:.*]] = vector.scalable.insert %[[B3_MASK]], %[[MASK_UNDEF]][0] : vector<[4]xi1> into vector<[16]xi1>
+// CHECK-DAG: %[[LHS0_MASK:.*]] = "arm_sve.intr.zip1"(%[[A0_MASK_INSERT]], %[[A2_MASK_INSERT]]) : (vector<[16]xi1>, vector<[16]xi1>) -> vector<[16]xi1>
+// CHECK-DAG: %[[LHS1_MASK:.*]] = "arm_sve.intr.zip1"(%[[A1_MASK_INSERT]], %[[A3_MASK_INSERT]]) : (vector<[16]xi1>, vector<[16]xi1>) -> vector<[16]xi1>
+// CHECK-DAG: %[[RHS0_MASK:.*]] = "arm_sve.intr.zip1"(%[[B0_MASK_INSERT]], %[[B2_MASK_INSERT]]) : (vector<[16]xi1>, vector<[16]xi1>) -> vector<[16]xi1>
+// CHECK-DAG: %[[RHS1_MASK:.*]] = "arm_sve.intr.zip1"(%[[B1_MASK_INSERT]], %[[B3_MASK_INSERT]]) : (vector<[16]xi1>, vector<[16]xi1>) -> vector<[16]xi1>
+// CHECK-DAG: %[[LHS_MASK:.*]] = "arm_sve.intr.zip1"(%[[LHS0_MASK]], %[[LHS1_MASK]]) : (vector<[16]xi1>, vector<[16]xi1>) -> vector<[16]xi1>
+// CHECK-DAG: %[[RHS_MASK:.*]] = "arm_sve.intr.zip1"(%[[RHS0_MASK]], %[[RHS1_MASK]]) : (vector<[16]xi1>, vector<[16]xi1>) -> vector<[16]xi1>
+// CHECK-DAG: arm_sme.smopa_wide_4way %[[LHS]], %[[RHS]] acc(%[[ACC]]) masks(%[[LHS_MASK]], %[[RHS_MASK]]) : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+func.func @outerproduct_add_widening_4way_signed_i8i8i32(
+ %a0 : vector<[4]xi8>, %b0 : vector<[4]xi8>,
+ %a1 : vector<[4]xi8>, %b1 : vector<[4]xi8>,
+ %a2 : vector<[4]xi8>, %b2 : vector<[4]xi8>,
+ %a3 : vector<[4]xi8>, %b3 : vector<[4]xi8>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>,
+ %a2_mask : vector<[4]xi1>, %b2_mask : vector<[4]xi1>,
+ %a3_mask : vector<[4]xi1>, %b3_mask : vector<[4]xi1>) -> vector<[4]x[4]xi32> {
+ %a0_ext = arith.extsi %a0 : vector<[4]xi8> to vector<[4]xi32>
+ %b0_ext = arith.extsi %b0 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a1_ext = arith.extsi %a1 : vector<[4]xi8> to vector<[4]xi32>
+ %b1_ext = arith.extsi %b1 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a2_ext = arith.extsi %a2 : vector<[4]xi8> to vector<[4]xi32>
+ %b2_ext = arith.extsi %b2 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a3_ext = arith.extsi %a3 : vector<[4]xi8> to vector<[4]xi32>
+ %b3_ext = arith.extsi %b3 : vector<[4]xi8> to vector<[4]xi32>
+
+ %acc = arith.constant dense<0> : vector<[4]x[4]xi32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext acc(%1) masks(%a2_mask, %b2_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext acc(%2) masks(%a3_mask, %b3_mask) : vector<[4]xi32>, vector<[4]xi32>
+
+ return %3 : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_sub_widening_4way_signed_i8i8i32
+// CHECK: arm_sme.smops_wide_4way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+func.func @outerproduct_sub_widening_4way_signed_i8i8i32(
+ %a0 : vector<[4]xi8>, %b0 : vector<[4]xi8>,
+ %a1 : vector<[4]xi8>, %b1 : vector<[4]xi8>,
+ %a2 : vector<[4]xi8>, %b2 : vector<[4]xi8>,
+ %a3 : vector<[4]xi8>, %b3 : vector<[4]xi8>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>,
+ %a2_mask : vector<[4]xi1>, %b2_mask : vector<[4]xi1>,
+ %a3_mask : vector<[4]xi1>, %b3_mask : vector<[4]xi1>) -> vector<[4]x[4]xi32> {
+ %a0_ext = arith.extsi %a0 : vector<[4]xi8> to vector<[4]xi32>
+ %b0_ext = arith.extsi %b0 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a1_ext = arith.extsi %a1 : vector<[4]xi8> to vector<[4]xi32>
+ %b1_ext = arith.extsi %b1 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a2_ext = arith.extsi %a2 : vector<[4]xi8> to vector<[4]xi32>
+ %b2_ext = arith.extsi %b2 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a3_ext = arith.extsi %a3 : vector<[4]xi8> to vector<[4]xi32>
+ %b3_ext = arith.extsi %b3 : vector<[4]xi8> to vector<[4]xi32>
+
+ %acc = arith.constant dense<0> : vector<[4]x[4]xi32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext kind<sub> acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext kind<sub> acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext kind<sub> acc(%1) masks(%a2_mask, %b2_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext kind<sub> acc(%2) masks(%a3_mask, %b3_mask) : vector<[4]xi32>, vector<[4]xi32>
+
+ return %3 : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_add_widening_4way_signed_i16i16i64
+// CHECK: arm_sme.smopa_wide_4way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+func.func @outerproduct_add_widening_4way_signed_i16i16i64(
+ %a0 : vector<[2]xi16>, %b0 : vector<[2]xi16>,
+ %a1 : vector<[2]xi16>, %b1 : vector<[2]xi16>,
+ %a2 : vector<[2]xi16>, %b2 : vector<[2]xi16>,
+ %a3 : vector<[2]xi16>, %b3 : vector<[2]xi16>,
+ %a0_mask : vector<[2]xi1>, %b0_mask : vector<[2]xi1>,
+ %a1_mask : vector<[2]xi1>, %b1_mask : vector<[2]xi1>,
+ %a2_mask : vector<[2]xi1>, %b2_mask : vector<[2]xi1>,
+ %a3_mask : vector<[2]xi1>, %b3_mask : vector<[2]xi1>) -> vector<[2]x[2]xi64> {
+ %a0_ext = arith.extsi %a0 : vector<[2]xi16> to vector<[2]xi64>
+ %b0_ext = arith.extsi %b0 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a1_ext = arith.extsi %a1 : vector<[2]xi16> to vector<[2]xi64>
+ %b1_ext = arith.extsi %b1 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a2_ext = arith.extsi %a2 : vector<[2]xi16> to vector<[2]xi64>
+ %b2_ext = arith.extsi %b2 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a3_ext = arith.extsi %a3 : vector<[2]xi16> to vector<[2]xi64>
+ %b3_ext = arith.extsi %b3 : vector<[2]xi16> to vector<[2]xi64>
+
+ %acc = arith.constant dense<0> : vector<[2]x[2]xi64>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext acc(%acc) masks(%a0_mask, %b0_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext acc(%0) masks(%a1_mask, %b1_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext acc(%1) masks(%a2_mask, %b2_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext acc(%2) masks(%a3_mask, %b3_mask) : vector<[2]xi64>, vector<[2]xi64>
+
+ return %3 : vector<[2]x[2]xi64>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_sub_widening_4way_signed_i16i16i64
+// CHECK: arm_sme.smops_wide_4way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+func.func @outerproduct_sub_widening_4way_signed_i16i16i64(
+ %a0 : vector<[2]xi16>, %b0 : vector<[2]xi16>,
+ %a1 : vector<[2]xi16>, %b1 : vector<[2]xi16>,
+ %a2 : vector<[2]xi16>, %b2 : vector<[2]xi16>,
+ %a3 : vector<[2]xi16>, %b3 : vector<[2]xi16>,
+ %a0_mask : vector<[2]xi1>, %b0_mask : vector<[2]xi1>,
+ %a1_mask : vector<[2]xi1>, %b1_mask : vector<[2]xi1>,
+ %a2_mask : vector<[2]xi1>, %b2_mask : vector<[2]xi1>,
+ %a3_mask : vector<[2]xi1>, %b3_mask : vector<[2]xi1>) -> vector<[2]x[2]xi64> {
+ %a0_ext = arith.extsi %a0 : vector<[2]xi16> to vector<[2]xi64>
+ %b0_ext = arith.extsi %b0 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a1_ext = arith.extsi %a1 : vector<[2]xi16> to vector<[2]xi64>
+ %b1_ext = arith.extsi %b1 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a2_ext = arith.extsi %a2 : vector<[2]xi16> to vector<[2]xi64>
+ %b2_ext = arith.extsi %b2 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a3_ext = arith.extsi %a3 : vector<[2]xi16> to vector<[2]xi64>
+ %b3_ext = arith.extsi %b3 : vector<[2]xi16> to vector<[2]xi64>
+
+ %acc = arith.constant dense<0> : vector<[2]x[2]xi64>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext kind<sub> acc(%acc) masks(%a0_mask, %b0_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext kind<sub> acc(%0) masks(%a1_mask, %b1_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext kind<sub> acc(%1) masks(%a2_mask, %b2_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext kind<sub> acc(%2) masks(%a3_mask, %b3_mask) : vector<[2]xi64>, vector<[2]xi64>
+
+ return %3 : vector<[2]x[2]xi64>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_add_widening_4way_unsigned_i8i8i32
+// CHECK: arm_sme.umopa_wide_4way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+func.func @outerproduct_add_widening_4way_unsigned_i8i8i32(
+ %a0 : vector<[4]xi8>, %b0 : vector<[4]xi8>,
+ %a1 : vector<[4]xi8>, %b1 : vector<[4]xi8>,
+ %a2 : vector<[4]xi8>, %b2 : vector<[4]xi8>,
+ %a3 : vector<[4]xi8>, %b3 : vector<[4]xi8>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>,
+ %a2_mask : vector<[4]xi1>, %b2_mask : vector<[4]xi1>,
+ %a3_mask : vector<[4]xi1>, %b3_mask : vector<[4]xi1>) -> vector<[4]x[4]xi32> {
+ %a0_ext = arith.extui %a0 : vector<[4]xi8> to vector<[4]xi32>
+ %b0_ext = arith.extui %b0 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a1_ext = arith.extui %a1 : vector<[4]xi8> to vector<[4]xi32>
+ %b1_ext = arith.extui %b1 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a2_ext = arith.extui %a2 : vector<[4]xi8> to vector<[4]xi32>
+ %b2_ext = arith.extui %b2 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a3_ext = arith.extui %a3 : vector<[4]xi8> to vector<[4]xi32>
+ %b3_ext = arith.extui %b3 : vector<[4]xi8> to vector<[4]xi32>
+
+ %acc = arith.constant dense<0> : vector<[4]x[4]xi32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext acc(%1) masks(%a2_mask, %b2_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext acc(%2) masks(%a3_mask, %b3_mask) : vector<[4]xi32>, vector<[4]xi32>
+
+ return %3 : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_sub_widening_4way_unsigned_i8i8i32
+// CHECK: arm_sme.umops_wide_4way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+func.func @outerproduct_sub_widening_4way_unsigned_i8i8i32(
+ %a0 : vector<[4]xi8>, %b0 : vector<[4]xi8>,
+ %a1 : vector<[4]xi8>, %b1 : vector<[4]xi8>,
+ %a2 : vector<[4]xi8>, %b2 : vector<[4]xi8>,
+ %a3 : vector<[4]xi8>, %b3 : vector<[4]xi8>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>,
+ %a2_mask : vector<[4]xi1>, %b2_mask : vector<[4]xi1>,
+ %a3_mask : vector<[4]xi1>, %b3_mask : vector<[4]xi1>) -> vector<[4]x[4]xi32> {
+ %a0_ext = arith.extui %a0 : vector<[4]xi8> to vector<[4]xi32>
+ %b0_ext = arith.extui %b0 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a1_ext = arith.extui %a1 : vector<[4]xi8> to vector<[4]xi32>
+ %b1_ext = arith.extui %b1 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a2_ext = arith.extui %a2 : vector<[4]xi8> to vector<[4]xi32>
+ %b2_ext = arith.extui %b2 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a3_ext = arith.extui %a3 : vector<[4]xi8> to vector<[4]xi32>
+ %b3_ext = arith.extui %b3 : vector<[4]xi8> to vector<[4]xi32>
+
+ %acc = arith.constant dense<0> : vector<[4]x[4]xi32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext kind<sub> acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext kind<sub> acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext kind<sub> acc(%1) masks(%a2_mask, %b2_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext kind<sub> acc(%2) masks(%a3_mask, %b3_mask) : vector<[4]xi32>, vector<[4]xi32>
+
+ return %3 : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_add_widening_4way_unsigned_i16i16i64
+// CHECK: arm_sme.umopa_wide_4way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+func.func @outerproduct_add_widening_4way_unsigned_i16i16i64(
+ %a0 : vector<[2]xi16>, %b0 : vector<[2]xi16>,
+ %a1 : vector<[2]xi16>, %b1 : vector<[2]xi16>,
+ %a2 : vector<[2]xi16>, %b2 : vector<[2]xi16>,
+ %a3 : vector<[2]xi16>, %b3 : vector<[2]xi16>,
+ %a0_mask : vector<[2]xi1>, %b0_mask : vector<[2]xi1>,
+ %a1_mask : vector<[2]xi1>, %b1_mask : vector<[2]xi1>,
+ %a2_mask : vector<[2]xi1>, %b2_mask : vector<[2]xi1>,
+ %a3_mask : vector<[2]xi1>, %b3_mask : vector<[2]xi1>) -> vector<[2]x[2]xi64> {
+ %a0_ext = arith.extui %a0 : vector<[2]xi16> to vector<[2]xi64>
+ %b0_ext = arith.extui %b0 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a1_ext = arith.extui %a1 : vector<[2]xi16> to vector<[2]xi64>
+ %b1_ext = arith.extui %b1 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a2_ext = arith.extui %a2 : vector<[2]xi16> to vector<[2]xi64>
+ %b2_ext = arith.extui %b2 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a3_ext = arith.extui %a3 : vector<[2]xi16> to vector<[2]xi64>
+ %b3_ext = arith.extui %b3 : vector<[2]xi16> to vector<[2]xi64>
+
+ %acc = arith.constant dense<0> : vector<[2]x[2]xi64>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext acc(%acc) masks(%a0_mask, %b0_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext acc(%0) masks(%a1_mask, %b1_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext acc(%1) masks(%a2_mask, %b2_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext acc(%2) masks(%a3_mask, %b3_mask) : vector<[2]xi64>, vector<[2]xi64>
+
+ return %3 : vector<[2]x[2]xi64>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_sub_widening_4way_unsigned_i16i16i64
+// CHECK: arm_sme.umops_wide_4way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+func.func @outerproduct_sub_widening_4way_unsigned_i16i16i64(
+ %a0 : vector<[2]xi16>, %b0 : vector<[2]xi16>,
+ %a1 : vector<[2]xi16>, %b1 : vector<[2]xi16>,
+ %a2 : vector<[2]xi16>, %b2 : vector<[2]xi16>,
+ %a3 : vector<[2]xi16>, %b3 : vector<[2]xi16>,
+ %a0_mask : vector<[2]xi1>, %b0_mask : vector<[2]xi1>,
+ %a1_mask : vector<[2]xi1>, %b1_mask : vector<[2]xi1>,
+ %a2_mask : vector<[2]xi1>, %b2_mask : vector<[2]xi1>,
+ %a3_mask : vector<[2]xi1>, %b3_mask : vector<[2]xi1>) -> vector<[2]x[2]xi64> {
+ %a0_ext = arith.extui %a0 : vector<[2]xi16> to vector<[2]xi64>
+ %b0_ext = arith.extui %b0 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a1_ext = arith.extui %a1 : vector<[2]xi16> to vector<[2]xi64>
+ %b1_ext = arith.extui %b1 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a2_ext = arith.extui %a2 : vector<[2]xi16> to vector<[2]xi64>
+ %b2_ext = arith.extui %b2 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a3_ext = arith.extui %a3 : vector<[2]xi16> to vector<[2]xi64>
+ %b3_ext = arith.extui %b3 : vector<[2]xi16> to vector<[2]xi64>
+
+ %acc = arith.constant dense<0> : vector<[2]x[2]xi64>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext kind<sub> acc(%acc) masks(%a0_mask, %b0_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext kind<sub> acc(%0) masks(%a1_mask, %b1_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext kind<sub> acc(%1) masks(%a2_mask, %b2_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext kind<sub> acc(%2) masks(%a3_mask, %b3_mask) : vector<[2]xi64>, vector<[2]xi64>
+
+ return %3 : vector<[2]x[2]xi64>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_add_widening_4way_signed_by_unsigned_i8i8i32
+// CHECK: arm_sme.sumopa_wide_4way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+func.func @outerproduct_add_widening_4way_signed_by_unsigned_i8i8i32(
+ %a0 : vector<[4]xi8>, %b0 : vector<[4]xi8>,
+ %a1 : vector<[4]xi8>, %b1 : vector<[4]xi8>,
+ %a2 : vector<[4]xi8>, %b2 : vector<[4]xi8>,
+ %a3 : vector<[4]xi8>, %b3 : vector<[4]xi8>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>,
+ %a2_mask : vector<[4]xi1>, %b2_mask : vector<[4]xi1>,
+ %a3_mask : vector<[4]xi1>, %b3_mask : vector<[4]xi1>) -> vector<[4]x[4]xi32> {
+ %a0_ext = arith.extsi %a0 : vector<[4]xi8> to vector<[4]xi32>
+ %b0_ext = arith.extui %b0 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a1_ext = arith.extsi %a1 : vector<[4]xi8> to vector<[4]xi32>
+ %b1_ext = arith.extui %b1 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a2_ext = arith.extsi %a2 : vector<[4]xi8> to vector<[4]xi32>
+ %b2_ext = arith.extui %b2 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a3_ext = arith.extsi %a3 : vector<[4]xi8> to vector<[4]xi32>
+ %b3_ext = arith.extui %b3 : vector<[4]xi8> to vector<[4]xi32>
+
+ %acc = arith.constant dense<0> : vector<[4]x[4]xi32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext acc(%1) masks(%a2_mask, %b2_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext acc(%2) masks(%a3_mask, %b3_mask) : vector<[4]xi32>, vector<[4]xi32>
+
+ return %3 : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_sub_widening_4way_signed_by_unsigned_i8i8i32
+// CHECK: arm_sme.sumops_wide_4way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+func.func @outerproduct_sub_widening_4way_signed_by_unsigned_i8i8i32(
+ %a0 : vector<[4]xi8>, %b0 : vector<[4]xi8>,
+ %a1 : vector<[4]xi8>, %b1 : vector<[4]xi8>,
+ %a2 : vector<[4]xi8>, %b2 : vector<[4]xi8>,
+ %a3 : vector<[4]xi8>, %b3 : vector<[4]xi8>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>,
+ %a2_mask : vector<[4]xi1>, %b2_mask : vector<[4]xi1>,
+ %a3_mask : vector<[4]xi1>, %b3_mask : vector<[4]xi1>) -> vector<[4]x[4]xi32> {
+ %a0_ext = arith.extsi %a0 : vector<[4]xi8> to vector<[4]xi32>
+ %b0_ext = arith.extui %b0 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a1_ext = arith.extsi %a1 : vector<[4]xi8> to vector<[4]xi32>
+ %b1_ext = arith.extui %b1 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a2_ext = arith.extsi %a2 : vector<[4]xi8> to vector<[4]xi32>
+ %b2_ext = arith.extui %b2 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a3_ext = arith.extsi %a3 : vector<[4]xi8> to vector<[4]xi32>
+ %b3_ext = arith.extui %b3 : vector<[4]xi8> to vector<[4]xi32>
+
+ %acc = arith.constant dense<0> : vector<[4]x[4]xi32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext kind<sub> acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext kind<sub> acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext kind<sub> acc(%1) masks(%a2_mask, %b2_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext kind<sub> acc(%2) masks(%a3_mask, %b3_mask) : vector<[4]xi32>, vector<[4]xi32>
+
+ return %3 : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_add_widening_4way_signed_by_unsigned_i16i16i64
+// CHECK: arm_sme.sumopa_wide_4way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+func.func @outerproduct_add_widening_4way_signed_by_unsigned_i16i16i64(
+ %a0 : vector<[2]xi16>, %b0 : vector<[2]xi16>,
+ %a1 : vector<[2]xi16>, %b1 : vector<[2]xi16>,
+ %a2 : vector<[2]xi16>, %b2 : vector<[2]xi16>,
+ %a3 : vector<[2]xi16>, %b3 : vector<[2]xi16>,
+ %a0_mask : vector<[2]xi1>, %b0_mask : vector<[2]xi1>,
+ %a1_mask : vector<[2]xi1>, %b1_mask : vector<[2]xi1>,
+ %a2_mask : vector<[2]xi1>, %b2_mask : vector<[2]xi1>,
+ %a3_mask : vector<[2]xi1>, %b3_mask : vector<[2]xi1>) -> vector<[2]x[2]xi64> {
+ %a0_ext = arith.extsi %a0 : vector<[2]xi16> to vector<[2]xi64>
+ %b0_ext = arith.extui %b0 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a1_ext = arith.extsi %a1 : vector<[2]xi16> to vector<[2]xi64>
+ %b1_ext = arith.extui %b1 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a2_ext = arith.extsi %a2 : vector<[2]xi16> to vector<[2]xi64>
+ %b2_ext = arith.extui %b2 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a3_ext = arith.extsi %a3 : vector<[2]xi16> to vector<[2]xi64>
+ %b3_ext = arith.extui %b3 : vector<[2]xi16> to vector<[2]xi64>
+
+ %acc = arith.constant dense<0> : vector<[2]x[2]xi64>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext acc(%acc) masks(%a0_mask, %b0_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext acc(%0) masks(%a1_mask, %b1_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext acc(%1) masks(%a2_mask, %b2_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext acc(%2) masks(%a3_mask, %b3_mask) : vector<[2]xi64>, vector<[2]xi64>
+
+ return %3 : vector<[2]x[2]xi64>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_sub_widening_4way_signed_by_unsigned_i16i16i64
+// CHECK: arm_sme.sumops_wide_4way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+func.func @outerproduct_sub_widening_4way_signed_by_unsigned_i16i16i64(
+ %a0 : vector<[2]xi16>, %b0 : vector<[2]xi16>,
+ %a1 : vector<[2]xi16>, %b1 : vector<[2]xi16>,
+ %a2 : vector<[2]xi16>, %b2 : vector<[2]xi16>,
+ %a3 : vector<[2]xi16>, %b3 : vector<[2]xi16>,
+ %a0_mask : vector<[2]xi1>, %b0_mask : vector<[2]xi1>,
+ %a1_mask : vector<[2]xi1>, %b1_mask : vector<[2]xi1>,
+ %a2_mask : vector<[2]xi1>, %b2_mask : vector<[2]xi1>,
+ %a3_mask : vector<[2]xi1>, %b3_mask : vector<[2]xi1>) -> vector<[2]x[2]xi64> {
+ %a0_ext = arith.extsi %a0 : vector<[2]xi16> to vector<[2]xi64>
+ %b0_ext = arith.extui %b0 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a1_ext = arith.extsi %a1 : vector<[2]xi16> to vector<[2]xi64>
+ %b1_ext = arith.extui %b1 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a2_ext = arith.extsi %a2 : vector<[2]xi16> to vector<[2]xi64>
+ %b2_ext = arith.extui %b2 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a3_ext = arith.extsi %a3 : vector<[2]xi16> to vector<[2]xi64>
+ %b3_ext = arith.extui %b3 : vector<[2]xi16> to vector<[2]xi64>
+
+ %acc = arith.constant dense<0> : vector<[2]x[2]xi64>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext kind<sub> acc(%acc) masks(%a0_mask, %b0_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext kind<sub> acc(%0) masks(%a1_mask, %b1_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext kind<sub> acc(%1) masks(%a2_mask, %b2_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext kind<sub> acc(%2) masks(%a3_mask, %b3_mask) : vector<[2]xi64>, vector<[2]xi64>
+
+ return %3 : vector<[2]x[2]xi64>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_add_widening_4way_unsigned_by_signed_i8i8i32
+// CHECK: arm_sme.usmopa_wide_4way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+func.func @outerproduct_add_widening_4way_unsigned_by_signed_i8i8i32(
+ %a0 : vector<[4]xi8>, %b0 : vector<[4]xi8>,
+ %a1 : vector<[4]xi8>, %b1 : vector<[4]xi8>,
+ %a2 : vector<[4]xi8>, %b2 : vector<[4]xi8>,
+ %a3 : vector<[4]xi8>, %b3 : vector<[4]xi8>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>,
+ %a2_mask : vector<[4]xi1>, %b2_mask : vector<[4]xi1>,
+ %a3_mask : vector<[4]xi1>, %b3_mask : vector<[4]xi1>) -> vector<[4]x[4]xi32> {
+ %a0_ext = arith.extui %a0 : vector<[4]xi8> to vector<[4]xi32>
+ %b0_ext = arith.extsi %b0 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a1_ext = arith.extui %a1 : vector<[4]xi8> to vector<[4]xi32>
+ %b1_ext = arith.extsi %b1 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a2_ext = arith.extui %a2 : vector<[4]xi8> to vector<[4]xi32>
+ %b2_ext = arith.extsi %b2 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a3_ext = arith.extui %a3 : vector<[4]xi8> to vector<[4]xi32>
+ %b3_ext = arith.extsi %b3 : vector<[4]xi8> to vector<[4]xi32>
+
+ %acc = arith.constant dense<0> : vector<[4]x[4]xi32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext acc(%1) masks(%a2_mask, %b2_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext acc(%2) masks(%a3_mask, %b3_mask) : vector<[4]xi32>, vector<[4]xi32>
+
+ return %3 : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_sub_widening_4way_unsigned_by_signed_i8i8i32
+// CHECK: arm_sme.usmops_wide_4way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+func.func @outerproduct_sub_widening_4way_unsigned_by_signed_i8i8i32(
+ %a0 : vector<[4]xi8>, %b0 : vector<[4]xi8>,
+ %a1 : vector<[4]xi8>, %b1 : vector<[4]xi8>,
+ %a2 : vector<[4]xi8>, %b2 : vector<[4]xi8>,
+ %a3 : vector<[4]xi8>, %b3 : vector<[4]xi8>,
+ %a0_mask : vector<[4]xi1>, %b0_mask : vector<[4]xi1>,
+ %a1_mask : vector<[4]xi1>, %b1_mask : vector<[4]xi1>,
+ %a2_mask : vector<[4]xi1>, %b2_mask : vector<[4]xi1>,
+ %a3_mask : vector<[4]xi1>, %b3_mask : vector<[4]xi1>) -> vector<[4]x[4]xi32> {
+ %a0_ext = arith.extui %a0 : vector<[4]xi8> to vector<[4]xi32>
+ %b0_ext = arith.extsi %b0 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a1_ext = arith.extui %a1 : vector<[4]xi8> to vector<[4]xi32>
+ %b1_ext = arith.extsi %b1 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a2_ext = arith.extui %a2 : vector<[4]xi8> to vector<[4]xi32>
+ %b2_ext = arith.extsi %b2 : vector<[4]xi8> to vector<[4]xi32>
+
+ %a3_ext = arith.extui %a3 : vector<[4]xi8> to vector<[4]xi32>
+ %b3_ext = arith.extsi %b3 : vector<[4]xi8> to vector<[4]xi32>
+
+ %acc = arith.constant dense<0> : vector<[4]x[4]xi32>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext kind<sub> acc(%acc) masks(%a0_mask, %b0_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext kind<sub> acc(%0) masks(%a1_mask, %b1_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext kind<sub> acc(%1) masks(%a2_mask, %b2_mask) : vector<[4]xi32>, vector<[4]xi32>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext kind<sub> acc(%2) masks(%a3_mask, %b3_mask) : vector<[4]xi32>, vector<[4]xi32>
+
+ return %3 : vector<[4]x[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_add_widening_4way_unsigned_by_signed_i16i16i64
+// CHECK: arm_sme.usmopa_wide_4way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+func.func @outerproduct_add_widening_4way_unsigned_by_signed_i16i16i64(
+ %a0 : vector<[2]xi16>, %b0 : vector<[2]xi16>,
+ %a1 : vector<[2]xi16>, %b1 : vector<[2]xi16>,
+ %a2 : vector<[2]xi16>, %b2 : vector<[2]xi16>,
+ %a3 : vector<[2]xi16>, %b3 : vector<[2]xi16>,
+ %a0_mask : vector<[2]xi1>, %b0_mask : vector<[2]xi1>,
+ %a1_mask : vector<[2]xi1>, %b1_mask : vector<[2]xi1>,
+ %a2_mask : vector<[2]xi1>, %b2_mask : vector<[2]xi1>,
+ %a3_mask : vector<[2]xi1>, %b3_mask : vector<[2]xi1>) -> vector<[2]x[2]xi64> {
+ %a0_ext = arith.extui %a0 : vector<[2]xi16> to vector<[2]xi64>
+ %b0_ext = arith.extsi %b0 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a1_ext = arith.extui %a1 : vector<[2]xi16> to vector<[2]xi64>
+ %b1_ext = arith.extsi %b1 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a2_ext = arith.extui %a2 : vector<[2]xi16> to vector<[2]xi64>
+ %b2_ext = arith.extsi %b2 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a3_ext = arith.extui %a3 : vector<[2]xi16> to vector<[2]xi64>
+ %b3_ext = arith.extsi %b3 : vector<[2]xi16> to vector<[2]xi64>
+
+ %acc = arith.constant dense<0> : vector<[2]x[2]xi64>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext acc(%acc) masks(%a0_mask, %b0_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext acc(%0) masks(%a1_mask, %b1_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext acc(%1) masks(%a2_mask, %b2_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext acc(%2) masks(%a3_mask, %b3_mask) : vector<[2]xi64>, vector<[2]xi64>
+
+ return %3 : vector<[2]x[2]xi64>
+}
+
+// -----
+
+// CHECK-LABEL: @outerproduct_sub_widening_4way_unsigned_by_signed_i16i16i64
+// CHECK: arm_sme.usmops_wide_4way %{{.*}}, %{{.*}} acc(%{{.*}}) masks(%{{.*}}, %{{.*}}) : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+func.func @outerproduct_sub_widening_4way_unsigned_by_signed_i16i16i64(
+ %a0 : vector<[2]xi16>, %b0 : vector<[2]xi16>,
+ %a1 : vector<[2]xi16>, %b1 : vector<[2]xi16>,
+ %a2 : vector<[2]xi16>, %b2 : vector<[2]xi16>,
+ %a3 : vector<[2]xi16>, %b3 : vector<[2]xi16>,
+ %a0_mask : vector<[2]xi1>, %b0_mask : vector<[2]xi1>,
+ %a1_mask : vector<[2]xi1>, %b1_mask : vector<[2]xi1>,
+ %a2_mask : vector<[2]xi1>, %b2_mask : vector<[2]xi1>,
+ %a3_mask : vector<[2]xi1>, %b3_mask : vector<[2]xi1>) -> vector<[2]x[2]xi64> {
+ %a0_ext = arith.extui %a0 : vector<[2]xi16> to vector<[2]xi64>
+ %b0_ext = arith.extsi %b0 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a1_ext = arith.extui %a1 : vector<[2]xi16> to vector<[2]xi64>
+ %b1_ext = arith.extsi %b1 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a2_ext = arith.extui %a2 : vector<[2]xi16> to vector<[2]xi64>
+ %b2_ext = arith.extsi %b2 : vector<[2]xi16> to vector<[2]xi64>
+
+ %a3_ext = arith.extui %a3 : vector<[2]xi16> to vector<[2]xi64>
+ %b3_ext = arith.extsi %b3 : vector<[2]xi16> to vector<[2]xi64>
+
+ %acc = arith.constant dense<0> : vector<[2]x[2]xi64>
+
+ %0 = arm_sme.outerproduct %a0_ext, %b0_ext kind<sub> acc(%acc) masks(%a0_mask, %b0_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %1 = arm_sme.outerproduct %a1_ext, %b1_ext kind<sub> acc(%0) masks(%a1_mask, %b1_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %2 = arm_sme.outerproduct %a2_ext, %b2_ext kind<sub> acc(%1) masks(%a2_mask, %b2_mask) : vector<[2]xi64>, vector<[2]xi64>
+ %3 = arm_sme.outerproduct %a3_ext, %b3_ext kind<sub> acc(%2) masks(%a3_mask, %b3_mask) : vector<[2]xi64>, vector<[2]xi64>
+
+ return %3 : vector<[2]x[2]xi64>
+}
diff --git a/mlir/test/Dialect/ArmSME/roundtrip.mlir b/mlir/test/Dialect/ArmSME/roundtrip.mlir
index 2ad742493408b01..2e552117bcdb4c2 100644
--- a/mlir/test/Dialect/ArmSME/roundtrip.mlir
+++ b/mlir/test/Dialect/ArmSME/roundtrip.mlir
@@ -1131,3 +1131,275 @@ func.func @arm_sme_streaming_vl_double_words() -> index {
%svl_d = arm_sme.streaming_vl <double>
return %svl_d : index
}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.fmopa_wide_2way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_fmopa_wide_2way_f16f16_to_f32(%vecA: vector<[8]xf16>, %vecB: vector<[8]xf16>) -> vector<[4]x[4]xf32> {
+ // CHECK: arm_sme.fmopa_wide_2way {{.*}}, {{.*}} : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ %result = arm_sme.fmopa_wide_2way %vecA, %vecB : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ return %result : vector<[4]x[4]xf32>
+}
+
+// -----
+
+func.func @arm_sme_fmopa_wide_2way_bf16bf16_to_f32(%vecA: vector<[8]xbf16>, %vecB: vector<[8]xbf16>) -> vector<[4]x[4]xf32> {
+ // CHECK: arm_sme.fmopa_wide_2way {{.*}}, {{.*}} : vector<[8]xbf16>, vector<[8]xbf16> into vector<[4]x[4]xf32>
+ %result = arm_sme.fmopa_wide_2way %vecA, %vecB : vector<[8]xbf16>, vector<[8]xbf16> into vector<[4]x[4]xf32>
+ return %result : vector<[4]x[4]xf32>
+}
+
+// -----
+
+func.func @arm_sme_fmopa_wide_2way_with_masking(%vecA: vector<[8]xf16>, %vecB: vector<[8]xf16>, %maskA: vector<[8]xi1>, %maskB: vector<[8]xi1>) -> vector<[4]x[4]xf32> {
+ // CHECK: arm_sme.fmopa_wide_2way {{.*}}, {{.*}} masks({{.*}}, {{.*}}) : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ %result = arm_sme.fmopa_wide_2way %vecA, %vecB masks(%maskA, %maskB) : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ return %result : vector<[4]x[4]xf32>
+}
+
+// -----
+
+func.func @arm_sme_fmopa_wide_2way_with_acc(%vecA: vector<[8]xf16>, %vecB: vector<[8]xf16>, %acc : vector<[4]x[4]xf32>) -> vector<[4]x[4]xf32> {
+ // CHECK: arm_sme.fmopa_wide_2way {{.*}}, {{.*}} acc({{.*}}) : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ %result = arm_sme.fmopa_wide_2way %vecA, %vecB acc(%acc) : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ return %result : vector<[4]x[4]xf32>
+}
+
+// -----
+
+func.func @arm_sme_fmopa_wide_2way_with_everything(%vecA: vector<[8]xf16>, %vecB: vector<[8]xf16>, %acc : vector<[4]x[4]xf32>, %maskA: vector<[8]xi1>, %maskB: vector<[8]xi1>) -> vector<[4]x[4]xf32> {
+ // CHECK: arm_sme.fmopa_wide_2way {{.*}}, {{.*}} acc({{.*}}) masks({{.*}}, {{.*}}) : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ %result = arm_sme.fmopa_wide_2way %vecA, %vecB acc(%acc) masks(%maskA, %maskB) : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ return %result : vector<[4]x[4]xf32>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.fmops_wide_2way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_fmops_wide_2way_f16f16_to_f32(%vecA: vector<[8]xf16>, %vecB: vector<[8]xf16>) -> vector<[4]x[4]xf32> {
+ // CHECK: arm_sme.fmops_wide_2way {{.*}}, {{.*}} : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ %result = arm_sme.fmops_wide_2way %vecA, %vecB : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
+ return %result : vector<[4]x[4]xf32>
+}
+
+// -----
+
+func.func @arm_sme_fmops_wide_2way_bf16bf16_to_f32(%vecA: vector<[8]xbf16>, %vecB: vector<[8]xbf16>) -> vector<[4]x[4]xf32> {
+ // CHECK: arm_sme.fmops_wide_2way {{.*}}, {{.*}} : vector<[8]xbf16>, vector<[8]xbf16> into vector<[4]x[4]xf32>
+ %result = arm_sme.fmops_wide_2way %vecA, %vecB : vector<[8]xbf16>, vector<[8]xbf16> into vector<[4]x[4]xf32>
+ return %result : vector<[4]x[4]xf32>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.smopa_wide_2way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_smopa_wide_2way_i16i16_to_i32(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[4]x[4]xi32> {
+ // CHECK: arm_sme.smopa_wide_2way {{.*}}, {{.*}} : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+ %result = arm_sme.smopa_wide_2way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.smops_wide_2way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_smops_wide_2way_i16i16_to_i32(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[4]x[4]xi32> {
+ // CHECK: arm_sme.smops_wide_2way {{.*}}, {{.*}} : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+ %result = arm_sme.smops_wide_2way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.umopa_wide_2way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_umopa_wide_2way_i16i16_to_i32(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[4]x[4]xi32> {
+ // CHECK: arm_sme.umopa_wide_2way {{.*}}, {{.*}} : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+ %result = arm_sme.umopa_wide_2way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.umops_wide_2way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_umops_wide_2way_i16i16_to_i32(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[4]x[4]xi32> {
+ // CHECK: arm_sme.umops_wide_2way {{.*}}, {{.*}} : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+ %result = arm_sme.umops_wide_2way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.smopa_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_smopa_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ // CHECK: arm_sme.smopa_wide_4way {{.*}}, {{.*}} : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ %result = arm_sme.smopa_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+// -----
+
+func.func @arm_sme_smopa_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ // CHECK: arm_sme.smopa_wide_4way {{.*}}, {{.*}} : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ %result = arm_sme.smopa_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %result : vector<[2]x[2]xi64>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.smops_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_smops_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ // CHECK: arm_sme.smops_wide_4way {{.*}}, {{.*}} : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ %result = arm_sme.smops_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+// -----
+
+func.func @arm_sme_smops_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ // CHECK: arm_sme.smops_wide_4way {{.*}}, {{.*}} : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ %result = arm_sme.smops_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %result : vector<[2]x[2]xi64>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.umopa_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_umopa_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ // CHECK: arm_sme.umopa_wide_4way {{.*}}, {{.*}} : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ %result = arm_sme.umopa_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+// -----
+
+func.func @arm_sme_umopa_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ // CHECK: arm_sme.umopa_wide_4way {{.*}}, {{.*}} : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ %result = arm_sme.umopa_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %result : vector<[2]x[2]xi64>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.umops_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_umops_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ // CHECK: arm_sme.umops_wide_4way {{.*}}, {{.*}} : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ %result = arm_sme.umops_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+// -----
+
+func.func @arm_sme_umops_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ // CHECK: arm_sme.umops_wide_4way {{.*}}, {{.*}} : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ %result = arm_sme.umops_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %result : vector<[2]x[2]xi64>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.sumopa_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_sumopa_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ // CHECK: arm_sme.sumopa_wide_4way {{.*}}, {{.*}} : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ %result = arm_sme.sumopa_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+// -----
+
+func.func @arm_sme_sumopa_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ // CHECK: arm_sme.sumopa_wide_4way {{.*}}, {{.*}} : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ %result = arm_sme.sumopa_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %result : vector<[2]x[2]xi64>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.sumops_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_sumops_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ // CHECK: arm_sme.sumops_wide_4way {{.*}}, {{.*}} : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ %result = arm_sme.sumops_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %result : vector<[4]x[4]xi32>
+}
+
+// -----
+
+func.func @arm_sme_sumops_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ // CHECK: arm_sme.sumops_wide_4way {{.*}}, {{.*}} : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ %result = arm_sme.sumops_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %result : vector<[2]x[2]xi64>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.usmopa_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_usmopa_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ // CHECK: arm_sme.usmopa_wide_4way {{.*}}, {{.*}} : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ %reuslt = arm_sme.usmopa_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %reuslt : vector<[4]x[4]xi32>
+}
+
+// -----
+
+func.func @arm_sme_usmopa_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ // CHECK: arm_sme.usmopa_wide_4way {{.*}}, {{.*}} : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ %reuslt = arm_sme.usmopa_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %reuslt : vector<[2]x[2]xi64>
+}
+
+//===----------------------------------------------------------------------===//
+// arm_sme.usmops_wide_4way
+//===----------------------------------------------------------------------===//
+
+// -----
+
+func.func @arm_sme_usmops_wide_4way_i8i8_to_i32(%vecA: vector<[16]xi8>, %vecB: vector<[16]xi8>) -> vector<[4]x[4]xi32> {
+ // CHECK: arm_sme.usmops_wide_4way {{.*}}, {{.*}} : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ %reuslt = arm_sme.usmops_wide_4way %vecA, %vecB : vector<[16]xi8>, vector<[16]xi8> into vector<[4]x[4]xi32>
+ return %reuslt : vector<[4]x[4]xi32>
+}
+
+// -----
+
+func.func @arm_sme_usmops_wide_4way_i16i16_to_i64(%vecA: vector<[8]xi16>, %vecB: vector<[8]xi16>) -> vector<[2]x[2]xi64> {
+ // CHECK: arm_sme.usmops_wide_4way {{.*}}, {{.*}} : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ %reuslt = arm_sme.usmops_wide_4way %vecA, %vecB : vector<[8]xi16>, vector<[8]xi16> into vector<[2]x[2]xi64>
+ return %reuslt : vector<[2]x[2]xi64>
+}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f16f16f32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f16f16f32.mlir
new file mode 100644
index 000000000000000..8fbdf5d0011ce23
--- /dev/null
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f16f16f32.mlir
@@ -0,0 +1,100 @@
+// DEFINE: %{entry} = test_outerproduct_f16f16f32
+// DEFINE: %{widening_opts} = -arm-sme-outer-product-widening
+// DEFINE: %{compile} = mlir-opt %s \
+// DEFINE: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za" \
+// DEFINE: -convert-vector-to-arm-sme -convert-arith-to-arm-sme %{widening_opts} \
+// DEFINE: -convert-arm-sme-to-scf -allocate-arm-sme-tiles \
+// DEFINE: -convert-arm-sme-to-llvm -cse -canonicalize \
+// DEFINE: -test-lower-to-llvm -o %t
+// DEFINE: %{run} = %mcr_aarch64_cmd %t \
+// DEFINE: -march=aarch64 -mattr=+sve,+sme \
+// DEFINE: -e %{entry} -entry-point-result=void \
+// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%arm_sme_abi_shlib
+
+// RUN: %{compile}
+
+// RUN: %{run} | FileCheck %s
+
+// Check result is the same when outerproducts are not combined into widening
+// variant.
+
+// REDEFINE: %{widening_opts} =
+// RUN: %{run} | FileCheck %s
+
+func.func @test_outerproduct_f16f16f32() {
+ %undef = llvm.mlir.undef : vector<[4]xf16>
+
+ %a0_data = arith.constant dense<[0., 2., 4., 6.]> : vector<4xf16>
+ %b0_data = arith.constant dense<[1., 3., 5., 7.]> : vector<4xf16>
+ %a1_data = arith.constant dense<[8., 10., 12., 14.]> : vector<4xf16>
+ %b1_data = arith.constant dense<[9., 11., 13., 15.]> : vector<4xf16>
+
+ %a0 = vector.scalable.insert %a0_data, %undef[0] : vector<4xf16> into vector<[4]xf16>
+ %b0 = vector.scalable.insert %b0_data, %undef[0] : vector<4xf16> into vector<[4]xf16>
+ %a1 = vector.scalable.insert %a1_data, %undef[0] : vector<4xf16> into vector<[4]xf16>
+ %b1 = vector.scalable.insert %b1_data, %undef[0] : vector<4xf16> into vector<[4]xf16>
+
+ %a0_ext = arith.extf %a0 : vector<[4]xf16> to vector<[4]xf32>
+ %b0_ext = arith.extf %b0 : vector<[4]xf16> to vector<[4]xf32>
+ %a1_ext = arith.extf %a1 : vector<[4]xf16> to vector<[4]xf32>
+ %b1_ext = arith.extf %b1 : vector<[4]xf16> to vector<[4]xf32>
+
+ %acc = arith.constant dense<7.0> : vector<[4]x[4]xf32>
+ %0 = vector.outerproduct %a0_ext, %b0_ext, %acc : vector<[4]xf32>, vector<[4]xf32>
+ %1 = vector.outerproduct %a1_ext, %b1_ext, %0 : vector<[4]xf32>, vector<[4]xf32>
+
+ // CHECK: ( 79, 95, 111, 127
+ // CHECK-NEXT: ( 99, 123, 147, 171
+ // CHECK-NEXT: ( 119, 151, 183, 215
+ // CHECK-NEXT: ( 139, 179, 219, 259
+ vector.print %1 : vector<[4]x[4]xf32>
+
+ return
+}
+
+// TODO: A bug in QEMU causes masked FMOPAs to hang [1]. Should be fixed in
+// 8.2.0, this test currently isn't run, once this version is available in CI
+// it can be run. The check lines here are correct and have been verified on a
+// version with the fix.
+// [1] https://gitlab.com/qemu-project/qemu/-/issues/1985
+func.func @test_masked_outerproduct_f16f16f32() {
+ %undef = llvm.mlir.undef : vector<[4]xf16>
+
+ %a0_data = arith.constant dense<[0., 2., 4., 6.]> : vector<4xf16>
+ %b0_data = arith.constant dense<[1., 3., 5., 7.]> : vector<4xf16>
+ %a1_data = arith.constant dense<[8., 10., 12., 14.]> : vector<4xf16>
+ %b1_data = arith.constant dense<[9., 11., 13., 15.]> : vector<4xf16>
+
+ %a0 = vector.scalable.insert %a0_data, %undef[0] : vector<4xf16> into vector<[4]xf16>
+ %b0 = vector.scalable.insert %b0_data, %undef[0] : vector<4xf16> into vector<[4]xf16>
+ %a1 = vector.scalable.insert %a1_data, %undef[0] : vector<4xf16> into vector<[4]xf16>
+ %b1 = vector.scalable.insert %b1_data, %undef[0] : vector<4xf16> into vector<[4]xf16>
+
+ %a0_ext = arith.extf %a0 : vector<[4]xf16> to vector<[4]xf32>
+ %b0_ext = arith.extf %b0 : vector<[4]xf16> to vector<[4]xf32>
+ %a1_ext = arith.extf %a1 : vector<[4]xf16> to vector<[4]xf32>
+ %b1_ext = arith.extf %b1 : vector<[4]xf16> to vector<[4]xf32>
+
+ %acc = arith.constant dense<7.0> : vector<[4]x[4]xf32>
+
+ %c2 = arith.constant 2 : index
+ %c3 = arith.constant 3 : index
+ %mask0 = vector.create_mask %c2, %c3 : vector<[4]x[4]xi1>
+ %mask1 = vector.create_mask %c3, %c2 : vector<[4]x[4]xi1>
+
+ %0 = vector.mask %mask0 {
+ vector.outerproduct %a0_ext, %b0_ext, %acc : vector<[4]xf32>, vector<[4]xf32>
+ } : vector<[4]x[4]xi1> -> vector<[4]x[4]xf32>
+
+ %1 = vector.mask %mask1 {
+ vector.outerproduct %a1_ext, %b1_ext, %0 : vector<[4]xf32>, vector<[4]xf32>
+ } : vector<[4]x[4]xi1> -> vector<[4]x[4]xf32>
+
+ // MASKED: ( 79, 95, 7, 7
+ // MASKED-NEXT: ( 99, 123, 17, 7
+ // MASKED-NEXT: ( 115, 139, 7, 7
+ // MASKED-NEXT: ( 7, 7, 7, 7
+ vector.print %1 : vector<[4]x[4]xf32>
+
+ return
+}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-i8i8i32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-i8i8i32.mlir
new file mode 100644
index 000000000000000..98b26beccc25bc2
--- /dev/null
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-i8i8i32.mlir
@@ -0,0 +1,142 @@
+// DEFINE: %{entry} = test_outerproduct_i8i8i32
+// DEFINE: %{widening_opts} = -arm-sme-outer-product-widening
+// DEFINE: %{compile} = mlir-opt %s \
+// DEFINE: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za" \
+// DEFINE: -convert-vector-to-arm-sme %{widening_opts} \
+// DEFINE: -convert-arm-sme-to-scf -allocate-arm-sme-tiles \
+// DEFINE: -convert-arm-sme-to-llvm -cse -canonicalize \
+// DEFINE: -test-lower-to-llvm -o %t
+// DEFINE: %{run} = %mcr_aarch64_cmd %t \
+// DEFINE: -march=aarch64 -mattr=+sve,+sme \
+// DEFINE: -e %{entry} -entry-point-result=void \
+// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%arm_sme_abi_shlib
+
+// RUN: %{compile}
+
+// RUN: %{run} | FileCheck %s
+
+// REDEFINE: %{entry} = test_masked_outerproduct_i8i8i32
+// RUN: %{run} | FileCheck %s --check-prefix=WITH-MASK
+
+// NOTE: QEMU gives incorrect result for SME SMOPA 4-way outer product
+// instruction (version <= 8.2.0, latest version at time of writing), see:
+// https://gitlab.com/qemu-project/qemu/-/issues/2083
+// This test is expected to fail until a fixed version of QEMU can be used.
+
+// FIXME: Remove the 'XFAIL' below once a fixed QEMU version is available
+// (and installed on CI buildbot).
+// XFAIL: *
+
+// NOTE: there is no non-widening variant for these types and this test can't
+// currently be lowered without the widening pass, therefore we can't check if
+// the result is the same without widening pass like
+// 'test-outerproduct-f16f16f32.mlir' does.
+
+func.func @test_outerproduct_i8i8i32() {
+ %undef = llvm.mlir.undef : vector<[4]xi8>
+
+ %a0_data = arith.constant dense<[0, 4, 8, 12]> : vector<4xi8>
+ %a1_data = arith.constant dense<[1, 5, 9, 13]> : vector<4xi8>
+ %a2_data = arith.constant dense<[2, 6, 10, 14]> : vector<4xi8>
+ %a3_data = arith.constant dense<[3, 7, 11, 15]> : vector<4xi8>
+
+ %b0_data = arith.constant dense<[16, 20, 24, 28]> : vector<4xi8>
+ %b1_data = arith.constant dense<[17, 21, 25, 29]> : vector<4xi8>
+ %b2_data = arith.constant dense<[18, 22, 26, 30]> : vector<4xi8>
+ %b3_data = arith.constant dense<[19, 23, 27, 31]> : vector<4xi8>
+
+ %a0 = vector.scalable.insert %a0_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+ %b0 = vector.scalable.insert %b0_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+ %a1 = vector.scalable.insert %a1_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+ %b1 = vector.scalable.insert %b1_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+ %a2 = vector.scalable.insert %a2_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+ %b2 = vector.scalable.insert %b2_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+ %a3 = vector.scalable.insert %a3_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+ %b3 = vector.scalable.insert %b3_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+
+ %a0_ext = arith.extsi %a0 : vector<[4]xi8> to vector<[4]xi32>
+ %b0_ext = arith.extsi %b0 : vector<[4]xi8> to vector<[4]xi32>
+ %a1_ext = arith.extsi %a1 : vector<[4]xi8> to vector<[4]xi32>
+ %b1_ext = arith.extsi %b1 : vector<[4]xi8> to vector<[4]xi32>
+ %a2_ext = arith.extsi %a2 : vector<[4]xi8> to vector<[4]xi32>
+ %b2_ext = arith.extsi %b2 : vector<[4]xi8> to vector<[4]xi32>
+ %a3_ext = arith.extsi %a3 : vector<[4]xi8> to vector<[4]xi32>
+ %b3_ext = arith.extsi %b3 : vector<[4]xi8> to vector<[4]xi32>
+
+ %0 = vector.outerproduct %a0_ext, %b0_ext : vector<[4]xi32>, vector<[4]xi32>
+ %1 = vector.outerproduct %a1_ext, %b1_ext, %0 : vector<[4]xi32>, vector<[4]xi32>
+ %2 = vector.outerproduct %a2_ext, %b2_ext, %1 : vector<[4]xi32>, vector<[4]xi32>
+ %3 = vector.outerproduct %a3_ext, %b3_ext, %2 : vector<[4]xi32>, vector<[4]xi32>
+
+ // CHECK: ( 110, 134, 158, 182
+ // CHECK-NEXT: ( 390, 478, 566, 654
+ // CHECK-NEXT: ( 670, 822, 974, 1126
+ // CHECK-NEXT: ( 950, 1166, 1382, 1598
+ vector.print %3 : vector<[4]x[4]xi32>
+
+ return
+}
+
+func.func @test_masked_outerproduct_i8i8i32() {
+ %undef = llvm.mlir.undef : vector<[4]xi8>
+
+ %a0_data = arith.constant dense<[0, 4, 8, 12]> : vector<4xi8>
+ %a1_data = arith.constant dense<[1, 5, 9, 13]> : vector<4xi8>
+ %a2_data = arith.constant dense<[2, 6, 10, 14]> : vector<4xi8>
+ %a3_data = arith.constant dense<[3, 7, 11, 15]> : vector<4xi8>
+
+ %b0_data = arith.constant dense<[16, 20, 24, 28]> : vector<4xi8>
+ %b1_data = arith.constant dense<[17, 21, 25, 29]> : vector<4xi8>
+ %b2_data = arith.constant dense<[18, 22, 26, 30]> : vector<4xi8>
+ %b3_data = arith.constant dense<[19, 23, 27, 31]> : vector<4xi8>
+
+ %a0 = vector.scalable.insert %a0_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+ %b0 = vector.scalable.insert %b0_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+ %a1 = vector.scalable.insert %a1_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+ %b1 = vector.scalable.insert %b1_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+ %a2 = vector.scalable.insert %a2_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+ %b2 = vector.scalable.insert %b2_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+ %a3 = vector.scalable.insert %a3_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+ %b3 = vector.scalable.insert %b3_data, %undef[0] : vector<4xi8> into vector<[4]xi8>
+
+ %a0_ext = arith.extsi %a0 : vector<[4]xi8> to vector<[4]xi32>
+ %b0_ext = arith.extsi %b0 : vector<[4]xi8> to vector<[4]xi32>
+ %a1_ext = arith.extsi %a1 : vector<[4]xi8> to vector<[4]xi32>
+ %b1_ext = arith.extsi %b1 : vector<[4]xi8> to vector<[4]xi32>
+ %a2_ext = arith.extsi %a2 : vector<[4]xi8> to vector<[4]xi32>
+ %b2_ext = arith.extsi %b2 : vector<[4]xi8> to vector<[4]xi32>
+ %a3_ext = arith.extsi %a3 : vector<[4]xi8> to vector<[4]xi32>
+ %b3_ext = arith.extsi %b3 : vector<[4]xi8> to vector<[4]xi32>
+
+ %c1 = arith.constant 1 : index
+ %c2 = arith.constant 2 : index
+ %c3 = arith.constant 3 : index
+ %c4 = arith.constant 4 : index
+
+ %mask0 = vector.create_mask %c1, %c1 : vector<[4]x[4]xi1>
+ %mask1 = vector.create_mask %c1, %c2 : vector<[4]x[4]xi1>
+ %mask2 = vector.create_mask %c2, %c3 : vector<[4]x[4]xi1>
+ %mask3 = vector.create_mask %c3, %c4 : vector<[4]x[4]xi1>
+
+ %acc = arith.constant dense<2> : vector<[4]x[4]xi32>
+ %0 = vector.mask %mask0 {
+ vector.outerproduct %a0_ext, %b0_ext, %acc : vector<[4]xi32>, vector<[4]xi32>
+ } : vector<[4]x[4]xi1> -> vector<[4]x[4]xi32>
+ %1 = vector.mask %mask1 {
+ vector.outerproduct %a1_ext, %b1_ext, %0 : vector<[4]xi32>, vector<[4]xi32>
+ } : vector<[4]x[4]xi1> -> vector<[4]x[4]xi32>
+ %2 = vector.mask %mask2 {
+ vector.outerproduct %a2_ext, %b2_ext, %1 : vector<[4]xi32>, vector<[4]xi32>
+ } : vector<[4]x[4]xi1> -> vector<[4]x[4]xi32>
+ %3 = vector.mask %mask3 {
+ vector.outerproduct %a3_ext, %b3_ext, %2 : vector<[4]xi32>, vector<[4]xi32>
+ } : vector<[4]x[4]xi1> -> vector<[4]x[4]xi32>
+
+ // WITH-MASK: ( 112, 136, 135, 95
+ // WITH-MASK-NEXT: ( 243, 295, 347, 219
+ // WITH-MASK-NEXT: ( 211, 255, 299, 343
+ // WITH-MASK-NEXT: ( 2, 2, 2, 2
+ vector.print %3 : vector<[4]x[4]xi32>
+
+ return
+}
diff --git a/mlir/test/Target/LLVMIR/arm-sme.mlir b/mlir/test/Target/LLVMIR/arm-sme.mlir
index 7a42033dc04bc0f..aedb6730b06bb3d 100644
--- a/mlir/test/Target/LLVMIR/arm-sme.mlir
+++ b/mlir/test/Target/LLVMIR/arm-sme.mlir
@@ -63,6 +63,12 @@ llvm.func @arm_sme_imopa(%nxv8i16 : vector<[8]xi16>,
// CHECK: call void @llvm.aarch64.sme.usmopa.wide.nxv16i8
"arm_sme.intr.usmopa.wide"(%nxv16i1, %nxv16i1, %nxv16i8, %nxv16i8) <{tile_id = 0 : i32}> :
(vector<[16]xi1>, vector<[16]xi1>, vector<[16]xi8>, vector<[16]xi8>) -> ()
+ // CHECK: call void @llvm.aarch64.sme.smopa.za32.nxv8i16
+ "arm_sme.intr.smopa.za32"(%nxv8i1, %nxv8i1, %nxv8i16, %nxv8i16) <{tile_id = 0 : i32}> :
+ (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
+ // CHECK: call void @llvm.aarch64.sme.umopa.za32.nxv8i16
+ "arm_sme.intr.umopa.za32"(%nxv8i1, %nxv8i1, %nxv8i16, %nxv8i16) <{tile_id = 0 : i32}> :
+ (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
llvm.return
}
@@ -122,6 +128,12 @@ llvm.func @arm_sme_imops(%nxv8i16 : vector<[8]xi16>,
// CHECK: call void @llvm.aarch64.sme.usmops.wide.nxv16i8
"arm_sme.intr.usmops.wide"(%nxv16i1, %nxv16i1, %nxv16i8, %nxv16i8) <{tile_id = 0 : i32}> :
(vector<[16]xi1>, vector<[16]xi1>, vector<[16]xi8>, vector<[16]xi8>) -> ()
+ // CHECK: call void @llvm.aarch64.sme.smops.za32.nxv8i16
+ "arm_sme.intr.smops.za32"(%nxv8i1, %nxv8i1, %nxv8i16, %nxv8i16) <{tile_id = 0 : i32}> :
+ (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
+ // CHECK: call void @llvm.aarch64.sme.umops.za32.nxv8i16
+ "arm_sme.intr.umops.za32"(%nxv8i1, %nxv8i1, %nxv8i16, %nxv8i16) <{tile_id = 0 : i32}> :
+ (vector<[8]xi1>, vector<[8]xi1>, vector<[8]xi16>, vector<[8]xi16>) -> ()
llvm.return
}
diff --git a/mlir/test/Target/LLVMIR/arm-sve.mlir b/mlir/test/Target/LLVMIR/arm-sve.mlir
index b63d3f06515690a..002b1f9d804a7ce 100644
--- a/mlir/test/Target/LLVMIR/arm-sve.mlir
+++ b/mlir/test/Target/LLVMIR/arm-sve.mlir
@@ -314,3 +314,10 @@ llvm.func @arm_sve_convert_to_svbool(
: (vector<[1]xi1>) -> vector<[16]xi1>
llvm.return
}
+
+// CHECK-LABEL: @arm_sve_zip1
+// CHECK-NEXT: call <vscale x 8 x half> @llvm.aarch64.sve.zip1.nxv8f16(<vscale x 8 x half> %{{.*}}, <vscale x 8 x half> {{.*}})
+llvm.func @arm_sve_zip1(%arg0 : vector<[8]xf16>) -> vector<[8]xf16> {
+ %0 = "arm_sve.intr.zip1"(%arg0, %arg0) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
+ llvm.return %0 : vector<[8]xf16>
+}
>From 84e9b29a6ee230807ca37daec1ce040e04a73e0e Mon Sep 17 00:00:00 2001
From: Cullen Rhodes <cullen.rhodes at arm.com>
Date: Mon, 22 Jan 2024 16:19:27 +0000
Subject: [PATCH 2/3] Remove pure
---
.../mlir/Dialect/ArmSME/IR/ArmSMEOps.td | 2 --
mlir/test/Dialect/ArmSME/cse.mlir | 25 ++++++++-----------
2 files changed, 10 insertions(+), 17 deletions(-)
diff --git a/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td b/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td
index 1365dd38c115ef2..8063ccd35faf866 100644
--- a/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td
+++ b/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td
@@ -736,7 +736,6 @@ class OuterProductResultTileTypeConstraint<string operand> :
def OuterProductOp :
ArmSME_Op<"outerproduct", [
- Pure,
ArmSMETileOpInterface,
AttrSizedOperandSegments,
AllTypesMatch<["lhs", "rhs"]>,
@@ -820,7 +819,6 @@ class OuterProductWideBase<string mnemonic,
list<Type> allowedResultVectorTypes,
int numOuterProducts> :
ArmSME_Op<mnemonic, [
- Pure,
ArmSMETileOpInterface,
AttrSizedOperandSegments,
AllTypesMatch<["lhs", "rhs"]>,
diff --git a/mlir/test/Dialect/ArmSME/cse.mlir b/mlir/test/Dialect/ArmSME/cse.mlir
index 2decdb53d8659c1..74e7293eaeca5fc 100644
--- a/mlir/test/Dialect/ArmSME/cse.mlir
+++ b/mlir/test/Dialect/ArmSME/cse.mlir
@@ -1,35 +1,30 @@
-// RUN: mlir-opt %s -cse -split-input-file | FileCheck %s
+// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.module(func.func(cse))' | FileCheck %s
-// These tests check that CSE does not remove 'arm_sme.zero/get_tile' ops as
+// This test is checking that CSE does not remove 'arm_sme.zero/get_tile' ops as
// duplicates.
// CHECK-LABEL: @zero_tile
// CHECK: %[[TILE_0:.*]] = arm_sme.zero : vector<[4]x[4]xi32>
// CHECK: %[[TILE_1:.*]] = arm_sme.zero : vector<[4]x[4]xi32>
+// CHECK: "prevent.dce"(%[[TILE_0]]) : (vector<[4]x[4]xi32>) -> ()
+// CHECK: "prevent.dce"(%[[TILE_1]]) : (vector<[4]x[4]xi32>) -> ()
func.func @zero_tile() {
%tile_1 = arm_sme.zero : vector<[4]x[4]xi32>
%tile_2 = arm_sme.zero : vector<[4]x[4]xi32>
+ "prevent.dce"(%tile_1) : (vector<[4]x[4]xi32>) -> ()
+ "prevent.dce"(%tile_2) : (vector<[4]x[4]xi32>) -> ()
return
}
-// -----
-
// CHECK-LABEL: @get_tile
// CHECK: %[[TILE_0:.*]] = arm_sme.get_tile : vector<[4]x[4]xi32>
// CHECK: %[[TILE_1:.*]] = arm_sme.get_tile : vector<[4]x[4]xi32>
+// CHECK: "prevent.dce"(%[[TILE_0]]) : (vector<[4]x[4]xi32>) -> ()
+// CHECK: "prevent.dce"(%[[TILE_1]]) : (vector<[4]x[4]xi32>) -> ()
func.func @get_tile() {
%tile_1 = arm_sme.get_tile : vector<[4]x[4]xi32>
%tile_2 = arm_sme.get_tile : vector<[4]x[4]xi32>
- return
-}
-
-// -----
-
-// Operation is pure and should be removed if it's trivially dead.
-
-// CHECK-LABEL: @dead_outerproduct
-// CHECK-NOT: arm_sme.outerproduct
-func.func @dead_outerproduct(%lhs : vector<[4]xf32>, %rhs : vector<[4]xf32>) {
- %0 = arm_sme.outerproduct %lhs, %rhs : vector<[4]xf32>, vector<[4]xf32>
+ "prevent.dce"(%tile_1) : (vector<[4]x[4]xi32>) -> ()
+ "prevent.dce"(%tile_2) : (vector<[4]x[4]xi32>) -> ()
return
}
>From 1a30e975960551693b6890f8f720cd880c924c50 Mon Sep 17 00:00:00 2001
From: Cullen Rhodes <cullen.rhodes at arm.com>
Date: Tue, 23 Jan 2024 15:06:08 +0000
Subject: [PATCH 3/3] vector.scalable.ins -> vector.scalable.insert
---
.../mlir/Dialect/ArmSME/IR/ArmSMEOps.td | 24 +++++++++----------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td b/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td
index 8063ccd35faf866..683db8766143f08 100644
--- a/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td
+++ b/mlir/include/mlir/Dialect/ArmSME/IR/ArmSMEOps.td
@@ -965,11 +965,11 @@ def FMopaWide2WayOp
```mlir
%undef = llvm.mlir.undef : vector<[8]xf16>
- %a0_ins = vector.scalable.ins %a0_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
- %a1_ins = vector.scalable.ins %a1_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %a0_ins = vector.scalable.insert %a0_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %a1_ins = vector.scalable.insert %a1_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
%a_packed = "arm_sve.intr.zip1"(%a0_ins, %a1_ins) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
- %b0_ins = vector.scalable.ins %b0_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
- %b1_ins = vector.scalable.ins %b1_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %b0_ins = vector.scalable.insert %b0_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %b1_ins = vector.scalable.insert %b1_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
%b_packed = "arm_sve.intr.zip1"(%b0_ins, %b1_ins) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
%0 = arm_sme.fmopa_wide_2way %a_packed, %b_packed : vector<[8]xf16>, vector<[8]xf16> into vector<[4]x[4]xf32>
```
@@ -1220,18 +1220,18 @@ def SMopaWide4WayOp
```mlir
%undef = llvm.mlir.undef : vector<[8]xf16>
- %a0_ins = vector.scalable.ins %a0_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
- %a1_ins = vector.scalable.ins %a1_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
- %a2_ins = vector.scalable.ins %a2_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
- %a3_ins = vector.scalable.ins %a3_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %a0_ins = vector.scalable.insert %a0_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %a1_ins = vector.scalable.insert %a1_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %a2_ins = vector.scalable.insert %a2_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %a3_ins = vector.scalable.insert %a3_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
%lhs0 = "arm_sve.intr.zip1"(%a0_ins, %a2_ins) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
%lhs1 = "arm_sve.intr.zip1"(%a1_ins, %a3_ins) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
%lhs = "arm_sve.intr.zip1"(%lhs0, %lhs1) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
- %b0_ins = vector.scalable.ins %b0_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
- %b1_ins = vector.scalable.ins %b1_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
- %b2_ins = vector.scalable.ins %b2_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
- %b3_ins = vector.scalable.ins %b3_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %b0_ins = vector.scalable.insert %b0_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %b1_ins = vector.scalable.insert %b1_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %b2_ins = vector.scalable.insert %b2_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
+ %b3_ins = vector.scalable.insert %b3_ext, %undef[0] : vector<[4]xf16> into vector<[8]xf16>
%rhs0 = "arm_sve.intr.zip1"(%b0_ins, %b2_ins) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
%rhs1 = "arm_sve.intr.zip1"(%b1_ins, %b3_ins) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
%rhs = "arm_sve.intr.zip1"(%rhs0, %rhs1) : (vector<[8]xf16>, vector<[8]xf16>) -> vector<[8]xf16>
More information about the Mlir-commits
mailing list