[Mlir-commits] [mlir] [MLIR][AArch64] Add an extra test for Neon I8MM (NFC) (PR #135777)

Momchil Velikov llvmlistbot at llvm.org
Tue Jun 3 02:41:53 PDT 2025


https://github.com/momchil-velikov updated https://github.com/llvm/llvm-project/pull/135777

>From 9cc437b78417c4bb68d399b18fe476bc68265934 Mon Sep 17 00:00:00 2001
From: Momchil Velikov <momchil.velikov at arm.com>
Date: Tue, 15 Apr 2025 10:48:11 +0000
Subject: [PATCH 1/3] [MLIR][AArch64] Add an extra test for Neon I8MM

---
 .../Dialect/ArmNeon/lower-to-arm-neon.mlir    | 78 +++++++++++++++++++
 1 file changed, 78 insertions(+)

diff --git a/mlir/test/Dialect/ArmNeon/lower-to-arm-neon.mlir b/mlir/test/Dialect/ArmNeon/lower-to-arm-neon.mlir
index 34e098150a459..3fdaac7fb9c52 100644
--- a/mlir/test/Dialect/ArmNeon/lower-to-arm-neon.mlir
+++ b/mlir/test/Dialect/ArmNeon/lower-to-arm-neon.mlir
@@ -355,6 +355,84 @@ func.func @vector_arm_neon_k_unroll_vecmat(%lhs: vector<1x32xi8>, %rhs: vector<2
   return %res : vector<1x2xi32>
 }
 
+// CHECK-LABEL: @vector_arm_neon_mnk_unroll
+// CHECK-SAME:    %arg0: vector<4x16xi8>
+// CHECK-SAME:    %arg1: vector<4x16xi8>
+// CHECK-SAME:    %arg2: vector<4x4xi32>
+// CHECK-SAME:    -> vector<4x4xi32> {
+// CHECK-NEXT:  %cst = arith.constant dense<0> : vector<4x4xi32>
+// CHECK-NEXT:  %[[VAL_0:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [0, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_1:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [0, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_2:[0-9]+]] = vector.extract_strided_slice %arg2 {offsets = [0, 0], sizes = [2, 2], strides = [1, 1]} : vector<4x4xi32> to vector<2x2xi32>
+// CHECK-NEXT:  %[[VAL_3:[0-9]+]] = vector.shape_cast %[[VAL_0]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[VAL_4:[0-9]+]] = vector.shape_cast %[[VAL_1]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[VAL_5:[0-9]+]] = vector.shape_cast %[[VAL_2]] : vector<2x2xi32> to vector<4xi32>
+// CHECK-NEXT:  %[[KACC_0_v0:[0-9]+]] = arm_neon.intr.smmla %[[VAL_5]], %[[VAL_3]], %[[VAL_4]] : vector<16xi8> to vector<4xi32>
+// CHECK-NEXT:  %[[VAL_7:[0-9]+]] = vector.shape_cast %[[KACC_0_v0]] : vector<4xi32> to vector<2x2xi32>
+// CHECK-NEXT:  %[[VAL_8:[0-9]+]] = vector.insert_strided_slice %[[VAL_7]], %cst {offsets = [0, 0], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
+// CHECK-NEXT:  %[[VAL_9:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [0, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_10:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [0, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_11:[0-9]+]] = vector.shape_cast %[[VAL_9]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[VAL_12:[0-9]+]] = vector.shape_cast %[[VAL_10]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[KACC_0_v1:[0-9]+]] = arm_neon.intr.smmla %[[KACC_0_v0]], %[[VAL_11]], %[[VAL_12]] : vector<16xi8> to vector<4xi32>
+// CHECK-NEXT:  %[[VAL_14:[0-9]+]] = vector.shape_cast %[[KACC_0_v1]] : vector<4xi32> to vector<2x2xi32>
+// CHECK-NEXT:  %[[VAL_15:[0-9]+]] = vector.insert_strided_slice %[[VAL_14]], %[[VAL_8]] {offsets = [0, 0], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
+// CHECK-NEXT:  %[[VAL_16:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [0, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_17:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [2, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_18:[0-9]+]] = vector.extract_strided_slice %arg2 {offsets = [0, 2], sizes = [2, 2], strides = [1, 1]} : vector<4x4xi32> to vector<2x2xi32>
+// CHECK-NEXT:  %[[VAL_19:[0-9]+]] = vector.shape_cast %[[VAL_16]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[VAL_20:[0-9]+]] = vector.shape_cast %[[VAL_17]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[VAL_21:[0-9]+]] = vector.shape_cast %[[VAL_18]] : vector<2x2xi32> to vector<4xi32>
+// CHECK-NEXT:  %[[KACC_1_v0:[0-9]+]] = arm_neon.intr.smmla %[[VAL_21]], %[[VAL_19]], %[[VAL_20]] : vector<16xi8> to vector<4xi32>
+// CHECK-NEXT:  %[[VAL_23:[0-9]+]] = vector.shape_cast %[[KACC_1_v0]] : vector<4xi32> to vector<2x2xi32>
+// CHECK-NEXT:  %[[VAL_24:[0-9]+]] = vector.insert_strided_slice %[[VAL_23]], %[[VAL_15]] {offsets = [0, 2], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
+// CHECK-NEXT:  %[[VAL_25:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [0, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_26:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [2, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_27:[0-9]+]] = vector.shape_cast %[[VAL_25]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[VAL_28:[0-9]+]] = vector.shape_cast %[[VAL_26]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[KACC_1_v1:[0-9]+]] = arm_neon.intr.smmla %[[KACC_1_v0]], %[[VAL_27]], %[[VAL_28]] : vector<16xi8> to vector<4xi32>
+// CHECK-NEXT:  %[[VAL_30:[0-9]+]] = vector.shape_cast %[[KACC_1_v1]] : vector<4xi32> to vector<2x2xi32>
+// CHECK-NEXT:  %[[VAL_31:[0-9]+]] = vector.insert_strided_slice %[[VAL_30]], %[[VAL_24]] {offsets = [0, 2], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
+// CHECK-NEXT:  %[[VAL_32:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [2, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_33:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [0, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_34:[0-9]+]] = vector.extract_strided_slice %arg2 {offsets = [2, 0], sizes = [2, 2], strides = [1, 1]} : vector<4x4xi32> to vector<2x2xi32>
+// CHECK-NEXT:  %[[VAL_35:[0-9]+]] = vector.shape_cast %[[VAL_32]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[VAL_36:[0-9]+]] = vector.shape_cast %[[VAL_33]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[VAL_37:[0-9]+]] = vector.shape_cast %[[VAL_34]] : vector<2x2xi32> to vector<4xi32>
+// CHECK-NEXT:  %[[KACC_2_v0:[0-9]+]] = arm_neon.intr.smmla %[[VAL_37]], %[[VAL_35]], %[[VAL_36]] : vector<16xi8> to vector<4xi32>
+// CHECK-NEXT:  %[[VAL_39:[0-9]+]] = vector.shape_cast %[[KACC_2_v0]] : vector<4xi32> to vector<2x2xi32>
+// CHECK-NEXT:  %[[VAL_40:[0-9]+]] = vector.insert_strided_slice %[[VAL_39]], %[[VAL_31]] {offsets = [2, 0], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
+// CHECK-NEXT:  %[[VAL_41:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [2, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_42:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [0, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_43:[0-9]+]] = vector.shape_cast %[[VAL_41]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[VAL_44:[0-9]+]] = vector.shape_cast %[[VAL_42]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[KACC_2_v1:[0-9]+]] = arm_neon.intr.smmla %[[KACC_2_v0]], %[[VAL_43]], %[[VAL_44]] : vector<16xi8> to vector<4xi32>
+// CHECK-NEXT:  %[[VAL_46:[0-9]+]] = vector.shape_cast %[[KACC_2_v1]] : vector<4xi32> to vector<2x2xi32>
+// CHECK-NEXT:  %[[VAL_47:[0-9]+]] = vector.insert_strided_slice %[[VAL_46]], %[[VAL_40]] {offsets = [2, 0], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
+// CHECK-NEXT:  %[[VAL_48:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [2, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_49:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [2, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_50:[0-9]+]] = vector.extract_strided_slice %arg2 {offsets = [2, 2], sizes = [2, 2], strides = [1, 1]} : vector<4x4xi32> to vector<2x2xi32>
+// CHECK-NEXT:  %[[VAL_51:[0-9]+]] = vector.shape_cast %[[VAL_48]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[VAL_52:[0-9]+]] = vector.shape_cast %[[VAL_49]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[VAL_53:[0-9]+]] = vector.shape_cast %[[VAL_50]] : vector<2x2xi32> to vector<4xi32>
+// CHECK-NEXT:  %[[KACC_3_v0:[0-9]+]] = arm_neon.intr.smmla %[[VAL_53]], %[[VAL_51]], %[[VAL_52]] : vector<16xi8> to vector<4xi32>
+// CHECK-NEXT:  %[[VAL_55:[0-9]+]] = vector.shape_cast %[[KACC_3_v0]] : vector<4xi32> to vector<2x2xi32>
+// CHECK-NEXT:  %[[VAL_56:[0-9]+]] = vector.insert_strided_slice %[[VAL_55]], %[[VAL_47]] {offsets = [2, 2], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
+// CHECK-NEXT:  %[[VAL_57:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [2, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_58:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [2, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_59:[0-9]+]] = vector.shape_cast %[[VAL_57]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[VAL_60:[0-9]+]] = vector.shape_cast %[[VAL_58]] : vector<2x8xi8> to vector<16xi8>
+// CHECK-NEXT:  %[[KACC_3_v1:[0-9]+]] = arm_neon.intr.smmla %[[KACC_3_v0]], %[[VAL_59]], %[[VAL_60]] : vector<16xi8> to vector<4xi32>
+// CHECK-NEXT:  %[[VAL_62:[0-9]+]] = vector.shape_cast %[[KACC_3_v1]] : vector<4xi32> to vector<2x2xi32>
+// CHECK-NEXT:  %[[VAL_63:[0-9]+]] = vector.insert_strided_slice %[[VAL_62]], %[[VAL_56]] {offsets = [2, 2], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
+// CHECK-NEXT:  return %[[VAL_63]] : vector<4x4xi32>
+func.func @vector_arm_neon_mnk_unroll(%lhs: vector<4x16xi8>, %rhs: vector<4x16xi8>, %acc : vector<4x4xi32>) -> vector<4x4xi32> {
+  %lhs_extsi = arith.extsi %lhs : vector<4x16xi8> to vector<4x16xi32>
+  %rhs_extsi = arith.extsi %rhs : vector<4x16xi8> to vector<4x16xi32>
+  %res = vector.contract {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>} %lhs_extsi, %rhs_extsi, %acc : vector<4x16xi32>, vector<4x16xi32> into vector<4x4xi32>
+  return %res : vector<4x4xi32>
+}
+
 module attributes {transform.with_named_sequence} {
   transform.named_sequence @__transform_main(%module: !transform.any_op {transform.readonly}) {
     %func = transform.structured.match ops{["func.func"]} in %module : (!transform.any_op) -> !transform.op<"func.func">

>From 2ff98bbf35c28d8b2afc010347bcd0b2b4ef7737 Mon Sep 17 00:00:00 2001
From: Momchil Velikov <momchil.velikov at arm.com>
Date: Wed, 14 May 2025 09:44:21 +0000
Subject: [PATCH 2/3] [fixup] Add a comment explaining the added test

---
 mlir/test/Dialect/ArmNeon/lower-to-arm-neon.mlir | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/mlir/test/Dialect/ArmNeon/lower-to-arm-neon.mlir b/mlir/test/Dialect/ArmNeon/lower-to-arm-neon.mlir
index 3fdaac7fb9c52..c7c855b3acba6 100644
--- a/mlir/test/Dialect/ArmNeon/lower-to-arm-neon.mlir
+++ b/mlir/test/Dialect/ArmNeon/lower-to-arm-neon.mlir
@@ -355,6 +355,11 @@ func.func @vector_arm_neon_k_unroll_vecmat(%lhs: vector<1x32xi8>, %rhs: vector<2
   return %res : vector<1x2xi32>
 }
 
+// -----
+
+// Test with more than one iteration across all of the M, N and K dimensions.
+// Shows multiple independent accumulators as well as accumulator reuse.
+
 // CHECK-LABEL: @vector_arm_neon_mnk_unroll
 // CHECK-SAME:    %arg0: vector<4x16xi8>
 // CHECK-SAME:    %arg1: vector<4x16xi8>

>From e4e0ba9f0848317e3a59ca3db36ba7c887d7bda0 Mon Sep 17 00:00:00 2001
From: Momchil Velikov <momchil.velikov at arm.com>
Date: Thu, 15 May 2025 10:17:05 +0000
Subject: [PATCH 3/3] [fixup] Some more comments

---
 .../Dialect/ArmNeon/lower-to-arm-neon.mlir    | 69 ++++++++++++-------
 1 file changed, 46 insertions(+), 23 deletions(-)

diff --git a/mlir/test/Dialect/ArmNeon/lower-to-arm-neon.mlir b/mlir/test/Dialect/ArmNeon/lower-to-arm-neon.mlir
index c7c855b3acba6..e4f7ea150c850 100644
--- a/mlir/test/Dialect/ArmNeon/lower-to-arm-neon.mlir
+++ b/mlir/test/Dialect/ArmNeon/lower-to-arm-neon.mlir
@@ -359,77 +359,100 @@ func.func @vector_arm_neon_k_unroll_vecmat(%lhs: vector<1x32xi8>, %rhs: vector<2
 
 // Test with more than one iteration across all of the M, N and K dimensions.
 // Shows multiple independent accumulators as well as accumulator reuse.
+// For each iterarion [I, J, K] sub-tiles are extracted from offsets as follows:
+//   LHS: [2*I, 8*K]
+//   RHS: [2*J, 8*K]
+//   ACC: [2*I, 2*J]
+// Sub-tile insert offsets for the result are as like ACC (there are redundant
+// inserts).
 
 // CHECK-LABEL: @vector_arm_neon_mnk_unroll
-// CHECK-SAME:    %arg0: vector<4x16xi8>
-// CHECK-SAME:    %arg1: vector<4x16xi8>
-// CHECK-SAME:    %arg2: vector<4x4xi32>
+// CHECK-SAME:    %[[LHS:arg0]]: vector<4x16xi8>
+// CHECK-SAME:    %[[RHS:arg1]]: vector<4x16xi8>
+// CHECK-SAME:    %[[ACC:arg2]]: vector<4x4xi32>
 // CHECK-SAME:    -> vector<4x4xi32> {
 // CHECK-NEXT:  %cst = arith.constant dense<0> : vector<4x4xi32>
-// CHECK-NEXT:  %[[VAL_0:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [0, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
-// CHECK-NEXT:  %[[VAL_1:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [0, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
-// CHECK-NEXT:  %[[VAL_2:[0-9]+]] = vector.extract_strided_slice %arg2 {offsets = [0, 0], sizes = [2, 2], strides = [1, 1]} : vector<4x4xi32> to vector<2x2xi32>
+
+// Iteration: [0, 0, 0]
+// CHECK-NEXT:  %[[VAL_0:[0-9]+]] = vector.extract_strided_slice %[[LHS]] {offsets = [0, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_1:[0-9]+]] = vector.extract_strided_slice %[[RHS]] {offsets = [0, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_2:[0-9]+]] = vector.extract_strided_slice %[[ACC]] {offsets = [0, 0], sizes = [2, 2], strides = [1, 1]} : vector<4x4xi32> to vector<2x2xi32>
 // CHECK-NEXT:  %[[VAL_3:[0-9]+]] = vector.shape_cast %[[VAL_0]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[VAL_4:[0-9]+]] = vector.shape_cast %[[VAL_1]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[VAL_5:[0-9]+]] = vector.shape_cast %[[VAL_2]] : vector<2x2xi32> to vector<4xi32>
 // CHECK-NEXT:  %[[KACC_0_v0:[0-9]+]] = arm_neon.intr.smmla %[[VAL_5]], %[[VAL_3]], %[[VAL_4]] : vector<16xi8> to vector<4xi32>
 // CHECK-NEXT:  %[[VAL_7:[0-9]+]] = vector.shape_cast %[[KACC_0_v0]] : vector<4xi32> to vector<2x2xi32>
 // CHECK-NEXT:  %[[VAL_8:[0-9]+]] = vector.insert_strided_slice %[[VAL_7]], %cst {offsets = [0, 0], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
-// CHECK-NEXT:  %[[VAL_9:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [0, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
-// CHECK-NEXT:  %[[VAL_10:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [0, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+
+// Iteration: [0, 0, 1]
+// CHECK-NEXT:  %[[VAL_9:[0-9]+]] = vector.extract_strided_slice %[[LHS]] {offsets = [0, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_10:[0-9]+]] = vector.extract_strided_slice %[[RHS]] {offsets = [0, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
 // CHECK-NEXT:  %[[VAL_11:[0-9]+]] = vector.shape_cast %[[VAL_9]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[VAL_12:[0-9]+]] = vector.shape_cast %[[VAL_10]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[KACC_0_v1:[0-9]+]] = arm_neon.intr.smmla %[[KACC_0_v0]], %[[VAL_11]], %[[VAL_12]] : vector<16xi8> to vector<4xi32>
 // CHECK-NEXT:  %[[VAL_14:[0-9]+]] = vector.shape_cast %[[KACC_0_v1]] : vector<4xi32> to vector<2x2xi32>
 // CHECK-NEXT:  %[[VAL_15:[0-9]+]] = vector.insert_strided_slice %[[VAL_14]], %[[VAL_8]] {offsets = [0, 0], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
-// CHECK-NEXT:  %[[VAL_16:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [0, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
-// CHECK-NEXT:  %[[VAL_17:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [2, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
-// CHECK-NEXT:  %[[VAL_18:[0-9]+]] = vector.extract_strided_slice %arg2 {offsets = [0, 2], sizes = [2, 2], strides = [1, 1]} : vector<4x4xi32> to vector<2x2xi32>
+
+// Iteration: [0, 1, 0]
+// CHECK-NEXT:  %[[VAL_16:[0-9]+]] = vector.extract_strided_slice %[[LHS]] {offsets = [0, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_17:[0-9]+]] = vector.extract_strided_slice %[[RHS]] {offsets = [2, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_18:[0-9]+]] = vector.extract_strided_slice %[[ACC]] {offsets = [0, 2], sizes = [2, 2], strides = [1, 1]} : vector<4x4xi32> to vector<2x2xi32>
 // CHECK-NEXT:  %[[VAL_19:[0-9]+]] = vector.shape_cast %[[VAL_16]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[VAL_20:[0-9]+]] = vector.shape_cast %[[VAL_17]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[VAL_21:[0-9]+]] = vector.shape_cast %[[VAL_18]] : vector<2x2xi32> to vector<4xi32>
 // CHECK-NEXT:  %[[KACC_1_v0:[0-9]+]] = arm_neon.intr.smmla %[[VAL_21]], %[[VAL_19]], %[[VAL_20]] : vector<16xi8> to vector<4xi32>
 // CHECK-NEXT:  %[[VAL_23:[0-9]+]] = vector.shape_cast %[[KACC_1_v0]] : vector<4xi32> to vector<2x2xi32>
 // CHECK-NEXT:  %[[VAL_24:[0-9]+]] = vector.insert_strided_slice %[[VAL_23]], %[[VAL_15]] {offsets = [0, 2], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
-// CHECK-NEXT:  %[[VAL_25:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [0, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
-// CHECK-NEXT:  %[[VAL_26:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [2, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+
+// Iteration: [0, 1, 1]
+// CHECK-NEXT:  %[[VAL_25:[0-9]+]] = vector.extract_strided_slice %[[LHS]] {offsets = [0, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_26:[0-9]+]] = vector.extract_strided_slice %[[RHS]] {offsets = [2, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
 // CHECK-NEXT:  %[[VAL_27:[0-9]+]] = vector.shape_cast %[[VAL_25]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[VAL_28:[0-9]+]] = vector.shape_cast %[[VAL_26]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[KACC_1_v1:[0-9]+]] = arm_neon.intr.smmla %[[KACC_1_v0]], %[[VAL_27]], %[[VAL_28]] : vector<16xi8> to vector<4xi32>
 // CHECK-NEXT:  %[[VAL_30:[0-9]+]] = vector.shape_cast %[[KACC_1_v1]] : vector<4xi32> to vector<2x2xi32>
 // CHECK-NEXT:  %[[VAL_31:[0-9]+]] = vector.insert_strided_slice %[[VAL_30]], %[[VAL_24]] {offsets = [0, 2], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
-// CHECK-NEXT:  %[[VAL_32:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [2, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
-// CHECK-NEXT:  %[[VAL_33:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [0, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
-// CHECK-NEXT:  %[[VAL_34:[0-9]+]] = vector.extract_strided_slice %arg2 {offsets = [2, 0], sizes = [2, 2], strides = [1, 1]} : vector<4x4xi32> to vector<2x2xi32>
+
+// Iteration: [1, 0, 0]
+// CHECK-NEXT:  %[[VAL_32:[0-9]+]] = vector.extract_strided_slice %[[LHS]] {offsets = [2, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_33:[0-9]+]] = vector.extract_strided_slice %[[RHS]] {offsets = [0, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_34:[0-9]+]] = vector.extract_strided_slice %[[ACC]] {offsets = [2, 0], sizes = [2, 2], strides = [1, 1]} : vector<4x4xi32> to vector<2x2xi32>
 // CHECK-NEXT:  %[[VAL_35:[0-9]+]] = vector.shape_cast %[[VAL_32]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[VAL_36:[0-9]+]] = vector.shape_cast %[[VAL_33]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[VAL_37:[0-9]+]] = vector.shape_cast %[[VAL_34]] : vector<2x2xi32> to vector<4xi32>
 // CHECK-NEXT:  %[[KACC_2_v0:[0-9]+]] = arm_neon.intr.smmla %[[VAL_37]], %[[VAL_35]], %[[VAL_36]] : vector<16xi8> to vector<4xi32>
 // CHECK-NEXT:  %[[VAL_39:[0-9]+]] = vector.shape_cast %[[KACC_2_v0]] : vector<4xi32> to vector<2x2xi32>
 // CHECK-NEXT:  %[[VAL_40:[0-9]+]] = vector.insert_strided_slice %[[VAL_39]], %[[VAL_31]] {offsets = [2, 0], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
-// CHECK-NEXT:  %[[VAL_41:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [2, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
-// CHECK-NEXT:  %[[VAL_42:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [0, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+
+// Iteration: [1, 0, 1]
+// CHECK-NEXT:  %[[VAL_41:[0-9]+]] = vector.extract_strided_slice %[[LHS]] {offsets = [2, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_42:[0-9]+]] = vector.extract_strided_slice %[[RHS]] {offsets = [0, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
 // CHECK-NEXT:  %[[VAL_43:[0-9]+]] = vector.shape_cast %[[VAL_41]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[VAL_44:[0-9]+]] = vector.shape_cast %[[VAL_42]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[KACC_2_v1:[0-9]+]] = arm_neon.intr.smmla %[[KACC_2_v0]], %[[VAL_43]], %[[VAL_44]] : vector<16xi8> to vector<4xi32>
 // CHECK-NEXT:  %[[VAL_46:[0-9]+]] = vector.shape_cast %[[KACC_2_v1]] : vector<4xi32> to vector<2x2xi32>
 // CHECK-NEXT:  %[[VAL_47:[0-9]+]] = vector.insert_strided_slice %[[VAL_46]], %[[VAL_40]] {offsets = [2, 0], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
-// CHECK-NEXT:  %[[VAL_48:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [2, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
-// CHECK-NEXT:  %[[VAL_49:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [2, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
-// CHECK-NEXT:  %[[VAL_50:[0-9]+]] = vector.extract_strided_slice %arg2 {offsets = [2, 2], sizes = [2, 2], strides = [1, 1]} : vector<4x4xi32> to vector<2x2xi32>
+
+// Iteration: [1, 1, 0]
+// CHECK-NEXT:  %[[VAL_48:[0-9]+]] = vector.extract_strided_slice %[[LHS]] {offsets = [2, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_49:[0-9]+]] = vector.extract_strided_slice %[[RHS]] {offsets = [2, 0], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_50:[0-9]+]] = vector.extract_strided_slice %[[ACC]] {offsets = [2, 2], sizes = [2, 2], strides = [1, 1]} : vector<4x4xi32> to vector<2x2xi32>
 // CHECK-NEXT:  %[[VAL_51:[0-9]+]] = vector.shape_cast %[[VAL_48]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[VAL_52:[0-9]+]] = vector.shape_cast %[[VAL_49]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[VAL_53:[0-9]+]] = vector.shape_cast %[[VAL_50]] : vector<2x2xi32> to vector<4xi32>
 // CHECK-NEXT:  %[[KACC_3_v0:[0-9]+]] = arm_neon.intr.smmla %[[VAL_53]], %[[VAL_51]], %[[VAL_52]] : vector<16xi8> to vector<4xi32>
 // CHECK-NEXT:  %[[VAL_55:[0-9]+]] = vector.shape_cast %[[KACC_3_v0]] : vector<4xi32> to vector<2x2xi32>
 // CHECK-NEXT:  %[[VAL_56:[0-9]+]] = vector.insert_strided_slice %[[VAL_55]], %[[VAL_47]] {offsets = [2, 2], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
-// CHECK-NEXT:  %[[VAL_57:[0-9]+]] = vector.extract_strided_slice %arg0 {offsets = [2, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
-// CHECK-NEXT:  %[[VAL_58:[0-9]+]] = vector.extract_strided_slice %arg1 {offsets = [2, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+
+// Iteration: [1, 1, 1]
+// CHECK-NEXT:  %[[VAL_57:[0-9]+]] = vector.extract_strided_slice %[[LHS]] {offsets = [2, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
+// CHECK-NEXT:  %[[VAL_58:[0-9]+]] = vector.extract_strided_slice %[[RHS]] {offsets = [2, 8], sizes = [2, 8], strides = [1, 1]} : vector<4x16xi8> to vector<2x8xi8>
 // CHECK-NEXT:  %[[VAL_59:[0-9]+]] = vector.shape_cast %[[VAL_57]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[VAL_60:[0-9]+]] = vector.shape_cast %[[VAL_58]] : vector<2x8xi8> to vector<16xi8>
 // CHECK-NEXT:  %[[KACC_3_v1:[0-9]+]] = arm_neon.intr.smmla %[[KACC_3_v0]], %[[VAL_59]], %[[VAL_60]] : vector<16xi8> to vector<4xi32>
 // CHECK-NEXT:  %[[VAL_62:[0-9]+]] = vector.shape_cast %[[KACC_3_v1]] : vector<4xi32> to vector<2x2xi32>
 // CHECK-NEXT:  %[[VAL_63:[0-9]+]] = vector.insert_strided_slice %[[VAL_62]], %[[VAL_56]] {offsets = [2, 2], strides = [1, 1]} : vector<2x2xi32> into vector<4x4xi32>
+
 // CHECK-NEXT:  return %[[VAL_63]] : vector<4x4xi32>
 func.func @vector_arm_neon_mnk_unroll(%lhs: vector<4x16xi8>, %rhs: vector<4x16xi8>, %acc : vector<4x4xi32>) -> vector<4x4xi32> {
   %lhs_extsi = arith.extsi %lhs : vector<4x16xi8> to vector<4x16xi32>



More information about the Mlir-commits mailing list