[Mlir-commits] [mlir] andrzej/update xfer perm tests 2 (PR #123237)
Andrzej WarzyĆski
llvmlistbot at llvm.org
Thu Jan 16 12:21:20 PST 2025
https://github.com/banach-space created https://github.com/llvm/llvm-project/pull/123237
- **[mlir][vector] Update tests for xfer permutation lowering**
- **[mlir][vector] Update tests for xfer permutation lowering (2/N)**
>From 170d7c2b9ae8040e81c5eb978e1e80c6956d3f72 Mon Sep 17 00:00:00 2001
From: Andrzej Warzynski <andrzej.warzynski at arm.com>
Date: Wed, 15 Jan 2025 16:05:26 +0000
Subject: [PATCH 1/2] [mlir][vector] Update tests for xfer permutation lowering
1. Remove `%c0 = arith.constant 0 : index` from testt functions. This
extra Op is not needed (the index can be passed as an argument), so
this is just noise.
2. Replaced `%cst_0` with `%pad` to communicate what the underlying SSA
value is intended for.
3. Unified some comments.
---
.../vector-transfer-permutation-lowering.mlir | 150 +++++++++---------
1 file changed, 77 insertions(+), 73 deletions(-)
diff --git a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
index 0feaf690af2510..045d4a9cdb5ddb 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
@@ -13,17 +13,17 @@
// CHECK-LABEL: func.func @xfer_write_transposing_permutation_map
// CHECK-SAME: %[[VEC:.*]]: vector<4x8xi16>,
-// CHECK-SAME: %[[MEM:.*]]: memref<2x2x8x4xi16>) {
+// CHECK-SAME: %[[MEM:.*]]: memref<2x2x8x4xi16>
// CHECK: %[[TR:.*]] = vector.transpose %[[VEC]], [1, 0] : vector<4x8xi16> to vector<8x4xi16>
// CHECK: vector.transfer_write
// CHECK-NOT: permutation_map
// CHECK-SAME: %[[TR]], %[[MEM]]{{.*}} {in_bounds = [true, true]} : vector<8x4xi16>, memref<2x2x8x4xi16>
func.func @xfer_write_transposing_permutation_map(
%vec: vector<4x8xi16>,
- %mem: memref<2x2x8x4xi16>) {
+ %mem: memref<2x2x8x4xi16>,
+ %idx: index) {
- %c0 = arith.constant 0 : index
- vector.transfer_write %vec, %mem[%c0, %c0, %c0, %c0] {
+ vector.transfer_write %vec, %mem[%idx, %idx, %idx, %idx] {
in_bounds = [true, true],
permutation_map = affine_map<(d0, d1, d2, d3) -> (d3, d2)>
} : vector<4x8xi16>, memref<2x2x8x4xi16>
@@ -31,24 +31,25 @@ func.func @xfer_write_transposing_permutation_map(
return
}
-// Even with out-of-bounds, it is safe to apply this pattern
+// Even with out-of-bounds accesses, it is safe to apply this pattern
+
// CHECK-LABEL: func.func @xfer_write_transposing_permutation_map_out_of_bounds
// CHECK-SAME: %[[VEC:.*]]: vector<4x8xi16>,
-// CHECK-SAME: %[[MEM:.*]]: memref<2x2x?x?xi16>) {
-// CHECK: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-SAME: %[[MEM:.*]]: memref<2x2x?x?xi16>,
+// CHECK-SAME: %[[IDX:.*]]: index) {
// CHECK: %[[TR:.*]] = vector.transpose %[[VEC]], [1, 0] : vector<4x8xi16> to vector<8x4xi16>
// Expect the in_bounds attribute to be preserved. Since we don't print it when
// all flags are "false", it should not appear in the output.
// CHECK-NOT: in_bounds
// CHECK: vector.transfer_write
// CHECK-NOT: permutation_map
-// CHECK-SAME: %[[TR]], %[[MEM]][%[[C0]], %[[C0]], %[[C0]], %[[C0]]] : vector<8x4xi16>, memref<2x2x?x?xi16>
+// CHECK-SAME: %[[TR]], %[[MEM]][%[[IDX]], %[[IDX]], %[[IDX]], %[[IDX]]] : vector<8x4xi16>, memref<2x2x?x?xi16>
func.func @xfer_write_transposing_permutation_map_out_of_bounds(
%vec: vector<4x8xi16>,
- %mem: memref<2x2x?x?xi16>) {
+ %mem: memref<2x2x?x?xi16>,
+ %idx: index) {
- %c0 = arith.constant 0 : index
- vector.transfer_write %vec, %mem[%c0, %c0, %c0, %c0] {
+ vector.transfer_write %vec, %mem[%idx, %idx, %idx, %idx] {
in_bounds = [false, false],
permutation_map = affine_map<(d0, d1, d2, d3) -> (d3, d2)>
} : vector<4x8xi16>, memref<2x2x?x?xi16>
@@ -59,7 +60,7 @@ func.func @xfer_write_transposing_permutation_map_out_of_bounds(
// CHECK-LABEL: func.func @xfer_write_transposing_permutation_map_with_mask_scalable
// CHECK-SAME: %[[VEC:.*]]: vector<4x[8]xi16>,
// CHECK-SAME: %[[MEM:.*]]: memref<2x2x?x4xi16>,
-// CHECK-SAME: %[[MASK:.*]]: vector<[8]x4xi1>) {
+// CHECK-SAME: %[[MASK:.*]]: vector<[8]x4xi1>
// CHECK: %[[TR:.*]] = vector.transpose %[[VEC]], [1, 0] : vector<4x[8]xi16> to vector<[8]x4xi16>
// CHECK: vector.transfer_write
// CHECK-NOT: permutation_map
@@ -67,10 +68,11 @@ func.func @xfer_write_transposing_permutation_map_out_of_bounds(
func.func @xfer_write_transposing_permutation_map_with_mask_scalable(
%vec: vector<4x[8]xi16>,
%mem: memref<2x2x?x4xi16>,
- %mask: vector<[8]x4xi1>) {
+ %mask: vector<[8]x4xi1>,
+ %idx: index) {
%c0 = arith.constant 0 : index
- vector.transfer_write %vec, %mem[%c0, %c0, %c0, %c0], %mask {
+ vector.transfer_write %vec, %mem[%idx, %idx, %idx, %idx], %mask {
in_bounds = [true, true],
permutation_map = affine_map<(d0, d1, d2, d3) -> (d3, d2)>
} : vector<4x[8]xi16>, memref<2x2x?x4xi16>
@@ -79,16 +81,18 @@ func.func @xfer_write_transposing_permutation_map_with_mask_scalable(
}
// Masked version is not supported
+
// CHECK-LABEL: func.func @xfer_write_transposing_permutation_map_masked
// CHECK-NOT: vector.transpose
func.func @xfer_write_transposing_permutation_map_masked(
%vec: vector<4x8xi16>,
%mem: memref<2x2x8x4xi16>,
- %mask: vector<8x4xi1>) {
+ %mask: vector<8x4xi1>,
+ %idx: index) {
%c0 = arith.constant 0 : index
vector.mask %mask {
- vector.transfer_write %vec, %mem[%c0, %c0, %c0, %c0] {
+ vector.transfer_write %vec, %mem[%idx, %idx, %idx, %idx] {
in_bounds = [true, true],
permutation_map = affine_map<(d0, d1, d2, d3) -> (d3, d2)>
} : vector<4x8xi16>, memref<2x2x8x4xi16>
@@ -128,7 +132,8 @@ func.func @xfer_write_non_transposing_permutation_map(
return
}
-// Even with out-of-bounds, it is safe to apply this pattern
+// Even with out-of-bounds accesses, it is safe to apply this pattern
+
// CHECK-LABEL: func.func @xfer_write_non_transposing_permutation_map_with_mask_out_of_bounds(
// CHECK-SAME: %[[MEM:.*]]: memref<?x?xf32>,
// CHECK-SAME: %[[VEC:.*]]: vector<7xf32>,
@@ -157,8 +162,7 @@ func.func @xfer_write_non_transposing_permutation_map_with_mask_out_of_bounds(
// CHECK: func.func @permutation_with_mask_xfer_write_scalable(
// CHECK-SAME: %[[VEC:.*]]: vector<4x[8]xi16>,
// CHECK-SAME: %[[MEM:.*]]: memref<1x4x?x1xi16>,
-// CHECK-SAME: %[[MASK:.*]]: vector<4x[8]xi1>) {
-// CHECK: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-SAME: %[[MASK:.*]]: vector<4x[8]xi1>
// CHECK: %[[BC_1:.*]] = vector.broadcast %[[VEC]] : vector<4x[8]xi16> to vector<1x4x[8]xi16>
// CHECK: %[[BC_2:.*]] = vector.broadcast %[[MASK]] : vector<4x[8]xi1> to vector<1x4x[8]xi1>
// CHECK: %[[TRANSPOSE_1:.*]] = vector.transpose %[[BC_2]], [1, 2, 0] : vector<1x4x[8]xi1> to vector<4x[8]x1xi1>
@@ -167,10 +171,10 @@ func.func @xfer_write_non_transposing_permutation_map_with_mask_out_of_bounds(
func.func @permutation_with_mask_xfer_write_scalable(
%vec: vector<4x[8]xi16>,
%mem: memref<1x4x?x1xi16>,
- %mask: vector<4x[8]xi1>){
+ %mask: vector<4x[8]xi1>,
+ %idx: index){
- %c0 = arith.constant 0 : index
- vector.transfer_write %vec, %mem[%c0, %c0, %c0, %c0], %mask {
+ vector.transfer_write %vec, %mem[%idx, %idx, %idx, %idx], %mask {
in_bounds = [true, true],
permutation_map = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
} : vector<4x[8]xi16>, memref<1x4x?x1xi16>
@@ -178,7 +182,8 @@ func.func @permutation_with_mask_xfer_write_scalable(
return
}
-// transfer_write in MaskOp case not supported.
+// Masked version is not supported
+
// CHECK-LABEL: func @masked_permutation_xfer_write_fixed_width
// CHECK-SAME: %[[DEST:.*]]: tensor<?x?xf32>,
// CHECK-SAME: %[[VEC:.*]]: vector<16xf32>,
@@ -204,18 +209,19 @@ func.func @masked_permutation_xfer_write_fixed_width(
// CHECK-LABEL: func.func @masked_permutation_xfer_write_scalable(
// CHECK-SAME: %[[VEC:.*]]: vector<4x[8]xi16>,
// CHECK-SAME: %[[DEST:.*]]: tensor<?x?x?x?xf32>,
-// CHECK-SAME: %[[MASK:.*]]: vector<4x[8]xi1>)
+// CHECK-SAME: %[[MASK:.*]]: vector<4x[8]xi1>
// CHECK-SAME: -> tensor<?x?x?x?xf32> {
// CHECK-NOT: vector.transpose
// CHECK: vector.mask %[[MASK]] { vector.transfer_write %[[VEC]], %[[DEST]]{{.*}} : vector<4x[8]xi16>, tensor<?x?x?x?xf32> } : vector<4x[8]xi1> -> tensor<?x?x?x?xf32>
func.func @masked_permutation_xfer_write_scalable(
%vec: vector<4x[8]xi16>,
%dest: tensor<?x?x?x?xf32>,
- %mask: vector<4x[8]xi1>) -> tensor<?x?x?x?xf32> {
+ %mask: vector<4x[8]xi1>,
+ %idx: index) -> tensor<?x?x?x?xf32> {
%c0 = arith.constant 0 : index
%res = vector.mask %mask {
- vector.transfer_write %vec, %dest[%c0, %c0, %c0, %c0] {
+ vector.transfer_write %vec, %dest[%idx, %idx, %idx, %idx] {
in_bounds = [true, true],
permutation_map = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
} : vector<4x[8]xi16>, tensor<?x?x?x?xf32>
@@ -224,22 +230,23 @@ func.func @masked_permutation_xfer_write_scalable(
return %res : tensor<?x?x?x?xf32>
}
-// transfer_write in MaskOp case not supported.
+// Masked version is not supported
+
// CHECK-LABEL: func @masked_non_permutation_xfer_write_fixed_width
// CHECK-SAME: %[[DEST:.*]]: tensor<?x?x?x?xf32>
// CHECK-SAME: %[[VEC:.*]]: vector<14x8x16xf32>
-// CHECK-SAME: %[[IDX:.*]]: index) -> tensor<?x?x?x?xf32>
+// CHECK-SAME: %[[DIM:.*]]: index, %[[IDX:.*]]: index) -> tensor<?x?x?x?xf32>
// CHECK-NOT: vector.broadcast
// CHECK: vector.mask %0 { vector.transfer_write %[[VEC]], %[[DEST]]{{.*}} : vector<14x8x16xf32>, tensor<?x?x?x?xf32> } : vector<14x8x16xi1> -> tensor<?x?x?x?xf32>
func.func @masked_non_permutation_xfer_write_fixed_width(
%dest : tensor<?x?x?x?xf32>,
%vec : vector<14x8x16xf32>,
- %dim : index) -> tensor<?x?x?x?xf32> {
+ %dim : index,
+ %idx: index) -> tensor<?x?x?x?xf32> {
- %c0 = arith.constant 0 : index
%mask = vector.create_mask %dim, %dim, %dim : vector<14x8x16xi1>
%res = vector.mask %mask {
- vector.transfer_write %vec, %dest[%c0, %c0, %c0, %c0] {
+ vector.transfer_write %vec, %dest[%idx, %idx, %idx, %idx] {
in_bounds = [false, false, true],
permutation_map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
} : vector<14x8x16xf32>, tensor<?x?x?x?xf32>
@@ -259,25 +266,23 @@ func.func @masked_non_permutation_xfer_write_fixed_width(
// CHECK-LABEL: func.func @permutation_with_mask_xfer_read_fixed_width(
// CHECK-SAME: %[[MEM:.*]]: memref<?x?xf32>,
-// CHECK-SAME: %[[IDX_1:.*]]: index,
-// CHECK-SAME: %[[IDX_2:.*]]: index) -> vector<8x4x2xf32> {
-// CHECK: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-SAME: %[[DIM_1:.*]]: index, %[[DIM_2:.*]]: index, %[[IDX:.*]]: index) -> vector<8x4x2xf32> {
// CHECK: %[[PASS_THROUGH:.*]] = arith.constant 0.000000e+00 : f32
-// CHECK: %[[MASK:.*]] = vector.create_mask %[[IDX_2]], %[[IDX_1]] : vector<2x4xi1>
-// CHECK: %[[T_READ:.*]] = vector.transfer_read %[[MEM]]{{\[}}%[[C0]], %[[C0]]], %[[PASS_THROUGH]], %[[MASK]] {in_bounds = [true, true]} : memref<?x?xf32>, vector<2x4xf32>
+// CHECK: %[[MASK:.*]] = vector.create_mask %[[DIM_2]], %[[DIM_1]] : vector<2x4xi1>
+// CHECK: %[[T_READ:.*]] = vector.transfer_read %[[MEM]]{{\[}}%[[IDX]], %[[IDX]]], %[[PASS_THROUGH]], %[[MASK]] {in_bounds = [true, true]} : memref<?x?xf32>, vector<2x4xf32>
// CHECK: %[[BCAST:.*]] = vector.broadcast %[[T_READ]] : vector<2x4xf32> to vector<8x2x4xf32>
// CHECK: %[[TRANSPOSE:.*]] = vector.transpose %[[BCAST]], [0, 2, 1] : vector<8x2x4xf32> to vector<8x4x2xf32>
// CHECK: return %[[TRANSPOSE]] : vector<8x4x2xf32>
func.func @permutation_with_mask_xfer_read_fixed_width(
%mem: memref<?x?xf32>,
%dim_1: index,
- %dim_2: index) -> (vector<8x4x2xf32>) {
+ %dim_2: index,
+ %idx: index) -> (vector<8x4x2xf32>) {
- %c0 = arith.constant 0 : index
- %cst_0 = arith.constant 0.000000e+00 : f32
+ %pad = arith.constant 0.000000e+00 : f32
%mask = vector.create_mask %dim_2, %dim_1 : vector<2x4xi1>
- %res = vector.transfer_read %mem[%c0, %c0], %cst_0, %mask {
+ %res = vector.transfer_read %mem[%idx, %idx], %pad, %mask {
in_bounds = [true, true, true],
permutation_map = affine_map<(d0, d1) -> (0, d1, d0)>
} : memref<?x?xf32>, vector<8x4x2xf32>
@@ -287,25 +292,23 @@ func.func @permutation_with_mask_xfer_read_fixed_width(
// CHECK-LABEL: func.func @permutation_with_mask_xfer_read_scalable(
// CHECK-SAME: %[[MEM:.*]]: memref<?x?xf32>,
-// CHECK-SAME: %[[IDX_1:.*]]: index,
-// CHECK-SAME: %[[IDX_2:.*]]: index) -> vector<8x[4]x2xf32> {
-// CHECK: %[[C0:.*]] = arith.constant 0 : index
-// CHECK: %[[PASS_THROUGH:.*]] = arith.constant 0.000000e+00 : f32
-// CHECK: %[[MASK:.*]] = vector.create_mask %[[IDX_2]], %[[IDX_1]] : vector<2x[4]xi1>
-// CHECK: %[[T_READ:.*]] = vector.transfer_read %[[MEM]]{{\[}}%[[C0]], %[[C0]]], %[[PASS_THROUGH]], %[[MASK]] {in_bounds = [true, true]} : memref<?x?xf32>, vector<2x[4]xf32>
+// CHECK-SAME: %[[DIM_1:.*]]: index, %[[DIM_2:.*]]: index, %[[IDX:.*]]: index) -> vector<8x[4]x2xf32> {
+// CHECK: %[[PAD:.*]] = arith.constant 0.000000e+00 : f32
+// CHECK: %[[MASK:.*]] = vector.create_mask %[[DIM_2]], %[[DIM_1]] : vector<2x[4]xi1>
+// CHECK: %[[T_READ:.*]] = vector.transfer_read %[[MEM]]{{\[}}%[[IDX]], %[[IDX]]], %[[PAD]], %[[MASK]] {in_bounds = [true, true]} : memref<?x?xf32>, vector<2x[4]xf32>
// CHECK: %[[BCAST:.*]] = vector.broadcast %[[T_READ]] : vector<2x[4]xf32> to vector<8x2x[4]xf32>
// CHECK: %[[TRANSPOSE:.*]] = vector.transpose %[[BCAST]], [0, 2, 1] : vector<8x2x[4]xf32> to vector<8x[4]x2xf32>
// CHECK: return %[[TRANSPOSE]] : vector<8x[4]x2xf32>
func.func @permutation_with_mask_xfer_read_scalable(
%mem: memref<?x?xf32>,
%dim_1: index,
- %dim_2: index) -> (vector<8x[4]x2xf32>) {
+ %dim_2: index,
+ %idx: index) -> (vector<8x[4]x2xf32>) {
- %c0 = arith.constant 0 : index
- %cst_0 = arith.constant 0.000000e+00 : f32
+ %pad = arith.constant 0.000000e+00 : f32
%mask = vector.create_mask %dim_2, %dim_1 : vector<2x[4]xi1>
- %res = vector.transfer_read %mem[%c0, %c0], %cst_0, %mask {
+ %res = vector.transfer_read %mem[%idx, %idx], %pad, %mask {
in_bounds = [true, true, true],
permutation_map = affine_map<(d0, d1) -> (0, d1, d0)>
} : memref<?x?xf32>, vector<8x[4]x2xf32>
@@ -313,7 +316,8 @@ func.func @permutation_with_mask_xfer_read_scalable(
return %res : vector<8x[4]x2xf32>
}
-// transfer_read in MaskOp case not supported.
+// Masked version is not supported
+
// CHECK-LABEL: func @masked_permutation_xfer_read_fixed_width
// CHECK-SAME: %[[DEST:.*]]: tensor<?x1xf32>,
// CHECK-SAME: %[[MASK:.*]]: vector<4x1xi1>
@@ -321,12 +325,12 @@ func.func @permutation_with_mask_xfer_read_scalable(
// CHECK: vector.mask %[[MASK]] { vector.transfer_read %[[DEST]]{{.*}}: tensor<?x1xf32>, vector<1x4x4xf32> } : vector<4x1xi1> -> vector<1x4x4xf32>
func.func @masked_permutation_xfer_read_fixed_width(
%dest: tensor<?x1xf32>,
- %mask : vector<4x1xi1>) {
+ %mask : vector<4x1xi1>,
+ %idx: index) {
- %cst = arith.constant 0.000000e+00 : f32
- %c0 = arith.constant 0 : index
+ %pad = arith.constant 0.000000e+00 : f32
%3 = vector.mask %mask {
- vector.transfer_read %dest[%c0, %c0], %cst {
+ vector.transfer_read %dest[%idx, %idx], %pad {
permutation_map = affine_map<(d0, d1) -> (d1, 0, d0)>
} : tensor<?x1xf32>, vector<1x4x4xf32>
} : vector<4x1xi1> -> vector<1x4x4xf32>
@@ -337,18 +341,18 @@ func.func @masked_permutation_xfer_read_fixed_width(
// CHECK-LABEL: func.func @masked_permutation_xfer_read_scalable(
// CHECK-SAME: %[[DEST:.*]]: tensor<?x?xf32>,
-// CHECK-SAME: %[[MASK:.*]]: vector<2x[4]xi1>) -> vector<8x[4]x2xf32> {
+// CHECK-SAME: %[[MASK:.*]]: vector<2x[4]xi1>
// CHECK-NOT: vector.transpose
// CHECK: %[[T_READ:.*]] = vector.mask %[[MASK]] { vector.transfer_read %[[DEST]]{{.*}} : tensor<?x?xf32>, vector<8x[4]x2xf32> } : vector<2x[4]xi1> -> vector<8x[4]x2xf32>
func.func @masked_permutation_xfer_read_scalable(
%dest: tensor<?x?xf32>,
- %mask : vector<2x[4]xi1>) -> vector<8x[4]x2xf32> {
+ %mask : vector<2x[4]xi1>,
+ %idx: index) -> vector<8x[4]x2xf32> {
- %c0 = arith.constant 0 : index
- %cst_0 = arith.constant 0.000000e+00 : f32
+ %pad = arith.constant 0.000000e+00 : f32
%res = vector.mask %mask {
- vector.transfer_read %dest[%c0, %c0], %cst_0 {
+ vector.transfer_read %dest[%idx, %idx], %pad {
in_bounds = [true, true, true],
permutation_map = affine_map<(d0, d1) -> (0, d1, d0)>
} : tensor<?x?xf32>, vector<8x[4]x2xf32>
@@ -377,18 +381,16 @@ module attributes {transform.with_named_sequence} {
// CHECK: #[[MAP:.*]] = affine_map<(d0, d1, d2, d3) -> (d1, 0, d3)>
// CHECK: func.func @transfer_read_reduce_rank_scalable(
-// CHECK-SAME: %[[MEM:.*]]: memref<?x?x?x?xf32>) -> vector<8x[4]x2x3xf32> {
-// CHECK: %[[C0:.*]] = arith.constant 0 : index
-// CHECK: %[[T_READ:.*]] = vector.transfer_read %[[MEM]][%[[C0]], %[[C0]], %[[C0]], %[[C0]]]{{.*}} permutation_map = #[[MAP]]} : memref<?x?x?x?xf32>, vector<[4]x2x3xf32>
+// CHECK-SAME: %[[MEM:.*]]: memref<?x?x?x?xf32>, %[[IDX:.*]]: index) -> vector<8x[4]x2x3xf32> {
+// CHECK: %[[T_READ:.*]] = vector.transfer_read %[[MEM]][%[[IDX]], %[[IDX]], %[[IDX]], %[[IDX]]]{{.*}} permutation_map = #[[MAP]]} : memref<?x?x?x?xf32>, vector<[4]x2x3xf32>
// CHECK: %[[BC:.*]] = vector.broadcast %[[T_READ]] : vector<[4]x2x3xf32> to vector<8x[4]x2x3xf32>
// CHECK: return %[[BC]] : vector<8x[4]x2x3xf32>
func.func @transfer_read_reduce_rank_scalable(
- %mem: memref<?x?x?x?xf32>) -> vector<8x[4]x2x3xf32> {
+ %mem: memref<?x?x?x?xf32>, %idx: index) -> vector<8x[4]x2x3xf32> {
- %c0 = arith.constant 0 : index
- %cst_0 = arith.constant 0.000000e+00 : f32
+ %pad = arith.constant 0.000000e+00 : f32
- %res = vector.transfer_read %mem[%c0, %c0, %c0, %c0], %cst_0 {
+ %res = vector.transfer_read %mem[%idx, %idx, %idx, %idx], %pad {
in_bounds = [true, true, true, true],
permutation_map = affine_map<(d0, d1, d2, d3) -> (0, d1, 0, d3)>
} : memref<?x?x?x?xf32>, vector<8x[4]x2x3xf32>
@@ -396,22 +398,24 @@ func.func @transfer_read_reduce_rank_scalable(
return %res : vector<8x[4]x2x3xf32>
}
-// Masked case not supported.
+// Masked version is not supported
+
// CHECK-LABEL: func.func @masked_transfer_read_reduce_rank(
// CHECK-SAME: %[[MEM:.*]]: memref<?x?x?x?xf32>,
-// CHECK-SAME: %[[DIM:.*]]: index) -> vector<8x[4]x2x3xf32> {
+// CHECK-SAME: %[[DIM:.*]]: index,
+// CHECK-SAME: %[[IDX:.*]]: index) -> vector<8x[4]x2x3xf32> {
// CHECK-NOT: vector.broadcast
// CHECK: %[[MASK:.*]] = vector.mask %0 { vector.transfer_read %[[MEM]]{{.*}} : memref<?x?x?x?xf32>, vector<8x[4]x2x3xf32> } : vector<[4]x3xi1> -> vector<8x[4]x2x3xf32>
func.func @masked_transfer_read_reduce_rank(
%mem: memref<?x?x?x?xf32>,
- %dim: index) -> vector<8x[4]x2x3xf32> {
+ %dim: index,
+ %idx: index) -> vector<8x[4]x2x3xf32> {
- %c0 = arith.constant 0 : index
- %cst_0 = arith.constant 0.000000e+00 : f32
+ %pad = arith.constant 0.000000e+00 : f32
%mask = vector.create_mask %dim, %dim: vector<[4]x3xi1>
%res = vector.mask %mask {
- vector.transfer_read %mem[%c0, %c0, %c0, %c0], %cst_0 {
+ vector.transfer_read %mem[%idx, %idx, %idx, %idx], %pad {
in_bounds = [true, true, true, true],
permutation_map = affine_map<(d0, d1, d2, d3) -> (0, d1, 0, d3)>
} : memref<?x?x?x?xf32>, vector<8x[4]x2x3xf32>
>From 2680861a064225ddc372fc4381d98341a0520957 Mon Sep 17 00:00:00 2001
From: Andrzej Warzynski <andrzej.warzynski at arm.com>
Date: Thu, 16 Jan 2025 20:16:55 +0000
Subject: [PATCH 2/2] [mlir][vector] Update tests for xfer permutation lowering
(2/N)
Unifies test function names so that it's easier to identify what
different cases are. Also improves consistency. The following naming
scheme has been adopted:
* `@xfer_{read|write}_{map_type}_{masked|with_mask|}_{out_of_bounds}_{scalable}
Also updated some comments to better document the patterns that are
being exercised.
---
.../vector-transfer-permutation-lowering.mlir | 124 ++++++++----------
1 file changed, 55 insertions(+), 69 deletions(-)
diff --git a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
index 045d4a9cdb5ddb..6705905633e0fa 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
@@ -1,24 +1,22 @@
// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
+/// CHECK: #[[$MAP:.*]] = affine_map<(d0, d1, d2, d3) -> (d1, 0, d3)>
+
///----------------------------------------------------------------------------------------
-/// vector.transfer_write -> vector.transpose + vector.transfer_write
/// [Pattern: TransferWritePermutationLowering]
+///
+/// IN: vector.transfer_write (_transposed_ minor identity permutation map)
+/// OUT: vector.transpose + vector.transfer_write (minor identity permutation map)
///----------------------------------------------------------------------------------------
-/// Input:
-/// * vector.transfer_write op with a permutation that under a transpose
-/// _would be_ a minor identity permutation map
-/// Output:
-/// * vector.transpose + vector.transfer_write with a permutation map which
-/// _is_ a minor identity
-
-// CHECK-LABEL: func.func @xfer_write_transposing_permutation_map
+
+// CHECK-LABEL: func.func @xfer_write_minor_identity_transposed
// CHECK-SAME: %[[VEC:.*]]: vector<4x8xi16>,
// CHECK-SAME: %[[MEM:.*]]: memref<2x2x8x4xi16>
// CHECK: %[[TR:.*]] = vector.transpose %[[VEC]], [1, 0] : vector<4x8xi16> to vector<8x4xi16>
// CHECK: vector.transfer_write
// CHECK-NOT: permutation_map
// CHECK-SAME: %[[TR]], %[[MEM]]{{.*}} {in_bounds = [true, true]} : vector<8x4xi16>, memref<2x2x8x4xi16>
-func.func @xfer_write_transposing_permutation_map(
+func.func @xfer_write_minor_identity_transposed(
%vec: vector<4x8xi16>,
%mem: memref<2x2x8x4xi16>,
%idx: index) {
@@ -33,7 +31,7 @@ func.func @xfer_write_transposing_permutation_map(
// Even with out-of-bounds accesses, it is safe to apply this pattern
-// CHECK-LABEL: func.func @xfer_write_transposing_permutation_map_out_of_bounds
+// CHECK-LABEL: func.func @xfer_write_minor_identity_transposed_out_of_bounds
// CHECK-SAME: %[[VEC:.*]]: vector<4x8xi16>,
// CHECK-SAME: %[[MEM:.*]]: memref<2x2x?x?xi16>,
// CHECK-SAME: %[[IDX:.*]]: index) {
@@ -44,7 +42,7 @@ func.func @xfer_write_transposing_permutation_map(
// CHECK: vector.transfer_write
// CHECK-NOT: permutation_map
// CHECK-SAME: %[[TR]], %[[MEM]][%[[IDX]], %[[IDX]], %[[IDX]], %[[IDX]]] : vector<8x4xi16>, memref<2x2x?x?xi16>
-func.func @xfer_write_transposing_permutation_map_out_of_bounds(
+func.func @xfer_write_minor_identity_transposed_out_of_bounds(
%vec: vector<4x8xi16>,
%mem: memref<2x2x?x?xi16>,
%idx: index) {
@@ -57,7 +55,7 @@ func.func @xfer_write_transposing_permutation_map_out_of_bounds(
return
}
-// CHECK-LABEL: func.func @xfer_write_transposing_permutation_map_with_mask_scalable
+// CHECK-LABEL: func.func @xfer_write_minor_identity_transposed_with_mask_scalable
// CHECK-SAME: %[[VEC:.*]]: vector<4x[8]xi16>,
// CHECK-SAME: %[[MEM:.*]]: memref<2x2x?x4xi16>,
// CHECK-SAME: %[[MASK:.*]]: vector<[8]x4xi1>
@@ -65,7 +63,7 @@ func.func @xfer_write_transposing_permutation_map_out_of_bounds(
// CHECK: vector.transfer_write
// CHECK-NOT: permutation_map
// CHECK-SAME: %[[TR]], %[[MEM]]{{.*}}, %[[MASK]] {in_bounds = [true, true]} : vector<[8]x4xi16>, memref<2x2x?x4xi16>
-func.func @xfer_write_transposing_permutation_map_with_mask_scalable(
+func.func @xfer_write_minor_identity_transposed_with_mask_scalable(
%vec: vector<4x[8]xi16>,
%mem: memref<2x2x?x4xi16>,
%mask: vector<[8]x4xi1>,
@@ -82,9 +80,9 @@ func.func @xfer_write_transposing_permutation_map_with_mask_scalable(
// Masked version is not supported
-// CHECK-LABEL: func.func @xfer_write_transposing_permutation_map_masked
+// CHECK-LABEL: func.func @xfer_write_minor_identity_transposed_map_masked
// CHECK-NOT: vector.transpose
-func.func @xfer_write_transposing_permutation_map_masked(
+func.func @xfer_write_minor_identity_transposed_map_masked(
%vec: vector<4x8xi16>,
%mem: memref<2x2x8x4xi16>,
%mask: vector<8x4xi1>,
@@ -102,24 +100,24 @@ func.func @xfer_write_transposing_permutation_map_masked(
}
///----------------------------------------------------------------------------------------
-/// vector.transfer_write -> vector.broadcast + vector.transpose + vector.transfer_write
/// [Patterns: TransferWriteNonPermutationLowering + TransferWritePermutationLowering]
+///
+/// IN: vector.transfer_write
+/// (neither a minor identity nor transposed minor identity map)
+/// OUT 1: vector.broadcast + vector.transfer_write
+/// (transposed minor identity)
+/// OUT 2: vector.transfer_write -> vector.broadcast + vector.transpose + vector.transfer_write
+/// (minor identity)
///----------------------------------------------------------------------------------------
-/// Input:
-/// * vector.transfer_write op with a map which _is not_ a permutation of a
-/// minor identity
-/// Output:
-/// * vector.broadcast + vector.transpose + vector.transfer_write with a map
-/// which _is_ a permutation of a minor identity
-
-// CHECK-LABEL: func.func @xfer_write_non_transposing_permutation_map(
+
+// CHECK-LABEL: func.func @xfer_write_non_minor_identity(
// CHECK-SAME: %[[MEM:.*]]: memref<?x?xf32>,
// CHECK-SAME: %[[VEC:.*]]: vector<7xf32>,
// CHECK-SAME: %[[IDX_1:.*]]: index, %[[IDX_2:.*]]: index) {
// CHECK: %[[BC:.*]] = vector.broadcast %[[VEC]] : vector<7xf32> to vector<1x7xf32>
// CHECK: %[[TR:.*]] = vector.transpose %[[BC]], [1, 0] : vector<1x7xf32> to vector<7x1xf32>
// CHECK: vector.transfer_write %[[TR]], %[[MEM]]{{\[}}%[[IDX_1]], %[[IDX_2]]] {in_bounds = [false, true]} : vector<7x1xf32>, memref<?x?xf32>
-func.func @xfer_write_non_transposing_permutation_map(
+func.func @xfer_write_non_minor_identity(
%mem : memref<?x?xf32>,
%vec : vector<7xf32>,
%idx_1 : index,
@@ -134,7 +132,7 @@ func.func @xfer_write_non_transposing_permutation_map(
// Even with out-of-bounds accesses, it is safe to apply this pattern
-// CHECK-LABEL: func.func @xfer_write_non_transposing_permutation_map_with_mask_out_of_bounds(
+// CHECK-LABEL: func.func @xfer_write_non_minor_identity_with_mask_out_of_bounds(
// CHECK-SAME: %[[MEM:.*]]: memref<?x?xf32>,
// CHECK-SAME: %[[VEC:.*]]: vector<7xf32>,
// CHECK-SAME: %[[IDX_1:.*]]: index, %[[IDX_2:.*]]: index,
@@ -144,7 +142,7 @@ func.func @xfer_write_non_transposing_permutation_map(
// CHECK: %[[TR_MASK:.*]] = vector.transpose %[[BC_MASK]], [1, 0] : vector<1x7xi1> to vector<7x1xi1>
// CHECK: %[[TR_VEC:.*]] = vector.transpose %[[BC_VEC]], [1, 0] : vector<1x7xf32> to vector<7x1xf32>
// CHECK: vector.transfer_write %[[TR_VEC]], %[[MEM]]{{\[}}%[[IDX_1]], %[[IDX_2]]], %[[TR_MASK]] {in_bounds = [false, true]} : vector<7x1xf32>, memref<?x?xf32>
-func.func @xfer_write_non_transposing_permutation_map_with_mask_out_of_bounds(
+func.func @xfer_write_non_minor_identity_with_mask_out_of_bounds(
%mem : memref<?x?xf32>,
%vec : vector<7xf32>,
%idx_1 : index,
@@ -159,7 +157,7 @@ func.func @xfer_write_non_transposing_permutation_map_with_mask_out_of_bounds(
return
}
-// CHECK: func.func @permutation_with_mask_xfer_write_scalable(
+// CHECK-LABEL: func.func @xfer_write_non_minor_identity_with_mask_scalable(
// CHECK-SAME: %[[VEC:.*]]: vector<4x[8]xi16>,
// CHECK-SAME: %[[MEM:.*]]: memref<1x4x?x1xi16>,
// CHECK-SAME: %[[MASK:.*]]: vector<4x[8]xi1>
@@ -168,7 +166,7 @@ func.func @xfer_write_non_transposing_permutation_map_with_mask_out_of_bounds(
// CHECK: %[[TRANSPOSE_1:.*]] = vector.transpose %[[BC_2]], [1, 2, 0] : vector<1x4x[8]xi1> to vector<4x[8]x1xi1>
// CHECK: %[[TRANSPOSE_2:.*]] = vector.transpose %[[BC_1]], [1, 2, 0] : vector<1x4x[8]xi16> to vector<4x[8]x1xi16>
// CHECK: vector.transfer_write %[[TRANSPOSE_2]], %[[MEM]]{{.*}}, %[[TRANSPOSE_1]] {in_bounds = [true, true, true]} : vector<4x[8]x1xi16>, memref<1x4x?x1xi16>
-func.func @permutation_with_mask_xfer_write_scalable(
+func.func @xfer_write_non_minor_identity_with_mask_scalable(
%vec: vector<4x[8]xi16>,
%mem: memref<1x4x?x1xi16>,
%mask: vector<4x[8]xi1>,
@@ -184,14 +182,14 @@ func.func @permutation_with_mask_xfer_write_scalable(
// Masked version is not supported
-// CHECK-LABEL: func @masked_permutation_xfer_write_fixed_width
+// CHECK-LABEL: func @xfer_write_non_minor_identity_masked
// CHECK-SAME: %[[DEST:.*]]: tensor<?x?xf32>,
// CHECK-SAME: %[[VEC:.*]]: vector<16xf32>,
// CHECK-SAME: %[[IDX:.*]]: index,
// CHECK-SAME: %[[MASK:.*]]: vector<16xi1>
// CHECK-NOT: vector.transpose
// CHECK: vector.mask %[[MASK]] { vector.transfer_write %[[VEC]], %[[DEST]]{{.*}} vector<16xf32>, tensor<?x?xf32> } : vector<16xi1> -> tensor<?x?xf32>
-func.func @masked_permutation_xfer_write_fixed_width(
+func.func @xfer_write_non_minor_identity_masked(
%dest: tensor<?x?xf32>,
%vec: vector<16xf32>,
%idx: index,
@@ -206,14 +204,14 @@ func.func @masked_permutation_xfer_write_fixed_width(
return %res : tensor<?x?xf32>
}
-// CHECK-LABEL: func.func @masked_permutation_xfer_write_scalable(
+// CHECK-LABEL: func.func @xfer_write_non_minor_identity_masked_scalable
// CHECK-SAME: %[[VEC:.*]]: vector<4x[8]xi16>,
// CHECK-SAME: %[[DEST:.*]]: tensor<?x?x?x?xf32>,
// CHECK-SAME: %[[MASK:.*]]: vector<4x[8]xi1>
// CHECK-SAME: -> tensor<?x?x?x?xf32> {
// CHECK-NOT: vector.transpose
// CHECK: vector.mask %[[MASK]] { vector.transfer_write %[[VEC]], %[[DEST]]{{.*}} : vector<4x[8]xi16>, tensor<?x?x?x?xf32> } : vector<4x[8]xi1> -> tensor<?x?x?x?xf32>
-func.func @masked_permutation_xfer_write_scalable(
+func.func @xfer_write_non_minor_identity_masked_scalable(
%vec: vector<4x[8]xi16>,
%dest: tensor<?x?x?x?xf32>,
%mask: vector<4x[8]xi1>,
@@ -232,13 +230,13 @@ func.func @masked_permutation_xfer_write_scalable(
// Masked version is not supported
-// CHECK-LABEL: func @masked_non_permutation_xfer_write_fixed_width
+// CHECK-LABEL: func @xfer_write_non_minor_identity_masked_2
// CHECK-SAME: %[[DEST:.*]]: tensor<?x?x?x?xf32>
// CHECK-SAME: %[[VEC:.*]]: vector<14x8x16xf32>
// CHECK-SAME: %[[DIM:.*]]: index, %[[IDX:.*]]: index) -> tensor<?x?x?x?xf32>
// CHECK-NOT: vector.broadcast
// CHECK: vector.mask %0 { vector.transfer_write %[[VEC]], %[[DEST]]{{.*}} : vector<14x8x16xf32>, tensor<?x?x?x?xf32> } : vector<14x8x16xi1> -> tensor<?x?x?x?xf32>
-func.func @masked_non_permutation_xfer_write_fixed_width(
+func.func @xfer_write_non_minor_identity_masked_2(
%dest : tensor<?x?x?x?xf32>,
%vec : vector<14x8x16xf32>,
%dim : index,
@@ -256,15 +254,17 @@ func.func @masked_non_permutation_xfer_write_fixed_width(
}
///----------------------------------------------------------------------------------------
-/// vector.transfer_read
+/// [Pattern: TransferOpReduceRank (for leading 0 dim) +
+/// TransferReadPermutationLowering (for transposed minor identity)]
+///
+/// IN: vector.transfer_read
+/// (_transposed_ minor identity permutation map, with 0 or more broadcast dims)
+/// OUT: vector.transpose + vector.transfer_write
+/// (minor identity permutation map with 0 or more leading broadcast dims)
///----------------------------------------------------------------------------------------
-/// Input:
-/// * vector.transfer_read op with a permutation map
-/// Output:
-/// * vector.transfer_read with a permutation map composed of leading zeros followed by a minor identiy +
-/// vector.transpose op
+/// TODO: Inner broadcast dim - see also the block at the bottom of this file
-// CHECK-LABEL: func.func @permutation_with_mask_xfer_read_fixed_width(
+// CHECK-LABEL: func.func @xfer_read_minor_identity_tranposed_with_mask
// CHECK-SAME: %[[MEM:.*]]: memref<?x?xf32>,
// CHECK-SAME: %[[DIM_1:.*]]: index, %[[DIM_2:.*]]: index, %[[IDX:.*]]: index) -> vector<8x4x2xf32> {
// CHECK: %[[PASS_THROUGH:.*]] = arith.constant 0.000000e+00 : f32
@@ -273,7 +273,7 @@ func.func @masked_non_permutation_xfer_write_fixed_width(
// CHECK: %[[BCAST:.*]] = vector.broadcast %[[T_READ]] : vector<2x4xf32> to vector<8x2x4xf32>
// CHECK: %[[TRANSPOSE:.*]] = vector.transpose %[[BCAST]], [0, 2, 1] : vector<8x2x4xf32> to vector<8x4x2xf32>
// CHECK: return %[[TRANSPOSE]] : vector<8x4x2xf32>
-func.func @permutation_with_mask_xfer_read_fixed_width(
+func.func @xfer_read_minor_identity_tranposed_with_mask(
%mem: memref<?x?xf32>,
%dim_1: index,
%dim_2: index,
@@ -290,7 +290,7 @@ func.func @permutation_with_mask_xfer_read_fixed_width(
return %res : vector<8x4x2xf32>
}
-// CHECK-LABEL: func.func @permutation_with_mask_xfer_read_scalable(
+// CHECK-LABEL: func.func @xfer_read_minor_identity_tranposed_with_mask_scalable(
// CHECK-SAME: %[[MEM:.*]]: memref<?x?xf32>,
// CHECK-SAME: %[[DIM_1:.*]]: index, %[[DIM_2:.*]]: index, %[[IDX:.*]]: index) -> vector<8x[4]x2xf32> {
// CHECK: %[[PAD:.*]] = arith.constant 0.000000e+00 : f32
@@ -299,7 +299,7 @@ func.func @permutation_with_mask_xfer_read_fixed_width(
// CHECK: %[[BCAST:.*]] = vector.broadcast %[[T_READ]] : vector<2x[4]xf32> to vector<8x2x[4]xf32>
// CHECK: %[[TRANSPOSE:.*]] = vector.transpose %[[BCAST]], [0, 2, 1] : vector<8x2x[4]xf32> to vector<8x[4]x2xf32>
// CHECK: return %[[TRANSPOSE]] : vector<8x[4]x2xf32>
-func.func @permutation_with_mask_xfer_read_scalable(
+func.func @xfer_read_minor_identity_tranposed_with_mask_scalable(
%mem: memref<?x?xf32>,
%dim_1: index,
%dim_2: index,
@@ -318,12 +318,12 @@ func.func @permutation_with_mask_xfer_read_scalable(
// Masked version is not supported
-// CHECK-LABEL: func @masked_permutation_xfer_read_fixed_width
+// CHECK-LABEL: func @xfer_read_minor_identity_transposed_masked(
// CHECK-SAME: %[[DEST:.*]]: tensor<?x1xf32>,
// CHECK-SAME: %[[MASK:.*]]: vector<4x1xi1>
// CHECK-NOT: vector.transpose
// CHECK: vector.mask %[[MASK]] { vector.transfer_read %[[DEST]]{{.*}}: tensor<?x1xf32>, vector<1x4x4xf32> } : vector<4x1xi1> -> vector<1x4x4xf32>
-func.func @masked_permutation_xfer_read_fixed_width(
+func.func @xfer_read_minor_identity_transposed_masked(
%dest: tensor<?x1xf32>,
%mask : vector<4x1xi1>,
%idx: index) {
@@ -339,12 +339,12 @@ func.func @masked_permutation_xfer_read_fixed_width(
return
}
-// CHECK-LABEL: func.func @masked_permutation_xfer_read_scalable(
+// CHECK-LABEL: func.func @xfer_read_minor_identity_transposed_masked_scalable(
// CHECK-SAME: %[[DEST:.*]]: tensor<?x?xf32>,
// CHECK-SAME: %[[MASK:.*]]: vector<2x[4]xi1>
// CHECK-NOT: vector.transpose
// CHECK: %[[T_READ:.*]] = vector.mask %[[MASK]] { vector.transfer_read %[[DEST]]{{.*}} : tensor<?x?xf32>, vector<8x[4]x2xf32> } : vector<2x[4]xi1> -> vector<8x[4]x2xf32>
-func.func @masked_permutation_xfer_read_scalable(
+func.func @xfer_read_minor_identity_transposed_masked_scalable(
%dest: tensor<?x?xf32>,
%mask : vector<2x[4]xi1>,
%idx: index) -> vector<8x[4]x2xf32> {
@@ -361,31 +361,17 @@ func.func @masked_permutation_xfer_read_scalable(
return %res : vector<8x[4]x2xf32>
}
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
- %f = transform.structured.match ops{["func.func"]} in %module_op
- : (!transform.any_op) -> !transform.any_op
- transform.apply_patterns to %f {
- transform.apply_patterns.vector.transfer_permutation_patterns
- } : !transform.any_op
- transform.yield
- }
-}
-
-// -----
-
///----------------------------------------------------------------------------------------
/// vector.transfer_read
///----------------------------------------------------------------------------------------
/// TODO: Review and categorize
-// CHECK: #[[MAP:.*]] = affine_map<(d0, d1, d2, d3) -> (d1, 0, d3)>
-// CHECK: func.func @transfer_read_reduce_rank_scalable(
+// CHECK-LABEL: func.func @xfer_read_minor_identitiy_bcast_dims_scalable
// CHECK-SAME: %[[MEM:.*]]: memref<?x?x?x?xf32>, %[[IDX:.*]]: index) -> vector<8x[4]x2x3xf32> {
-// CHECK: %[[T_READ:.*]] = vector.transfer_read %[[MEM]][%[[IDX]], %[[IDX]], %[[IDX]], %[[IDX]]]{{.*}} permutation_map = #[[MAP]]} : memref<?x?x?x?xf32>, vector<[4]x2x3xf32>
+// CHECK: %[[T_READ:.*]] = vector.transfer_read %[[MEM]][%[[IDX]], %[[IDX]], %[[IDX]], %[[IDX]]]{{.*}} permutation_map = #[[$MAP]]} : memref<?x?x?x?xf32>, vector<[4]x2x3xf32>
// CHECK: %[[BC:.*]] = vector.broadcast %[[T_READ]] : vector<[4]x2x3xf32> to vector<8x[4]x2x3xf32>
// CHECK: return %[[BC]] : vector<8x[4]x2x3xf32>
-func.func @transfer_read_reduce_rank_scalable(
+func.func @xfer_read_minor_identitiy_bcast_dims_scalable(
%mem: memref<?x?x?x?xf32>, %idx: index) -> vector<8x[4]x2x3xf32> {
%pad = arith.constant 0.000000e+00 : f32
@@ -400,13 +386,13 @@ func.func @transfer_read_reduce_rank_scalable(
// Masked version is not supported
-// CHECK-LABEL: func.func @masked_transfer_read_reduce_rank(
+// CHECK-LABEL: func.func @xfer_read_minor_identitiy_bcast_dims_masked
// CHECK-SAME: %[[MEM:.*]]: memref<?x?x?x?xf32>,
// CHECK-SAME: %[[DIM:.*]]: index,
// CHECK-SAME: %[[IDX:.*]]: index) -> vector<8x[4]x2x3xf32> {
// CHECK-NOT: vector.broadcast
// CHECK: %[[MASK:.*]] = vector.mask %0 { vector.transfer_read %[[MEM]]{{.*}} : memref<?x?x?x?xf32>, vector<8x[4]x2x3xf32> } : vector<[4]x3xi1> -> vector<8x[4]x2x3xf32>
-func.func @masked_transfer_read_reduce_rank(
+func.func @xfer_read_minor_identitiy_bcast_dims_masked(
%mem: memref<?x?x?x?xf32>,
%dim: index,
%idx: index) -> vector<8x[4]x2x3xf32> {
More information about the Mlir-commits
mailing list