[Mlir-commits] [mlir] [mlir][vector] Extend TransferReadDropUnitDimsPattern to support partially-static memrefs (PR #72142)

Cullen Rhodes llvmlistbot at llvm.org
Wed Nov 15 10:57:46 PST 2023


https://github.com/c-rhodes updated https://github.com/llvm/llvm-project/pull/72142

>From 1034fdf622910aa52b2b0ae6f4057438fee04869 Mon Sep 17 00:00:00 2001
From: Cullen Rhodes <cullen.rhodes at arm.com>
Date: Thu, 9 Nov 2023 19:44:34 +0000
Subject: [PATCH 1/2] [mlir][vector] Extend vector.transfer_read drop unit dim
 pattern

This patch extends the vector.transfer_read drop unit dim pattern to
support scalable vectors with (non-scalable) unit dims, and dynamic
memrefs. The xfer op can also have a mask of type 'vector.create_mask',
which gets rewritten as long as the mask of the unit dim is a constant
of 1.
---
 .../Transforms/VectorTransferOpTransforms.cpp | 98 +++++++++++++------
 ...ctor-transfer-drop-unit-dims-patterns.mlir | 86 ++++++++++++++++
 2 files changed, 155 insertions(+), 29 deletions(-)

diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
index a5f1b28152b9bde..95445f2081ec89c 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
@@ -260,14 +260,22 @@ void TransferOptimization::storeToLoadForwarding(vector::TransferReadOp read) {
   opToErase.push_back(read.getOperation());
 }
 
+/// Returns a copy of `shape` without unit dims.
+static SmallVector<int64_t> getReducedShape(ArrayRef<int64_t> shape) {
+  SmallVector<int64_t> reducedShape;
+  llvm::copy_if(shape, std::back_inserter(reducedShape),
+                [](int64_t dimSize) { return dimSize != 1; });
+  return reducedShape;
+}
+
 /// Drops unit dimensions from the input MemRefType.
-static MemRefType dropUnitDims(MemRefType inputType, ArrayRef<int64_t> offsets,
-                               ArrayRef<int64_t> sizes,
-                               ArrayRef<int64_t> strides) {
-  SmallVector<int64_t> targetShape = llvm::to_vector(
-      llvm::make_filter_range(sizes, [](int64_t sz) { return sz != 1; }));
+static MemRefType dropUnitDims(MemRefType inputType,
+                               ArrayRef<OpFoldResult> offsets,
+                               ArrayRef<OpFoldResult> sizes,
+                               ArrayRef<OpFoldResult> strides) {
   Type rankReducedType = memref::SubViewOp::inferRankReducedResultType(
-      targetShape, inputType, offsets, sizes, strides);
+      getReducedShape(inputType.getShape()), inputType, offsets, sizes,
+      strides);
   return canonicalizeStridedLayout(cast<MemRefType>(rankReducedType));
 }
 
@@ -277,17 +285,18 @@ static Value rankReducingSubviewDroppingUnitDims(PatternRewriter &rewriter,
                                                  mlir::Location loc,
                                                  Value input) {
   MemRefType inputType = cast<MemRefType>(input.getType());
-  assert(inputType.hasStaticShape());
-  SmallVector<int64_t> subViewOffsets(inputType.getRank(), 0);
-  SmallVector<int64_t> subViewStrides(inputType.getRank(), 1);
-  ArrayRef<int64_t> subViewSizes = inputType.getShape();
-  MemRefType resultType =
-      dropUnitDims(inputType, subViewOffsets, subViewSizes, subViewStrides);
+  SmallVector<OpFoldResult> offsets(inputType.getRank(),
+                                    rewriter.getIndexAttr(0));
+  SmallVector<OpFoldResult> sizes = memref::getMixedSizes(rewriter, loc, input);
+  SmallVector<OpFoldResult> strides(inputType.getRank(),
+                                    rewriter.getIndexAttr(1));
+  MemRefType resultType = dropUnitDims(inputType, offsets, sizes, strides);
+
   if (canonicalizeStridedLayout(resultType) ==
       canonicalizeStridedLayout(inputType))
     return input;
-  return rewriter.create<memref::SubViewOp>(
-      loc, resultType, input, subViewOffsets, subViewSizes, subViewStrides);
+  return rewriter.create<memref::SubViewOp>(loc, resultType, input, offsets,
+                                            sizes, strides);
 }
 
 /// Returns the number of dims that aren't unit dims.
@@ -295,12 +304,18 @@ static int getReducedRank(ArrayRef<int64_t> shape) {
   return llvm::count_if(shape, [](int64_t dimSize) { return dimSize != 1; });
 }
 
-/// Returns a copy of `shape` without unit dims.
-static SmallVector<int64_t> getReducedShape(ArrayRef<int64_t> shape) {
-  SmallVector<int64_t> reducedShape;
-  llvm::copy_if(shape, std::back_inserter(reducedShape),
-                [](int64_t dimSize) { return dimSize != 1; });
-  return reducedShape;
+/// Trims non-scalable one dimensions from `oldType` and returns the result
+/// type.
+static VectorType trimUnitDims(VectorType oldType) {
+  SmallVector<int64_t> newShape;
+  SmallVector<bool> newScalableDims;
+  for (auto [dimIdx, dimSize] : llvm::enumerate(oldType.getShape())) {
+    if (dimSize == 1 && !oldType.getScalableDims()[dimIdx])
+      continue;
+    newShape.push_back(dimSize);
+    newScalableDims.push_back(oldType.getScalableDims()[dimIdx]);
+  }
+  return VectorType::get(newShape, oldType.getElementType(), newScalableDims);
 }
 
 namespace {
@@ -320,9 +335,7 @@ class TransferReadDropUnitDimsPattern
     Value source = transferReadOp.getSource();
     MemRefType sourceType = dyn_cast<MemRefType>(source.getType());
     // TODO: support tensor types.
-    if (!sourceType || !sourceType.hasStaticShape())
-      return failure();
-    if (sourceType.getNumElements() != vectorType.getNumElements())
+    if (!sourceType)
       return failure();
     // TODO: generalize this pattern, relax the requirements here.
     if (transferReadOp.hasOutOfBoundsDim())
@@ -335,23 +348,50 @@ class TransferReadDropUnitDimsPattern
       return failure();
     // Check if the reduced vector shape matches the reduced source shape.
     // Otherwise, this case is not supported yet.
-    int vectorReducedRank = getReducedRank(vectorType.getShape());
-    if (reducedRank != vectorReducedRank)
+    auto reducedVectorType = trimUnitDims(vectorType);
+    if (reducedRank != reducedVectorType.getRank())
       return failure();
     if (llvm::any_of(transferReadOp.getIndices(), [](Value v) {
           return getConstantIntValue(v) != static_cast<int64_t>(0);
         }))
       return failure();
+
+    auto maskOp = transferReadOp.getMask();
+    if (maskOp) {
+      auto createMaskOp = maskOp.getDefiningOp<vector::CreateMaskOp>();
+      if (!createMaskOp)
+        return failure();
+      auto maskType = maskOp.getType();
+      auto reducedMaskType = trimUnitDims(maskType);
+      if (reducedMaskType.getRank() == maskType.getRank())
+        return failure();
+      SmallVector<Value> maskOperands;
+      for (auto [dim, dimIsScalable, maskOperand] :
+           llvm::zip(maskType.getShape(), maskType.getScalableDims(),
+                     createMaskOp.getOperands())) {
+        if (dim == 1 && !dimIsScalable) {
+          // If the mask for the unit dim is not a constant of 1, do nothing.
+          auto constant = maskOperand.getDefiningOp<arith::ConstantIndexOp>();
+          if (!constant || (constant.value() != 1))
+            return failure();
+          continue;
+        }
+        maskOperands.push_back(maskOperand);
+      }
+      maskOp = rewriter.create<vector::CreateMaskOp>(loc, reducedMaskType,
+                                                     maskOperands);
+    }
+
     Value reducedShapeSource =
         rankReducingSubviewDroppingUnitDims(rewriter, loc, source);
     Value c0 = rewriter.create<arith::ConstantIndexOp>(loc, 0);
     SmallVector<Value> zeros(reducedRank, c0);
     auto identityMap = rewriter.getMultiDimIdentityMap(reducedRank);
-    auto reducedVectorType = VectorType::get(
-        getReducedShape(vectorType.getShape()), vectorType.getElementType());
-
+    SmallVector<bool> inBounds(reducedVectorType.getRank(), true);
     auto newTransferReadOp = rewriter.create<vector::TransferReadOp>(
-        loc, reducedVectorType, reducedShapeSource, zeros, identityMap);
+        loc, reducedVectorType, reducedShapeSource, zeros, identityMap,
+        transferReadOp.getPadding(), maskOp,
+        rewriter.getBoolArrayAttr(inBounds));
     auto shapeCast = rewriter.createOrFold<vector::ShapeCastOp>(
         loc, vectorType, newTransferReadOp);
     rewriter.replaceOp(transferReadOp, shapeCast);
diff --git a/mlir/test/Dialect/Vector/vector-transfer-drop-unit-dims-patterns.mlir b/mlir/test/Dialect/Vector/vector-transfer-drop-unit-dims-patterns.mlir
index 2852e301888cca8..688fcd114041812 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-drop-unit-dims-patterns.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-drop-unit-dims-patterns.mlir
@@ -82,6 +82,92 @@ func.func @transfer_write_and_vector_rank_reducing_to_0d(
 //       CHECK:   %[[SHCAST:.+]] = vector.shape_cast %[[VECTOR]] : vector<1x1x1xf32> to vector<f32>
 //       CHECK:   vector.transfer_write %[[SHCAST]], %[[SUBVIEW]]{{.*}} : vector<f32>, memref<f32>
 
+func.func @transfer_read_dynamic_rank_reducing(
+      %arg : memref<?x1xi8, strided<[?, ?], offset: ?>>) -> vector<[16]x1xi8> {
+    %c0 = arith.constant 0 : index
+    %pad = arith.constant 0 : i8
+    %v = vector.transfer_read %arg[%c0, %c0], %pad {in_bounds = [true, true]} :
+      memref<?x1xi8, strided<[?, ?], offset: ?>>, vector<[16]x1xi8>
+    return %v : vector<[16]x1xi8>
+}
+// CHECK-LABEL: func @transfer_read_dynamic_rank_reducing
+//  CHECK-SAME:     %[[ARG:.+]]: memref<?x1xi8
+//       CHECK:   %[[C0:.+]] = arith.constant 0 : index
+//       CHECK:   %[[DIM0:.+]] = memref.dim %[[ARG]], %[[C0]] : memref<?x1xi8, strided<[?, ?], offset: ?>>
+//       CHECK:   %[[SUBVIEW:.+]] = memref.subview %[[ARG]][0, 0] [%[[DIM0]], 1] [1, 1] : memref<?x1xi8, {{.*}}> to memref<?xi8, {{.*}}>
+//       CHECK:   vector.transfer_read %[[SUBVIEW]]{{.*}} : memref<?xi8, {{.*}}>, vector<[16]xi8>
+
+func.func @masked_transfer_read_dynamic_rank_reducing(
+      %arg : memref<?x1xi8, strided<[?, ?], offset: ?>>,
+      %mask_dim0 : index) -> vector<[16]x1xi8> {
+    %c0 = arith.constant 0 : index
+    %c1 = arith.constant 1 : index
+    %pad = arith.constant 0 : i8
+    %mask = vector.create_mask %mask_dim0, %c1 : vector<[16]x1xi1>
+    %v = vector.transfer_read %arg[%c0, %c0], %pad, %mask {in_bounds = [true, true]} :
+      memref<?x1xi8, strided<[?, ?], offset: ?>>, vector<[16]x1xi8>
+    return %v : vector<[16]x1xi8>
+}
+// CHECK-LABEL: func @masked_transfer_read_dynamic_rank_reducing
+//  CHECK-SAME:     %[[ARG:.+]]: memref<?x1xi8
+//  CHECK-SAME:     %[[MASK_DIM0:.+]]: index
+//       CHECK:   %[[C0:.+]] = arith.constant 0 : index
+//       CHECK:   %[[PAD:.+]] = arith.constant 0 : i8
+//       CHECK:   %[[MASK:.+]] = vector.create_mask %[[MASK_DIM0]] : vector<[16]xi1>
+//       CHECK:   %[[DIM0:.+]] = memref.dim %[[ARG]], %[[C0]] : memref<?x1xi8, strided<[?, ?], offset: ?>>
+//       CHECK:   %[[SUBVIEW:.+]] = memref.subview %[[ARG]][0, 0] [%[[DIM0]], 1] [1, 1] : memref<?x1xi8, {{.*}}> to memref<?xi8, {{.*}}>
+//       CHECK:   vector.transfer_read %[[SUBVIEW]][{{.*}}], %[[PAD]], %[[MASK]] {in_bounds = [true]} : memref<?xi8, {{.*}}>, vector<[16]xi8>
+
+/// Only vector.create_mask is currently supported.
+func.func @unsupported_masked_transfer_read_dynamic_rank_reducing_1(
+      %arg : memref<?x1xi8, strided<[?, ?], offset: ?>>,
+      %mask : vector<[16]x1xi1>) -> vector<[16]x1xi8> {
+    %c0 = arith.constant 0 : index
+    %pad = arith.constant 0 : i8
+    %v = vector.transfer_read %arg[%c0, %c0], %pad, %mask {in_bounds = [true, true]} :
+      memref<?x1xi8, strided<[?, ?], offset: ?>>, vector<[16]x1xi8>
+    return %v : vector<[16]x1xi8>
+}
+// CHECK-LABEL: func @unsupported_masked_transfer_read_dynamic_rank_reducing_1
+//  CHECK-SAME:     %[[ARG:.+]]: memref<?x1xi8
+//   CHECK-NOT: vector.create_mask
+//   CHECK-NOT: memref.subview
+//       CHECK: vector.transfer_read %[[ARG]]
+
+/// Unit dim mask must be constant of 1.
+func.func @unsupported_masked_transfer_read_dynamic_rank_reducing_2(
+      %arg : memref<?x1xi8, strided<[?, ?], offset: ?>>,
+      %mask_dim0 : index, %mask_dim1 : index) -> vector<[16]x1xi8> {
+    %c0 = arith.constant 0 : index
+    %c1 = arith.constant 1 : index
+    %pad = arith.constant 0 : i8
+    %mask = vector.create_mask %mask_dim0, %mask_dim1 : vector<[16]x1xi1>
+    %v = vector.transfer_read %arg[%c0, %c0], %pad, %mask {in_bounds = [true, true]} :
+      memref<?x1xi8, strided<[?, ?], offset: ?>>, vector<[16]x1xi8>
+    return %v : vector<[16]x1xi8>
+}
+// CHECK-LABEL: func @unsupported_masked_transfer_read_dynamic_rank_reducing_2
+//  CHECK-SAME:     %[[ARG:.+]]: memref<?x1xi8
+//   CHECK-NOT: memref.subview
+//       CHECK: vector.transfer_read {{.*}} vector<[16]x1xi8>
+
+/// Unit dim must be non-scalable.
+func.func @masked_transfer_read_dynamic_rank_reducing_scalable_unit_dim(
+      %arg : memref<?x1xi8, strided<[?, ?], offset: ?>>,
+      %mask_dim0 : index) -> vector<[16]x[1]xi8> {
+    %c0 = arith.constant 0 : index
+    %c1 = arith.constant 1 : index
+    %pad = arith.constant 0 : i8
+    %mask = vector.create_mask %mask_dim0, %c1 : vector<[16]x[1]xi1>
+    %v = vector.transfer_read %arg[%c0, %c0], %pad, %mask {in_bounds = [true, true]} :
+      memref<?x1xi8, strided<[?, ?], offset: ?>>, vector<[16]x[1]xi8>
+    return %v : vector<[16]x[1]xi8>
+}
+// CHECK-LABEL: func @masked_transfer_read_dynamic_rank_reducing_scalable_unit_dim
+//  CHECK-SAME:     %[[ARG:.+]]: memref<?x1xi8
+//   CHECK-NOT: memref.subview
+//       CHECK: vector.transfer_read {{.*}} vector<[16]x[1]xi8>
+
 module attributes {transform.with_named_sequence} {
   transform.named_sequence @__transform_main(%func_op: !transform.op<"func.func"> {transform.readonly}) {
     transform.apply_patterns to %func_op {

>From cc4990ab101d0badb979048629df3d77232ed5ef Mon Sep 17 00:00:00 2001
From: Cullen Rhodes <cullen.rhodes at arm.com>
Date: Wed, 15 Nov 2023 14:22:36 +0000
Subject: [PATCH 2/2] address comments

* trimUnitDims -> trimNonScalableUnitDims
* add notifyMatchFailure on unsupported mask op
* llvm::zip -> llvm::zip_equal
* add helper to rewrite vector.create_mask to drop non-scalable unit dims.
* add getReducedShape that takes mixedSizes.
* add a more complex test.
---
 .../Transforms/VectorTransferOpTransforms.cpp | 79 +++++++++++++------
 ...ctor-transfer-drop-unit-dims-patterns.mlir | 32 +++++++-
 2 files changed, 84 insertions(+), 27 deletions(-)

diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
index 95445f2081ec89c..d2c6ba557b9bbec 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
@@ -268,14 +268,31 @@ static SmallVector<int64_t> getReducedShape(ArrayRef<int64_t> shape) {
   return reducedShape;
 }
 
+/// Converts OpFoldResults to int64_t shape without unit dims.
+static SmallVector<int64_t> getReducedShape(ArrayRef<OpFoldResult> mixedSizes) {
+  SmallVector<int64_t> reducedShape;
+  for (const auto size : mixedSizes) {
+    if (llvm::dyn_cast_if_present<Value>(size)) {
+      reducedShape.push_back(ShapedType::kDynamic);
+      continue;
+    }
+
+    auto value = cast<IntegerAttr>(size.get<Attribute>()).getValue();
+    if (value == 1)
+      continue;
+    reducedShape.push_back(value.getSExtValue());
+  }
+  return reducedShape;
+}
+
 /// Drops unit dimensions from the input MemRefType.
 static MemRefType dropUnitDims(MemRefType inputType,
                                ArrayRef<OpFoldResult> offsets,
                                ArrayRef<OpFoldResult> sizes,
                                ArrayRef<OpFoldResult> strides) {
+  auto targetShape = getReducedShape(sizes);
   Type rankReducedType = memref::SubViewOp::inferRankReducedResultType(
-      getReducedShape(inputType.getShape()), inputType, offsets, sizes,
-      strides);
+      targetShape, inputType, offsets, sizes, strides);
   return canonicalizeStridedLayout(cast<MemRefType>(rankReducedType));
 }
 
@@ -306,7 +323,7 @@ static int getReducedRank(ArrayRef<int64_t> shape) {
 
 /// Trims non-scalable one dimensions from `oldType` and returns the result
 /// type.
-static VectorType trimUnitDims(VectorType oldType) {
+static VectorType trimNonScalableUnitDims(VectorType oldType) {
   SmallVector<int64_t> newShape;
   SmallVector<bool> newScalableDims;
   for (auto [dimIdx, dimSize] : llvm::enumerate(oldType.getShape())) {
@@ -318,6 +335,32 @@ static VectorType trimUnitDims(VectorType oldType) {
   return VectorType::get(newShape, oldType.getElementType(), newScalableDims);
 }
 
+// Rewrites vector.create_mask 'op' to drop non-scalable one dimensions.
+static FailureOr<Value>
+createMaskDropNonScalableUnitDims(PatternRewriter &rewriter, Location loc,
+                                  vector::CreateMaskOp op) {
+  auto type = op.getType();
+  auto reducedType = trimNonScalableUnitDims(type);
+  if (reducedType.getRank() == type.getRank())
+    return failure();
+
+  SmallVector<Value> reducedOperands;
+  for (auto [dim, dimIsScalable, operand] : llvm::zip_equal(
+           type.getShape(), type.getScalableDims(), op.getOperands())) {
+    if (dim == 1 && !dimIsScalable) {
+      // If the mask for the unit dim is not a constant of 1, do nothing.
+      auto constant = operand.getDefiningOp<arith::ConstantIndexOp>();
+      if (!constant || (constant.value() != 1))
+        return failure();
+      continue;
+    }
+    reducedOperands.push_back(operand);
+  }
+  return rewriter
+      .create<vector::CreateMaskOp>(loc, reducedType, reducedOperands)
+      .getResult();
+}
+
 namespace {
 
 /// Rewrites `vector.transfer_read` ops where the source has unit dims, by
@@ -348,7 +391,7 @@ class TransferReadDropUnitDimsPattern
       return failure();
     // Check if the reduced vector shape matches the reduced source shape.
     // Otherwise, this case is not supported yet.
-    auto reducedVectorType = trimUnitDims(vectorType);
+    auto reducedVectorType = trimNonScalableUnitDims(vectorType);
     if (reducedRank != reducedVectorType.getRank())
       return failure();
     if (llvm::any_of(transferReadOp.getIndices(), [](Value v) {
@@ -356,30 +399,18 @@ class TransferReadDropUnitDimsPattern
         }))
       return failure();
 
-    auto maskOp = transferReadOp.getMask();
+    Value maskOp = transferReadOp.getMask();
     if (maskOp) {
       auto createMaskOp = maskOp.getDefiningOp<vector::CreateMaskOp>();
       if (!createMaskOp)
+        return rewriter.notifyMatchFailure(
+            transferReadOp, "unsupported mask op, only 'vector.create_mask' is "
+                            "currently supported");
+      FailureOr<Value> rankReducedCreateMask =
+          createMaskDropNonScalableUnitDims(rewriter, loc, createMaskOp);
+      if (failed(rankReducedCreateMask))
         return failure();
-      auto maskType = maskOp.getType();
-      auto reducedMaskType = trimUnitDims(maskType);
-      if (reducedMaskType.getRank() == maskType.getRank())
-        return failure();
-      SmallVector<Value> maskOperands;
-      for (auto [dim, dimIsScalable, maskOperand] :
-           llvm::zip(maskType.getShape(), maskType.getScalableDims(),
-                     createMaskOp.getOperands())) {
-        if (dim == 1 && !dimIsScalable) {
-          // If the mask for the unit dim is not a constant of 1, do nothing.
-          auto constant = maskOperand.getDefiningOp<arith::ConstantIndexOp>();
-          if (!constant || (constant.value() != 1))
-            return failure();
-          continue;
-        }
-        maskOperands.push_back(maskOperand);
-      }
-      maskOp = rewriter.create<vector::CreateMaskOp>(loc, reducedMaskType,
-                                                     maskOperands);
+      maskOp = *rankReducedCreateMask;
     }
 
     Value reducedShapeSource =
diff --git a/mlir/test/Dialect/Vector/vector-transfer-drop-unit-dims-patterns.mlir b/mlir/test/Dialect/Vector/vector-transfer-drop-unit-dims-patterns.mlir
index 688fcd114041812..735915d43565389 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-drop-unit-dims-patterns.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-drop-unit-dims-patterns.mlir
@@ -97,7 +97,7 @@ func.func @transfer_read_dynamic_rank_reducing(
 //       CHECK:   %[[SUBVIEW:.+]] = memref.subview %[[ARG]][0, 0] [%[[DIM0]], 1] [1, 1] : memref<?x1xi8, {{.*}}> to memref<?xi8, {{.*}}>
 //       CHECK:   vector.transfer_read %[[SUBVIEW]]{{.*}} : memref<?xi8, {{.*}}>, vector<[16]xi8>
 
-func.func @masked_transfer_read_dynamic_rank_reducing(
+func.func @masked_transfer_read_dynamic_rank_reducing_1(
       %arg : memref<?x1xi8, strided<[?, ?], offset: ?>>,
       %mask_dim0 : index) -> vector<[16]x1xi8> {
     %c0 = arith.constant 0 : index
@@ -108,7 +108,7 @@ func.func @masked_transfer_read_dynamic_rank_reducing(
       memref<?x1xi8, strided<[?, ?], offset: ?>>, vector<[16]x1xi8>
     return %v : vector<[16]x1xi8>
 }
-// CHECK-LABEL: func @masked_transfer_read_dynamic_rank_reducing
+// CHECK-LABEL: func @masked_transfer_read_dynamic_rank_reducing_1
 //  CHECK-SAME:     %[[ARG:.+]]: memref<?x1xi8
 //  CHECK-SAME:     %[[MASK_DIM0:.+]]: index
 //       CHECK:   %[[C0:.+]] = arith.constant 0 : index
@@ -118,7 +118,33 @@ func.func @masked_transfer_read_dynamic_rank_reducing(
 //       CHECK:   %[[SUBVIEW:.+]] = memref.subview %[[ARG]][0, 0] [%[[DIM0]], 1] [1, 1] : memref<?x1xi8, {{.*}}> to memref<?xi8, {{.*}}>
 //       CHECK:   vector.transfer_read %[[SUBVIEW]][{{.*}}], %[[PAD]], %[[MASK]] {in_bounds = [true]} : memref<?xi8, {{.*}}>, vector<[16]xi8>
 
-/// Only vector.create_mask is currently supported.
+func.func @masked_transfer_read_dynamic_rank_reducing_2(
+      %arg : memref<1x?x3x1x?x1xi8, strided<[?, ?, ?, ?, ?, ?], offset: ?>>,
+      %mask_dim1 : index, %mask_dim4 : index) -> vector<1x[1]x3x1x[16]x1xi8> {
+    %c0 = arith.constant 0 : index
+    %c1 = arith.constant 1 : index
+    %c2 = arith.constant 2 : index
+    %pad = arith.constant 0 : i8
+    %mask = vector.create_mask %c1, %mask_dim1, %c2, %c1, %mask_dim4, %c1 : vector<1x[1]x3x1x[16]x1xi1>
+    %v = vector.transfer_read %arg[%c0, %c0, %c0, %c0, %c0, %c0], %pad, %mask {in_bounds = [true, true, true, true, true, true]} :
+      memref<1x?x3x1x?x1xi8, strided<[?, ?, ?, ?, ?, ?], offset: ?>>, vector<1x[1]x3x1x[16]x1xi8>
+    return %v : vector<1x[1]x3x1x[16]x1xi8>
+}
+// CHECK-LABEL: func @masked_transfer_read_dynamic_rank_reducing_2
+//  CHECK-SAME:     %[[ARG:.+]]: memref<1x?x3x1x?x1xi8
+//  CHECK-SAME:     %[[MASK_DIM1:.+]]: index, %[[MASK_DIM4:.+]]: index
+//   CHECK-DAG:   %[[C0:.+]] = arith.constant 0 : index
+//   CHECK-DAG:   %[[C1:.+]] = arith.constant 1 : index
+//   CHECK-DAG:   %[[C2:.+]] = arith.constant 2 : index
+//   CHECK-DAG:   %[[C4:.+]] = arith.constant 4 : index
+//   CHECK-DAG:   %[[PAD:.+]] = arith.constant 0 : i8
+//       CHECK:   %[[MASK:.+]] = vector.create_mask %[[MASK_DIM1]], %[[C2]], %[[MASK_DIM4]] : vector<[1]x3x[16]xi1>
+//       CHECK:   %[[DIM1:.+]] = memref.dim %[[ARG]], %[[C1]] : memref<1x?x3x1x?x1xi8, strided<[?, ?, ?, ?, ?, ?], offset: ?>>
+//       CHECK:   %[[DIM4:.+]] = memref.dim %[[ARG]], %[[C4]] : memref<1x?x3x1x?x1xi8, strided<[?, ?, ?, ?, ?, ?], offset: ?>>
+//       CHECK:   %[[SUBVIEW:.+]] = memref.subview %[[ARG]][0, 0, 0, 0, 0, 0] [1, %[[DIM1]], 3, 1, %[[DIM4]], 1] [1, 1, 1, 1, 1, 1] : memref<1x?x3x1x?x1xi8, {{.*}}> to memref<?x3x?xi8, {{.*}}>
+//       CHECK:   vector.transfer_read %[[SUBVIEW]][{{.*}}], %[[PAD]], %[[MASK]] {in_bounds = [true, true, true]} : memref<?x3x?xi8, {{.*}}>, vector<[1]x3x[16]xi8>
+
+/// Only masks operands of vector.create_mask are currently supported.
 func.func @unsupported_masked_transfer_read_dynamic_rank_reducing_1(
       %arg : memref<?x1xi8, strided<[?, ?], offset: ?>>,
       %mask : vector<[16]x1xi1>) -> vector<[16]x1xi8> {



More information about the Mlir-commits mailing list