[Mlir-commits] [mlir] [MLIR][Vector] Allow any shaped type to be distributed for vector.wa… (PR #114215)
Petr Kurapov
llvmlistbot at llvm.org
Wed Oct 30 05:19:38 PDT 2024
https://github.com/kurapov-peter updated https://github.com/llvm/llvm-project/pull/114215
>From 7d5b1b46881a77dc10608e559f37ae25a568184b Mon Sep 17 00:00:00 2001
From: Petr Kurapov <petr.a.kurapov at intel.com>
Date: Thu, 17 Oct 2024 17:29:00 +0000
Subject: [PATCH 1/2] [MLIR][Vector] Allow any shaped typed to be distributed
for vector.warp_execute_on_lane_0's return values
---
mlir/lib/Dialect/Vector/IR/VectorOps.cpp | 12 ++++++------
mlir/test/Dialect/Vector/invalid.mlir | 6 +++---
2 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index 1853ae04f45d90..af5a2a276042ca 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -6558,14 +6558,14 @@ static LogicalResult verifyDistributedType(Type expanded, Type distributed,
// If the types matches there is no distribution.
if (expanded == distributed)
return success();
- auto expandedVecType = llvm::dyn_cast<VectorType>(expanded);
- auto distributedVecType = llvm::dyn_cast<VectorType>(distributed);
+ auto expandedVecType = llvm::dyn_cast<ShapedType>(expanded);
+ auto distributedVecType = llvm::dyn_cast<ShapedType>(distributed);
if (!expandedVecType || !distributedVecType)
- return op->emitOpError("expected vector type for distributed operands.");
+ return op->emitOpError("expected shaped type for distributed operands.");
if (expandedVecType.getRank() != distributedVecType.getRank() ||
expandedVecType.getElementType() != distributedVecType.getElementType())
return op->emitOpError(
- "expected distributed vectors to have same rank and element type.");
+ "expected distributed types to have same rank and element type.");
SmallVector<int64_t> scales(expandedVecType.getRank(), 1);
for (int64_t i = 0, e = expandedVecType.getRank(); i < e; i++) {
@@ -6575,8 +6575,8 @@ static LogicalResult verifyDistributedType(Type expanded, Type distributed,
continue;
if (eDim % dDim != 0)
return op->emitOpError()
- << "expected expanded vector dimension #" << i << " (" << eDim
- << ") to be a multipler of the distributed vector dimension ("
+ << "expected expanded type dimension #" << i << " (" << eDim
+ << ") to be a multipler of the distributed type dimension ("
<< dDim << ")";
scales[i] = eDim / dDim;
}
diff --git a/mlir/test/Dialect/Vector/invalid.mlir b/mlir/test/Dialect/Vector/invalid.mlir
index 56039d04549aa5..f2b7685d79effb 100644
--- a/mlir/test/Dialect/Vector/invalid.mlir
+++ b/mlir/test/Dialect/Vector/invalid.mlir
@@ -1665,7 +1665,7 @@ func.func @warp_2_distributed_dims(%laneid: index) {
// -----
func.func @warp_2_distributed_dims(%laneid: index) {
- // expected-error at +1 {{expected expanded vector dimension #1 (8) to be a multipler of the distributed vector dimension (3)}}
+ // expected-error at +1 {{expected expanded type dimension #1 (8) to be a multipler of the distributed type dimension (3)}}
%2 = vector.warp_execute_on_lane_0(%laneid)[32] -> (vector<1x3xi32>) {
%0 = arith.constant dense<2>: vector<4x8xi32>
vector.yield %0 : vector<4x8xi32>
@@ -1676,7 +1676,7 @@ func.func @warp_2_distributed_dims(%laneid: index) {
// -----
func.func @warp_mismatch_rank(%laneid: index) {
- // expected-error at +1 {{'vector.warp_execute_on_lane_0' op expected distributed vectors to have same rank and element type.}}
+ // expected-error at +1 {{'vector.warp_execute_on_lane_0' op expected distributed types to have same rank and element type.}}
%2 = vector.warp_execute_on_lane_0(%laneid)[32] -> (vector<4x4xi32>) {
%0 = arith.constant dense<2>: vector<128xi32>
vector.yield %0 : vector<128xi32>
@@ -1687,7 +1687,7 @@ func.func @warp_mismatch_rank(%laneid: index) {
// -----
func.func @warp_mismatch_rank(%laneid: index) {
- // expected-error at +1 {{'vector.warp_execute_on_lane_0' op expected vector type for distributed operands.}}
+ // expected-error at +1 {{'vector.warp_execute_on_lane_0' op expected shaped type for distributed operands.}}
%2 = vector.warp_execute_on_lane_0(%laneid)[32] -> (i32) {
%0 = arith.constant dense<2>: vector<128xi32>
vector.yield %0 : vector<128xi32>
>From 834094fb87b060987c9239f91a32f9240923a42d Mon Sep 17 00:00:00 2001
From: Petr Kurapov <petr.a.kurapov at intel.com>
Date: Wed, 30 Oct 2024 12:19:22 +0000
Subject: [PATCH 2/2] Update warp_execute_on_lane_0 description
---
mlir/include/mlir/Dialect/Vector/IR/VectorOps.td | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 474f4ccf4891de..91b68f207f8bc9 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -3000,7 +3000,8 @@ def Vector_WarpExecuteOnLane0Op : Vector_Op<"warp_execute_on_lane_0",
Return values are distributed on all lanes using laneId as index. The
vector is distributed based on the shape ratio between the vector type of
- the yield and the result type.
+ the yield and the result type. Any ShapedType return value is allowed to be
+ distributed to support other vector-like types (e.g., xegpu.tensor_desc).
If the shapes are the same this means the value is broadcasted to all lanes.
In the future the distribution can be made more explicit using affine_maps
and will support having multiple Ids.
More information about the Mlir-commits
mailing list