[Mlir-commits] [mlir] [mlir][affine]make threadid op is valid symbol. (PR #118478)
lonely eagle
llvmlistbot at llvm.org
Mon Dec 16 04:19:40 PST 2024
https://github.com/linuxlonelyeagle updated https://github.com/llvm/llvm-project/pull/118478
>From ebf3898a5e9581466523b5e0c318770dd09a3a83 Mon Sep 17 00:00:00 2001
From: linuxlonelyeagle <2020382038 at qq.com>
Date: Tue, 3 Dec 2024 19:47:48 +0800
Subject: [PATCH 1/2] make threadid op is valid symbol.
---
mlir/lib/Dialect/Affine/IR/AffineOps.cpp | 6 +++
mlir/lib/Dialect/Affine/IR/CMakeLists.txt | 1 +
mlir/test/Dialect/Affine/ops.mlir | 36 +++++++++++++++
mlir/test/Dialect/GPU/transform-gpu.mlir | 56 +++++++++++------------
4 files changed, 71 insertions(+), 28 deletions(-)
diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
index dceebbfec586c8..cf355515deb63d 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
@@ -8,6 +8,7 @@
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/IR/AffineValueMap.h"
+#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/UB/IR/UBOps.h"
#include "mlir/Dialect/Utils/StaticValueUtils.h"
@@ -410,6 +411,7 @@ bool mlir::affine::isValidSymbol(Value value) {
/// A value can be used as a symbol for `region` iff it meets one of the
/// following conditions:
/// *) It is a constant.
+/// *) It is a threadId Op.
/// *) It is the result of an affine apply operation with symbol arguments.
/// *) It is a result of the dim op on a memref whose corresponding size is
/// a valid symbol.
@@ -443,6 +445,10 @@ bool mlir::affine::isValidSymbol(Value value, Region *region) {
if (matchPattern(defOp, m_Constant(&operandCst)))
return true;
+ // ThreadId operation is ok.
+ if (isa<gpu::ThreadIdOp>(defOp))
+ return true;
+
// Affine apply operation is ok if all of its operands are ok.
if (auto applyOp = dyn_cast<AffineApplyOp>(defOp))
return applyOp.isValidSymbol(region);
diff --git a/mlir/lib/Dialect/Affine/IR/CMakeLists.txt b/mlir/lib/Dialect/Affine/IR/CMakeLists.txt
index 7f7a01be891e05..9dad5cdb28cbc4 100644
--- a/mlir/lib/Dialect/Affine/IR/CMakeLists.txt
+++ b/mlir/lib/Dialect/Affine/IR/CMakeLists.txt
@@ -22,4 +22,5 @@ add_mlir_dialect_library(MLIRAffineDialect
MLIRSideEffectInterfaces
MLIRUBDialect
MLIRValueBoundsOpInterface
+ MLIRGPUDialect
)
diff --git a/mlir/test/Dialect/Affine/ops.mlir b/mlir/test/Dialect/Affine/ops.mlir
index c6bfb688db1c1d..5bd556619f3d5a 100644
--- a/mlir/test/Dialect/Affine/ops.mlir
+++ b/mlir/test/Dialect/Affine/ops.mlir
@@ -324,3 +324,39 @@ module attributes {gpu.container_module} {
// CHECK: affine.for %[[VAL_4:.*]] = %[[VAL_3]] to %[[VAL_2]] step 32 {
// CHECK: }
// CHECK: gpu.return
+
+// -----
+
+#map = affine_map<()[s0] -> (s0 mod 32)>
+
+// CHECK: #[[$ATTR_0:.+]] = affine_map<()[s0] -> (s0 mod 32)>
+
+module {
+ gpu.module @gpu {
+ gpu.func @affine_thread_id(%arg0: memref<?x?xf32>) kernel {
+ %c3 = arith.constant 3 : index
+ %dim = memref.dim %arg0, %c3 : memref<?x?xf32>
+ %c0 = arith.constant 0 : index
+ affine.for %arg3 = %c0 to %dim step 32 {
+ %thread_id_x = gpu.thread_id x
+ %0 = affine.apply #map()[%thread_id_x]
+ %c128 = arith.constant 128 : index
+ affine.for %arg4 = %0 to %c128 step 8 {
+ %c32 = arith.constant 32 : index
+ }
+ }
+ gpu.return
+ }
+ }
+}
+
+// CHECK-LABEL: @affine_thread_id
+// CHECK-SAME: (%[[VAL_0:.*]]: memref<?x?xf32>) kernel {
+// CHECK: %[[VAL_1:.*]] = arith.constant 3 : index
+// CHECK: %[[VAL_2:.*]] = memref.dim %[[VAL_0]], %[[VAL_1]] : memref<?x?xf32>
+// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
+// CHECK: affine.for %[[VAL_4:.*]] = %[[VAL_3]] to %[[VAL_2]] step 32 {
+// CHECK: %[[VAL_5:.*]] = gpu.thread_id x
+// CHECK: %[[VAL_6:.*]] = affine.apply #[[$ATTR_0]](){{\[}}%[[VAL_5]]]
+// CHECK: %[[VAL_7:.*]] = arith.constant 128 : index
+// CHECK: affine.for %[[VAL_8:.*]] = %[[VAL_6]] to %[[VAL_7]] step 8 {
diff --git a/mlir/test/Dialect/GPU/transform-gpu.mlir b/mlir/test/Dialect/GPU/transform-gpu.mlir
index 72572c6a38de12..6018eb40bac2a8 100644
--- a/mlir/test/Dialect/GPU/transform-gpu.mlir
+++ b/mlir/test/Dialect/GPU/transform-gpu.mlir
@@ -43,7 +43,7 @@ module attributes {transform.with_named_sequence} {
!type = memref<2 x 32 x f32>
!type1d = memref<32 x f32>
-// CHECK-DAG: #[[$MAP:.*]] = affine_map<(d0) -> (d0 floordiv 128)>
+// CHECK-DAG: #[[$MAP:.*]] = affine_map<()[s0] -> (s0 floordiv 128)>
// CHECK-LABEL: func.func @warpgroup_3d(
// CHECK-SAME: %[[ARGX:[0-9a-z]+]]: memref<2x32xf32>
@@ -61,7 +61,7 @@ func.func @warpgroup_3d(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream
// CHECK: gpu.launch
// CHECK: %[[TIDX:.*]] = gpu.thread_id x
// CHECK: %[[TIDY:.*]] = gpu.thread_id y
-// CHECK-DAG: %[[WG:.*]] = affine.apply #[[$MAP]](%[[TIDX]])
+// CHECK-DAG: %[[WG:.*]] = affine.apply #[[$MAP]]()[%[[TIDX]]]
// CHECK-DAG: %[[CMPX:.*]] = arith.cmpi ult, %[[TIDX]], %[[C384]] : index
// CHECK-DAG: %[[CMPY:.*]] = arith.cmpi ult, %[[TIDY]], %[[C1]] : index
// CHECK: %[[COND:.*]] = arith.andi %[[CMPX]], %[[CMPY]] : i1
@@ -95,7 +95,7 @@ module attributes {transform.with_named_sequence} {
!type = memref<2 x 32 x f32>
!type1d = memref<32 x f32>
-// CHECK-DAG: #[[$MAP:.*]] = affine_map<(d0) -> (d0 floordiv 16)>
+// CHECK-DAG: #map = affine_map<()[s0] -> (s0 floordiv 16)>
// CHECK-LABEL: func.func @warp_3d(
// CHECK-SAME: %[[ARGX:[0-9a-z]+]]: memref<2x32xf32>
@@ -114,7 +114,7 @@ func.func @warp_3d(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream : !g
// CHECK: gpu.launch
// CHECK: %[[TIDX:.*]] = gpu.thread_id x
// CHECK: %[[TIDY:.*]] = gpu.thread_id y
-// CHECK-DAG: %[[W:.*]] = affine.apply #[[$MAP]](%[[TIDX]])
+// CHECK-DAG: %[[W:.*]] = affine.apply #[[$MAP]]()[%[[TIDX]]]
// CHECK-DAG: %[[CMPX:.*]] = arith.cmpi ult, %[[TIDX]], %[[C32]] : index
// CHECK-DAG: %[[CMPY:.*]] = arith.cmpi ult, %[[TIDY]], %[[C3]] : index
// CHECK: %[[COND:.*]] = arith.andi %[[CMPX]], %[[CMPY]] : i1
@@ -354,9 +354,9 @@ module attributes {transform.with_named_sequence} {
!type = memref<2 x 32 x f32>
!type1d = memref<32 x f32>
-// CHECK-DAG: #[[$MAPWGLIN:.*]] = affine_map<(d0, d1, d2) -> (d0 + d1 * 32 + d2 * 256)>
-// CHECK-DAG: #[[$MAPWGX:.*]] = affine_map<(d0, d1) -> (((d0 + d1 * 32) floordiv 128) mod 2)>
-// CHECK-DAG: #[[$MAPWGY:.*]] = affine_map<(d0, d1, d2) -> (d2 + ((d0 + d1 * 32) floordiv 128) floordiv 2)>
+// CHECK-DAG: #[[$MAPWGLIN:.*]] = affine_map<()[s0, s1, s2] -> (s0 + s1 * 32 + s2 * 256)>
+// CHECK-DAG: #[[$MAPWGX:.*]] = affine_map<()[s0, s1] -> (((s0 + s1 * 32) floordiv 128) mod 2)>
+// CHECK-DAG: #[[$MAPWGY:.*]] = affine_map<()[s0, s1, s2] -> (s2 + ((s0 + s1 * 32) floordiv 128) floordiv 2)>
// CHECK-LABEL: func.func @warpgroup_linear(
// CHECK-SAME: %[[ARGX:[0-9a-z]+]]: memref<2x32xf32>
@@ -376,9 +376,9 @@ func.func @warpgroup_linear(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %st
// CHECK-DAG: %[[TIDX:.*]] = gpu.thread_id x
// CHECK-DAG: %[[TIDY:.*]] = gpu.thread_id y
// CHECK-DAG: %[[TIDZ:.*]] = gpu.thread_id z
-// CHECK-DAG: %[[WIDLIN:.*]] = affine.apply #[[$MAPWGLIN]](%[[TIDX]], %[[TIDY]], %[[TIDZ]])
-// CHECK-DAG: %[[WIDX:.*]] = affine.apply #[[$MAPWGX]](%[[TIDX]], %[[TIDY]])
-// CHECK-DAG: %[[WIDY:.*]] = affine.apply #[[$MAPWGY]](%[[TIDX]], %[[TIDY]], %[[TIDZ]])
+// CHECK-DAG: %[[WIDLIN:.*]] = affine.apply #[[$MAPWGLIN]]()[%[[TIDX]], %[[TIDY]], %[[TIDZ]]]
+// CHECK-DAG: %[[WIDX:.*]] = affine.apply #[[$MAPWGX]]()[%[[TIDX]], %[[TIDY]]]
+// CHECK-DAG: %[[WIDY:.*]] = affine.apply #[[$MAPWGY]]()[%[[TIDX]], %[[TIDY]], %[[TIDZ]]]
// CHECK-DAG: %[[CMPLIN:.*]] = arith.cmpi ult, %[[WIDLIN]], %[[C768]] : index
// CHECK: scf.if %[[CMPLIN]]
// CHECK: memref.load %[[ARGX]][%[[WIDX]], %[[WIDY]]]
@@ -410,9 +410,9 @@ module attributes {transform.with_named_sequence} {
!type = memref<2 x 32 x f32>
!type1d = memref<32 x f32>
-// CHECK-DAG: #[[$MAPWLIN:.*]] = affine_map<(d0, d1, d2) -> (d0 + d1 * 32 + d2 * 256)>
-// CHECK-DAG: #[[$MAPWX:.*]] = affine_map<(d0, d1, d2) -> ((d1 + d2 * 8 + d0 floordiv 32) mod 2)>
-// CHECK-DAG: #[[$MAPWY:.*]] = affine_map<(d0, d1, d2) -> ((d1 + d2 * 8 + d0 floordiv 32) floordiv 2)>
+// CHECK-DAG: #[[$MAPWLIN:.*]] = affine_map<()[s0, s1, s2] -> (s0 + s1 * 32 + s2 * 256)>
+// CHECK-DAG: #[[$MAPWX:.*]] = affine_map<()[s0, s1, s2] -> ((s1 + s2 * 8 + s0 floordiv 32) mod 2)>
+// CHECK-DAG: #[[$MAPWY:.*]] = affine_map<()[s0, s1, s2] -> ((s1 + s2 * 8 + s0 floordiv 32) floordiv 2)>
// CHECK-LABEL: func.func @warp_linear(
// CHECK-SAME: %[[ARGX:[0-9a-z]+]]: memref<2x32xf32>
@@ -432,9 +432,9 @@ func.func @warp_linear(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream
// CHECK-DAG: %[[TIDX:.*]] = gpu.thread_id x
// CHECK-DAG: %[[TIDY:.*]] = gpu.thread_id y
// CHECK-DAG: %[[TIDZ:.*]] = gpu.thread_id z
-// CHECK-DAG: %[[WIDLIN:.*]] = affine.apply #[[$MAPWLIN]](%[[TIDX]], %[[TIDY]], %[[TIDZ]])
-// CHECK-DAG: %[[WIDX:.*]] = affine.apply #[[$MAPWX]](%[[TIDX]], %[[TIDY]], %[[TIDZ]])
-// CHECK-DAG: %[[WIDY:.*]] = affine.apply #[[$MAPWY]](%[[TIDX]], %[[TIDY]], %[[TIDZ]])
+// CHECK-DAG: %[[WIDLIN:.*]] = affine.apply #[[$MAPWLIN]]()[%[[TIDX]], %[[TIDY]], %[[TIDZ]]]
+// CHECK-DAG: %[[WIDX:.*]] = affine.apply #[[$MAPWX]]()[%[[TIDX]], %[[TIDY]], %[[TIDZ]]]
+// CHECK-DAG: %[[WIDY:.*]] = affine.apply #[[$MAPWY]]()[%[[TIDX]], %[[TIDY]], %[[TIDZ]]]
// CHECK-DAG: %[[CMPLIN:.*]] = arith.cmpi ult, %[[WIDLIN]], %[[C192]] : index
// CHECK: scf.if %[[CMPLIN]]
// CHECK: memref.load %[[ARGX]][%[[WIDX]], %[[WIDY]]]
@@ -466,12 +466,12 @@ module attributes {transform.with_named_sequence} {
!type = memref<2 x 32 x f32>
!type1d = memref<32 x f32>
-// CHECK-DAG: #[[$MAPWX:.*]] = affine_map<(d0, d1) -> (((d0 + d1 * 18) floordiv 32) mod 3)>
-// CHECK-DAG: #[[$MAPWY:.*]] = affine_map<(d0, d1) -> ((((d0 + d1 * 18) floordiv 32) mod 6) floordiv 3)>
+// CHECK-DAG: #[[$MAPWX:.*]] = affine_map<()[s0, s1] -> (((s0 + s1 * 18) floordiv 32) mod 3)>
+// CHECK-DAG: #[[$MAPWY:.*]] = affine_map<()[s0, s1] -> ((((s0 + s1 * 18) floordiv 32) mod 6) floordiv 3)>
-// CHECK-DAG: #[[$MAPLIN:.*]] = affine_map<(d0, d1) -> (d0 + d1 * 18)>
-// CHECK-DAG: #[[$MAPLX:.*]] = affine_map<(d0, d1) -> ((d0 + d1 * 18) mod 10)>
-// CHECK-DAG: #[[$MAPLY:.*]] = affine_map<(d0, d1) -> ((d0 + d1 * 18) floordiv 10)>
+// CHECK-DAG: #[[$MAPLIN:.*]] = affine_map<()[s0, s1] -> (s0 + s1 * 18)>
+// CHECK-DAG: #[[$MAPLX:.*]] = affine_map<()[s0, s1] -> ((s0 + s1 * 18) mod 10)>
+// CHECK-DAG: #[[$MAPLY:.*]] = affine_map<()[s0, s1] -> ((s0 + s1 * 18) floordiv 10)>
// CHECK-LABEL: func.func @map_multi_level_linear(
func.func @map_multi_level_linear(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream : !gpu.async.token) -> !type {
@@ -504,9 +504,9 @@ func.func @map_multi_level_linear(%x: !type, %y: !type, %t: !type1d, %alpha : f3
memref.store %6, %y[%i, %j] : !type
} { mapping = [#gpu.thread<y>, #gpu.thread<x>]}
- // CHECK-DAG: %[[LIN:.*]] = affine.apply #[[$MAPLIN]](%[[TIDX]], %[[TIDY]])
- // CHECK-DAG: %[[WIDX:.*]] = affine.apply #[[$MAPWX]](%[[TIDX]], %[[TIDY]])
- // CHECK-DAG: %[[WIDY:.*]] = affine.apply #[[$MAPWY]](%[[TIDX]], %[[TIDY]])
+ // CHECK-DAG: %[[LIN:.*]] = affine.apply #[[$MAPLIN]]()[%[[TIDX]], %[[TIDY]]]
+ // CHECK-DAG: %[[WIDX:.*]] = affine.apply #[[$MAPWX]]()[%[[TIDX]], %[[TIDY]]]
+ // CHECK-DAG: %[[WIDY:.*]] = affine.apply #[[$MAPWY]]()[%[[TIDX]], %[[TIDY]]]
// CHECK-DAG: %[[CMPLIN:.*]] = arith.cmpi ult, %[[LIN]], %[[C192]] : index
// CHECK: scf.if %[[CMPLIN]]
scf.forall (%i, %j, %k) in (%c3, %c2, %c1) {
@@ -515,8 +515,8 @@ func.func @map_multi_level_linear(%x: !type, %y: !type, %t: !type1d, %alpha : f3
memref.store %8, %y[%i, %j] : !type
} {mapping = [#gpu.warp<linear_dim_0>, #gpu.warp<linear_dim_1>, #gpu.warp<linear_dim_2>] }
- // CHECK-DAG: %[[LIDX:.*]] = affine.apply #[[$MAPLX]](%[[TIDX]], %[[TIDY]])
- // CHECK-DAG: %[[LIDY:.*]] = affine.apply #[[$MAPLY]](%[[TIDX]], %[[TIDY]])
+ // CHECK-DAG: %[[LIDX:.*]] = affine.apply #[[$MAPLX]]()[%[[TIDX]], %[[TIDY]]]
+ // CHECK-DAG: %[[LIDY:.*]] = affine.apply #[[$MAPLY]]()[%[[TIDX]], %[[TIDY]]]
// CHECK-DAG: %[[COND:.*]] = arith.cmpi ult, %[[LIN]], %[[C20]] : index
// CHECK: scf.if %[[COND]]
// CHECK: memref.load %{{.*}}[%[[LIDX]]] : memref<32xf32>
@@ -648,7 +648,7 @@ module attributes {transform.with_named_sequence} {
#map1 = affine_map<(d0) -> (d0 * 32)>
// CHECK-DAG: #[[$MAPB:.*]] = affine_map<(d0) -> (d0 * 128)>
-// CHECK-DAG: #[[$MAPW:.*]] = affine_map<(d0, d1, d2) -> (d2 * 32 + ((d0 + d1 * 4) floordiv 32) * 32)>
+// CHECK-DAG: #[[$MAPW:.*]] = affine_map<()[s0, s1, s2] -> (s2 * 32 + ((s0 + s1 * 4) floordiv 32) * 32)>
// CHECK-LABEL: func.func @simple_fill(
func.func @simple_fill(%arg0: memref<128xf32>) -> memref<128xf32> {
@@ -667,7 +667,7 @@ func.func @simple_fill(%arg0: memref<128xf32>) -> memref<128xf32> {
// CHECK: %[[TIDX:.*]] = gpu.thread_id x
// CHECK: %[[TIDY:.*]] = gpu.thread_id y
// CHECK: %[[TIDZ:.*]] = gpu.thread_id z
-// CHECK: %[[THX:.*]] = affine.apply #[[$MAPW]](%[[TIDX]], %[[TIDY]], %[[TIDZ]])
+// CHECK: %[[THX:.*]] = affine.apply #[[$MAPW]]()[%[[TIDX]], %[[TIDY]], %[[TIDZ]]]
// CHECK-NOT: scf.if
// CHECK: memref.subview %{{.*}}[%[[THX]]]
%1 = affine.apply #map1(%arg2)
>From 10557b73713ea7f1f53cd91b9a81a12de4168dab Mon Sep 17 00:00:00 2001
From: linuxlonelyeagle <2020382038 at qq.com>
Date: Mon, 16 Dec 2024 18:09:38 +0800
Subject: [PATCH 2/2] add AffineScope and add it to threadid op.
---
mlir/include/mlir/Dialect/GPU/IR/GPUOps.td | 2 +-
mlir/include/mlir/IR/OpBase.td | 2 ++
mlir/include/mlir/IR/OpDefinition.h | 34 ++++++++++++++--------
mlir/lib/Dialect/Affine/IR/AffineOps.cpp | 7 ++---
mlir/lib/Dialect/Affine/IR/CMakeLists.txt | 1 -
mlir/lib/IR/Operation.cpp | 12 ++++++--
6 files changed, 38 insertions(+), 20 deletions(-)
diff --git a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
index d08e7ceb9e6c69..5c51dfc1471623 100644
--- a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
+++ b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
@@ -223,7 +223,7 @@ def GPU_GridDimOp : GPU_IndexOp<"grid_dim"> {
There is an implicit upper bound of `kMaxDim` (currently uint32_t::max).
}];
}
-def GPU_ThreadIdOp : GPU_IndexOp<"thread_id"> {
+def GPU_ThreadIdOp : GPU_IndexOp<"thread_id", [AffineSymbol]> {
let description = [{
Returns the thread id, i.e. the index of the current thread within the block
along the x, y, or z `dimension`.
diff --git a/mlir/include/mlir/IR/OpBase.td b/mlir/include/mlir/IR/OpBase.td
index 5c82c041c62eeb..3dc6341b0e24fe 100644
--- a/mlir/include/mlir/IR/OpBase.td
+++ b/mlir/include/mlir/IR/OpBase.td
@@ -63,6 +63,8 @@ class PredOpTrait<string descr, Pred pred, list<Trait> traits = []>
// Op defines an affine scope.
def AffineScope : NativeOpTrait<"AffineScope">;
+// Op defines an affine symbol.
+def AffineSymbol : NativeOpTrait<"AffineSymbol">;
// Op defines an automatic allocation scope.
def AutomaticAllocationScope :
NativeOpTrait<"AutomaticAllocationScope">;
diff --git a/mlir/include/mlir/IR/OpDefinition.h b/mlir/include/mlir/IR/OpDefinition.h
index 59f094d6690991..3c2f4a89eaadba 100644
--- a/mlir/include/mlir/IR/OpDefinition.h
+++ b/mlir/include/mlir/IR/OpDefinition.h
@@ -365,6 +365,7 @@ LogicalResult verifyResultSizeAttr(Operation *op, StringRef sizeAttrName);
LogicalResult verifyNoRegionArguments(Operation *op);
LogicalResult verifyElementwise(Operation *op);
LogicalResult verifyIsIsolatedFromAbove(Operation *op);
+LogicalResult verifyIndexResultType(Operation *op);
} // namespace impl
/// Helper class for implementing traits. Clients are not expected to interact
@@ -1268,6 +1269,16 @@ class AffineScope : public TraitBase<ConcreteType, AffineScope> {
}
};
+/// A trait of operation. Any operation holds the AffineSymbol, and its result
+/// can be used as a symbol.
+template <typename ConcreteType>
+class AffineSymbol : public TraitBase<ConcreteType, AffineSymbol> {
+public:
+ static LogicalResult verifyTrait(Operation *op) {
+ return impl::verifyIndexResultType(op);
+ }
+};
+
/// A trait of region holding operations that define a new scope for automatic
/// allocations, i.e., allocations that are freed when control is transferred
/// back from the operation's region. Any operations performing such allocations
@@ -1778,9 +1789,9 @@ class Op : public OpState, public Traits<ConcreteType>... {
llvm::is_detected<has_single_result_fold_t, T>::value;
/// Trait to check if T provides a general 'fold' method.
template <typename T, typename... Args>
- using has_fold_t = decltype(std::declval<T>().fold(
- std::declval<ArrayRef<Attribute>>(),
- std::declval<SmallVectorImpl<OpFoldResult> &>()));
+ using has_fold_t = decltype(
+ std::declval<T>().fold(std::declval<ArrayRef<Attribute>>(),
+ std::declval<SmallVectorImpl<OpFoldResult> &>()));
template <typename T>
constexpr static bool has_fold_v = llvm::is_detected<has_fold_t, T>::value;
/// Trait to check if T provides a 'fold' method with a FoldAdaptor for a
@@ -1793,9 +1804,9 @@ class Op : public OpState, public Traits<ConcreteType>... {
llvm::is_detected<has_fold_adaptor_single_result_fold_t, T>::value;
/// Trait to check if T provides a general 'fold' method with a FoldAdaptor.
template <typename T, typename... Args>
- using has_fold_adaptor_fold_t = decltype(std::declval<T>().fold(
- std::declval<typename T::FoldAdaptor>(),
- std::declval<SmallVectorImpl<OpFoldResult> &>()));
+ using has_fold_adaptor_fold_t = decltype(
+ std::declval<T>().fold(std::declval<typename T::FoldAdaptor>(),
+ std::declval<SmallVectorImpl<OpFoldResult> &>()));
template <class T>
constexpr static bool has_fold_adaptor_v =
llvm::is_detected<has_fold_adaptor_fold_t, T>::value;
@@ -1810,18 +1821,17 @@ class Op : public OpState, public Traits<ConcreteType>... {
/// Trait to check if printProperties(OpAsmPrinter, T, ArrayRef<StringRef>)
/// exist
template <typename T, typename... Args>
- using has_print_properties =
- decltype(printProperties(std::declval<OpAsmPrinter &>(),
- std::declval<T>(),
- std::declval<ArrayRef<StringRef>>()));
+ using has_print_properties = decltype(
+ printProperties(std::declval<OpAsmPrinter &>(), std::declval<T>(),
+ std::declval<ArrayRef<StringRef>>()));
template <typename T>
using detect_has_print_properties =
llvm::is_detected<has_print_properties, T>;
/// Trait to check if parseProperties(OpAsmParser, T) exist
template <typename T, typename... Args>
- using has_parse_properties = decltype(parseProperties(
- std::declval<OpAsmParser &>(), std::declval<T &>()));
+ using has_parse_properties = decltype(
+ parseProperties(std::declval<OpAsmParser &>(), std::declval<T &>()));
template <typename T>
using detect_has_parse_properties =
llvm::is_detected<has_parse_properties, T>;
diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
index cf355515deb63d..8f30b3e29d955b 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
@@ -8,7 +8,6 @@
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/IR/AffineValueMap.h"
-#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/UB/IR/UBOps.h"
#include "mlir/Dialect/Utils/StaticValueUtils.h"
@@ -411,7 +410,7 @@ bool mlir::affine::isValidSymbol(Value value) {
/// A value can be used as a symbol for `region` iff it meets one of the
/// following conditions:
/// *) It is a constant.
-/// *) It is a threadId Op.
+/// *) It is a AffineSymbol op.
/// *) It is the result of an affine apply operation with symbol arguments.
/// *) It is a result of the dim op on a memref whose corresponding size is
/// a valid symbol.
@@ -445,8 +444,8 @@ bool mlir::affine::isValidSymbol(Value value, Region *region) {
if (matchPattern(defOp, m_Constant(&operandCst)))
return true;
- // ThreadId operation is ok.
- if (isa<gpu::ThreadIdOp>(defOp))
+ // AffineScope Op.
+ if (defOp->hasTrait<OpTrait::AffineSymbol>())
return true;
// Affine apply operation is ok if all of its operands are ok.
diff --git a/mlir/lib/Dialect/Affine/IR/CMakeLists.txt b/mlir/lib/Dialect/Affine/IR/CMakeLists.txt
index 9dad5cdb28cbc4..7f7a01be891e05 100644
--- a/mlir/lib/Dialect/Affine/IR/CMakeLists.txt
+++ b/mlir/lib/Dialect/Affine/IR/CMakeLists.txt
@@ -22,5 +22,4 @@ add_mlir_dialect_library(MLIRAffineDialect
MLIRSideEffectInterfaces
MLIRUBDialect
MLIRValueBoundsOpInterface
- MLIRGPUDialect
)
diff --git a/mlir/lib/IR/Operation.cpp b/mlir/lib/IR/Operation.cpp
index fe0fee0f8db2ce..1e1c0bda024e8b 100644
--- a/mlir/lib/IR/Operation.cpp
+++ b/mlir/lib/IR/Operation.cpp
@@ -322,8 +322,8 @@ void Operation::setAttrs(DictionaryAttr newAttrs) {
}
void Operation::setAttrs(ArrayRef<NamedAttribute> newAttrs) {
if (getPropertiesStorageSize()) {
- // We're spliting the providing array of attributes by removing the inherentAttr
- // which will be stored in the properties.
+ // We're spliting the providing array of attributes by removing the
+ // inherentAttr which will be stored in the properties.
SmallVector<NamedAttribute> discardableAttrs;
discardableAttrs.reserve(newAttrs.size());
for (NamedAttribute attr : newAttrs) {
@@ -1390,6 +1390,14 @@ LogicalResult OpTrait::impl::verifyIsIsolatedFromAbove(Operation *isolatedOp) {
return success();
}
+LogicalResult OpTrait::impl::verifyIndexResultType(Operation *op) {
+ if (op->getNumResults() != 1)
+ op->emitError("operation's result number should be 1.");
+ if (!mlir::isa<IndexType>(op->getResult(0).getType()))
+ op->emitError("operation's result type should be index.");
+ return success();
+}
+
bool OpTrait::hasElementwiseMappableTraits(Operation *op) {
return op->hasTrait<Elementwise>() && op->hasTrait<Scalarizable>() &&
op->hasTrait<Vectorizable>() && op->hasTrait<Tensorizable>();
More information about the Mlir-commits
mailing list