[Mlir-commits] [mlir] 2648e2d - [mlir][OpDSL] Rename `AttributeDef` to `IndexAttrDef`.

llvmlistbot at llvm.org llvmlistbot at llvm.org
Fri Jan 7 04:17:08 PST 2022


Author: gysit
Date: 2022-01-07T12:09:25Z
New Revision: 2648e2d5ddd753b9480372db3186721ef470b7ac

URL: https://github.com/llvm/llvm-project/commit/2648e2d5ddd753b9480372db3186721ef470b7ac
DIFF: https://github.com/llvm/llvm-project/commit/2648e2d5ddd753b9480372db3186721ef470b7ac.diff

LOG: [mlir][OpDSL] Rename `AttributeDef` to `IndexAttrDef`.

Renaming `AttributeDef` to `IndexAttrDef` prepares OpDSL to support different kinds of attributes and more closely reflects the purpose of the attribute.

Reviewed By: nicolasvasilache

Differential Revision: https://reviews.llvm.org/D115237

Added: 
    

Modified: 
    mlir/docs/Dialects/Linalg/OpDSL.md
    mlir/python/mlir/dialects/linalg/opdsl/lang/comprehension.py
    mlir/python/mlir/dialects/linalg/opdsl/lang/dsl.py
    mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py
    mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
    mlir/test/python/dialects/linalg/opdsl/arguments.py
    mlir/test/python/dialects/linalg/opdsl/emit_convolution.py
    mlir/test/python/dialects/linalg/opdsl/emit_pooling.py

Removed: 
    


################################################################################
diff  --git a/mlir/docs/Dialects/Linalg/OpDSL.md b/mlir/docs/Dialects/Linalg/OpDSL.md
index 50115fa2b4391..271056e167582 100644
--- a/mlir/docs/Dialects/Linalg/OpDSL.md
+++ b/mlir/docs/Dialects/Linalg/OpDSL.md
@@ -118,13 +118,13 @@ The following example demonstrates the use of attributes:
 @linalg_structured_op
 def strided_copy(I=TensorDef(T, S.IH, S.IW),
                  O=TensorDef(T, S.OH, S.OW, output=True),
-                 strides=AttributeDef(S.SH, S.SW)):
+                 strides=IndexAttrDef(S.SH, S.SW)):
   """Copy a subset of the input tensor elements to the output tensor"""
   O[D.oh, D.ow] = I[D.oh * S.SH, D.ow * S.SW]
 ```
 
 The operation implements a strided copy from the input tensor `I` to the output
-tensor `O`. The `strides` attribute is bound to an `AttributeDef`. It defines
+tensor `O`. The `strides` attribute is bound to an `IndexAttrDef`. It defines
 the symbols `S.SH` and `S.SW`, which are used to index the input tensor `I`.
 When instantiating the operation, the attribute is set using a named argument:
 
@@ -157,8 +157,8 @@ def pooling_poly(
     I=TensorDef(T1, S.N, S.H, S.W, S.C),
     K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
     O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   O[D.n, D.oh, D.ow, D.c] += \
       cast(U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW, D.c])
 ```

diff  --git a/mlir/python/mlir/dialects/linalg/opdsl/lang/comprehension.py b/mlir/python/mlir/dialects/linalg/opdsl/lang/comprehension.py
index 732cacfff63ab..0edd5d13d5dbe 100644
--- a/mlir/python/mlir/dialects/linalg/opdsl/lang/comprehension.py
+++ b/mlir/python/mlir/dialects/linalg/opdsl/lang/comprehension.py
@@ -261,18 +261,17 @@ def to_scalar_expression(self) -> ScalarExpression:
     return ScalarArg(self.scalar_name).expr()
 
 
-class AttributeDef:
+class IndexAttrDef:
   """Index Attribute definition.
 
   Index attributes provide a way to define and set symbols that can be used in
   indexing expressions. Every attribute specifies a tuple of symbols that at
   compile-time are replaced by integer values.
   """
-  yaml_tag = "!LinalgAttributeDef"
 
   def __init__(self, *sizes: SymbolDef):
     if any(not isinstance(size, SymbolDef) for size in sizes):
-      raise ValueError(f"AttributeDef requires sizes of type SymbolDef but got "
+      raise ValueError(f"IndexAttrDef requires sizes of type SymbolDef but got "
                        f"{sizes}")
     self.operand_def = OperandDef(OperandKind.Attribute, I64, size_exprs=sizes)
 
@@ -501,6 +500,7 @@ def __init__(self, cpp_name: str):
 ContractionOpInterface = OpInterfaceDef("LinalgContractionOpInterface")
 ConvolutionOpInterface = OpInterfaceDef("LinalgConvolutionOpInterface")
 
+
 class OpMetadataDef(YAMLObject):
   """Metadata about the op (generally not behavior impacting)."""
   yaml_tag = "!LinalgOpMetadata"

diff  --git a/mlir/python/mlir/dialects/linalg/opdsl/lang/dsl.py b/mlir/python/mlir/dialects/linalg/opdsl/lang/dsl.py
index a65350ccd430d..459b1206af459 100644
--- a/mlir/python/mlir/dialects/linalg/opdsl/lang/dsl.py
+++ b/mlir/python/mlir/dialects/linalg/opdsl/lang/dsl.py
@@ -20,6 +20,7 @@
 StructuredOpOuts = Union[ir.Operation, ir.OpView, ir.OpResultList,
                          Sequence[Union[ir.Value, ir.Operation, ir.OpView]]]
 
+
 @contextmanager
 def bind_op_def(model: LinalgOpDef):
   if hasattr(_CONTEXT, "current_op_def"):
@@ -128,12 +129,12 @@ def linalg_structured_op(dsl_func=None,
   sig = inspect.signature(dsl_func)
   for param_name, param in sig.parameters.items():
     param_default = param.default
-    if isinstance(param_default, (TensorDef, ScalarDef, AttributeDef)):
+    if isinstance(param_default, (TensorDef, ScalarDef, IndexAttrDef)):
       tc_model.add_operand(param_name, param_default.operand_def)
     else:
       raise ValueError(
           f"@linalg_structured_op function parameters must be defaulted as "
-          f"TensorDef(...), ScalarDef(...), or AttributeDef(...): "
+          f"TensorDef(...), ScalarDef(...), or IndexAttrDef(...): "
           f"Found {param_name}: {param_default}")
     dsl_func_args.append(param_default)
 

diff  --git a/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py b/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py
index 85bed25febdd2..1e78e624f7165 100644
--- a/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py
+++ b/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py
@@ -20,6 +20,7 @@ def matmul(
   implements(ContractionOpInterface)
   C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
 
+
 @linalg_structured_op
 def matmul_unsigned(
     A=TensorDef(T1, S.M, S.K),
@@ -34,6 +35,7 @@ def matmul_unsigned(
   implements(ContractionOpInterface)
   C[D.m, D.n] += cast_unsigned(U, A[D.m, D.k]) * cast_unsigned(U, B[D.k, D.n])
 
+
 @linalg_structured_op
 def quantized_matmul(
     A=TensorDef(T1, S.M, S.K),
@@ -49,13 +51,15 @@ def quantized_matmul(
   matmul.
   """
   domain(D.m, D.n, D.k)
-  C[D.m, D.n] += (cast(U, A[D.m, D.k]) - cast(U, AZp)) * (cast(U, B[D.k, D.n]) - cast(U, BZp))
+  C[D.m, D.n] += (cast(U, A[D.m, D.k]) - cast(U, AZp)) * (
+      cast(U, B[D.k, D.n]) - cast(U, BZp))
+
 
 @linalg_structured_op
-def mmt4d(lhs=TensorDef(TV.LhsType, S.M, S.K, S.M0, S.K0),
-          rhs=TensorDef(TV.RhsType, S.N, S.K, S.N0, S.K0),
-          accum=TensorDef(TV.AccumType, S.M, S.N, S.M0, S.N0,
-                                  output=True)):
+def mmt4d(
+    lhs=TensorDef(TV.LhsType, S.M, S.K, S.M0, S.K0),
+    rhs=TensorDef(TV.RhsType, S.N, S.K, S.N0, S.K0),
+    accum=TensorDef(TV.AccumType, S.M, S.N, S.M0, S.N0, output=True)):
   """Performs a matrix-matrix-transpose multiplication of two 4D inputs.
 
     Differences from linalg.matmul:
@@ -68,7 +72,10 @@ def mmt4d(lhs=TensorDef(TV.LhsType, S.M, S.K, S.M0, S.K0),
   """
   domain(D.m, D.n, D.k, D.m0, D.n0, D.k0)
   implements(ContractionOpInterface)
-  accum[D.m, D.n, D.m0, D.n0] += cast(TV.AccumType, lhs[D.m, D.k, D.m0, D.k0]) * cast(TV.AccumType, rhs[D.n, D.k, D.n0, D.k0])
+  accum[D.m, D.n, D.m0,
+        D.n0] += cast(TV.AccumType, lhs[D.m, D.k, D.m0, D.k0]) * cast(
+            TV.AccumType, rhs[D.n, D.k, D.n0, D.k0])
+
 
 @linalg_structured_op
 def batch_matmul(
@@ -84,6 +91,7 @@ def batch_matmul(
   implements(ContractionOpInterface)
   C[D.b, D.m, D.n] += cast(U, A[D.b, D.m, D.k]) * cast(U, B[D.b, D.k, D.n])
 
+
 @linalg_structured_op
 def quantized_batch_matmul(
     A=TensorDef(T1, Batch, S.M, S.K),
@@ -99,7 +107,8 @@ def quantized_batch_matmul(
   matmul.
   """
   domain(D.b, D.m, D.n, D.k)
-  C[D.b, D.m, D.n] += (cast(U, A[D.b, D.m, D.k]) - cast(U, AZp)) * (cast(U, B[D.b, D.k, D.n]) - cast(U, BZp))
+  C[D.b, D.m, D.n] += (cast(U, A[D.b, D.m, D.k]) - cast(U, AZp)) * (
+      cast(U, B[D.b, D.k, D.n]) - cast(U, BZp))
 
 
 @linalg_structured_op
@@ -136,7 +145,7 @@ def vecmat(
 def batch_matvec(
     A=TensorDef(T1, Batch, S.M, S.K),
     B=TensorDef(T2, Batch, S.K),
-    C=TensorDef(U,  Batch, S.M, output=True)):
+    C=TensorDef(U, Batch, S.M, output=True)):
   """Performs a batched matrix-vector multiplication.
 
   Numeric casting is performed on the operands to the inner multiply, promoting
@@ -158,6 +167,7 @@ def dot(
   implements(ContractionOpInterface)
   C[None] += cast(U, A[D.m]) * cast(U, B[D.m])
 
+
 @linalg_structured_op
 def conv_1d(
     I=TensorDef(T1, S.OW + S.KW),
@@ -170,8 +180,8 @@ def conv_1d(
   """
   implements(ConvolutionOpInterface)
   domain(D.ow, D.kw)
-  O[D.ow] += cast(
-      U, I[D.ow + D.kw]) * cast(U, K[D.kw])
+  O[D.ow] += cast(U, I[D.ow + D.kw]) * cast(U, K[D.kw])
+
 
 @linalg_structured_op
 def conv_2d(
@@ -185,8 +195,8 @@ def conv_2d(
   """
   implements(ConvolutionOpInterface)
   domain(D.oh, D.ow, D.kh, D.kw)
-  O[D.oh, D.ow] += cast(
-      U, I[D.oh + D.kh, D.ow + D.kw]) * cast(U, K[D.kh, D.kw])
+  O[D.oh, D.ow] += cast(U, I[D.oh + D.kh, D.ow + D.kw]) * cast(U, K[D.kh, D.kw])
+
 
 @linalg_structured_op
 def conv_3d(
@@ -200,16 +210,18 @@ def conv_3d(
   """
   implements(ConvolutionOpInterface)
   domain(D.od, D.oh, D.ow, D.kd, D.kh, D.kw)
-  O[D.od, D.oh, D.ow] += cast(
-      U, I[D.od + D.kd, D.oh + D.kh, D.ow + D.kw]) * cast(U, K[D.kd, D.kh, D.kw])
+  O[D.od, D.oh,
+    D.ow] += cast(U, I[D.od + D.kd, D.oh + D.kh, D.ow + D.kw]) * cast(
+        U, K[D.kd, D.kh, D.kw])
+
 
 @linalg_structured_op
 def conv_1d_nwc_wcf(
     I=TensorDef(T1, S.N, S.OW * S.SW + S.KW * S.DW, S.C),
     K=TensorDef(T2, S.KW, S.C, S.F),
     O=TensorDef(U, S.N, S.OW, S.F, output=True),
-    strides=AttributeDef(S.SW),
-    dilations=AttributeDef(S.DW)):
+    strides=IndexAttrDef(S.SW),
+    dilations=IndexAttrDef(S.DW)):
   """Performs 1-D convolution.
 
   Numeric casting is performed on the operands to the inner multiply, promoting
@@ -217,17 +229,18 @@ def conv_1d_nwc_wcf(
   """
   implements(ConvolutionOpInterface)
   domain(D.n, D.ow, D.f, D.kw, D.c)
-  O[D.n, D.ow, D.f] += cast(
-      U, I[D.n, D.ow * S.SW + D.kw * S.DW, D.c
-           ]) * cast(U, K[D.kw, D.c, D.f])
+  O[D.n, D.ow, D.f] += cast(U, I[D.n, D.ow * S.SW + D.kw * S.DW, D.c]) * cast(
+      U, K[D.kw, D.c, D.f])
+
 
 @linalg_structured_op
 def conv_2d_nhwc_hwcf(
-    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.C),
+    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW,
+                S.C),
     K=TensorDef(T2, S.KH, S.KW, S.C, S.F),
     O=TensorDef(U, S.N, S.OH, S.OW, S.F, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   """Performs 2-D convolution.
 
   Layout:
@@ -240,18 +253,20 @@ def conv_2d_nhwc_hwcf(
   implements(ConvolutionOpInterface)
   domain(D.n, D.oh, D.ow, D.f, D.kh, D.kw, D.c)
   O[D.n, D.oh, D.ow, D.f] += cast(
-      U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW, D.c
-           ]) * cast(U, K[D.kh, D.kw, D.c, D.f])
+      U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,
+           D.c]) * cast(U, K[D.kh, D.kw, D.c, D.f])
+
 
 @linalg_structured_op
 def conv_2d_nhwc_hwcf_q(
-    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.C),
+    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW,
+                S.C),
     K=TensorDef(T2, S.KH, S.KW, S.C, S.F),
     IZp=ScalarDef(I32),
     KZp=ScalarDef(I32),
     O=TensorDef(U, S.N, S.OH, S.OW, S.F, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   """Performs 2-D convolution with zero point offsets.
 
   Layout:
@@ -264,17 +279,21 @@ def conv_2d_nhwc_hwcf_q(
   """
   implements(ConvolutionOpInterface)
   domain(D.n, D.oh, D.ow, D.f, D.kh, D.kw, D.c)
-  O[D.n, D.oh, D.ow, D.f] += (cast(
-      U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW, D.c
-           ]) - cast(U, IZp)) * (cast(U, K[D.kh, D.kw, D.c, D.f]) - cast(U, KZp))
+  O[D.n, D.oh, D.ow,
+    D.f] += (cast(
+        U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW, D.c]) -
+             cast(U, IZp)) * (
+                 cast(U, K[D.kh, D.kw, D.c, D.f]) - cast(U, KZp))
+
 
 @linalg_structured_op
 def conv_2d_nchw_fchw(
-    I=TensorDef(T1, S.N, S.C, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW),
+    I=TensorDef(T1, S.N, S.C, S.OH * S.SH + S.KH * S.DH,
+                S.OW * S.SW + S.KW * S.DW),
     K=TensorDef(T2, S.F, S.C, S.KH, S.KW),
     O=TensorDef(U, S.N, S.F, S.OH, S.OW, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   """Performs 2-D convolution.
 
   Layout:
@@ -287,17 +306,18 @@ def conv_2d_nchw_fchw(
   implements(ConvolutionOpInterface)
   domain(D.n, D.f, D.oh, D.ow, D.c, D.kh, D.kw)
   O[D.n, D.f, D.oh, D.ow] += cast(
-      U, I[D.n, D.c, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW
-           ]) * cast(U, K[D.f, D.c, D.kh, D.kw])
+      U, I[D.n, D.c, D.oh * S.SH + D.kh * S.DH,
+           D.ow * S.SW + D.kw * S.DW]) * cast(U, K[D.f, D.c, D.kh, D.kw])
+
 
 @linalg_structured_op
 def conv_3d_ndhwc_dhwcf(
-    I=TensorDef(T1, S.N, S.OD * S.SD + S.KD * S.DD,
-                S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.C),
+    I=TensorDef(T1, S.N, S.OD * S.SD + S.KD * S.DD, S.OH * S.SH + S.KH * S.DH,
+                S.OW * S.SW + S.KW * S.DW, S.C),
     K=TensorDef(T2, S.KD, S.KH, S.KW, S.C, S.F),
     O=TensorDef(U, S.N, S.OD, S.OH, S.OW, S.F, output=True),
-    strides=AttributeDef(S.SD, S.SH, S.SW),
-    dilations=AttributeDef(S.DD, S.DH, S.DW)):
+    strides=IndexAttrDef(S.SD, S.SH, S.SW),
+    dilations=IndexAttrDef(S.DD, S.DH, S.DW)):
   """Performs 3-D convolution.
 
   Numeric casting is performed on the operands to the inner multiply, promoting
@@ -306,16 +326,18 @@ def conv_3d_ndhwc_dhwcf(
   implements(ConvolutionOpInterface)
   domain(D.n, D.od, D.oh, D.ow, D.f, D.kd, D.kh, D.kw, D.c)
   O[D.n, D.od, D.oh, D.ow, D.f] += cast(
-      U, I[D.n, D.od * S.SD + D.kd * S.DD, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW, D.c
-           ]) * cast(U, K[D.kd, D.kh, D.kw, D.c, D.f])
+      U, I[D.n, D.od * S.SD + D.kd * S.DD, D.oh * S.SH + D.kh * S.DH,
+           D.ow * S.SW + D.kw * S.DW, D.c]) * cast(
+               U, K[D.kd, D.kh, D.kw, D.c, D.f])
+
 
 @linalg_structured_op
 def depthwise_conv_1d_nwc_wc(
     I=TensorDef(T1, S.N, S.OW * S.SW + S.KW * S.DW, S.IC),
     K=TensorDef(T2, S.KW, S.IC),
     O=TensorDef(U, S.N, S.OW, S.IC, output=True),
-    strides=AttributeDef(S.SW),
-    dilations=AttributeDef(S.DW)):
+    strides=IndexAttrDef(S.SW),
+    dilations=IndexAttrDef(S.DW)):
   """Performs depth-wise 1-D convolution.
 
   Numeric casting is performed on the operands to the inner multiply, promoting
@@ -328,13 +350,15 @@ def depthwise_conv_1d_nwc_wc(
       cast(U, I[D.n, D.ow * S.SW + D.kw * S.DW, D.ic]) * \
       cast(U, K[D.kw, D.ic])
 
+
 @linalg_structured_op
 def depthwise_conv_2d_nhwc_hwc(
-    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.IC),
+    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW,
+                S.IC),
     K=TensorDef(T2, S.KH, S.KW, S.IC),
     O=TensorDef(U, S.N, S.OH, S.OW, S.IC, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   """Performs depth-wise 2-D convolution.
 
   Numeric casting is performed on the operands to the inner multiply, promoting
@@ -347,15 +371,17 @@ def depthwise_conv_2d_nhwc_hwc(
       U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,
            D.ic]) * cast(U, K[D.kh, D.kw, D.ic])
 
+
 @linalg_structured_op
 def depthwise_conv_2d_nhwc_hwc_q(
-    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.IC),
+    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW,
+                S.IC),
     K=TensorDef(T2, S.KH, S.KW, S.IC),
     IZp=ScalarDef(I32),
     KZp=ScalarDef(I32),
     O=TensorDef(U, S.N, S.OH, S.OW, S.IC, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   """Performs depth-wise 2-D convolution.
 
   Numeric casting is performed on the operands to the inner multiply, promoting
@@ -368,13 +394,15 @@ def depthwise_conv_2d_nhwc_hwc_q(
                  D.ic]) - cast(U, IZp)) *
       (cast(U, K[D.kh, D.kw, D.ic]) - cast(U, KZp)))
 
+
 @linalg_structured_op
 def depthwise_conv_2d_nhwc_hwcm(
-    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.IC),
+    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW,
+                S.IC),
     K=TensorDef(T2, S.KH, S.KW, S.IC, S.CM),
     O=TensorDef(U, S.N, S.OH, S.OW, S.IC, S.CM, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   """Performs depth-wise 2-D convolution.
 
   Numeric casting is performed on the operands to the inner multiply, promoting
@@ -386,15 +414,17 @@ def depthwise_conv_2d_nhwc_hwcm(
       U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,
            D.ic]) * cast(U, K[D.kh, D.kw, D.ic, D.cm])
 
+
 @linalg_structured_op
 def depthwise_conv_2d_nhwc_hwcm_q(
-    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.IC),
+    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW,
+                S.IC),
     K=TensorDef(T2, S.KH, S.KW, S.IC, S.CM),
     IZp=ScalarDef(I32),
     KZp=ScalarDef(I32),
     O=TensorDef(U, S.N, S.OH, S.OW, S.IC, S.CM, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   """Performs depth-wise 2-D convolution.
 
   Numeric casting is performed on the operands to the inner multiply, promoting
@@ -410,11 +440,12 @@ def depthwise_conv_2d_nhwc_hwcm_q(
 
 @linalg_structured_op
 def pooling_nhwc_sum(
-    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.C),
+    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW,
+                S.C),
     K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
     O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   """Performs sum pooling.
 
   Numeric casting is performed on the input operand, promoting it to the same
@@ -428,11 +459,12 @@ def pooling_nhwc_sum(
 
 @linalg_structured_op
 def pooling_nhwc_max(
-    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.C),
+    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW,
+                S.C),
     K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
     O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   """Performs max pooling.
 
   Numeric casting is performed on the input operand, promoting it to the same
@@ -444,13 +476,15 @@ def pooling_nhwc_max(
       cast(U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,
                 D.c]))
 
+
 @linalg_structured_op
 def pooling_nhwc_max_unsigned(
-    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.C),
+    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW,
+                S.C),
     K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
     O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   """Performs unsigned max pooling.
 
   Numeric casting is performed on the input operand, promoting it to the same
@@ -462,13 +496,15 @@ def pooling_nhwc_max_unsigned(
       cast_unsigned(
           U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW, D.c]))
 
+
 @linalg_structured_op
 def pooling_nchw_max(
-    I=TensorDef(T1, S.N, S.C, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW),
+    I=TensorDef(T1, S.N, S.C, S.OH * S.SH + S.KH * S.DH,
+                S.OW * S.SW + S.KW * S.DW),
     K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
     O=TensorDef(U, S.N, S.C, S.OH, S.OW, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   """Performs max pooling.
 
   Numeric casting is performed on the input operand, promoting it to the same
@@ -477,16 +513,18 @@ def pooling_nchw_max(
   implements(ConvolutionOpInterface)
   domain(D.n, D.c, D.oh, D.ow, D.kh, D.kw)
   O[D.n, D.c, D.oh, D.ow] = ReduceFn.max(D.kh, D.kw)(
-      cast(U, I[D.n, D.c, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,
-                ]))
+      cast(U, I[D.n, D.c, D.oh * S.SH + D.kh * S.DH,
+                D.ow * S.SW + D.kw * S.DW,]))
+
 
 @linalg_structured_op
 def pooling_nhwc_min(
-    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.C),
+    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW,
+                S.C),
     K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
     O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   """Performs min pooling.
 
   Numeric casting is performed on the input operand, promoting it to the same
@@ -498,13 +536,15 @@ def pooling_nhwc_min(
       cast(U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,
                 D.c]))
 
+
 @linalg_structured_op
 def pooling_nhwc_min_unsigned(
-    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.C),
+    I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW,
+                S.C),
     K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
     O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   """Performs unsigned min pooling.
 
   Numeric casting is performed on the input operand, promoting it to the same
@@ -516,14 +556,15 @@ def pooling_nhwc_min_unsigned(
       cast_unsigned(
           U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW, D.c]))
 
+
 @linalg_structured_op
 def pooling_ndhwc_sum(
     I=TensorDef(T1, S.N, S.OD * S.SD + S.KD * S.DD, S.OH * S.SH + S.KH * S.DH,
                 S.OW * S.SW + S.KW * S.DW, S.C),
     K=TensorDef(T2, S.KD, S.KH, S.KW, index_dims=[D.kd, D.kh, D.kw]),
     O=TensorDef(U, S.N, S.OD, S.OH, S.OW, S.C, output=True),
-    strides=AttributeDef(S.SD, S.SH, S.SW),
-    dilations=AttributeDef(S.DD, S.DH, S.DW)):
+    strides=IndexAttrDef(S.SD, S.SH, S.SW),
+    dilations=IndexAttrDef(S.DD, S.DH, S.DW)):
   """Performs 3D sum pooling.
 
   Numeric casting is performed on the input operand, promoting it to the same
@@ -542,8 +583,8 @@ def pooling_ndhwc_max(
                 S.OW * S.SW + S.KW * S.DW, S.C),
     K=TensorDef(T2, S.KD, S.KH, S.KW, index_dims=[D.kd, D.kh, D.kw]),
     O=TensorDef(U, S.N, S.OD, S.OH, S.OW, S.C, output=True),
-    strides=AttributeDef(S.SD, S.SH, S.SW),
-    dilations=AttributeDef(S.DD, S.DH, S.DW)):
+    strides=IndexAttrDef(S.SD, S.SH, S.SW),
+    dilations=IndexAttrDef(S.DD, S.DH, S.DW)):
   """Performs 3D max pooling.
 
   Numeric casting is performed on the input operand, promoting it to the same
@@ -563,8 +604,8 @@ def pooling_ndhwc_min(
                 S.OW * S.SW + S.KW * S.DW, S.C),
     K=TensorDef(T2, S.KD, S.KH, S.KW, index_dims=[D.kd, D.kh, D.kw]),
     O=TensorDef(U, S.N, S.OD, S.OH, S.OW, S.C, output=True),
-    strides=AttributeDef(S.SD, S.SH, S.SW),
-    dilations=AttributeDef(S.DD, S.DH, S.DW)):
+    strides=IndexAttrDef(S.SD, S.SH, S.SW),
+    dilations=IndexAttrDef(S.DD, S.DH, S.DW)):
   """Performs 3D min pooling.
 
   Numeric casting is performed on the input operand, promoting it to the same

diff  --git a/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml b/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
index b8edbea19a401..ba346f3cc75dc 100644
--- a/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
+++ b/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
@@ -95,7 +95,7 @@ structured_op: !LinalgStructuredOpConfig
 # @linalg_structured_op
 # def test2(I=TensorDef(T, S.M, S.N),
 #           O=TensorDef(T, S.M, S.N, output=True),
-#           strides=AttributeDef(S.SM, S.SN)):
+#           strides=IndexAttrDef(S.SM, S.SN)):
 #   """Title.
 
 #   Detailed description.

diff  --git a/mlir/test/python/dialects/linalg/opdsl/arguments.py b/mlir/test/python/dialects/linalg/opdsl/arguments.py
index 572c811d93a4b..1d56cb9a40a33 100644
--- a/mlir/test/python/dialects/linalg/opdsl/arguments.py
+++ b/mlir/test/python/dialects/linalg/opdsl/arguments.py
@@ -57,5 +57,5 @@ def fill(value=ScalarDef(T), O=TensorDef(T, S.M, S.K, output=True)):
 def strided_copy(
     I=TensorDef(T, S.IH, S.IW),
     O=TensorDef(T, S.OH, S.OW, output=True),
-    strides=AttributeDef(S.SH, S.SW)):
+    strides=IndexAttrDef(S.SH, S.SW)):
   O[D.oh, D.ow] = I[D.oh * S.SH, D.ow * S.SW]

diff  --git a/mlir/test/python/dialects/linalg/opdsl/emit_convolution.py b/mlir/test/python/dialects/linalg/opdsl/emit_convolution.py
index 44b3c771f2f0f..5c85c30c085e3 100644
--- a/mlir/test/python/dialects/linalg/opdsl/emit_convolution.py
+++ b/mlir/test/python/dialects/linalg/opdsl/emit_convolution.py
@@ -16,8 +16,8 @@ def conv_poly(
     I=TensorDef(T1, S.N, S.IH, S.IW, S.C),
     K=TensorDef(T2, S.KH, S.KW, S.C),
     O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   domain(D.n, D.oh, D.ow, D.kh, D.kw, D.c)
   O[D.n, D.oh, D.ow, D.c] += cast(
       U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,

diff  --git a/mlir/test/python/dialects/linalg/opdsl/emit_pooling.py b/mlir/test/python/dialects/linalg/opdsl/emit_pooling.py
index 2bc8be3e79625..4f6f6db535d5a 100644
--- a/mlir/test/python/dialects/linalg/opdsl/emit_pooling.py
+++ b/mlir/test/python/dialects/linalg/opdsl/emit_pooling.py
@@ -16,8 +16,8 @@ def pooling_max_poly(
     I=TensorDef(T1, S.N, S.H, S.W, S.C),
     K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
     O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   domain(D.n, D.oh, D.ow, D.kh, D.kw, D.c)
   O[D.n, D.oh, D.ow, D.c] = ReduceFn.max(D.kh, D.kw)(
       cast(U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,
@@ -29,8 +29,8 @@ def pooling_max_unsigned_poly(
     I=TensorDef(T1, S.N, S.H, S.W, S.C),
     K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
     O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   domain(D.n, D.oh, D.ow, D.kh, D.kw, D.c)
   O[D.n, D.oh, D.ow, D.c] = ReduceFn.max_unsigned(D.kh, D.kw)(
       cast_unsigned(
@@ -42,8 +42,8 @@ def pooling_min_poly(
     I=TensorDef(T1, S.N, S.H, S.W, S.C),
     K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
     O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   domain(D.n, D.oh, D.ow, D.kh, D.kw, D.c)
   O[D.n, D.oh, D.ow, D.c] = ReduceFn.min(D.kh, D.kw)(
       cast(U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,
@@ -55,8 +55,8 @@ def pooling_min_unsigned_poly(
     I=TensorDef(T1, S.N, S.H, S.W, S.C),
     K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
     O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
-    strides=AttributeDef(S.SH, S.SW),
-    dilations=AttributeDef(S.DH, S.DW)):
+    strides=IndexAttrDef(S.SH, S.SW),
+    dilations=IndexAttrDef(S.DH, S.DW)):
   domain(D.n, D.oh, D.ow, D.kh, D.kw, D.c)
   O[D.n, D.oh, D.ow, D.c] = ReduceFn.min_unsigned(D.kh, D.kw)(
       cast_unsigned(


        


More information about the Mlir-commits mailing list