[Mlir-commits] [mlir] f4939d5 - [mlir][OpDSL] Simplify index and constant tests.

llvmlistbot at llvm.org llvmlistbot at llvm.org
Tue Mar 8 09:11:26 PST 2022


Author: gysit
Date: 2022-03-08T17:11:03Z
New Revision: f4939d56184e68d1302e8896f7ab32c4405ff609

URL: https://github.com/llvm/llvm-project/commit/f4939d56184e68d1302e8896f7ab32c4405ff609
DIFF: https://github.com/llvm/llvm-project/commit/f4939d56184e68d1302e8896f7ab32c4405ff609.diff

LOG: [mlir][OpDSL] Simplify index and constant tests.

Simplify tests that use `linalg.fill_rng_2d` to focus on testing the `const` and `index` functions. Additionally, cleanup emit_misc.py to use simpler test functions and fix an error message in config.py.

Reviewed By: nicolasvasilache

Differential Revision: https://reviews.llvm.org/D120734

Added: 
    

Modified: 
    mlir/python/mlir/dialects/linalg/opdsl/lang/config.py
    mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir
    mlir/test/python/dialects/linalg/opdsl/emit_misc.py

Removed: 
    


################################################################################
diff  --git a/mlir/python/mlir/dialects/linalg/opdsl/lang/config.py b/mlir/python/mlir/dialects/linalg/opdsl/lang/config.py
index ed30b8e5fc9a0..2a0da68295265 100644
--- a/mlir/python/mlir/dialects/linalg/opdsl/lang/config.py
+++ b/mlir/python/mlir/dialects/linalg/opdsl/lang/config.py
@@ -265,8 +265,8 @@ def __init__(self,
     for index in collected_indices:
       if index.dim_def.dimname not in self.affine_state.all_dims:
         raise ValueError(
-            f"The dimension {index.dim.dimname} is not part of the iteration "
-            f"domain {self.affine_state.all_dims}")
+            f"The dimension {index.dim_def.dimname} is not part of the "
+            f"iteration domain {self.affine_state.all_dims}")
       index.resolve_dimension_name(self.affine_state)
 
     # Generate the scalar assignments (used to build a body).

diff  --git a/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir b/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir
index 21f00268a9c12..425eeb8373276 100644
--- a/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir
+++ b/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir
@@ -250,45 +250,28 @@ func @generalize_fill_2d(%value: f64, %O: memref<16x32xf32>) {
 
 // -----
 
-func @generalize_fill_rng_2d_f32(%min: f64, %max: f64, %seed: i32, %O: tensor<16x32xf32>) -> tensor<16x32xf32> {
+func @generalize_index(%min: f64, %max: f64, %seed: i32, %O: tensor<16x32xf32>) -> tensor<16x32xf32> {
   %0 = linalg.fill_rng_2d ins(%min, %max, %seed: f64, f64, i32) outs(%O : tensor<16x32xf32>) -> tensor<16x32xf32>
   return %0: tensor<16x32xf32>
 }
 
-// CHECK-LABEL: @generalize_fill_rng_2d_f32
-// CHECK-DAG:  ^{{.*}}(%[[MIN:.+]]: f64, %[[MAX:.+]]: f64, %[[SEED:.+]]: i32, %[[O:.+]]: f32
+// CHECK-LABEL: @generalize_index
 // CHECK-DAG:    %[[IDX0:.+]] = linalg.index 0 : index
 // CHECK-DAG:    %[[IDX1:.+]] = linalg.index 1 : index
 // CHECK-DAG:    %[[IDX0_CAST:.+]] = arith.index_cast %[[IDX0]] : index to i32
 // CHECK-DAG:    %[[IDX1_CAST:.+]] = arith.index_cast %[[IDX1]] : index to i32
-// CHECK-DAG:    %[[VAL0:.+]] = arith.addi %[[IDX0_CAST]], %[[SEED]] : i32
-// CHECK-DAG:    %[[CST0:.+]] = arith.constant 1103515245 : i32
-// CHECK-DAG:    %[[CST1:.+]] = arith.constant 12345 : i32
-// CHECK-DAG:    %[[VAL1:.+]] = arith.muli %[[VAL0]], %[[CST0]] : i32
-// CHECK-DAG:    %[[VAL2:.+]] = arith.addi %[[VAL1]], %[[CST1]] : i32
-// Skip random number computation for the second index.
-// CHECK-DAG:    %[[DIFF:.+]] = arith.subf %[[MAX]], %[[MIN]] : f64
-// CHECK-DAG:    %[[CST2:.+]] = arith.constant 2.3283063999999999E-10 : f64
-// CHECK-DAG:    %[[FACT:.+]] = arith.mulf %[[DIFF]], %[[CST2]] : f64
-// CHECK-DAG:    %[[VAL4:.+]] = arith.mulf %{{.+}}, %[[FACT]] : f64
-// CHECK-DAG:    %[[VAL5:.+]] = arith.addf %[[VAL4]], %[[MIN]] : f64
-// CHECK-DAG:    %[[VAL6:.+]] = arith.truncf %[[VAL5]] : f64 to f32
-// CHECK-NEXT:   linalg.yield %[[VAL6]] : f32
-// CHECK-NEXT: -> tensor<16x32xf32>
 
 // -----
 
-func @generalize_fill_rng_2d_i32(%min: f64, %max: f64, %seed: i32, %O: tensor<16x32xi32>) -> tensor<16x32xi32> {
-  %0 = linalg.fill_rng_2d ins(%min, %max, %seed: f64, f64, i32) outs(%O : tensor<16x32xi32>) -> tensor<16x32xi32>
-  return %0: tensor<16x32xi32>
+func @generalize_const(%min: f64, %max: f64, %seed: i32, %O: tensor<16x32xf32>) -> tensor<16x32xf32> {
+  %0 = linalg.fill_rng_2d ins(%min, %max, %seed: f64, f64, i32) outs(%O : tensor<16x32xf32>) -> tensor<16x32xf32>
+  return %0: tensor<16x32xf32>
 }
 
-// CHECK-LABEL: @generalize_fill_rng_2d_i32
-// CHECK: ^{{.*}}(%[[MIN:.+]]: f64, %[[MAX:.+]]: f64, %[[SEED:.+]]: i32, %[[O:.+]]: i32
-// Verifies floating point to integer cast.
-// CHECK:        %[[VAL6:.+]] = arith.fptosi %{{.+}} : f64 to i32
-// CHECK-NEXT:   linalg.yield %[[VAL6]] : i32
-// CHECK-NEXT: -> tensor<16x32xi32>
+// CHECK-LABEL: @generalize_const
+// CHECK-DAG:    %[[CST0:.+]] = arith.constant 1103515245 : i32
+// CHECK-DAG:    %[[CST1:.+]] = arith.constant 12345 : i32
+// CHECK-DAG:    %[[CST2:.+]] = arith.constant 2.3283063999999999E-10 : f64
 
 // -----
 

diff  --git a/mlir/test/python/dialects/linalg/opdsl/emit_misc.py b/mlir/test/python/dialects/linalg/opdsl/emit_misc.py
index 81de8fc0f2045..e69d71dcb337d 100644
--- a/mlir/test/python/dialects/linalg/opdsl/emit_misc.py
+++ b/mlir/test/python/dialects/linalg/opdsl/emit_misc.py
@@ -8,7 +8,7 @@
 from mlir.dialects.linalg.opdsl.lang import *
 
 # This tests miscellaneous features of the emitter that are not tested by the
-# matmul, convolution, or, pooling tests. The features include:
+# fill, matmul, convolution, or pooling tests. The features include:
 # - constant defined in the body
 # - fix/predefined types
 # - exponential functions
@@ -16,28 +16,24 @@
 
 
 @linalg_structured_op
-def fill_rng_poly(
-    min=ScalarDef(F64),
-    max=ScalarDef(F64),
-    seed=ScalarDef(I32),
-    O=TensorDef(T, S.M, S.N, output=True)):
-  multiplier = TypeFn.cast_signed(I32, const(1103515245))
-  increment = TypeFn.cast_signed(I32, const(12345))
-  rand1 = (TypeFn.cast_signed(I32, index(D.m)) + seed) * multiplier + increment
-  rand2 = (TypeFn.cast_signed(I32, index(D.n)) + rand1) * multiplier + increment
-  inv_range = TypeFn.cast_signed(F64, const(2.3283064e-10))
-  offset = TypeFn.cast_signed(F64, const(2147483647))
-  scaling = (max - min) * inv_range
-  O[D.m, D.n] = TypeFn.cast_signed(
-      T, (offset + TypeFn.cast_signed(F64, rand2)) * scaling + min)
+def test_const(O=TensorDef(F32, S.M, S.N, output=True)):
+  O[D.m, D.n] = TypeFn.cast_unsigned(F32, const(42)) + TypeFn.cast_unsigned(
+      F32, const(2.3283064e-10))
 
 
 @linalg_structured_op
-def soft_plus_poly(
-    I=TensorDef(T, S.M, S.N), O=TensorDef(U, S.M, S.N, output=True)):
-  O[D.m, D.n] = UnaryFn.log(
-      TypeFn.cast_signed(U, const(1.0)) +
-      TypeFn.cast_signed(U, UnaryFn.exp(I[D.m, D.n])))
+def test_index(O=TensorDef(I32, S.M, S.N, output=True)):
+  O[D.m, D.n] = TypeFn.cast_signed(I32, index(D.m)) + TypeFn.cast_signed(
+      I32, index(D.n))
+
+
+ at linalg_structured_op
+def elemwise_unary_poly(
+    I=TensorDef(T),
+    O=TensorDef(U, output=True),
+    fun=UnaryFnAttrDef(default=UnaryFn.exp),
+    cast=TypeFnAttrDef(default=TypeFn.cast_signed)):
+  O[None] = fun(cast(U, I[None]))
 
 
 @linalg_structured_op(op_name="custom_op_name")
@@ -48,42 +44,50 @@ def non_default_op_name(I=TensorDef(T, S.N), O=TensorDef(T, S.N, output=True)):
 with Context() as ctx, Location.unknown():
   module = Module.create()
   f32 = F32Type.get()
-  f64 = F64Type.get()
   i32 = IntegerType.get_signless(32)
   with InsertionPoint(module.body):
 
-    # CHECK-LABEL: @test_i32_fill_rng
-    # CHECK:      ^{{.*}}(%[[MIN:.+]]: f64, %[[MAX:.+]]: f64, %[[SEED:.+]]: i32, %{{.*}}
+    # CHECK-LABEL: @test_f32_const
+    # CHECK-DAG:    %[[CST0:.+]] = arith.constant 42 : i64
+    # CHECK-DAG:    %[[CST0_CAST:.+]] = arith.uitofp %[[CST0]] : i64 to f32
+    # CHECK-DAG:    %[[CST1:.+]] = arith.constant 2.3283063999999999E-10 : f64
+    # CHECK-DAG:    %[[CST1_CAST:.+]] = arith.truncf %[[CST1]] : f64 to f32
+    # CHECK-DAG:    %[[SUM:.+]] = arith.addf %[[CST0_CAST]], %[[CST1_CAST]] : f32
+    # CHECK-NEXT:   linalg.yield %[[SUM]] : f32
+    @builtin.FuncOp.from_py_func(RankedTensorType.get((4, 16), f32))
+    def test_f32_const(init_result):
+      return test_const(outs=[init_result])
+
+    # CHECK-LABEL: @test_i32_index
     # CHECK-DAG:    %[[IDX0:.+]] = linalg.index 0 : index
+    # CHECK-DAG:    %[[IDX1:.+]] = linalg.index 1 : index
     # CHECK-DAG:    %[[IDX0_CAST:.+]] = arith.index_cast %[[IDX0]] : index to i32
-    # CHECK-DAG:    %[[RND0:.+]] = arith.addi %[[IDX0_CAST]], %[[SEED]] : i32
-    # CHECK-DAG:    %[[CST0:.+]] = arith.constant 1103515245 : i64
-    # CHECK-DAG:    %[[CST0_CAST:.+]] = arith.trunci %[[CST0]] : i64 to i32
-    # Skip the remaining random number computation and match the scaling logic.
-    # CHECK-DAG:    %[[DIFF:.+]] = arith.subf %[[MAX]], %[[MIN]] : f64
-    # CHECK-DAG:    %[[CST3:.+]] = arith.constant 2.3283063999999999E-10 : f64
-    # CHECK-DAG:    %[[FACT:.+]] = arith.mulf %[[DIFF]], %[[CST3]] : f64
-    # CHECK-DAG:    %[[RND4:.+]] = arith.mulf %{{.+}}, %[[FACT]] : f64
-    # CHECK-DAG:    %[[RND5:.+]] = arith.addf %[[RND4]], %[[MIN]] : f64
-    # CHECK-DAG:    %{{.*}} = arith.fptosi %[[RND5]] : f64 to i32
-    @builtin.FuncOp.from_py_func(f64, f64, i32,
-                                 RankedTensorType.get((4, 16), i32))
-    def test_i32_fill_rng(min, max, seed, init_result):
-      return fill_rng_poly(min, max, seed, outs=[init_result])
-
-    # CHECK-LABEL: @test_f32_soft_plus
+    # CHECK-DAG:    %[[IDX1_CAST:.+]] = arith.index_cast %[[IDX1]] : index to i32
+    # CHECK-DAG:    %[[SUM:.+]] = arith.addi %[[IDX0_CAST]], %[[IDX1_CAST]] : i32
+    # CHECK-NEXT:   linalg.yield %[[SUM]] : i32
+    @builtin.FuncOp.from_py_func(RankedTensorType.get((4, 16), i32))
+    def test_i32_index(init_result):
+      return test_index(outs=[init_result])
+
+    # CHECK-LABEL: @test_f32_elemwise_exp
     # CHECK:      ^{{.*}}(%[[IN:.+]]: f32, %[[OUT:.+]]: f32)
-    # CHECK-NEXT:   %[[C1:.+]] = arith.constant 1.000000e+00 : f64
-    # CHECK-NEXT:   %[[C1_CAST:.+]] = arith.truncf %[[C1]] : f64 to f32
     # CHECK-NEXT:   %[[EXP:.+]] = math.exp %[[IN]] : f32
-    # CHECK-NEXT:   %[[SUM:.+]] = arith.addf %[[C1_CAST]], %[[EXP]] : f32
-    # CHECK-NEXT:   %[[LOG:.+]] = math.log %[[SUM]] : f32
+    # CHECK-NEXT:   linalg.yield %[[EXP]] : f32
+    # CHECK-NEXT: -> tensor<4x16xf32>
+    @builtin.FuncOp.from_py_func(
+        RankedTensorType.get((4, 16), f32), RankedTensorType.get((4, 16), f32))
+    def test_f32_elemwise_exp(input, init_result):
+      return elemwise_unary_poly(input, outs=[init_result], fun=UnaryFn.exp)
+
+    # CHECK-LABEL: @test_f32_elemwise_log
+    # CHECK:      ^{{.*}}(%[[IN:.+]]: f32, %[[OUT:.+]]: f32)
+    # CHECK-NEXT:   %[[LOG:.+]] = math.log %[[IN]] : f32
     # CHECK-NEXT:   linalg.yield %[[LOG]] : f32
     # CHECK-NEXT: -> tensor<4x16xf32>
     @builtin.FuncOp.from_py_func(
         RankedTensorType.get((4, 16), f32), RankedTensorType.get((4, 16), f32))
-    def test_f32_soft_plus(input, init_result):
-      return soft_plus_poly(input, outs=[init_result])
+    def test_f32_elemwise_log(input, init_result):
+      return elemwise_unary_poly(input, outs=[init_result], fun=UnaryFn.log)
 
     # Just check that we don't assert out on name mismatch.
     # CHECK-LABEL: @test_non_default_op_name


        


More information about the Mlir-commits mailing list