[Mlir-commits] [mlir] f5a9bfd - [mlir] NFC - Move invalid.mlir tests to the proper dialects

Nicolas Vasilache llvmlistbot at llvm.org
Sun Nov 28 13:30:45 PST 2021


Author: Nicolas Vasilache
Date: 2021-11-28T21:30:40Z
New Revision: f5a9bfdf8f4a278ba725fee50d5b5054f95e88f2

URL: https://github.com/llvm/llvm-project/commit/f5a9bfdf8f4a278ba725fee50d5b5054f95e88f2
DIFF: https://github.com/llvm/llvm-project/commit/f5a9bfdf8f4a278ba725fee50d5b5054f95e88f2.diff

LOG: [mlir] NFC - Move invalid.mlir tests to the proper dialects

Added: 
    

Modified: 
    mlir/test/Dialect/MemRef/invalid.mlir
    mlir/test/Dialect/Tensor/invalid.mlir
    mlir/test/IR/invalid-ops.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Dialect/MemRef/invalid.mlir b/mlir/test/Dialect/MemRef/invalid.mlir
index c8c6d7f92e55e..861478d7cc171 100644
--- a/mlir/test/Dialect/MemRef/invalid.mlir
+++ b/mlir/test/Dialect/MemRef/invalid.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -split-input-file %s -verify-diagnostics
+// RUN: mlir-opt -allow-unregistered-dialect -split-input-file %s -verify-diagnostics
 
 func @dma_start_not_enough_operands() {
   // expected-error at +1 {{expected at least 4 operands}}
@@ -488,9 +488,323 @@ func @collapse_shape_illegal_mixed_memref_2(%arg0 : memref<?x4x5xf32>)
 
 // -----
 
+func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<2048xi8>
+  // expected-error at +1 {{expects 1 offset operand}}
+  %1 = memref.view %0[][%arg0, %arg1]
+    : memref<2048xi8> to memref<?x?xf32>
+  return
+}
+
+// -----
+
+func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<2048xi8, affine_map<(d0) -> (d0 floordiv 8, d0 mod 8)>>
+  // expected-error at +1 {{unsupported map for base memref type}}
+  %1 = memref.view %0[%arg2][%arg0, %arg1]
+    : memref<2048xi8, affine_map<(d0) -> (d0 floordiv 8, d0 mod 8)>> to
+      memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0 * 4 + d1 + s0)>>
+  return
+}
+
+// -----
+
+func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<2048xi8>
+  // expected-error at +1 {{unsupported map for result memref type}}
+  %1 = memref.view %0[%arg2][%arg0, %arg1]
+    : memref<2048xi8> to memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0, d1, s0)>>
+  return
+}
+
+// -----
+
+func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<2048xi8, 2>
+  // expected-error at +1 {{
diff erent memory spaces}}
+  %1 = memref.view %0[%arg2][%arg0, %arg1] :  memref<2048xi8, 2> to memref<?x?xf32, 1>
+  return
+}
+
+// -----
+
+func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<2048xi8>
+  // expected-error at +1 {{incorrect number of size operands for type}}
+  %1 = memref.view %0[%arg2][%arg0]
+    : memref<2048xi8> to memref<?x?xf32>
+  return
+}
+
+// -----
+
+func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<8x16x4xf32>
+  // expected-error at +1 {{expected mixed offsets rank to match mixed sizes rank (2 vs 3) so the rank of the result type is well-formed}}
+  %1 = memref.subview %0[0, 0][2, 2, 2][1, 1, 1]
+    : memref<8x16x4xf32> to memref<8x16x4xf32>
+  return
+}
+
+// -----
+
+func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<8x16x4xf32>
+  // expected-error at +1 {{expected mixed sizes rank to match mixed strides rank (3 vs 2) so the rank of the result type is well-formed}}
+  %1 = memref.subview %0[0, 0, 0][2, 2, 2][1, 1]
+    : memref<8x16x4xf32> to memref<8x16x4xf32>
+  return
+}
+
+// -----
+
+func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<8x16x4xf32>
+  // expected-error at +1 {{expected mixed sizes rank to match mixed strides rank (3 vs 2) so the rank of the result type is well-formed}}
+  %1 = memref.reinterpret_cast %0 to offset: [0], sizes: [2, 2, 2], strides:[1, 1]
+    : memref<8x16x4xf32> to memref<8x16x4xf32>
+  return
+}
+
+// -----
+
+func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<8x16x4xf32, offset: 0, strides: [64, 4, 1], 2>
+  // expected-error at +1 {{
diff erent memory spaces}}
+  %1 = memref.subview %0[0, 0, 0][%arg2, %arg2, %arg2][1, 1, 1]
+    : memref<8x16x4xf32, offset: 0, strides: [64, 4, 1], 2> to
+      memref<8x?x4xf32, affine_map<(d0, d1, d2)[s0] -> (d0 * s0 + d1 * 4 + d2)>>
+  return
+}
+
+// -----
+
+func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 + d1, d1 + d2, d2)>>
+  // expected-error at +1 {{is not strided}}
+  %1 = memref.subview %0[0, 0, 0][%arg2, %arg2, %arg2][1, 1, 1]
+    : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 + d1, d1 + d2, d2)>> to
+      memref<8x?x4xf32, offset: 0, strides: [?, 4, 1]>
+  return
+}
+
+// -----
+
+func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<8x16x4xf32>
+  // expected-error at +1 {{expected <= 3 offset values}}
+  %1 = memref.subview %0[%arg0, %arg1, 0, 0][%arg2, 0, 0, 0][1, 1, 1, 1]
+    : memref<8x16x4xf32> to
+      memref<8x?x4xf32, offset: 0, strides:[?, ?, 4]>
+  return
+}
+
+// -----
+
+func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<8x16x4xf32>
+  // expected-error at +1 {{expected result element type to be 'f32'}}
+  %1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1]
+    : memref<8x16x4xf32> to
+      memref<8x16x4xi32>
+  return
+}
+
+// -----
+
+func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<8x16x4xf32>
+  // expected-error at +1 {{expected result rank to be smaller or equal to the source rank.}}
+  %1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1]
+    : memref<8x16x4xf32> to
+      memref<8x16x4x3xi32>
+  return
+}
+
+// -----
+
+func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<8x16x4xf32>
+  // expected-error at +1 {{expected result type to be 'memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>' or a rank-reduced version. (mismatch of result sizes)}}
+  %1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1]
+    : memref<8x16x4xf32> to memref<16x4xf32>
+  return
+}
+
+// -----
+
+func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+  %0 = memref.alloc() : memref<8x16x4xf32>
+  // expected-error at +1 {{expected result type to be 'memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 8)>>' or a rank-reduced version. (mismatch of result sizes)}}
+  %1 = memref.subview %0[0, 2, 0][8, 16, 4][1, 1, 1]
+    : memref<8x16x4xf32> to memref<16x4xf32>
+  return
+}
+
+// -----
+
+func @invalid_rank_reducing_subview(%arg0 : memref<?x?xf32>, %arg1 : index, %arg2 : index) {
+  // expected-error at +1 {{expected result type to be 'memref<?x1xf32, affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>>' or a rank-reduced version. (mismatch of result sizes)}}
+  %0 = memref.subview %arg0[0, %arg1][%arg2, 1][1, 1] : memref<?x?xf32> to memref<?xf32>
+  return
+}
+
+// -----
+
 func @static_stride_to_dynamic_stride(%arg0 : memref<?x?x?xf32>, %arg1 : index,
     %arg2 : index) -> memref<?x?xf32, offset:?, strides: [?, ?]> {
   // expected-error @+1 {{expected result type to be 'memref<1x?x?xf32, affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>>' or a rank-reduced version. (mismatch of result sizes)}}
   %0 = memref.subview %arg0[0, 0, 0] [1, %arg1, %arg2] [1, 1, 1] : memref<?x?x?xf32> to memref<?x?xf32, offset: ?, strides: [?, ?]>
   return %0 : memref<?x?xf32, offset: ?, strides: [?, ?]>
 }
+
+// -----
+
+func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]>) {
+  // expected-error at +1{{operand type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2)>>' and result type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 128 + d1 * 32 + d2 * 2)>>' are cast incompatible}}
+  %0 = memref.cast %arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]> to memref<12x4x16xf32, offset:0, strides:[128, 32, 2]>
+  return
+}
+
+// -----
+
+func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]>) {
+  // expected-error at +1{{operand type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2)>>' and result type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2 + 16)>>' are cast incompatible}}
+  %0 = memref.cast %arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]> to memref<12x4x16xf32, offset:16, strides:[64, 16, 1]>
+  return
+}
+
+// -----
+
+// incompatible element types
+func @invalid_memref_cast() {
+  %0 = memref.alloc() : memref<2x5xf32, 0>
+  // expected-error at +1 {{operand type 'memref<2x5xf32>' and result type 'memref<*xi32>' are cast incompatible}}
+  %1 = memref.cast %0 : memref<2x5xf32, 0> to memref<*xi32>
+  return
+}
+
+// -----
+
+func @invalid_prefetch_rw(%i : index) {
+  %0 = memref.alloc() : memref<10xf32>
+  // expected-error at +1 {{rw specifier has to be 'read' or 'write'}}
+  memref.prefetch %0[%i], rw, locality<0>, data  : memref<10xf32>
+  return
+}
+
+// -----
+
+func @invalid_prefetch_cache_type(%i : index) {
+  %0 = memref.alloc() : memref<10xf32>
+  // expected-error at +1 {{cache type has to be 'data' or 'instr'}}
+  memref.prefetch %0[%i], read, locality<0>, false  : memref<10xf32>
+  return
+}
+
+// -----
+
+func @invalid_prefetch_locality_hint(%i : index) {
+  %0 = memref.alloc() : memref<10xf32>
+  // expected-error at +1 {{32-bit signless integer attribute whose minimum value is 0 whose maximum value is 3}}
+  memref.prefetch %0[%i], read, locality<5>, data  : memref<10xf32>
+  return
+}
+
+// -----
+
+// incompatible memory space
+func @invalid_memref_cast() {
+  %0 = memref.alloc() : memref<2x5xf32, 0>
+  // expected-error at +1 {{operand type 'memref<2x5xf32>' and result type 'memref<*xf32, 1>' are cast incompatible}}
+  %1 = memref.cast %0 : memref<2x5xf32, 0> to memref<*xf32, 1>
+  return
+}
+
+// -----
+
+// unranked to unranked
+func @invalid_memref_cast() {
+  %0 = memref.alloc() : memref<2x5xf32, 0>
+  %1 = memref.cast %0 : memref<2x5xf32, 0> to memref<*xf32, 0>
+  // expected-error at +1 {{operand type 'memref<*xf32>' and result type 'memref<*xf32>' are cast incompatible}}
+  %2 = memref.cast %1 : memref<*xf32, 0> to memref<*xf32, 0>
+  return
+}
+
+// -----
+
+// alignment is not power of 2.
+func @assume_alignment(%0: memref<4x4xf16>) {
+  // expected-error at +1 {{alignment must be power of 2}}
+  memref.assume_alignment %0, 12 : memref<4x4xf16>
+  return
+}
+
+// -----
+
+// 0 alignment value.
+func @assume_alignment(%0: memref<4x4xf16>) {
+  // expected-error at +1 {{attribute 'alignment' failed to satisfy constraint: 32-bit signless integer attribute whose value is positive}}
+  memref.assume_alignment %0, 0 : memref<4x4xf16>
+  return
+}
+
+// -----
+
+"alloca_without_scoped_alloc_parent"() ( {
+  memref.alloca() : memref<1xf32>
+  // expected-error at -1 {{requires an ancestor op with AutomaticAllocationScope trait}}
+  return
+}) : () -> ()
+
+// -----
+
+func @bad_alloc_wrong_dynamic_dim_count() {
+^bb0:
+  %0 = arith.constant 7 : index
+  // Test alloc with wrong number of dynamic dimensions.
+  // expected-error at +1 {{dimension operand count does not equal memref dynamic dimension count}}
+  %1 = memref.alloc(%0)[%0] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1>
+  return
+}
+
+// -----
+
+func @bad_alloc_wrong_symbol_count() {
+^bb0:
+  %0 = arith.constant 7 : index
+  // Test alloc with wrong number of symbols
+  // expected-error at +1 {{symbol operand count does not equal memref symbol count}}
+  %1 = memref.alloc(%0) : memref<2x?xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1>
+  return
+}
+
+// -----
+
+func @test_store_zero_results() {
+^bb0:
+  %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
+  %1 = arith.constant 0 : index
+  %2 = arith.constant 1 : index
+  %3 = memref.load %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
+  // Test that store returns zero results.
+  %4 = memref.store %3, %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> // expected-error {{cannot name an operation with no results}}
+  return
+}
+
+// -----
+
+func @test_store_zero_results2(%x: i32, %p: memref<i32>) {
+  "memref.store"(%x,%p) : (i32, memref<i32>) -> i32  // expected-error {{'memref.store' op requires zero results}}
+  return
+}
+
+// -----
+
+func @test_alloc_memref_map_rank_mismatch() {
+^bb0:
+  // expected-error at +1 {{memref layout mismatch between rank and affine map: 2 != 1}}
+  %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0) -> (d0)>, 1>
+  return
+}

diff  --git a/mlir/test/Dialect/Tensor/invalid.mlir b/mlir/test/Dialect/Tensor/invalid.mlir
index f82e6bfa12ec7..51b67a76d14c3 100644
--- a/mlir/test/Dialect/Tensor/invalid.mlir
+++ b/mlir/test/Dialect/Tensor/invalid.mlir
@@ -1,5 +1,13 @@
 // RUN: mlir-opt <%s -split-input-file -verify-diagnostics
 
+func @dim(%arg : tensor<1x?xf32>) {
+  %c2 = arith.constant 2 : index
+  tensor.dim %arg, %c2 : tensor<1x?xf32> // expected-error {{'tensor.dim' op index is out of range}}
+  return
+}
+
+// -----
+
 func @tensor.cast_mismatching_constants(%arg0: tensor<1xf32>) {
   // expected-error at +1 {{operand type 'tensor<1xf32>' and result type 'tensor<2xf32>' are cast incompatible}}
   %0 = tensor.cast %arg0 : tensor<1xf32> to tensor<2xf32>
@@ -138,3 +146,23 @@ func @tensor.reshape_num_elements_mismatch(
   tensor.reshape %buf(%shape)
     : (tensor<1xf32>, tensor<1xi32>) -> tensor<10xf32>
 }
+
+// -----
+
+func @slice_wrong_dynamic_type(%t: tensor<8x16x4xf32>, %idx : index) {
+      // expected-error @+1 {{expected result type to be 'tensor<4x4x4xf32>' or a rank-reduced version. (mismatch of result sizes)}}
+  %0 = tensor.extract_slice %t[0, 2, 0][4, 4, 4][1, 1, 1]
+    : tensor<8x16x4xf32> to tensor<?x4x4xf32>
+
+  return
+}
+
+// -----
+
+func @slice_wrong_static_type(%t: tensor<8x16x4xf32>, %idx : index) {
+      // expected-error @+1 {{expected result type to be 'tensor<?x3x?xf32>' or a rank-reduced version. (mismatch of result sizes)}}
+  %0 = tensor.extract_slice %t[0, 0, 0][%idx, 3, %idx][1, 1, 1]
+    : tensor<8x16x4xf32> to tensor<4x4x4xf32>
+
+  return
+}

diff  --git a/mlir/test/IR/invalid-ops.mlir b/mlir/test/IR/invalid-ops.mlir
index 4913d672e4f0d..13cfd16daf9a4 100644
--- a/mlir/test/IR/invalid-ops.mlir
+++ b/mlir/test/IR/invalid-ops.mlir
@@ -1,13 +1,5 @@
 // RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -verify-diagnostics
 
-func @dim(%arg : tensor<1x?xf32>) {
-  %c2 = arith.constant 2 : index
-  tensor.dim %arg, %c2 : tensor<1x?xf32> // expected-error {{'tensor.dim' op index is out of range}}
-  return
-}
-
-// -----
-
 func @rank(f32) {
 ^bb(%0: f32):
   "std.rank"(%0): (f32)->index // expected-error {{'std.rank' op operand #0 must be any memref or tensor type}}
@@ -60,57 +52,6 @@ func @unknown_std_op() {
 
 // -----
 
-func @bad_alloc_wrong_dynamic_dim_count() {
-^bb0:
-  %0 = arith.constant 7 : index
-  // Test alloc with wrong number of dynamic dimensions.
-  // expected-error at +1 {{dimension operand count does not equal memref dynamic dimension count}}
-  %1 = memref.alloc(%0)[%0] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1>
-  return
-}
-
-// -----
-
-func @bad_alloc_wrong_symbol_count() {
-^bb0:
-  %0 = arith.constant 7 : index
-  // Test alloc with wrong number of symbols
-  // expected-error at +1 {{symbol operand count does not equal memref symbol count}}
-  %1 = memref.alloc(%0) : memref<2x?xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1>
-  return
-}
-
-// -----
-
-func @test_store_zero_results() {
-^bb0:
-  %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
-  %1 = arith.constant 0 : index
-  %2 = arith.constant 1 : index
-  %3 = memref.load %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
-  // Test that store returns zero results.
-  %4 = memref.store %3, %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> // expected-error {{cannot name an operation with no results}}
-  return
-}
-
-// -----
-
-func @test_store_zero_results2(%x: i32, %p: memref<i32>) {
-  "memref.store"(%x,%p) : (i32, memref<i32>) -> i32  // expected-error {{'memref.store' op requires zero results}}
-  return
-}
-
-// -----
-
-func @test_alloc_memref_map_rank_mismatch() {
-^bb0:
-  // expected-error at +1 {{memref layout mismatch between rank and affine map: 2 != 1}}
-  %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0) -> (d0)>, 1>
-  return
-}
-
-// -----
-
 func @calls(%arg0: i32) {
   %x = call @calls() : () -> i32  // expected-error {{incorrect number of operands for callee}}
   return
@@ -197,243 +138,6 @@ func @invalid_splat(%v : f32) { // expected-note {{prior use here}}
 
 // -----
 
-func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<2048xi8>
-  // expected-error at +1 {{expects 1 offset operand}}
-  %1 = memref.view %0[][%arg0, %arg1]
-    : memref<2048xi8> to memref<?x?xf32>
-  return
-}
-
-// -----
-
-func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<2048xi8, affine_map<(d0) -> (d0 floordiv 8, d0 mod 8)>>
-  // expected-error at +1 {{unsupported map for base memref type}}
-  %1 = memref.view %0[%arg2][%arg0, %arg1]
-    : memref<2048xi8, affine_map<(d0) -> (d0 floordiv 8, d0 mod 8)>> to
-      memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0 * 4 + d1 + s0)>>
-  return
-}
-
-// -----
-
-func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<2048xi8>
-  // expected-error at +1 {{unsupported map for result memref type}}
-  %1 = memref.view %0[%arg2][%arg0, %arg1]
-    : memref<2048xi8> to memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0, d1, s0)>>
-  return
-}
-
-// -----
-
-func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<2048xi8, 2>
-  // expected-error at +1 {{
diff erent memory spaces}}
-  %1 = memref.view %0[%arg2][%arg0, %arg1] :  memref<2048xi8, 2> to memref<?x?xf32, 1>
-  return
-}
-
-// -----
-
-func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<2048xi8>
-  // expected-error at +1 {{incorrect number of size operands for type}}
-  %1 = memref.view %0[%arg2][%arg0]
-    : memref<2048xi8> to memref<?x?xf32>
-  return
-}
-
-// -----
-
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<8x16x4xf32>
-  // expected-error at +1 {{expected mixed offsets rank to match mixed sizes rank (2 vs 3) so the rank of the result type is well-formed}}
-  %1 = memref.subview %0[0, 0][2, 2, 2][1, 1, 1]
-    : memref<8x16x4xf32> to memref<8x16x4xf32>
-  return
-}
-
-// -----
-
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<8x16x4xf32>
-  // expected-error at +1 {{expected mixed sizes rank to match mixed strides rank (3 vs 2) so the rank of the result type is well-formed}}
-  %1 = memref.subview %0[0, 0, 0][2, 2, 2][1, 1]
-    : memref<8x16x4xf32> to memref<8x16x4xf32>
-  return
-}
-
-// -----
-
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<8x16x4xf32>
-  // expected-error at +1 {{expected mixed sizes rank to match mixed strides rank (3 vs 2) so the rank of the result type is well-formed}}
-  %1 = memref.reinterpret_cast %0 to offset: [0], sizes: [2, 2, 2], strides:[1, 1]
-    : memref<8x16x4xf32> to memref<8x16x4xf32>
-  return
-}
-
-// -----
-
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<8x16x4xf32, offset: 0, strides: [64, 4, 1], 2>
-  // expected-error at +1 {{
diff erent memory spaces}}
-  %1 = memref.subview %0[0, 0, 0][%arg2, %arg2, %arg2][1, 1, 1]
-    : memref<8x16x4xf32, offset: 0, strides: [64, 4, 1], 2> to
-      memref<8x?x4xf32, affine_map<(d0, d1, d2)[s0] -> (d0 * s0 + d1 * 4 + d2)>>
-  return
-}
-
-// -----
-
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 + d1, d1 + d2, d2)>>
-  // expected-error at +1 {{is not strided}}
-  %1 = memref.subview %0[0, 0, 0][%arg2, %arg2, %arg2][1, 1, 1]
-    : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 + d1, d1 + d2, d2)>> to
-      memref<8x?x4xf32, offset: 0, strides: [?, 4, 1]>
-  return
-}
-
-// -----
-
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<8x16x4xf32>
-  // expected-error at +1 {{expected <= 3 offset values}}
-  %1 = memref.subview %0[%arg0, %arg1, 0, 0][%arg2, 0, 0, 0][1, 1, 1, 1]
-    : memref<8x16x4xf32> to
-      memref<8x?x4xf32, offset: 0, strides:[?, ?, 4]>
-  return
-}
-
-// -----
-
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<8x16x4xf32>
-  // expected-error at +1 {{expected result element type to be 'f32'}}
-  %1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1]
-    : memref<8x16x4xf32> to
-      memref<8x16x4xi32>
-  return
-}
-
-// -----
-
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<8x16x4xf32>
-  // expected-error at +1 {{expected result rank to be smaller or equal to the source rank.}}
-  %1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1]
-    : memref<8x16x4xf32> to
-      memref<8x16x4x3xi32>
-  return
-}
-
-// -----
-
-func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<8x16x4xf32>
-  // expected-error at +1 {{expected result type to be 'memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>' or a rank-reduced version. (mismatch of result sizes)}}
-  %1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1]
-    : memref<8x16x4xf32> to memref<16x4xf32>
-  return
-}
-
-// -----
-
-func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
-  %0 = memref.alloc() : memref<8x16x4xf32>
-  // expected-error at +1 {{expected result type to be 'memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 8)>>' or a rank-reduced version. (mismatch of result sizes)}}
-  %1 = memref.subview %0[0, 2, 0][8, 16, 4][1, 1, 1]
-    : memref<8x16x4xf32> to memref<16x4xf32>
-  return
-}
-
-// -----
-
-func @invalid_rank_reducing_subview(%arg0 : memref<?x?xf32>, %arg1 : index, %arg2 : index) {
-  // expected-error at +1 {{expected result type to be 'memref<?x1xf32, affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>>' or a rank-reduced version. (mismatch of result sizes)}}
-  %0 = memref.subview %arg0[0, %arg1][%arg2, 1][1, 1] : memref<?x?xf32> to memref<?xf32>
-  return
-}
-
-// -----
-
-func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]>) {
-  // expected-error at +1{{operand type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2)>>' and result type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 128 + d1 * 32 + d2 * 2)>>' are cast incompatible}}
-  %0 = memref.cast %arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]> to memref<12x4x16xf32, offset:0, strides:[128, 32, 2]>
-  return
-}
-
-// -----
-
-func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]>) {
-  // expected-error at +1{{operand type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2)>>' and result type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2 + 16)>>' are cast incompatible}}
-  %0 = memref.cast %arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]> to memref<12x4x16xf32, offset:16, strides:[64, 16, 1]>
-  return
-}
-
-// -----
-
-// incompatible element types
-func @invalid_memref_cast() {
-  %0 = memref.alloc() : memref<2x5xf32, 0>
-  // expected-error at +1 {{operand type 'memref<2x5xf32>' and result type 'memref<*xi32>' are cast incompatible}}
-  %1 = memref.cast %0 : memref<2x5xf32, 0> to memref<*xi32>
-  return
-}
-
-// -----
-
-func @invalid_prefetch_rw(%i : index) {
-  %0 = memref.alloc() : memref<10xf32>
-  // expected-error at +1 {{rw specifier has to be 'read' or 'write'}}
-  memref.prefetch %0[%i], rw, locality<0>, data  : memref<10xf32>
-  return
-}
-
-// -----
-
-func @invalid_prefetch_cache_type(%i : index) {
-  %0 = memref.alloc() : memref<10xf32>
-  // expected-error at +1 {{cache type has to be 'data' or 'instr'}}
-  memref.prefetch %0[%i], read, locality<0>, false  : memref<10xf32>
-  return
-}
-
-// -----
-
-func @invalid_prefetch_locality_hint(%i : index) {
-  %0 = memref.alloc() : memref<10xf32>
-  // expected-error at +1 {{32-bit signless integer attribute whose minimum value is 0 whose maximum value is 3}}
-  memref.prefetch %0[%i], read, locality<5>, data  : memref<10xf32>
-  return
-}
-
-// -----
-
-// incompatible memory space
-func @invalid_memref_cast() {
-  %0 = memref.alloc() : memref<2x5xf32, 0>
-  // expected-error at +1 {{operand type 'memref<2x5xf32>' and result type 'memref<*xf32, 1>' are cast incompatible}}
-  %1 = memref.cast %0 : memref<2x5xf32, 0> to memref<*xf32, 1>
-  return
-}
-
-// -----
-
-// unranked to unranked
-func @invalid_memref_cast() {
-  %0 = memref.alloc() : memref<2x5xf32, 0>
-  %1 = memref.cast %0 : memref<2x5xf32, 0> to memref<*xf32, 0>
-  // expected-error at +1 {{operand type 'memref<*xf32>' and result type 'memref<*xf32>' are cast incompatible}}
-  %2 = memref.cast %1 : memref<*xf32, 0> to memref<*xf32, 0>
-  return
-}
-
-// -----
-
 func @atomic_rmw_idxs_rank_mismatch(%I: memref<16x10xf32>, %i : index, %val : f32) {
   // expected-error at +1 {{expects the number of subscripts to be equal to memref rank}}
   %x = atomic_rmw addf %val, %I[%i] : (f32, memref<16x10xf32>) -> f32
@@ -518,52 +222,6 @@ func @atomic_yield_type_mismatch(%I: memref<10xf32>, %i : index) {
 
 // -----
 
-// alignment is not power of 2.
-func @assume_alignment(%0: memref<4x4xf16>) {
-  // expected-error at +1 {{alignment must be power of 2}}
-  memref.assume_alignment %0, 12 : memref<4x4xf16>
-  return
-}
-
-// -----
-
-// 0 alignment value.
-func @assume_alignment(%0: memref<4x4xf16>) {
-  // expected-error at +1 {{attribute 'alignment' failed to satisfy constraint: 32-bit signless integer attribute whose value is positive}}
-  memref.assume_alignment %0, 0 : memref<4x4xf16>
-  return
-}
-
-// -----
-
-"alloca_without_scoped_alloc_parent"() ( {
-  memref.alloca() : memref<1xf32>
-  // expected-error at -1 {{requires an ancestor op with AutomaticAllocationScope trait}}
-  return
-}) : () -> ()
-
-// -----
-
-func @slice_wrong_dynamic_type(%t: tensor<8x16x4xf32>, %idx : index) {
-      // expected-error @+1 {{expected result type to be 'tensor<4x4x4xf32>' or a rank-reduced version. (mismatch of result sizes)}}
-  %0 = tensor.extract_slice %t[0, 2, 0][4, 4, 4][1, 1, 1]
-    : tensor<8x16x4xf32> to tensor<?x4x4xf32>
-
-  return
-}
-
-// -----
-
-func @slice_wrong_static_type(%t: tensor<8x16x4xf32>, %idx : index) {
-      // expected-error @+1 {{expected result type to be 'tensor<?x3x?xf32>' or a rank-reduced version. (mismatch of result sizes)}}
-  %0 = tensor.extract_slice %t[0, 0, 0][%idx, 3, %idx][1, 1, 1]
-    : tensor<8x16x4xf32> to tensor<4x4x4xf32>
-
-  return
-}
-
-// -----
-
 func @no_zero_bit_integer_attrs() {
   // expected-error @+1 {{integer constant out of range for attribute}}
   %x = "some.op"(){value = 0 : i0} : () -> f32


        


More information about the Mlir-commits mailing list