[Mlir-commits] [mlir] 0254b0b - [mlir][NFC] Update textual references of `func` to `func.func` in LLVM/Math/MemRef/NVGPU/OpenACC/OpenMP/Quant/SCF/Shape tests
River Riddle
llvmlistbot at llvm.org
Wed Apr 20 22:24:34 PDT 2022
Author: River Riddle
Date: 2022-04-20T22:17:28-07:00
New Revision: 0254b0bcf0a0fc96e7568abb4ff67cda454c6444
URL: https://github.com/llvm/llvm-project/commit/0254b0bcf0a0fc96e7568abb4ff67cda454c6444
DIFF: https://github.com/llvm/llvm-project/commit/0254b0bcf0a0fc96e7568abb4ff67cda454c6444.diff
LOG: [mlir][NFC] Update textual references of `func` to `func.func` in LLVM/Math/MemRef/NVGPU/OpenACC/OpenMP/Quant/SCF/Shape tests
The special case parsing of `func` operations is being removed.
Added:
Modified:
mlir/test/Dialect/LLVMIR/canonicalize.mlir
mlir/test/Dialect/LLVMIR/global.mlir
mlir/test/Dialect/LLVMIR/invalid.mlir
mlir/test/Dialect/LLVMIR/layout.mlir
mlir/test/Dialect/LLVMIR/nvvm.mlir
mlir/test/Dialect/LLVMIR/rocdl.mlir
mlir/test/Dialect/LLVMIR/roundtrip.mlir
mlir/test/Dialect/LLVMIR/terminator.mlir
mlir/test/Dialect/LLVMIR/types-invalid.mlir
mlir/test/Dialect/LLVMIR/types.mlir
mlir/test/Dialect/Math/algebraic-simplification.mlir
mlir/test/Dialect/Math/canonicalize.mlir
mlir/test/Dialect/Math/expand-tanh.mlir
mlir/test/Dialect/Math/ops.mlir
mlir/test/Dialect/Math/polynomial-approximation.mlir
mlir/test/Dialect/MemRef/canonicalize.mlir
mlir/test/Dialect/MemRef/expand-ops.mlir
mlir/test/Dialect/MemRef/fold-subview-ops.mlir
mlir/test/Dialect/MemRef/invalid.mlir
mlir/test/Dialect/MemRef/multibuffer.mlir
mlir/test/Dialect/MemRef/ops.mlir
mlir/test/Dialect/MemRef/subview.mlir
mlir/test/Dialect/NVGPU/roundtrip.mlir
mlir/test/Dialect/OpenACC/canonicalize.mlir
mlir/test/Dialect/OpenACC/ops.mlir
mlir/test/Dialect/OpenMP/invalid.mlir
mlir/test/Dialect/OpenMP/ops.mlir
mlir/test/Dialect/PDLInterp/ops.mlir
mlir/test/Dialect/Quant/canonicalize.mlir
mlir/test/Dialect/Quant/convert-const.mlir
mlir/test/Dialect/Quant/convert-fakequant-invalid.mlir
mlir/test/Dialect/Quant/convert-fakequant.mlir
mlir/test/Dialect/Quant/parse-any.mlir
mlir/test/Dialect/Quant/parse-calibrated.mlir
mlir/test/Dialect/Quant/parse-ops-invalid.mlir
mlir/test/Dialect/Quant/parse-ops.mlir
mlir/test/Dialect/Quant/parse-uniform.mlir
mlir/test/Dialect/Quant/quant_region.mlir
mlir/test/Dialect/SCF/bufferize.mlir
mlir/test/Dialect/SCF/canonicalize.mlir
mlir/test/Dialect/SCF/control-flow-sink.mlir
mlir/test/Dialect/SCF/for-loop-canonicalization.mlir
mlir/test/Dialect/SCF/for-loop-peeling.mlir
mlir/test/Dialect/SCF/for-loop-specialization.mlir
mlir/test/Dialect/SCF/for-loop-to-while-loop.mlir
mlir/test/Dialect/SCF/invalid.mlir
mlir/test/Dialect/SCF/loop-pipelining.mlir
mlir/test/Dialect/SCF/loop-range.mlir
mlir/test/Dialect/SCF/loop-unroll.mlir
mlir/test/Dialect/SCF/ops.mlir
mlir/test/Dialect/SCF/parallel-loop-fusion.mlir
mlir/test/Dialect/SCF/parallel-loop-specialization.mlir
mlir/test/Dialect/SCF/parallel-loop-tiling-inbound-check.mlir
mlir/test/Dialect/SCF/parallel-loop-tiling.mlir
mlir/test/Dialect/Shape/bufferize.mlir
mlir/test/Dialect/Shape/canonicalize.mlir
mlir/test/Dialect/Shape/invalid.mlir
mlir/test/Dialect/Shape/ops.mlir
mlir/test/Dialect/Shape/remove-shape-constraints.mlir
mlir/test/Dialect/Shape/shape-to-shape.mlir
Removed:
################################################################################
diff --git a/mlir/test/Dialect/LLVMIR/canonicalize.mlir b/mlir/test/Dialect/LLVMIR/canonicalize.mlir
index 3e7b545875662..8557a51a2a888 100644
--- a/mlir/test/Dialect/LLVMIR/canonicalize.mlir
+++ b/mlir/test/Dialect/LLVMIR/canonicalize.mlir
@@ -106,7 +106,7 @@ llvm.func @fold_gep(%x : !llvm.ptr<i8>) -> !llvm.ptr<i8> {
// resulting constant is created in the arith dialect because the last folded
// operation belongs to it.
// CHECK-LABEL: llvm_constant
-func @llvm_constant() -> i32 {
+func.func @llvm_constant() -> i32 {
// CHECK-NOT: llvm.mlir.constant
%0 = llvm.mlir.constant(40 : i32) : i32
%1 = llvm.mlir.constant(42 : i32) : i32
diff --git a/mlir/test/Dialect/LLVMIR/global.mlir b/mlir/test/Dialect/LLVMIR/global.mlir
index ab0f0c48042a0..49b976a62d68f 100644
--- a/mlir/test/Dialect/LLVMIR/global.mlir
+++ b/mlir/test/Dialect/LLVMIR/global.mlir
@@ -63,7 +63,7 @@ llvm.mlir.global external @has_thr_local(42 : i64) {thr_local} : i64
llvm.mlir.global external @has_dso_local(42 : i64) {dso_local} : i64
// CHECK-LABEL: references
-func @references() {
+func.func @references() {
// CHECK: llvm.mlir.addressof @global : !llvm.ptr<i64>
%0 = llvm.mlir.addressof @global : !llvm.ptr<i64>
@@ -109,7 +109,7 @@ llvm.mlir.global internal constant @constant(37.0) : !llvm.label
// -----
-func @foo() {
+func.func @foo() {
// expected-error @+1 {{must appear at the module level}}
llvm.mlir.global internal @bar(42) : i32
@@ -135,14 +135,14 @@ llvm.mlir.global internal @more_than_one_type(0) : i64, i32
llvm.mlir.global internal @foo(0: i32) : i32
-func @bar() {
+func.func @bar() {
// expected-error @+2{{expected ':'}}
llvm.mlir.addressof @foo
}
// -----
-func @foo() {
+func.func @foo() {
// The attribute parser will consume the first colon-type, so we put two of
// them to trigger the attribute type mismatch error.
// expected-error @+1 {{invalid kind of attribute specified}}
@@ -151,7 +151,7 @@ func @foo() {
// -----
-func @foo() {
+func.func @foo() {
// expected-error @+1 {{must reference a global defined by 'llvm.mlir.global'}}
llvm.mlir.addressof @foo : !llvm.ptr<func<void ()>>
}
@@ -160,7 +160,7 @@ func @foo() {
llvm.mlir.global internal @foo(0: i32) : i32
-func @bar() {
+func.func @bar() {
// expected-error @+1 {{the type must be a pointer to the type of the referenced global}}
llvm.mlir.addressof @foo : !llvm.ptr<i64>
}
@@ -200,7 +200,7 @@ llvm.mlir.global internal @g(43 : i64) : i64 {
// -----
llvm.mlir.global internal @g(32 : i64) {addr_space = 3: i32} : i64
-func @mismatch_addr_space_implicit_global() {
+func.func @mismatch_addr_space_implicit_global() {
// expected-error @+1 {{op the type must be a pointer to the type of the referenced global}}
llvm.mlir.addressof @g : !llvm.ptr<i64>
}
@@ -208,7 +208,7 @@ func @mismatch_addr_space_implicit_global() {
// -----
llvm.mlir.global internal @g(32 : i64) {addr_space = 3: i32} : i64
-func @mismatch_addr_space() {
+func.func @mismatch_addr_space() {
// expected-error @+1 {{op the type must be a pointer to the type of the referenced global}}
llvm.mlir.addressof @g : !llvm.ptr<i64, 4>
}
diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir
index c260568b17888..f5ccae3abe6a2 100644
--- a/mlir/test/Dialect/LLVMIR/invalid.mlir
+++ b/mlir/test/Dialect/LLVMIR/invalid.mlir
@@ -36,14 +36,14 @@ llvm.mlir.global_dtors {dtors = [@dtor], priorities = [0 : i32]}
// -----
// expected-error at +1{{expected llvm.noalias argument attribute to be a unit attribute}}
-func @invalid_noalias(%arg0: i32 {llvm.noalias = 3}) {
+func.func @invalid_noalias(%arg0: i32 {llvm.noalias = 3}) {
"llvm.return"() : () -> ()
}
// -----
// expected-error at +1{{llvm.align argument attribute of non integer type}}
-func @invalid_align(%arg0: i32 {llvm.align = "foo"}) {
+func.func @invalid_align(%arg0: i32 {llvm.align = "foo"}) {
"llvm.return"() : () -> ()
}
@@ -53,7 +53,7 @@ func @invalid_align(%arg0: i32 {llvm.align = "foo"}) {
// -----
-func @icmp_non_string(%arg0 : i32, %arg1 : i16) {
+func.func @icmp_non_string(%arg0 : i32, %arg1 : i16) {
// expected-error at +1 {{invalid kind of attribute specified}}
llvm.icmp 42 %arg0, %arg0 : i32
return
@@ -61,7 +61,7 @@ func @icmp_non_string(%arg0 : i32, %arg1 : i16) {
// -----
-func @icmp_wrong_string(%arg0 : i32, %arg1 : i16) {
+func.func @icmp_wrong_string(%arg0 : i32, %arg1 : i16) {
// expected-error at +1 {{'foo' is an incorrect value of the 'predicate' attribute}}
llvm.icmp "foo" %arg0, %arg0 : i32
return
@@ -69,156 +69,156 @@ func @icmp_wrong_string(%arg0 : i32, %arg1 : i16) {
// -----
-func @alloca_missing_input_result_type(%size : i64) {
+func.func @alloca_missing_input_result_type(%size : i64) {
// expected-error at +1 {{expected trailing function type with one argument and one result}}
llvm.alloca %size x i32 : () -> ()
}
// -----
-func @alloca_missing_input_type() {
+func.func @alloca_missing_input_type() {
// expected-error at +1 {{expected trailing function type with one argument and one result}}
llvm.alloca %size x i32 : () -> (!llvm.ptr<i32>)
}
// -----
-func @alloca_missing_result_type() {
+func.func @alloca_missing_result_type() {
// expected-error at +1 {{expected trailing function type with one argument and one result}}
llvm.alloca %size x i32 : (i64) -> ()
}
// -----
-func @alloca_non_function_type() {
+func.func @alloca_non_function_type() {
// expected-error at +1 {{expected trailing function type with one argument and one result}}
llvm.alloca %size x i32 : !llvm.ptr<i32>
}
// -----
-func @alloca_non_integer_alignment() {
+func.func @alloca_non_integer_alignment() {
// expected-error at +1 {{expected integer alignment}}
llvm.alloca %size x i32 {alignment = 3.0} : !llvm.ptr<i32>
}
// -----
-func @alloca_opaque_ptr_no_type(%sz : i64) {
+func.func @alloca_opaque_ptr_no_type(%sz : i64) {
// expected-error at below {{expected 'elem_type' attribute if opaque pointer type is used}}
"llvm.alloca"(%sz) : (i64) -> !llvm.ptr
}
// -----
-func @alloca_ptr_type_attr_non_opaque_ptr(%sz : i64) {
+func.func @alloca_ptr_type_attr_non_opaque_ptr(%sz : i64) {
// expected-error at below {{unexpected 'elem_type' attribute when non-opaque pointer type is used}}
"llvm.alloca"(%sz) { elem_type = i32 } : (i64) -> !llvm.ptr<i32>
}
// -----
-func @gep_missing_input_result_type(%pos : i64, %base : !llvm.ptr<f32>) {
+func.func @gep_missing_input_result_type(%pos : i64, %base : !llvm.ptr<f32>) {
// expected-error at +1 {{2 operands present, but expected 0}}
llvm.getelementptr %base[%pos] : () -> ()
}
// -----
-func @gep_missing_input_type(%pos : i64, %base : !llvm.ptr<f32>) {
+func.func @gep_missing_input_type(%pos : i64, %base : !llvm.ptr<f32>) {
// expected-error at +1 {{2 operands present, but expected 0}}
llvm.getelementptr %base[%pos] : () -> (!llvm.ptr<f32>)
}
// -----
-func @gep_missing_result_type(%pos : i64, %base : !llvm.ptr<f32>) {
+func.func @gep_missing_result_type(%pos : i64, %base : !llvm.ptr<f32>) {
// expected-error at +1 {{op requires one result}}
llvm.getelementptr %base[%pos] : (!llvm.ptr<f32>, i64) -> ()
}
// -----
-func @gep_non_function_type(%pos : i64, %base : !llvm.ptr<f32>) {
+func.func @gep_non_function_type(%pos : i64, %base : !llvm.ptr<f32>) {
// expected-error at +1 {{invalid kind of type specified}}
llvm.getelementptr %base[%pos] : !llvm.ptr<f32>
}
// -----
-func @load_non_llvm_type(%foo : memref<f32>) {
+func.func @load_non_llvm_type(%foo : memref<f32>) {
// expected-error at +1 {{expected LLVM pointer type}}
llvm.load %foo : memref<f32>
}
// -----
-func @load_non_ptr_type(%foo : f32) {
+func.func @load_non_ptr_type(%foo : f32) {
// expected-error at +1 {{expected LLVM pointer type}}
llvm.load %foo : f32
}
// -----
-func @store_non_llvm_type(%foo : memref<f32>, %bar : f32) {
+func.func @store_non_llvm_type(%foo : memref<f32>, %bar : f32) {
// expected-error at +1 {{expected LLVM pointer type}}
llvm.store %bar, %foo : memref<f32>
}
// -----
-func @store_non_ptr_type(%foo : f32, %bar : f32) {
+func.func @store_non_ptr_type(%foo : f32, %bar : f32) {
// expected-error at +1 {{expected LLVM pointer type}}
llvm.store %bar, %foo : f32
}
// -----
-func @store_malformed_elem_type(%foo: !llvm.ptr, %bar: f32) {
+func.func @store_malformed_elem_type(%foo: !llvm.ptr, %bar: f32) {
// expected-error at +1 {{expected non-function type}}
llvm.store %bar, %foo : !llvm.ptr, "f32"
}
// -----
-func @call_non_function_type(%callee : !llvm.func<i8 (i8)>, %arg : i8) {
+func.func @call_non_function_type(%callee : !llvm.func<i8 (i8)>, %arg : i8) {
// expected-error at +1 {{expected function type}}
llvm.call %callee(%arg) : !llvm.func<i8 (i8)>
}
// -----
-func @invalid_call() {
+func.func @invalid_call() {
// expected-error at +1 {{'llvm.call' op must have either a `callee` attribute or at least an operand}}
"llvm.call"() : () -> ()
}
// -----
-func @call_non_function_type(%callee : !llvm.func<i8 (i8)>, %arg : i8) {
+func.func @call_non_function_type(%callee : !llvm.func<i8 (i8)>, %arg : i8) {
// expected-error at +1 {{expected function type}}
llvm.call %callee(%arg) : !llvm.func<i8 (i8)>
}
// -----
-func @call_unknown_symbol() {
+func.func @call_unknown_symbol() {
// expected-error at +1 {{'llvm.call' op 'missing_callee' does not reference a symbol in the current scope}}
llvm.call @missing_callee() : () -> ()
}
// -----
-func private @standard_func_callee()
+func.func private @standard_func_callee()
-func @call_non_llvm() {
+func.func @call_non_llvm() {
// expected-error at +1 {{'llvm.call' op 'standard_func_callee' does not reference a valid LLVM function}}
llvm.call @standard_func_callee() : () -> ()
}
// -----
-func @call_non_llvm_indirect(%arg0 : tensor<*xi32>) {
+func.func @call_non_llvm_indirect(%arg0 : tensor<*xi32>) {
// expected-error at +1 {{'llvm.call' op operand #0 must be LLVM dialect-compatible type}}
"llvm.call"(%arg0) : (tensor<*xi32>) -> ()
}
@@ -227,14 +227,14 @@ func @call_non_llvm_indirect(%arg0 : tensor<*xi32>) {
llvm.func @callee_func(i8) -> ()
-func @callee_arg_mismatch(%arg0 : i32) {
+func.func @callee_arg_mismatch(%arg0 : i32) {
// expected-error at +1 {{'llvm.call' op operand type mismatch for operand 0: 'i32' != 'i8'}}
llvm.call @callee_func(%arg0) : (i32) -> ()
}
// -----
-func @indirect_callee_arg_mismatch(%arg0 : i32, %callee : !llvm.ptr<func<void(i8)>>) {
+func.func @indirect_callee_arg_mismatch(%arg0 : i32, %callee : !llvm.ptr<func<void(i8)>>) {
// expected-error at +1 {{'llvm.call' op operand type mismatch for operand 0: 'i32' != 'i8'}}
"llvm.call"(%callee, %arg0) : (!llvm.ptr<func<void(i8)>>, i32) -> ()
}
@@ -243,35 +243,35 @@ func @indirect_callee_arg_mismatch(%arg0 : i32, %callee : !llvm.ptr<func<void(i8
llvm.func @callee_func() -> (i8)
-func @callee_return_mismatch() {
+func.func @callee_return_mismatch() {
// expected-error at +1 {{'llvm.call' op result type mismatch: 'i32' != 'i8'}}
%res = llvm.call @callee_func() : () -> (i32)
}
// -----
-func @indirect_callee_return_mismatch(%callee : !llvm.ptr<func<i8()>>) {
+func.func @indirect_callee_return_mismatch(%callee : !llvm.ptr<func<i8()>>) {
// expected-error at +1 {{'llvm.call' op result type mismatch: 'i32' != 'i8'}}
"llvm.call"(%callee) : (!llvm.ptr<func<i8()>>) -> (i32)
}
// -----
-func @call_too_many_results(%callee : () -> (i32,i32)) {
+func.func @call_too_many_results(%callee : () -> (i32,i32)) {
// expected-error at +1 {{expected function with 0 or 1 result}}
llvm.call %callee() : () -> (i32, i32)
}
// -----
-func @call_non_llvm_result(%callee : () -> (tensor<*xi32>)) {
+func.func @call_non_llvm_result(%callee : () -> (tensor<*xi32>)) {
// expected-error at +1 {{expected result to have LLVM type}}
llvm.call %callee() : () -> (tensor<*xi32>)
}
// -----
-func @call_non_llvm_input(%callee : (tensor<*xi32>) -> (), %arg : tensor<*xi32>) {
+func.func @call_non_llvm_input(%callee : (tensor<*xi32>) -> (), %arg : tensor<*xi32>) {
// expected-error at +1 {{expected LLVM types as inputs}}
llvm.call %callee(%arg) : (tensor<*xi32>) -> ()
}
@@ -302,14 +302,14 @@ llvm.func @func_result_mismatch(%arg0: f32) -> i32 {
// -----
-func @constant_wrong_type() {
+func.func @constant_wrong_type() {
// expected-error at +1 {{only supports integer, float, string or elements attributes}}
llvm.mlir.constant(@constant_wrong_type) : !llvm.ptr<func<void ()>>
}
// -----
-func @constant_wrong_type_string() {
+func.func @constant_wrong_type_string() {
// expected-error at below {{expected array type of 3 i8 elements for the string constant}}
llvm.mlir.constant("foo") : !llvm.ptr<i8>
}
@@ -364,14 +364,14 @@ llvm.func @struct_wrong_element_types() -> !llvm.struct<(!llvm.array<2 x f64>, !
// -----
-func @insertvalue_non_llvm_type(%a : i32, %b : i32) {
+func.func @insertvalue_non_llvm_type(%a : i32, %b : i32) {
// expected-error at +1 {{expected LLVM IR Dialect type}}
llvm.insertvalue %a, %b[0] : tensor<*xi32>
}
// -----
-func @insertvalue_non_array_position() {
+func.func @insertvalue_non_array_position() {
// Note the double-type, otherwise attribute parsing consumes the trailing
// type of the op as the (wrong) attribute type.
// expected-error at +1 {{invalid kind of attribute specified}}
@@ -380,35 +380,35 @@ func @insertvalue_non_array_position() {
// -----
-func @insertvalue_non_integer_position() {
+func.func @insertvalue_non_integer_position() {
// expected-error at +1 {{expected an array of integer literals}}
llvm.insertvalue %a, %b[0.0] : !llvm.struct<(i32)>
}
// -----
-func @insertvalue_struct_out_of_bounds() {
+func.func @insertvalue_struct_out_of_bounds() {
// expected-error at +1 {{position out of bounds}}
llvm.insertvalue %a, %b[1] : !llvm.struct<(i32)>
}
// -----
-func @insertvalue_array_out_of_bounds() {
+func.func @insertvalue_array_out_of_bounds() {
// expected-error at +1 {{position out of bounds}}
llvm.insertvalue %a, %b[1] : !llvm.array<1 x i32>
}
// -----
-func @insertvalue_wrong_nesting() {
+func.func @insertvalue_wrong_nesting() {
// expected-error at +1 {{expected LLVM IR structure/array type}}
llvm.insertvalue %a, %b[0,0] : !llvm.struct<(i32)>
}
// -----
-func @insertvalue_invalid_type(%a : !llvm.array<1 x i32>) -> !llvm.array<1 x i32> {
+func.func @insertvalue_invalid_type(%a : !llvm.array<1 x i32>) -> !llvm.array<1 x i32> {
// expected-error at +1 {{'llvm.insertvalue' op Type mismatch: cannot insert '!llvm.array<1 x i32>' into '!llvm.array<1 x i32>'}}
%b = "llvm.insertvalue"(%a, %a) {position = [0]} : (!llvm.array<1 x i32>, !llvm.array<1 x i32>) -> !llvm.array<1 x i32>
return %b : !llvm.array<1 x i32>
@@ -416,7 +416,7 @@ func @insertvalue_invalid_type(%a : !llvm.array<1 x i32>) -> !llvm.array<1 x i32
// -----
-func @extractvalue_invalid_type(%a : !llvm.array<4 x vector<8xf32>>) -> !llvm.array<4 x vector<8xf32>> {
+func.func @extractvalue_invalid_type(%a : !llvm.array<4 x vector<8xf32>>) -> !llvm.array<4 x vector<8xf32>> {
// expected-error at +1 {{'llvm.extractvalue' op Type mismatch: extracting from '!llvm.array<4 x vector<8xf32>>' should produce 'vector<8xf32>' but this op returns '!llvm.array<4 x vector<8xf32>>'}}
%b = "llvm.extractvalue"(%a) {position = [1]}
: (!llvm.array<4 x vector<8xf32>>) -> !llvm.array<4 x vector<8xf32>>
@@ -426,14 +426,14 @@ func @extractvalue_invalid_type(%a : !llvm.array<4 x vector<8xf32>>) -> !llvm.ar
// -----
-func @extractvalue_non_llvm_type(%a : i32, %b : tensor<*xi32>) {
+func.func @extractvalue_non_llvm_type(%a : i32, %b : tensor<*xi32>) {
// expected-error at +1 {{expected LLVM IR Dialect type}}
llvm.extractvalue %b[0] : tensor<*xi32>
}
// -----
-func @extractvalue_non_array_position() {
+func.func @extractvalue_non_array_position() {
// Note the double-type, otherwise attribute parsing consumes the trailing
// type of the op as the (wrong) attribute type.
// expected-error at +1 {{invalid kind of attribute specified}}
@@ -442,56 +442,56 @@ func @extractvalue_non_array_position() {
// -----
-func @extractvalue_non_integer_position() {
+func.func @extractvalue_non_integer_position() {
// expected-error at +1 {{expected an array of integer literals}}
llvm.extractvalue %b[0.0] : !llvm.struct<(i32)>
}
// -----
-func @extractvalue_struct_out_of_bounds() {
+func.func @extractvalue_struct_out_of_bounds() {
// expected-error at +1 {{position out of bounds}}
llvm.extractvalue %b[1] : !llvm.struct<(i32)>
}
// -----
-func @extractvalue_array_out_of_bounds() {
+func.func @extractvalue_array_out_of_bounds() {
// expected-error at +1 {{position out of bounds}}
llvm.extractvalue %b[1] : !llvm.array<1 x i32>
}
// -----
-func @extractvalue_wrong_nesting() {
+func.func @extractvalue_wrong_nesting() {
// expected-error at +1 {{expected LLVM IR structure/array type}}
llvm.extractvalue %b[0,0] : !llvm.struct<(i32)>
}
// -----
-func @invalid_vector_type_1(%arg0: vector<4xf32>, %arg1: i32, %arg2: f32) {
+func.func @invalid_vector_type_1(%arg0: vector<4xf32>, %arg1: i32, %arg2: f32) {
// expected-error at +1 {{expected LLVM dialect-compatible vector type for operand #1}}
%0 = llvm.extractelement %arg2[%arg1 : i32] : f32
}
// -----
-func @invalid_vector_type_2(%arg0: vector<4xf32>, %arg1: i32, %arg2: f32) {
+func.func @invalid_vector_type_2(%arg0: vector<4xf32>, %arg1: i32, %arg2: f32) {
// expected-error at +1 {{expected LLVM dialect-compatible vector type for operand #1}}
%0 = llvm.insertelement %arg2, %arg2[%arg1 : i32] : f32
}
// -----
-func @invalid_vector_type_3(%arg0: vector<4xf32>, %arg1: i32, %arg2: f32) {
+func.func @invalid_vector_type_3(%arg0: vector<4xf32>, %arg1: i32, %arg2: f32) {
// expected-error at +1 {{expected LLVM IR dialect vector type for operand #1}}
%0 = llvm.shufflevector %arg2, %arg2 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : f32, f32
}
// -----
-func @invalid_vector_type_4(%a : vector<4xf32>, %idx : i32) -> vector<4xf32> {
+func.func @invalid_vector_type_4(%a : vector<4xf32>, %idx : i32) -> vector<4xf32> {
// expected-error at +1 {{'llvm.extractelement' op Type mismatch: extracting from 'vector<4xf32>' should produce 'f32' but this op returns 'vector<4xf32>'}}
%b = "llvm.extractelement"(%a, %idx) : (vector<4xf32>, i32) -> vector<4xf32>
return %b : vector<4xf32>
@@ -499,7 +499,7 @@ func @invalid_vector_type_4(%a : vector<4xf32>, %idx : i32) -> vector<4xf32> {
// -----
-func @invalid_vector_type_5(%a : vector<4xf32>, %idx : i32) -> vector<4xf32> {
+func.func @invalid_vector_type_5(%a : vector<4xf32>, %idx : i32) -> vector<4xf32> {
// expected-error at +1 {{'llvm.insertelement' op Type mismatch: cannot insert 'vector<4xf32>' into 'vector<4xf32>'}}
%b = "llvm.insertelement"(%a, %a, %idx) : (vector<4xf32>, vector<4xf32>, i32) -> vector<4xf32>
return %b : vector<4xf32>
@@ -507,35 +507,35 @@ func @invalid_vector_type_5(%a : vector<4xf32>, %idx : i32) -> vector<4xf32> {
// -----
-func @null_non_llvm_type() {
+func.func @null_non_llvm_type() {
// expected-error at +1 {{must be LLVM pointer type, but got 'i32'}}
llvm.mlir.null : i32
}
// -----
-func @nvvm_invalid_shfl_pred_1(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i32) {
+func.func @nvvm_invalid_shfl_pred_1(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i32) {
// expected-error at +1 {{expected return type to be a two-element struct with i1 as the second element}}
%0 = nvvm.shfl.sync bfly %arg0, %arg3, %arg1, %arg2 {return_value_and_is_valid} : i32 -> i32
}
// -----
-func @nvvm_invalid_shfl_pred_2(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i32) {
+func.func @nvvm_invalid_shfl_pred_2(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i32) {
// expected-error at +1 {{expected return type to be a two-element struct with i1 as the second element}}
%0 = nvvm.shfl.sync bfly %arg0, %arg3, %arg1, %arg2 {return_value_and_is_valid} : i32 -> !llvm.struct<(i32)>
}
// -----
-func @nvvm_invalid_shfl_pred_3(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i32) {
+func.func @nvvm_invalid_shfl_pred_3(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i32) {
// expected-error at +1 {{expected return type to be a two-element struct with i1 as the second element}}
%0 = nvvm.shfl.sync bfly %arg0, %arg3, %arg1, %arg2 {return_value_and_is_valid} : i32 -> !llvm.struct<(i32, i32)>
}
// -----
-func @nvvm_invalid_mma_0(%a0 : f16, %a1 : f16,
+func.func @nvvm_invalid_mma_0(%a0 : f16, %a1 : f16,
%b0 : vector<2xf16>, %b1 : vector<2xf16>,
%c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
%c4 : f32, %c5 : f32, %c6 : f32, %c7 : f32) {
@@ -547,7 +547,7 @@ func @nvvm_invalid_mma_0(%a0 : f16, %a1 : f16,
// -----
-func @nvvm_invalid_mma_1(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
+func.func @nvvm_invalid_mma_1(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
%b0 : vector<2xf16>, %b1 : vector<2xf16>,
%c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
%c4 : f32, %c5 : f32, %c6 : f32, %c7 : f32) {
@@ -559,7 +559,7 @@ func @nvvm_invalid_mma_1(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
// -----
-func @nvvm_invalid_mma_2(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
+func.func @nvvm_invalid_mma_2(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
%b0 : vector<2xf16>, %b1 : vector<2xf16>,
%c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
%c4 : f32, %c5 : f32, %c6 : f32, %c7 : f32) {
@@ -571,7 +571,7 @@ func @nvvm_invalid_mma_2(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
// -----
-func @nvvm_invalid_mma_3(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
+func.func @nvvm_invalid_mma_3(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
%b0 : vector<2xf16>, %b1 : vector<2xf16>,
%c0 : vector<2xf16>, %c1 : vector<2xf16>) {
// expected-error at +1 {{unimplemented variant for MMA shape <8, 8, 16>}}
@@ -581,7 +581,7 @@ func @nvvm_invalid_mma_3(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
// -----
-func @nvvm_invalid_mma_8(%a0 : i32, %a1 : i32,
+func.func @nvvm_invalid_mma_8(%a0 : i32, %a1 : i32,
%b0 : i32,
%c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32) {
// expected-error at +1 {{op requires b1Op attribute}}
@@ -594,7 +594,7 @@ func @nvvm_invalid_mma_8(%a0 : i32, %a1 : i32,
// -----
-func @atomicrmw_expected_ptr(%f32 : f32) {
+func.func @atomicrmw_expected_ptr(%f32 : f32) {
// expected-error at +1 {{operand #0 must be LLVM pointer to floating point LLVM type or integer}}
%0 = "llvm.atomicrmw"(%f32, %f32) {bin_op=11, ordering=1} : (f32, f32) -> f32
llvm.return
@@ -602,7 +602,7 @@ func @atomicrmw_expected_ptr(%f32 : f32) {
// -----
-func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr<f32>, %i32 : i32) {
+func.func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr<f32>, %i32 : i32) {
// expected-error at +1 {{expected LLVM IR element type for operand #0 to match type for operand #1}}
%0 = "llvm.atomicrmw"(%f32_ptr, %i32) {bin_op=11, ordering=1} : (!llvm.ptr<f32>, i32) -> f32
llvm.return
@@ -610,7 +610,7 @@ func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr<f32>, %i32 : i32) {
// -----
-func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr<f32>, %f32 : f32) {
+func.func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr<f32>, %f32 : f32) {
// expected-error at +1 {{expected LLVM IR result type to match type for operand #1}}
%0 = "llvm.atomicrmw"(%f32_ptr, %f32) {bin_op=11, ordering=1} : (!llvm.ptr<f32>, f32) -> i32
llvm.return
@@ -618,7 +618,7 @@ func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr<f32>, %f32 : f32) {
// -----
-func @atomicrmw_expected_float(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
+func.func @atomicrmw_expected_float(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
// expected-error at +1 {{expected LLVM IR floating point type}}
%0 = llvm.atomicrmw fadd %i32_ptr, %i32 unordered : i32
llvm.return
@@ -626,7 +626,7 @@ func @atomicrmw_expected_float(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
// -----
-func @atomicrmw_unexpected_xchg_type(%i1_ptr : !llvm.ptr<i1>, %i1 : i1) {
+func.func @atomicrmw_unexpected_xchg_type(%i1_ptr : !llvm.ptr<i1>, %i1 : i1) {
// expected-error at +1 {{unexpected LLVM IR type for 'xchg' bin_op}}
%0 = llvm.atomicrmw xchg %i1_ptr, %i1 unordered : i1
llvm.return
@@ -634,7 +634,7 @@ func @atomicrmw_unexpected_xchg_type(%i1_ptr : !llvm.ptr<i1>, %i1 : i1) {
// -----
-func @atomicrmw_expected_int(%f32_ptr : !llvm.ptr<f32>, %f32 : f32) {
+func.func @atomicrmw_expected_int(%f32_ptr : !llvm.ptr<f32>, %f32 : f32) {
// expected-error at +1 {{expected LLVM IR integer type}}
%0 = llvm.atomicrmw max %f32_ptr, %f32 unordered : f32
llvm.return
@@ -642,7 +642,7 @@ func @atomicrmw_expected_int(%f32_ptr : !llvm.ptr<f32>, %f32 : f32) {
// -----
-func @cmpxchg_expected_ptr(%f32_ptr : !llvm.ptr<f32>, %f32 : f32) {
+func.func @cmpxchg_expected_ptr(%f32_ptr : !llvm.ptr<f32>, %f32 : f32) {
// expected-error at +1 {{op operand #0 must be LLVM pointer to integer or LLVM pointer type}}
%0 = "llvm.cmpxchg"(%f32, %f32, %f32) {success_ordering=2,failure_ordering=2} : (f32, f32, f32) -> !llvm.struct<(f32, i1)>
llvm.return
@@ -650,7 +650,7 @@ func @cmpxchg_expected_ptr(%f32_ptr : !llvm.ptr<f32>, %f32 : f32) {
// -----
-func @cmpxchg_mismatched_operands(%i64_ptr : !llvm.ptr<i64>, %i32 : i32) {
+func.func @cmpxchg_mismatched_operands(%i64_ptr : !llvm.ptr<i64>, %i32 : i32) {
// expected-error at +1 {{expected LLVM IR element type for operand #0 to match type for all other operands}}
%0 = "llvm.cmpxchg"(%i64_ptr, %i32, %i32) {success_ordering=2,failure_ordering=2} : (!llvm.ptr<i64>, i32, i32) -> !llvm.struct<(i32, i1)>
llvm.return
@@ -658,7 +658,7 @@ func @cmpxchg_mismatched_operands(%i64_ptr : !llvm.ptr<i64>, %i32 : i32) {
// -----
-func @cmpxchg_unexpected_type(%i1_ptr : !llvm.ptr<i1>, %i1 : i1) {
+func.func @cmpxchg_unexpected_type(%i1_ptr : !llvm.ptr<i1>, %i1 : i1) {
// expected-error at +1 {{unexpected LLVM IR type}}
%0 = llvm.cmpxchg %i1_ptr, %i1, %i1 monotonic monotonic : i1
llvm.return
@@ -666,7 +666,7 @@ func @cmpxchg_unexpected_type(%i1_ptr : !llvm.ptr<i1>, %i1 : i1) {
// -----
-func @cmpxchg_at_least_monotonic_success(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
+func.func @cmpxchg_at_least_monotonic_success(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
// expected-error at +1 {{ordering must be at least 'monotonic'}}
%0 = llvm.cmpxchg %i32_ptr, %i32, %i32 unordered monotonic : i32
llvm.return
@@ -674,7 +674,7 @@ func @cmpxchg_at_least_monotonic_success(%i32_ptr : !llvm.ptr<i32>, %i32 : i32)
// -----
-func @cmpxchg_at_least_monotonic_failure(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
+func.func @cmpxchg_at_least_monotonic_failure(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
// expected-error at +1 {{ordering must be at least 'monotonic'}}
%0 = llvm.cmpxchg %i32_ptr, %i32, %i32 monotonic unordered : i32
llvm.return
@@ -682,7 +682,7 @@ func @cmpxchg_at_least_monotonic_failure(%i32_ptr : !llvm.ptr<i32>, %i32 : i32)
// -----
-func @cmpxchg_failure_release(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
+func.func @cmpxchg_failure_release(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
// expected-error at +1 {{failure ordering cannot be 'release' or 'acq_rel'}}
%0 = llvm.cmpxchg %i32_ptr, %i32, %i32 acq_rel release : i32
llvm.return
@@ -690,7 +690,7 @@ func @cmpxchg_failure_release(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
// -----
-func @cmpxchg_failure_acq_rel(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
+func.func @cmpxchg_failure_acq_rel(%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
// expected-error at +1 {{failure ordering cannot be 'release' or 'acq_rel'}}
%0 = llvm.cmpxchg %i32_ptr, %i32, %i32 acq_rel acq_rel : i32
llvm.return
@@ -781,7 +781,7 @@ llvm.func @caller(%arg0: i32) -> i32 {
// -----
-func @invalid_ordering_in_fence() {
+func.func @invalid_ordering_in_fence() {
// expected-error @+1 {{can be given only acquire, release, acq_rel, and seq_cst orderings}}
llvm.fence syncscope("agent") monotonic
}
@@ -790,12 +790,12 @@ func @invalid_ordering_in_fence() {
// expected-error @+1 {{invalid data layout descriptor}}
module attributes {llvm.data_layout = "#vjkr32"} {
- func @invalid_data_layout()
+ func.func @invalid_data_layout()
}
// -----
-func @switch_wrong_number_of_weights(%arg0 : i32) {
+func.func @switch_wrong_number_of_weights(%arg0 : i32) {
// expected-error at +1 {{expects number of branch weights to match number of successors: 3 vs 2}}
llvm.switch %arg0 : i32, ^bb1 [
42: ^bb2(%arg0, %arg0 : i32, i32)
@@ -1245,7 +1245,7 @@ llvm.func @callee() -> !llvm.struct<(i32, f32)>
// -----
-func @bitcast(%arg0: vector<2x3xf32>) {
+func.func @bitcast(%arg0: vector<2x3xf32>) {
// expected-error @below {{op operand #0 must be LLVM-compatible non-aggregate type}}
llvm.bitcast %arg0 : vector<2x3xf32> to vector<2x3xi32>
return
@@ -1253,7 +1253,7 @@ func @bitcast(%arg0: vector<2x3xf32>) {
// -----
-func @cp_async(%arg0: !llvm.ptr<i8, 3>, %arg1: !llvm.ptr<i8, 1>) {
+func.func @cp_async(%arg0: !llvm.ptr<i8, 3>, %arg1: !llvm.ptr<i8, 1>) {
// expected-error @below {{expected byte size to be either 4, 8 or 16.}}
nvvm.cp.async.shared.global %arg0, %arg1, 32
return
@@ -1261,7 +1261,7 @@ func @cp_async(%arg0: !llvm.ptr<i8, 3>, %arg1: !llvm.ptr<i8, 1>) {
// -----
-func @gep_struct_variable(%arg0: !llvm.ptr<struct<(i32)>>, %arg1: i32, %arg2: i32) {
+func.func @gep_struct_variable(%arg0: !llvm.ptr<struct<(i32)>>, %arg1: i32, %arg2: i32) {
// expected-error @below {{op expected index 1 indexing a struct to be constant}}
llvm.getelementptr %arg0[%arg1, %arg1] : (!llvm.ptr<struct<(i32)>>, i32, i32) -> !llvm.ptr<i32>
return
@@ -1269,7 +1269,7 @@ func @gep_struct_variable(%arg0: !llvm.ptr<struct<(i32)>>, %arg1: i32, %arg2: i3
// -----
-func @gep_out_of_bounds(%ptr: !llvm.ptr<struct<(i32, struct<(i32, f32)>)>>, %idx: i64) {
+func.func @gep_out_of_bounds(%ptr: !llvm.ptr<struct<(i32, struct<(i32, f32)>)>>, %idx: i64) {
// expected-error @below {{index 2 indexing a struct is out of bounds}}
llvm.getelementptr %ptr[%idx, 1, 3] : (!llvm.ptr<struct<(i32, struct<(i32, f32)>)>>, i64) -> !llvm.ptr<i32>
return
@@ -1277,7 +1277,7 @@ func @gep_out_of_bounds(%ptr: !llvm.ptr<struct<(i32, struct<(i32, f32)>)>>, %idx
// -----
-func @non_splat_shuffle_on_scalable_vector(%arg0: vector<[4]xf32>) {
+func.func @non_splat_shuffle_on_scalable_vector(%arg0: vector<[4]xf32>) {
// expected-error at below {{expected a splat operation for scalable vectors}}
%0 = llvm.shufflevector %arg0, %arg0 [0 : i32, 0 : i32, 0 : i32, 1 : i32] : vector<[4]xf32>, vector<[4]xf32>
return
@@ -1296,62 +1296,62 @@ llvm.mlir.global internal @side_effecting_global() : !llvm.struct<(i8)> {
// -----
// expected-error at +1 {{'llvm.struct_attrs' is permitted only in argument or result attributes}}
-func @struct_attrs_in_op() attributes {llvm.struct_attrs = []} {
+func.func @struct_attrs_in_op() attributes {llvm.struct_attrs = []} {
return
}
// -----
// expected-error at +1 {{expected 'llvm.struct_attrs' to annotate '!llvm.struct' or '!llvm.ptr<struct<...>>'}}
-func @invalid_struct_attr_arg_type(%arg0 : i32 {llvm.struct_attrs = []}) {
+func.func @invalid_struct_attr_arg_type(%arg0 : i32 {llvm.struct_attrs = []}) {
return
}
// -----
// expected-error at +1 {{expected 'llvm.struct_attrs' to annotate '!llvm.struct' or '!llvm.ptr<struct<...>>'}}
-func @invalid_struct_attr_pointer_arg_type(%arg0 : !llvm.ptr<i32> {llvm.struct_attrs = []}) {
+func.func @invalid_struct_attr_pointer_arg_type(%arg0 : !llvm.ptr<i32> {llvm.struct_attrs = []}) {
return
}
// -----
// expected-error at +1 {{expected 'llvm.struct_attrs' to be an array attribute}}
-func @invalid_arg_struct_attr_value(%arg0 : !llvm.struct<(i32)> {llvm.struct_attrs = {}}) {
+func.func @invalid_arg_struct_attr_value(%arg0 : !llvm.struct<(i32)> {llvm.struct_attrs = {}}) {
return
}
// -----
// expected-error at +1 {{size of 'llvm.struct_attrs' must match the size of the annotated '!llvm.struct'}}
-func @invalid_arg_struct_attr_size(%arg0 : !llvm.struct<(i32)> {llvm.struct_attrs = []}) {
+func.func @invalid_arg_struct_attr_size(%arg0 : !llvm.struct<(i32)> {llvm.struct_attrs = []}) {
return
}
// -----
// expected-error at +1 {{expected 'llvm.struct_attrs' to annotate '!llvm.struct' or '!llvm.ptr<struct<...>>'}}
-func @invalid_struct_attr_res_type(%arg0 : i32) -> (i32 {llvm.struct_attrs = []}) {
+func.func @invalid_struct_attr_res_type(%arg0 : i32) -> (i32 {llvm.struct_attrs = []}) {
return %arg0 : i32
}
// -----
// expected-error at +1 {{expected 'llvm.struct_attrs' to annotate '!llvm.struct' or '!llvm.ptr<struct<...>>'}}
-func @invalid_struct_attr_pointer_res_type(%arg0 : !llvm.ptr<i32>) -> (!llvm.ptr<i32> {llvm.struct_attrs = []}) {
+func.func @invalid_struct_attr_pointer_res_type(%arg0 : !llvm.ptr<i32>) -> (!llvm.ptr<i32> {llvm.struct_attrs = []}) {
return %arg0 : !llvm.ptr<i32>
}
// -----
// expected-error at +1 {{expected 'llvm.struct_attrs' to be an array attribute}}
-func @invalid_res_struct_attr_value(%arg0 : !llvm.struct<(i32)>) -> (!llvm.struct<(i32)> {llvm.struct_attrs = {}}) {
+func.func @invalid_res_struct_attr_value(%arg0 : !llvm.struct<(i32)>) -> (!llvm.struct<(i32)> {llvm.struct_attrs = {}}) {
return %arg0 : !llvm.struct<(i32)>
}
// -----
// expected-error at +1 {{size of 'llvm.struct_attrs' must match the size of the annotated '!llvm.struct'}}
-func @invalid_res_struct_attr_size(%arg0 : !llvm.struct<(i32)>) -> (!llvm.struct<(i32)> {llvm.struct_attrs = []}) {
+func.func @invalid_res_struct_attr_size(%arg0 : !llvm.struct<(i32)>) -> (!llvm.struct<(i32)> {llvm.struct_attrs = []}) {
return %arg0 : !llvm.struct<(i32)>
}
diff --git a/mlir/test/Dialect/LLVMIR/layout.mlir b/mlir/test/Dialect/LLVMIR/layout.mlir
index dc6d5a91376a7..df1e0ea7940ff 100644
--- a/mlir/test/Dialect/LLVMIR/layout.mlir
+++ b/mlir/test/Dialect/LLVMIR/layout.mlir
@@ -2,7 +2,7 @@
module {
// CHECK: @no_spec
- func @no_spec() {
+ func.func @no_spec() {
// CHECK: alignment = 8
// CHECK: bitsize = 64
// CHECK: preferred = 8
@@ -44,7 +44,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!llvm.ptr<i8, 5>, dense<[64, 64, 64]> : vector<3xi32>>
>} {
// CHECK: @spec
- func @spec() {
+ func.func @spec() {
// CHECK: alignment = 4
// CHECK: bitsize = 32
// CHECK: preferred = 8
@@ -85,7 +85,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
module attributes { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!llvm.ptr<i32>, dense<[64, 64, 64]> : vector<3xi32>>
>} {
- func @pointer() {
+ func.func @pointer() {
return
}
}
@@ -96,7 +96,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
module attributes { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!llvm.ptr<i8>, dense<[64.0, 64.0, 64.0]> : vector<3xf32>>
>} {
- func @pointer() {
+ func.func @pointer() {
return
}
}
@@ -107,7 +107,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
module attributes { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!llvm.ptr<i8>, dense<[64, 64, 32]> : vector<3xi32>>
>} {
- func @pointer() {
+ func.func @pointer() {
return
}
}
@@ -116,7 +116,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
module {
// CHECK: @no_spec
- func @no_spec() {
+ func.func @no_spec() {
// simple case
// CHECK: alignment = 4
// CHECK: bitsize = 32
@@ -161,7 +161,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!llvm.struct<()>, dense<[32, 32]> : vector<2xi32>>
>} {
// CHECK: @spec
- func @spec() {
+ func.func @spec() {
// Strict alignment is applied
// CHECK: alignment = 4
// CHECK: bitsize = 16
@@ -199,7 +199,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!llvm.struct<()>, dense<[32]> : vector<1xi32>>
>} {
// CHECK: @spec_without_preferred
- func @spec_without_preferred() {
+ func.func @spec_without_preferred() {
// abi alignment is applied to both preferred and abi
// CHECK: alignment = 4
// CHECK: bitsize = 16
@@ -216,7 +216,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
module attributes { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!llvm.struct<(i8)>, dense<[64, 64]> : vector<2xi32>>
>} {
- func @struct() {
+ func.func @struct() {
return
}
}
@@ -227,7 +227,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
module attributes { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!llvm.struct<()>, dense<[64, 64, 64]> : vector<3xi32>>
>} {
- func @struct() {
+ func.func @struct() {
return
}
}
@@ -238,7 +238,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
module attributes { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!llvm.struct<()>, dense<[64, 32]> : vector<2xi32>>
>} {
- func @struct() {
+ func.func @struct() {
return
}
}
@@ -247,7 +247,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
module {
// CHECK: @arrays
- func @arrays() {
+ func.func @arrays() {
// simple case
// CHECK: alignment = 4
// CHECK: bitsize = 64
@@ -278,7 +278,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!llvm.struct<()>, dense<[64]> : vector<1xi32>>
>} {
// CHECK: @overaligned
- func @overaligned() {
+ func.func @overaligned() {
// Over aligned element types are respected
// CHECK: alignment = 8
// CHECK: bitsize = 128
diff --git a/mlir/test/Dialect/LLVMIR/nvvm.mlir b/mlir/test/Dialect/LLVMIR/nvvm.mlir
index c2e1db76f251c..cf3679904ecc4 100644
--- a/mlir/test/Dialect/LLVMIR/nvvm.mlir
+++ b/mlir/test/Dialect/LLVMIR/nvvm.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s -split-input-file -verify-diagnostics | FileCheck %s
-func @nvvm_special_regs() -> i32 {
+func.func @nvvm_special_regs() -> i32 {
// CHECK: nvvm.read.ptx.sreg.tid.x : i32
%0 = nvvm.read.ptx.sreg.tid.x : i32
// CHECK: nvvm.read.ptx.sreg.tid.y : i32
@@ -28,13 +28,13 @@ func @nvvm_special_regs() -> i32 {
llvm.return %0 : i32
}
-func @llvm.nvvm.barrier0() {
+func.func @llvm.nvvm.barrier0() {
// CHECK: nvvm.barrier0
nvvm.barrier0
llvm.return
}
-func @nvvm_shfl(
+func.func @nvvm_shfl(
%arg0 : i32, %arg1 : i32, %arg2 : i32,
%arg3 : i32, %arg4 : f32) -> i32 {
// CHECK: nvvm.shfl.sync bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : i32 -> i32
@@ -50,7 +50,7 @@ func @nvvm_shfl(
llvm.return %0 : i32
}
-func @nvvm_shfl_pred(
+func.func @nvvm_shfl_pred(
%arg0 : i32, %arg1 : i32, %arg2 : i32,
%arg3 : i32, %arg4 : f32) -> !llvm.struct<(i32, i1)> {
// CHECK: nvvm.shfl.sync bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} {return_value_and_is_valid} : i32 -> !llvm.struct<(i32, i1)>
@@ -60,14 +60,14 @@ func @nvvm_shfl_pred(
llvm.return %0 : !llvm.struct<(i32, i1)>
}
-func @nvvm_vote(%arg0 : i32, %arg1 : i1) -> i32 {
+func.func @nvvm_vote(%arg0 : i32, %arg1 : i1) -> i32 {
// CHECK: nvvm.vote.ballot.sync %{{.*}}, %{{.*}} : i32
%0 = nvvm.vote.ballot.sync %arg0, %arg1 : i32
llvm.return %0 : i32
}
// CHECK-LABEL: @nvvm_mma_m8n8k4_row_col_f32_f32
-func @nvvm_mma_m8n8k4_row_col_f32_f32(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
+func.func @nvvm_mma_m8n8k4_row_col_f32_f32(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
%b0 : vector<2xf16>, %b1 : vector<2xf16>,
%c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32, %c4 : f32, %c5 : f32, %c6 : f32, %c7 : f32) {
// CHECK: nvvm.mma.sync
@@ -77,7 +77,7 @@ func @nvvm_mma_m8n8k4_row_col_f32_f32(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
llvm.return %0 : !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
}
-func @nvvm_mma_m8n8k4_f16_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
+func.func @nvvm_mma_m8n8k4_f16_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
%b0 : vector<2xf16>, %b1 : vector<2xf16>,
%c0 : vector<2xf16>, %c1 : vector<2xf16>, %c2 : vector<2xf16>, %c3 : vector<2xf16>) {
// CHECK: nvvm.mma.sync A[{{.*}}] B[{{.*}}] C[{{.*}}]
@@ -87,7 +87,7 @@ func @nvvm_mma_m8n8k4_f16_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
llvm.return %0 : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>
}
-func @nvvm_mma_m8n8k16_s8_s8(%a0 : i32, %b0 : i32,
+func.func @nvvm_mma_m8n8k16_s8_s8(%a0 : i32, %b0 : i32,
%c0 : i32, %c1 : i32) {
// CHECK: nvvm.mma.sync A[{{.*}}] B[{{.*}}] C[{{.*}}, {{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, layoutA = #nvvm.mma_layout<row>, layoutB = #nvvm.mma_layout<col>, multiplicandAPtxType = #nvvm.mma_type<s8>, multiplicandBPtxType = #nvvm.mma_type<s8>, shape = {k = 16 : i32, m = 8 : i32, n = 8 : i32}} : (i32, i32, i32) -> !llvm.struct<(i32, i32)>
%0 = nvvm.mma.sync A[%a0] B[%b0] C[%c0, %c1]
@@ -98,7 +98,7 @@ func @nvvm_mma_m8n8k16_s8_s8(%a0 : i32, %b0 : i32,
llvm.return %0 : !llvm.struct<(i32, i32)>
}
-func @nvvm_mma_m16n8k8_f16_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
+func.func @nvvm_mma_m16n8k8_f16_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
%b0 : vector<2xf16>,
%c0 : vector<2xf16>, %c1 : vector<2xf16>) {
// CHECK: nvvm.mma.sync A[%{{.*}}, %{{.*}}] B[%{{.*}}] C[%{{.*}}, %{{.*}}] {{{.*}}} : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)>
@@ -108,7 +108,7 @@ func @nvvm_mma_m16n8k8_f16_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
llvm.return %0 : !llvm.struct<(vector<2xf16>, vector<2xf16>)>
}
-func @nvvm_mma_m16n8k16_f16_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
+func.func @nvvm_mma_m16n8k16_f16_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
%a2 : vector<2xf16>, %a3 : vector<2xf16>,
%b0 : vector<2xf16>, %b1 : vector<2xf16>,
%c0 : vector<2xf16>, %c1 : vector<2xf16>) {
@@ -119,7 +119,7 @@ func @nvvm_mma_m16n8k16_f16_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
llvm.return %0 : !llvm.struct<(vector<2xf16>, vector<2xf16>)>
}
-func @nvvm_mma_m16n8k16_f32_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
+func.func @nvvm_mma_m16n8k16_f32_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
%a2 : vector<2xf16>, %a3 : vector<2xf16>,
%b0 : vector<2xf16>, %b1 : vector<2xf16>,
%c0 : vector<2xf16>, %c1 : vector<2xf16>) {
@@ -130,7 +130,7 @@ func @nvvm_mma_m16n8k16_f32_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
llvm.return %0 : !llvm.struct<(f32, f32, f32, f32)>
}
-func @nvvm_mma_m16n8k16_f16_f32(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
+func.func @nvvm_mma_m16n8k16_f16_f32(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
%a2 : vector<2xf16>, %a3 : vector<2xf16>,
%b0 : vector<2xf16>, %b1 : vector<2xf16>,
%c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32) {
@@ -141,7 +141,7 @@ func @nvvm_mma_m16n8k16_f16_f32(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
llvm.return %0 : !llvm.struct<(vector<2xf16>, vector<2xf16>)>
}
-func @nvvm_mma_m16n8k16_f32_f32(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
+func.func @nvvm_mma_m16n8k16_f32_f32(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
%a2 : vector<2xf16>, %a3 : vector<2xf16>,
%b0 : vector<2xf16>, %b1 : vector<2xf16>,
%c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32) {
@@ -152,7 +152,7 @@ func @nvvm_mma_m16n8k16_f32_f32(%a0 : vector<2xf16>, %a1 : vector<2xf16>,
llvm.return %0 : !llvm.struct<(f32, f32, f32, f32)>
}
-func @nvvm_mma_m16n8k16_s8_s8(%a0 : i32, %a1 : i32, %b0 : i32,
+func.func @nvvm_mma_m16n8k16_s8_s8(%a0 : i32, %a1 : i32, %b0 : i32,
%c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32) {
// CHECK: nvvm.mma.sync A[{{.*}}, {{.*}}] B[{{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, layoutA = #nvvm.mma_layout<row>, layoutB = #nvvm.mma_layout<col>, multiplicandAPtxType = #nvvm.mma_type<s8>, multiplicandBPtxType = #nvvm.mma_type<s8>, shape = {k = 16 : i32, m = 16 : i32, n = 8 : i32}} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
%0 = nvvm.mma.sync A[%a0, %a1] B[%b0] C[%c0, %c1, %c2, %c3]
@@ -163,7 +163,7 @@ func @nvvm_mma_m16n8k16_s8_s8(%a0 : i32, %a1 : i32, %b0 : i32,
llvm.return %0 : !llvm.struct<(i32,i32,i32,i32)>
}
-func @nvvm_mma_m16n8k16_s8_u8(%a0 : i32, %a1 : i32,
+func.func @nvvm_mma_m16n8k16_s8_u8(%a0 : i32, %a1 : i32,
%b0 : i32,
%c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32) {
// CHECK: nvvm.mma.sync A[{{.*}}, {{.*}}] B[{{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<satfinite>, layoutA = #nvvm.mma_layout<row>, layoutB = #nvvm.mma_layout<col>, multiplicandAPtxType = #nvvm.mma_type<s8>, multiplicandBPtxType = #nvvm.mma_type<u8>, shape = {k = 16 : i32, m = 16 : i32, n = 8 : i32}} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
@@ -175,7 +175,7 @@ func @nvvm_mma_m16n8k16_s8_u8(%a0 : i32, %a1 : i32,
llvm.return %0 : !llvm.struct<(i32,i32,i32,i32)>
}
-func @nvvm_mma_m16n8k256_b1_b1(%a0 : i32, %a1 : i32, %a2 : i32, %a3 : i32,
+func.func @nvvm_mma_m16n8k256_b1_b1(%a0 : i32, %a1 : i32, %a2 : i32, %a3 : i32,
%b0 : i32, %b1 : i32,
%c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32) {
// CHECK: nvvm.mma.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] {b1Op = #nvvm.mma_b1op<xor_popc>, layoutA = #nvvm.mma_layout<row>, layoutB = #nvvm.mma_layout<col>, multiplicandAPtxType = #nvvm.mma_type<b1>, multiplicandBPtxType = #nvvm.mma_type<b1>, shape = {k = 256 : i32, m = 16 : i32, n = 8 : i32}} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
@@ -186,7 +186,7 @@ func @nvvm_mma_m16n8k256_b1_b1(%a0 : i32, %a1 : i32, %a2 : i32, %a3 : i32,
llvm.return %0 : !llvm.struct<(i32,i32,i32,i32)>
}
-func @nvvm_mma_m16n8k128_b1_b1(%a0 : i32, %a1 : i32,
+func.func @nvvm_mma_m16n8k128_b1_b1(%a0 : i32, %a1 : i32,
%b0 : i32,
%c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32) {
// CHECK: nvvm.mma.sync A[{{.*}}, {{.*}}] B[{{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] {b1Op = #nvvm.mma_b1op<xor_popc>, layoutA = #nvvm.mma_layout<row>, layoutB = #nvvm.mma_layout<col>, multiplicandAPtxType = #nvvm.mma_type<b1>, multiplicandBPtxType = #nvvm.mma_type<b1>, shape = {k = 128 : i32, m = 16 : i32, n = 8 : i32}} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
@@ -199,7 +199,7 @@ func @nvvm_mma_m16n8k128_b1_b1(%a0 : i32, %a1 : i32,
}
// CHECK-LABEL: @nvvm_mma_m8n8k128_b1_b1
-func @nvvm_mma_m8n8k128_b1_b1(%a0 : i32,
+func.func @nvvm_mma_m8n8k128_b1_b1(%a0 : i32,
%b0 : i32,
%c0 : i32, %c1 : i32) {
// CHECK: nvvm.mma.sync A[{{.*}}] B[{{.*}}] C[{{.*}}, {{.*}}] {b1Op = #nvvm.mma_b1op<xor_popc>, layoutA = #nvvm.mma_layout<row>, layoutB = #nvvm.mma_layout<col>, multiplicandAPtxType = #nvvm.mma_type<b1>, multiplicandBPtxType = #nvvm.mma_type<b1>, shape = {k = 128 : i32, m = 8 : i32, n = 8 : i32}} : (i32, i32, i32) -> !llvm.struct<(i32, i32)>
@@ -211,7 +211,7 @@ func @nvvm_mma_m8n8k128_b1_b1(%a0 : i32,
}
// CHECK-LABEL: @nvvm_mma_m16n8k32_s4_s4
-func @nvvm_mma_m16n8k32_s4_s4(%a0 : i32, %a1 : i32,
+func.func @nvvm_mma_m16n8k32_s4_s4(%a0 : i32, %a1 : i32,
%b0 : i32,
%c0 : i32, %c1 : i32, %c2 : i32, %c3 : i32) {
// CHECK: nvvm.mma.sync A[{{.*}}, {{.*}}] B[{{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] {intOverflowBehavior = #nvvm.mma_int_overflow<wrapped>, layoutA = #nvvm.mma_layout<row>, layoutB = #nvvm.mma_layout<col>, multiplicandAPtxType = #nvvm.mma_type<s4>, multiplicandBPtxType = #nvvm.mma_type<s4>, shape = {k = 32 : i32, m = 16 : i32, n = 8 : i32}} : (i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32)>
@@ -224,7 +224,7 @@ func @nvvm_mma_m16n8k32_s4_s4(%a0 : i32, %a1 : i32,
}
// CHECK-LABEL: @nvvm_wmma_load_tf32
-func @nvvm_wmma_load_tf32(%arg0: !llvm.ptr<i32>, %arg1 : i32) -> !llvm.struct<(i32, i32, i32, i32)> {
+func.func @nvvm_wmma_load_tf32(%arg0: !llvm.ptr<i32>, %arg1 : i32) -> !llvm.struct<(i32, i32, i32, i32)> {
// CHECK: nvvm.wmma.load {{.*}} {eltype = #nvvm.mma_type<tf32>, frag = #nvvm.mma_frag<a>, k = 8 : i32, layout = #nvvm.mma_layout<row>, m = 16 : i32, n = 16 : i32}
%0 = nvvm.wmma.load %arg0, %arg1
{eltype = #nvvm.mma_type<tf32>, frag = #nvvm.mma_frag<a>, k = 8 : i32, layout = #nvvm.mma_layout<row>, m = 16 : i32, n = 16 : i32}
@@ -232,7 +232,7 @@ func @nvvm_wmma_load_tf32(%arg0: !llvm.ptr<i32>, %arg1 : i32) -> !llvm.struct<(i
llvm.return %0 : !llvm.struct<(i32, i32, i32, i32)>
}
-func @nvvm_wmma_mma(%0 : i32, %1 : i32, %2 : i32, %3 : i32, %4 : i32, %5 : i32,
+func.func @nvvm_wmma_mma(%0 : i32, %1 : i32, %2 : i32, %3 : i32, %4 : i32, %5 : i32,
%6 : i32, %7 : i32, %8 : f32, %9 : f32, %10 : f32,
%11 : f32, %12 : f32, %13 : f32, %14 : f32, %15 : f32)
-> !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)> {
@@ -267,4 +267,4 @@ llvm.func @ld_matrix(%arg0: !llvm.ptr<i32, 3>) {
// -----
// expected-error at below {{attribute attached to unexpected op}}
-func private @expected_llvm_func() attributes { nvvm.kernel }
+func.func private @expected_llvm_func() attributes { nvvm.kernel }
diff --git a/mlir/test/Dialect/LLVMIR/rocdl.mlir b/mlir/test/Dialect/LLVMIR/rocdl.mlir
index e9a3a59d85019..a7aca0754af02 100644
--- a/mlir/test/Dialect/LLVMIR/rocdl.mlir
+++ b/mlir/test/Dialect/LLVMIR/rocdl.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s -split-input-file -verify-diagnostics | FileCheck %s
-func @rocdl_special_regs() -> i32 {
+func.func @rocdl_special_regs() -> i32 {
// CHECK-LABEL: rocdl_special_regs
// CHECK: rocdl.workitem.id.x : i32
%0 = rocdl.workitem.id.x : i32
@@ -29,13 +29,13 @@ func @rocdl_special_regs() -> i32 {
llvm.return %0 : i32
}
-func @rocdl.barrier() {
+func.func @rocdl.barrier() {
// CHECK: rocdl.barrier
rocdl.barrier
llvm.return
}
-func @rocdl.xdlops(%arg0 : f32, %arg1 : f32,
+func.func @rocdl.xdlops(%arg0 : f32, %arg1 : f32,
%arg2 : vector<32xf32>, %arg3 : i32,
%arg4 : vector<16xf32>, %arg5 : vector<4xf32>,
%arg6 : vector<4xf16>, %arg7 : vector<32xi32>,
@@ -170,4 +170,4 @@ llvm.func @rocdl.mubuf(%rsrc : vector<4xi32>, %vindex : i32,
// -----
// expected-error at below {{attribute attached to unexpected op}}
-func private @expected_llvm_func() attributes { rocdl.kernel }
+func.func private @expected_llvm_func() attributes { rocdl.kernel }
diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
index 1e6b7f282aeb8..cc4da8e095a79 100644
--- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir
+++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
@@ -2,7 +2,7 @@
// CHECK-LABEL: func @ops
// CHECK-SAME: (%[[I32:.*]]: i32, %[[FLOAT:.*]]: f32, %[[I8PTR1:.*]]: !llvm.ptr<i8>, %[[I8PTR2:.*]]: !llvm.ptr<i8>, %[[BOOL:.*]]: i1, %[[VI8PTR1:.*]]: !llvm.vec<2 x ptr<i8>>)
-func @ops(%arg0: i32, %arg1: f32,
+func.func @ops(%arg0: i32, %arg1: f32,
%arg2: !llvm.ptr<i8>, %arg3: !llvm.ptr<i8>,
%arg4: i1, %arg5 : !llvm.vec<2x!llvm.ptr<i8>>) {
// Integer arithmetic binary operations.
@@ -241,7 +241,7 @@ llvm.func @foo(%arg0: i32) -> !llvm.struct<(i32, f64, i32)> {
// CHECK-LABEL: @casts
// CHECK-SAME: (%[[I32:.*]]: i32, %[[I64:.*]]: i64, %[[V4I32:.*]]: vector<4xi32>, %[[V4I64:.*]]: vector<4xi64>, %[[I32PTR:.*]]: !llvm.ptr<i32>)
-func @casts(%arg0: i32, %arg1: i64, %arg2: vector<4xi32>,
+func.func @casts(%arg0: i32, %arg1: i64, %arg2: vector<4xi32>,
%arg3: vector<4xi64>, %arg4: !llvm.ptr<i32>) {
// CHECK: = llvm.sext %[[I32]] : i32 to i56
%0 = llvm.sext %arg0 : i32 to i56
@@ -269,7 +269,7 @@ func @casts(%arg0: i32, %arg1: i64, %arg2: vector<4xi32>,
}
// CHECK-LABEL: @vect
-func @vect(%arg0: vector<4xf32>, %arg1: i32, %arg2: f32) {
+func.func @vect(%arg0: vector<4xf32>, %arg1: i32, %arg2: f32) {
// CHECK: = llvm.extractelement {{.*}} : vector<4xf32>
%0 = llvm.extractelement %arg0[%arg1 : i32] : vector<4xf32>
// CHECK: = llvm.insertelement {{.*}} : vector<4xf32>
@@ -282,7 +282,7 @@ func @vect(%arg0: vector<4xf32>, %arg1: i32, %arg2: f32) {
}
// CHECK-LABEL: @scalable_vect
-func @scalable_vect(%arg0: vector<[4]xf32>, %arg1: i32, %arg2: f32) {
+func.func @scalable_vect(%arg0: vector<[4]xf32>, %arg1: i32, %arg2: f32) {
// CHECK: = llvm.extractelement {{.*}} : vector<[4]xf32>
%0 = llvm.extractelement %arg0[%arg1 : i32] : vector<[4]xf32>
// CHECK: = llvm.insertelement {{.*}} : vector<[4]xf32>
@@ -295,7 +295,7 @@ func @scalable_vect(%arg0: vector<[4]xf32>, %arg1: i32, %arg2: f32) {
}
// CHECK-LABEL: @alloca
-func @alloca(%size : i64) {
+func.func @alloca(%size : i64) {
// CHECK: llvm.alloca %{{.*}} x i32 : (i64) -> !llvm.ptr<i32>
llvm.alloca %size x i32 {alignment = 0} : (i64) -> (!llvm.ptr<i32>)
// CHECK: llvm.alloca %{{.*}} x i32 {alignment = 8 : i64} : (i64) -> !llvm.ptr<i32>
@@ -304,7 +304,7 @@ func @alloca(%size : i64) {
}
// CHECK-LABEL: @null
-func @null() {
+func.func @null() {
// CHECK: llvm.mlir.null : !llvm.ptr<i8>
%0 = llvm.mlir.null : !llvm.ptr<i8>
// CHECK: llvm.mlir.null : !llvm.ptr<struct<(ptr<func<void (i32, ptr<func<void ()>>)>>, i64)>>
@@ -313,14 +313,14 @@ func @null() {
}
// CHECK-LABEL: @atomicrmw
-func @atomicrmw(%ptr : !llvm.ptr<f32>, %val : f32) {
+func.func @atomicrmw(%ptr : !llvm.ptr<f32>, %val : f32) {
// CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} monotonic : f32
%0 = llvm.atomicrmw fadd %ptr, %val monotonic : f32
llvm.return
}
// CHECK-LABEL: @cmpxchg
-func @cmpxchg(%ptr : !llvm.ptr<i32>, %cmp : i32, %new : i32) {
+func.func @cmpxchg(%ptr : !llvm.ptr<i32>, %cmp : i32, %new : i32) {
// CHECK: llvm.cmpxchg %{{.*}}, %{{.*}}, %{{.*}} acq_rel monotonic : i32
%0 = llvm.cmpxchg %ptr, %cmp, %new acq_rel monotonic : i32
llvm.return
@@ -379,7 +379,7 @@ llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personali
}
// CHECK-LABEL: @useFreezeOp
-func @useFreezeOp(%arg0: i32) {
+func.func @useFreezeOp(%arg0: i32) {
// CHECK: = llvm.freeze %[[ARG0:.*]] : i32
%0 = llvm.freeze %arg0 : i32
// CHECK: %[[x:.*]] = llvm.mlir.undef : i8
@@ -390,7 +390,7 @@ func @useFreezeOp(%arg0: i32) {
}
// CHECK-LABEL: @useFenceInst
-func @useFenceInst() {
+func.func @useFenceInst() {
// CHECK: syncscope("agent") seq_cst
llvm.fence syncscope("agent") seq_cst
// CHECK: seq_cst
@@ -421,7 +421,7 @@ llvm.func @useInlineAsm(%arg0: i32) {
}
// CHECK-LABEL: @fastmathFlags
-func @fastmathFlags(%arg0: f32, %arg1: f32, %arg2: i32) {
+func.func @fastmathFlags(%arg0: f32, %arg1: f32, %arg2: i32) {
// CHECK: {{.*}} = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32
// CHECK: {{.*}} = llvm.fsub %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32
// CHECK: {{.*}} = llvm.fmul %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32
diff --git a/mlir/test/Dialect/LLVMIR/terminator.mlir b/mlir/test/Dialect/LLVMIR/terminator.mlir
index 4902e89ff0beb..6c2a2bf00f09e 100644
--- a/mlir/test/Dialect/LLVMIR/terminator.mlir
+++ b/mlir/test/Dialect/LLVMIR/terminator.mlir
@@ -3,7 +3,7 @@
// CHECK-LABEL: @return
// CHECK: llvm.return
-func @return() {
+func.func @return() {
llvm.return
}
@@ -11,7 +11,7 @@ func @return() {
// CHECK: llvm.br
// CHECK: llvm.cond_br
// CHECK: llvm.return
-func @control_flow(%cond : i1) {
+func.func @control_flow(%cond : i1) {
llvm.br ^bb1
^bb1:
llvm.cond_br %cond, ^bb2, ^bb1
diff --git a/mlir/test/Dialect/LLVMIR/types-invalid.mlir b/mlir/test/Dialect/LLVMIR/types-invalid.mlir
index cfeab15267e01..d8ac523b86d93 100644
--- a/mlir/test/Dialect/LLVMIR/types-invalid.mlir
+++ b/mlir/test/Dialect/LLVMIR/types-invalid.mlir
@@ -1,34 +1,34 @@
// RUN: mlir-opt --allow-unregistered-dialect -split-input-file -verify-diagnostics %s
-func @array_of_void() {
+func.func @array_of_void() {
// expected-error @+1 {{invalid array element type}}
"some.op"() : () -> !llvm.array<4 x void>
}
// -----
-func @function_returning_function() {
+func.func @function_returning_function() {
// expected-error @+1 {{invalid function result type}}
"some.op"() : () -> !llvm.func<func<void ()> ()>
}
// -----
-func @function_taking_function() {
+func.func @function_taking_function() {
// expected-error @+1 {{invalid function argument type}}
"some.op"() : () -> !llvm.func<void (func<void ()>)>
}
// -----
-func @void_pointer() {
+func.func @void_pointer() {
// expected-error @+1 {{invalid pointer element type}}
"some.op"() : () -> !llvm.ptr<void>
}
// -----
-func @repeated_struct_name() {
+func.func @repeated_struct_name() {
"some.op"() : () -> !llvm.struct<"a", (ptr<struct<"a">>)>
// expected-error @+1 {{identified type already used with a
diff erent body}}
"some.op"() : () -> !llvm.struct<"a", (i32)>
@@ -36,7 +36,7 @@ func @repeated_struct_name() {
// -----
-func @repeated_struct_name_packed() {
+func.func @repeated_struct_name_packed() {
"some.op"() : () -> !llvm.struct<"a", packed (i32)>
// expected-error @+1 {{identified type already used with a
diff erent body}}
"some.op"() : () -> !llvm.struct<"a", (i32)>
@@ -44,7 +44,7 @@ func @repeated_struct_name_packed() {
// -----
-func @repeated_struct_opaque() {
+func.func @repeated_struct_opaque() {
"some.op"() : () -> !llvm.struct<"a", opaque>
// expected-error @+1 {{identified type already used with a
diff erent body}}
"some.op"() : () -> !llvm.struct<"a", ()>
@@ -52,7 +52,7 @@ func @repeated_struct_opaque() {
// -----
-func @repeated_struct_opaque_non_empty() {
+func.func @repeated_struct_opaque_non_empty() {
"some.op"() : () -> !llvm.struct<"a", opaque>
// expected-error @+1 {{identified type already used with a
diff erent body}}
"some.op"() : () -> !llvm.struct<"a", (i32, i32)>
@@ -60,7 +60,7 @@ func @repeated_struct_opaque_non_empty() {
// -----
-func @repeated_struct_opaque_redefinition() {
+func.func @repeated_struct_opaque_redefinition() {
"some.op"() : () -> !llvm.struct<"a", ()>
// expected-error @+1 {{redeclaring defined struct as opaque}}
"some.op"() : () -> !llvm.struct<"a", opaque>
@@ -68,28 +68,28 @@ func @repeated_struct_opaque_redefinition() {
// -----
-func @struct_literal_opaque() {
+func.func @struct_literal_opaque() {
// expected-error @+1 {{only identified structs can be opaque}}
"some.op"() : () -> !llvm.struct<opaque>
}
// -----
-func @unexpected_type() {
+func.func @unexpected_type() {
// expected-error @+1 {{unexpected type, expected keyword}}
"some.op"() : () -> !llvm.tensor<*xf32>
}
// -----
-func @unexpected_type() {
+func.func @unexpected_type() {
// expected-error @+1 {{unknown LLVM type}}
"some.op"() : () -> !llvm.ifoo
}
// -----
-func @explicitly_opaque_struct() {
+func.func @explicitly_opaque_struct() {
"some.op"() : () -> !llvm.struct<"a", opaque>
// expected-error @+1 {{identified type already used with a
diff erent body}}
"some.op"() : () -> !llvm.struct<"a", ()>
@@ -97,56 +97,56 @@ func @explicitly_opaque_struct() {
// -----
-func @literal_struct_with_void() {
+func.func @literal_struct_with_void() {
// expected-error @+1 {{invalid LLVM structure element type}}
"some.op"() : () -> !llvm.struct<(void)>
}
// -----
-func @identified_struct_with_void() {
+func.func @identified_struct_with_void() {
// expected-error @+1 {{invalid LLVM structure element type}}
"some.op"() : () -> !llvm.struct<"a", (void)>
}
// -----
-func @dynamic_vector() {
+func.func @dynamic_vector() {
// expected-error @+1 {{expected '? x <integer> x <type>' or '<integer> x <type>'}}
"some.op"() : () -> !llvm.vec<? x ptr<f32>>
}
// -----
-func @dynamic_scalable_vector() {
+func.func @dynamic_scalable_vector() {
// expected-error @+1 {{expected '? x <integer> x <type>' or '<integer> x <type>'}}
"some.op"() : () -> !llvm.vec<?x? x ptr<f32>>
}
// -----
-func @unscalable_vector() {
+func.func @unscalable_vector() {
// expected-error @+1 {{expected '? x <integer> x <type>' or '<integer> x <type>'}}
"some.op"() : () -> !llvm.vec<4x4 x ptr<i32>>
}
// -----
-func @zero_vector() {
+func.func @zero_vector() {
// expected-error @+1 {{the number of vector elements must be positive}}
"some.op"() : () -> !llvm.vec<0 x ptr<i32>>
}
// -----
-func @nested_vector() {
+func.func @nested_vector() {
// expected-error @+1 {{invalid vector element type}}
"some.op"() : () -> !llvm.vec<2 x vector<2xi32>>
}
// -----
-func @scalable_void_vector() {
+func.func @scalable_void_vector() {
// expected-error @+1 {{invalid vector element type}}
"some.op"() : () -> !llvm.vec<?x4 x void>
}
@@ -154,14 +154,14 @@ func @scalable_void_vector() {
// -----
// expected-error @+1 {{unexpected type, expected keyword}}
-func private @unexpected_type() -> !llvm.tensor<*xf32>
+func.func private @unexpected_type() -> !llvm.tensor<*xf32>
// -----
// expected-error @+1 {{unexpected type, expected keyword}}
-func private @unexpected_type() -> !llvm.f32
+func.func private @unexpected_type() -> !llvm.f32
// -----
// expected-error @below {{cannot use !llvm.vec for built-in primitives, use 'vector' instead}}
-func private @llvm_vector_primitive() -> !llvm.vec<4 x f32>
+func.func private @llvm_vector_primitive() -> !llvm.vec<4 x f32>
diff --git a/mlir/test/Dialect/LLVMIR/types.mlir b/mlir/test/Dialect/LLVMIR/types.mlir
index d67d2b6c4f71a..22ba9f03aa304 100644
--- a/mlir/test/Dialect/LLVMIR/types.mlir
+++ b/mlir/test/Dialect/LLVMIR/types.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file | mlir-opt -allow-unregistered-dialect | FileCheck %s
// CHECK-LABEL: @primitive
-func @primitive() {
+func.func @primitive() {
// CHECK: !llvm.void
"some.op"() : () -> !llvm.void
// CHECK: !llvm.ppc_fp128
@@ -18,7 +18,7 @@ func @primitive() {
}
// CHECK-LABEL: @func
-func @func() {
+func.func @func() {
// CHECK: !llvm.func<void ()>
"some.op"() : () -> !llvm.func<void ()>
// CHECK: !llvm.func<void (i32)>
@@ -37,7 +37,7 @@ func @func() {
}
// CHECK-LABEL: @integer
-func @integer() {
+func.func @integer() {
// CHECK: i1
"some.op"() : () -> i1
// CHECK: i8
@@ -56,7 +56,7 @@ func @integer() {
}
// CHECK-LABEL: @ptr
-func @ptr() {
+func.func @ptr() {
// CHECK: !llvm.ptr<i8>
"some.op"() : () -> !llvm.ptr<i8>
// CHECK: !llvm.ptr<f32>
@@ -81,7 +81,7 @@ func @ptr() {
}
// CHECK-LABEL: @vec
-func @vec() {
+func.func @vec() {
// CHECK: vector<4xi32>
"some.op"() : () -> vector<4xi32>
// CHECK: vector<4xf32>
@@ -96,7 +96,7 @@ func @vec() {
}
// CHECK-LABEL: @array
-func @array() {
+func.func @array() {
// CHECK: !llvm.array<10 x i32>
"some.op"() : () -> !llvm.array<10 x i32>
// CHECK: !llvm.array<8 x f32>
@@ -109,7 +109,7 @@ func @array() {
}
// CHECK-LABEL: @literal_struct
-func @literal_struct() {
+func.func @literal_struct() {
// CHECK: !llvm.struct<()>
"some.op"() : () -> !llvm.struct<()>
// CHECK: !llvm.struct<(i32)>
@@ -142,7 +142,7 @@ func @literal_struct() {
}
// CHECK-LABEL: @identified_struct
-func @identified_struct() {
+func.func @identified_struct() {
// CHECK: !llvm.struct<"empty", ()>
"some.op"() : () -> !llvm.struct<"empty", ()>
// CHECK: !llvm.struct<"opaque", opaque>
@@ -174,7 +174,7 @@ func @identified_struct() {
return
}
-func @verbose() {
+func.func @verbose() {
// CHECK: !llvm.struct<(i64, struct<(f32)>)>
"some.op"() : () -> !llvm.struct<(i64, !llvm.struct<(f32)>)>
return
@@ -183,7 +183,7 @@ func @verbose() {
// CHECK-LABEL: @ptr_elem_interface
// CHECK-COUNT-3: !llvm.ptr<!test.smpla>
// CHECK: llvm.mlir.undef : !llvm.ptr<!test.smpla>
-func @ptr_elem_interface(%arg0: !llvm.ptr<!test.smpla>) {
+func.func @ptr_elem_interface(%arg0: !llvm.ptr<!test.smpla>) {
%0 = llvm.load %arg0 : !llvm.ptr<!test.smpla>
llvm.store %0, %arg0 : !llvm.ptr<!test.smpla>
llvm.mlir.undef : !llvm.ptr<!test.smpla>
diff --git a/mlir/test/Dialect/Math/algebraic-simplification.mlir b/mlir/test/Dialect/Math/algebraic-simplification.mlir
index 13aaa5c4c686a..d4f4efe0de1fa 100644
--- a/mlir/test/Dialect/Math/algebraic-simplification.mlir
+++ b/mlir/test/Dialect/Math/algebraic-simplification.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -test-math-algebraic-simplification | FileCheck %s --dump-input=always
// CHECK-LABEL: @pow_noop
-func @pow_noop(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
+func.func @pow_noop(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
// CHECK: return %arg0, %arg1
%c = arith.constant 1.0 : f32
%v = arith.constant dense <1.0> : vector<4xf32>
@@ -11,7 +11,7 @@ func @pow_noop(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
}
// CHECK-LABEL: @pow_square
-func @pow_square(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
+func.func @pow_square(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
// CHECK: %[[SCALAR:.*]] = arith.mulf %arg0, %arg0
// CHECK: %[[VECTOR:.*]] = arith.mulf %arg1, %arg1
// CHECK: return %[[SCALAR]], %[[VECTOR]]
@@ -23,7 +23,7 @@ func @pow_square(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
}
// CHECK-LABEL: @pow_cube
-func @pow_cube(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
+func.func @pow_cube(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
// CHECK: %[[TMP_S:.*]] = arith.mulf %arg0, %arg0
// CHECK: %[[SCALAR:.*]] = arith.mulf %arg0, %[[TMP_S]]
// CHECK: %[[TMP_V:.*]] = arith.mulf %arg1, %arg1
@@ -37,7 +37,7 @@ func @pow_cube(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
}
// CHECK-LABEL: @pow_recip
-func @pow_recip(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
+func.func @pow_recip(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
// CHECK: %[[CST_S:.*]] = arith.constant 1.0{{.*}} : f32
// CHECK: %[[CST_V:.*]] = arith.constant dense<1.0{{.*}}> : vector<4xf32>
// CHECK: %[[SCALAR:.*]] = arith.divf %[[CST_S]], %arg0
@@ -51,7 +51,7 @@ func @pow_recip(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
}
// CHECK-LABEL: @pow_sqrt
-func @pow_sqrt(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
+func.func @pow_sqrt(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
// CHECK: %[[SCALAR:.*]] = math.sqrt %arg0
// CHECK: %[[VECTOR:.*]] = math.sqrt %arg1
// CHECK: return %[[SCALAR]], %[[VECTOR]]
@@ -63,7 +63,7 @@ func @pow_sqrt(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
}
// CHECK-LABEL: @pow_rsqrt
-func @pow_rsqrt(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
+func.func @pow_rsqrt(%arg0: f32, %arg1 : vector<4xf32>) -> (f32, vector<4xf32>) {
// CHECK: %[[SCALAR:.*]] = math.rsqrt %arg0
// CHECK: %[[VECTOR:.*]] = math.rsqrt %arg1
// CHECK: return %[[SCALAR]], %[[VECTOR]]
diff --git a/mlir/test/Dialect/Math/canonicalize.mlir b/mlir/test/Dialect/Math/canonicalize.mlir
index 45b13b455a2f0..e29941f966aab 100644
--- a/mlir/test/Dialect/Math/canonicalize.mlir
+++ b/mlir/test/Dialect/Math/canonicalize.mlir
@@ -3,7 +3,7 @@
// CHECK-LABEL: @ceil_fold
// CHECK: %[[cst:.+]] = arith.constant 1.000000e+00 : f32
// CHECK: return %[[cst]]
-func @ceil_fold() -> f32 {
+func.func @ceil_fold() -> f32 {
%c = arith.constant 0.3 : f32
%r = math.ceil %c : f32
return %r : f32
@@ -12,7 +12,7 @@ func @ceil_fold() -> f32 {
// CHECK-LABEL: @ceil_fold2
// CHECK: %[[cst:.+]] = arith.constant 2.000000e+00 : f32
// CHECK: return %[[cst]]
-func @ceil_fold2() -> f32 {
+func.func @ceil_fold2() -> f32 {
%c = arith.constant 2.0 : f32
%r = math.ceil %c : f32
return %r : f32
@@ -21,7 +21,7 @@ func @ceil_fold2() -> f32 {
// CHECK-LABEL: @log2_fold
// CHECK: %[[cst:.+]] = arith.constant 2.000000e+00 : f32
// CHECK: return %[[cst]]
-func @log2_fold() -> f32 {
+func.func @log2_fold() -> f32 {
%c = arith.constant 4.0 : f32
%r = math.log2 %c : f32
return %r : f32
@@ -30,7 +30,7 @@ func @log2_fold() -> f32 {
// CHECK-LABEL: @log2_fold2
// CHECK: %[[cst:.+]] = arith.constant 0xFF800000 : f32
// CHECK: return %[[cst]]
-func @log2_fold2() -> f32 {
+func.func @log2_fold2() -> f32 {
%c = arith.constant 0.0 : f32
%r = math.log2 %c : f32
return %r : f32
@@ -40,7 +40,7 @@ func @log2_fold2() -> f32 {
// CHECK: %[[cst:.+]] = arith.constant -1.000000e+00 : f32
// CHECK: %[[res:.+]] = math.log2 %[[cst]] : f32
// CHECK: return %[[res]]
-func @log2_nofold2() -> f32 {
+func.func @log2_nofold2() -> f32 {
%c = arith.constant -1.0 : f32
%r = math.log2 %c : f32
return %r : f32
@@ -49,7 +49,7 @@ func @log2_nofold2() -> f32 {
// CHECK-LABEL: @log2_fold_64
// CHECK: %[[cst:.+]] = arith.constant 2.000000e+00 : f64
// CHECK: return %[[cst]]
-func @log2_fold_64() -> f64 {
+func.func @log2_fold_64() -> f64 {
%c = arith.constant 4.0 : f64
%r = math.log2 %c : f64
return %r : f64
@@ -58,7 +58,7 @@ func @log2_fold_64() -> f64 {
// CHECK-LABEL: @log2_fold2_64
// CHECK: %[[cst:.+]] = arith.constant 0xFFF0000000000000 : f64
// CHECK: return %[[cst]]
-func @log2_fold2_64() -> f64 {
+func.func @log2_fold2_64() -> f64 {
%c = arith.constant 0.0 : f64
%r = math.log2 %c : f64
return %r : f64
@@ -68,7 +68,7 @@ func @log2_fold2_64() -> f64 {
// CHECK: %[[cst:.+]] = arith.constant -1.000000e+00 : f64
// CHECK: %[[res:.+]] = math.log2 %[[cst]] : f64
// CHECK: return %[[res]]
-func @log2_nofold2_64() -> f64 {
+func.func @log2_nofold2_64() -> f64 {
%c = arith.constant -1.0 : f64
%r = math.log2 %c : f64
return %r : f64
@@ -77,7 +77,7 @@ func @log2_nofold2_64() -> f64 {
// CHECK-LABEL: @powf_fold
// CHECK: %[[cst:.+]] = arith.constant 4.000000e+00 : f32
// CHECK: return %[[cst]]
-func @powf_fold() -> f32 {
+func.func @powf_fold() -> f32 {
%c = arith.constant 2.0 : f32
%r = math.powf %c, %c : f32
return %r : f32
@@ -86,7 +86,7 @@ func @powf_fold() -> f32 {
// CHECK-LABEL: @sqrt_fold
// CHECK: %[[cst:.+]] = arith.constant 2.000000e+00 : f32
// CHECK: return %[[cst]]
-func @sqrt_fold() -> f32 {
+func.func @sqrt_fold() -> f32 {
%c = arith.constant 4.0 : f32
%r = math.sqrt %c : f32
return %r : f32
@@ -95,7 +95,7 @@ func @sqrt_fold() -> f32 {
// CHECK-LABEL: @abs_fold
// CHECK: %[[cst:.+]] = arith.constant 4.000000e+00 : f32
// CHECK: return %[[cst]]
-func @abs_fold() -> f32 {
+func.func @abs_fold() -> f32 {
%c = arith.constant -4.0 : f32
%r = math.abs %c : f32
return %r : f32
@@ -104,7 +104,7 @@ func @abs_fold() -> f32 {
// CHECK-LABEL: @copysign_fold
// CHECK: %[[cst:.+]] = arith.constant -4.000000e+00 : f32
// CHECK: return %[[cst]]
-func @copysign_fold() -> f32 {
+func.func @copysign_fold() -> f32 {
%c1 = arith.constant 4.0 : f32
%c2 = arith.constant -9.0 : f32
%r = math.copysign %c1, %c2 : f32
@@ -114,7 +114,7 @@ func @copysign_fold() -> f32 {
// CHECK-LABEL: @ctlz_fold1
// CHECK: %[[cst:.+]] = arith.constant 31 : i32
// CHECK: return %[[cst]]
-func @ctlz_fold1() -> i32 {
+func.func @ctlz_fold1() -> i32 {
%c = arith.constant 1 : i32
%r = math.ctlz %c : i32
return %r : i32
@@ -123,7 +123,7 @@ func @ctlz_fold1() -> i32 {
// CHECK-LABEL: @ctlz_fold2
// CHECK: %[[cst:.+]] = arith.constant 7 : i8
// CHECK: return %[[cst]]
-func @ctlz_fold2() -> i8 {
+func.func @ctlz_fold2() -> i8 {
%c = arith.constant 1 : i8
%r = math.ctlz %c : i8
return %r : i8
@@ -132,7 +132,7 @@ func @ctlz_fold2() -> i8 {
// CHECK-LABEL: @cttz_fold
// CHECK: %[[cst:.+]] = arith.constant 8 : i32
// CHECK: return %[[cst]]
-func @cttz_fold() -> i32 {
+func.func @cttz_fold() -> i32 {
%c = arith.constant 256 : i32
%r = math.cttz %c : i32
return %r : i32
@@ -141,7 +141,7 @@ func @cttz_fold() -> i32 {
// CHECK-LABEL: @ctpop_fold
// CHECK: %[[cst:.+]] = arith.constant 16 : i32
// CHECK: return %[[cst]]
-func @ctpop_fold() -> i32 {
+func.func @ctpop_fold() -> i32 {
%c = arith.constant 0xFF0000FF : i32
%r = math.ctpop %c : i32
return %r : i32
diff --git a/mlir/test/Dialect/Math/expand-tanh.mlir b/mlir/test/Dialect/Math/expand-tanh.mlir
index 20e5410ffb705..6724268c9a36b 100644
--- a/mlir/test/Dialect/Math/expand-tanh.mlir
+++ b/mlir/test/Dialect/Math/expand-tanh.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -test-expand-tanh | FileCheck %s
// CHECK-LABEL: func @tanh
-func @tanh(%arg: f32) -> f32 {
+func.func @tanh(%arg: f32) -> f32 {
%res = math.tanh %arg : f32
return %res : f32
}
diff --git a/mlir/test/Dialect/Math/ops.mlir b/mlir/test/Dialect/Math/ops.mlir
index 67ea78f4e797c..a1bb9af1786fd 100644
--- a/mlir/test/Dialect/Math/ops.mlir
+++ b/mlir/test/Dialect/Math/ops.mlir
@@ -3,7 +3,7 @@
// CHECK-LABEL: func @atan(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @atan(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @atan(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.atan %[[F]] : f32
%0 = math.atan %f : f32
// CHECK: %{{.*}} = math.atan %[[V]] : vector<4xf32>
@@ -16,7 +16,7 @@ func @atan(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK-LABEL: func @atan2(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @atan2(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @atan2(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.atan2 %[[F]], %[[F]] : f32
%0 = math.atan2 %f, %f : f32
// CHECK: %{{.*}} = math.atan2 %[[V]], %[[V]] : vector<4xf32>
@@ -28,7 +28,7 @@ func @atan2(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK-LABEL: func @cos(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @cos(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @cos(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.cos %[[F]] : f32
%0 = math.cos %f : f32
// CHECK: %{{.*}} = math.cos %[[V]] : vector<4xf32>
@@ -40,7 +40,7 @@ func @cos(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK-LABEL: func @sin(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @sin(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @sin(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.sin %[[F]] : f32
%0 = math.sin %f : f32
// CHECK: %{{.*}} = math.sin %[[V]] : vector<4xf32>
@@ -52,7 +52,7 @@ func @sin(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK-LABEL: func @erf(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @erf(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @erf(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.erf %[[F]] : f32
%0 = math.erf %f : f32
// CHECK: %{{.*}} = math.erf %[[V]] : vector<4xf32>
@@ -64,7 +64,7 @@ func @erf(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK-LABEL: func @exp(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @exp(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @exp(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.exp %[[F]] : f32
%0 = math.exp %f : f32
// CHECK: %{{.*}} = math.exp %[[V]] : vector<4xf32>
@@ -76,7 +76,7 @@ func @exp(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK-LABEL: func @exp2(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @exp2(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @exp2(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.exp2 %[[F]] : f32
%0 = math.exp2 %f : f32
// CHECK: %{{.*}} = math.exp2 %[[V]] : vector<4xf32>
@@ -88,7 +88,7 @@ func @exp2(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK-LABEL: func @expm1(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @expm1(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @expm1(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.expm1 %[[F]] : f32
%0 = math.expm1 %f : f32
// CHECK: %{{.*}} = math.expm1 %[[V]] : vector<4xf32>
@@ -100,7 +100,7 @@ func @expm1(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK-LABEL: func @log(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @log(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @log(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.log %[[F]] : f32
%0 = math.log %f : f32
// CHECK: %{{.*}} = math.log %[[V]] : vector<4xf32>
@@ -112,7 +112,7 @@ func @log(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK-LABEL: func @log10(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @log10(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @log10(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.log10 %[[F]] : f32
%0 = math.log10 %f : f32
// CHECK: %{{.*}} = math.log10 %[[V]] : vector<4xf32>
@@ -124,7 +124,7 @@ func @log10(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK-LABEL: func @log1p(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @log1p(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @log1p(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.log1p %[[F]] : f32
%0 = math.log1p %f : f32
// CHECK: %{{.*}} = math.log1p %[[V]] : vector<4xf32>
@@ -136,7 +136,7 @@ func @log1p(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK-LABEL: func @log2(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @log2(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @log2(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.log2 %[[F]] : f32
%0 = math.log2 %f : f32
// CHECK: %{{.*}} = math.log2 %[[V]] : vector<4xf32>
@@ -148,7 +148,7 @@ func @log2(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK-LABEL: func @powf(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @powf(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @powf(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.powf %[[F]], %[[F]] : f32
%0 = math.powf %f, %f : f32
// CHECK: %{{.*}} = math.powf %[[V]], %[[V]] : vector<4xf32>
@@ -160,7 +160,7 @@ func @powf(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK-LABEL: func @rsqrt(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @rsqrt(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @rsqrt(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.rsqrt %[[F]] : f32
%0 = math.rsqrt %f : f32
// CHECK: %{{.*}} = math.rsqrt %[[V]] : vector<4xf32>
@@ -173,7 +173,7 @@ func @rsqrt(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK-LABEL: func @sqrt(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @sqrt(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @sqrt(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.sqrt %[[F]] : f32
%0 = math.sqrt %f : f32
// CHECK: %{{.*}} = math.sqrt %[[V]] : vector<4xf32>
@@ -185,7 +185,7 @@ func @sqrt(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK-LABEL: func @tanh(
// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>)
-func @tanh(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
+func.func @tanh(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) {
// CHECK: %{{.*}} = math.tanh %[[F]] : f32
%0 = math.tanh %f : f32
// CHECK: %{{.*}} = math.tanh %[[V]] : vector<4xf32>
diff --git a/mlir/test/Dialect/Math/polynomial-approximation.mlir b/mlir/test/Dialect/Math/polynomial-approximation.mlir
index cff92f3042c06..1ba82696744dd 100644
--- a/mlir/test/Dialect/Math/polynomial-approximation.mlir
+++ b/mlir/test/Dialect/Math/polynomial-approximation.mlir
@@ -76,7 +76,7 @@
// CHECK: %[[val_36:.*]] = arith.select %[[val_0]], %[[val_35]], %[[val_34]] : f32
// CHECK: return %[[val_36]] : f32
// CHECK: }
-func @erf_scalar(%arg0: f32) -> f32 {
+func.func @erf_scalar(%arg0: f32) -> f32 {
%0 = math.erf %arg0 : f32
return %0 : f32
}
@@ -89,7 +89,7 @@ func @erf_scalar(%arg0: f32) -> f32 {
// CHECK: %[[res:.*]] = arith.select
// CHECK: return %[[res]] : vector<8xf32>
// CHECK: }
-func @erf_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
+func.func @erf_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
%0 = math.erf %arg0 : vector<8xf32>
return %0 : vector<8xf32>
}
@@ -139,7 +139,7 @@ func @erf_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
// CHECK: %[[VAL_40:.*]] = arith.select %[[VAL_33]], %[[VAL_9]], %[[VAL_39]] : f32
// CHECK: %[[VAL_41:.*]] = arith.select %[[IS_NAN]], %[[VAL_0]], %[[VAL_40]] : f32
// CHECK: return %[[VAL_41]] : f32
-func @exp_scalar(%arg0: f32) -> f32 {
+func.func @exp_scalar(%arg0: f32) -> f32 {
%0 = math.exp %arg0 : f32
return %0 : f32
}
@@ -151,7 +151,7 @@ func @exp_scalar(%arg0: f32) -> f32 {
// CHECK-COUNT-4: select
// CHECK: %[[VAL_40:.*]] = arith.select
// CHECK: return %[[VAL_40]] : vector<8xf32>
-func @exp_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
+func.func @exp_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
%0 = math.exp %arg0 : vector<8xf32>
return %0 : vector<8xf32>
}
@@ -179,7 +179,7 @@ func @exp_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
// CHECK: %[[VAL_109:.*]] = arith.select %[[IS_ONE_OR_NAN]], %[[X]], %[[VAL_108]] : f32
// CHECK: return %[[VAL_109]] : f32
// CHECK: }
-func @expm1_scalar(%arg0: f32) -> f32 {
+func.func @expm1_scalar(%arg0: f32) -> f32 {
%0 = math.expm1 %arg0 : f32
return %0 : f32
}
@@ -196,7 +196,7 @@ func @expm1_scalar(%arg0: f32) -> f32 {
// CHECK: %[[VAL_115:.*]] = arith.select
// CHECK: return %[[VAL_115]] : vector<8x8xf32>
// CHECK: }
-func @expm1_vector(%arg0: vector<8x8xf32>) -> vector<8x8xf32> {
+func.func @expm1_vector(%arg0: vector<8x8xf32>) -> vector<8x8xf32> {
%0 = math.expm1 %arg0 : vector<8x8xf32>
return %0 : vector<8x8xf32>
}
@@ -264,7 +264,7 @@ func @expm1_vector(%arg0: vector<8x8xf32>) -> vector<8x8xf32> {
// CHECK: %[[VAL_58:.*]] = arith.select %[[VAL_54]], %[[VAL_5]], %[[VAL_57]] : f32
// CHECK: return %[[VAL_58]] : f32
// CHECK: }
-func @log_scalar(%arg0: f32) -> f32 {
+func.func @log_scalar(%arg0: f32) -> f32 {
%0 = math.log %arg0 : f32
return %0 : f32
}
@@ -276,7 +276,7 @@ func @log_scalar(%arg0: f32) -> f32 {
// CHECK: %[[VAL_71:.*]] = arith.select
// CHECK: return %[[VAL_71]] : vector<8xf32>
// CHECK: }
-func @log_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
+func.func @log_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
%0 = math.log %arg0 : vector<8xf32>
return %0 : vector<8xf32>
}
@@ -288,7 +288,7 @@ func @log_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
// CHECK: %[[VAL_65:.*]] = arith.select
// CHECK: return %[[VAL_65]] : f32
// CHECK: }
-func @log2_scalar(%arg0: f32) -> f32 {
+func.func @log2_scalar(%arg0: f32) -> f32 {
%0 = math.log2 %arg0 : f32
return %0 : f32
}
@@ -300,7 +300,7 @@ func @log2_scalar(%arg0: f32) -> f32 {
// CHECK: %[[VAL_71:.*]] = arith.select
// CHECK: return %[[VAL_71]] : vector<8xf32>
// CHECK: }
-func @log2_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
+func.func @log2_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
%0 = math.log2 %arg0 : vector<8xf32>
return %0 : vector<8xf32>
}
@@ -321,7 +321,7 @@ func @log2_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
// CHECK: %[[APPROX:.*]] = arith.select %[[VAL_72]], %[[X]], %[[LOG_LARGE]] : f32
// CHECK: return %[[APPROX]] : f32
// CHECK: }
-func @log1p_scalar(%arg0: f32) -> f32 {
+func.func @log1p_scalar(%arg0: f32) -> f32 {
%0 = math.log1p %arg0 : f32
return %0 : f32
}
@@ -333,7 +333,7 @@ func @log1p_scalar(%arg0: f32) -> f32 {
// CHECK: %[[VAL_79:.*]] = arith.select
// CHECK: return %[[VAL_79]] : vector<8xf32>
// CHECK: }
-func @log1p_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
+func.func @log1p_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
%0 = math.log1p %arg0 : vector<8xf32>
return %0 : vector<8xf32>
}
@@ -376,7 +376,7 @@ func @log1p_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
// CHECK: %[[VAL_33:.*]] = arith.select %[[VAL_20]], %[[VAL_18]], %[[VAL_32]] : f32
// CHECK: return %[[VAL_33]] : f32
// CHECK: }
-func @tanh_scalar(%arg0: f32) -> f32 {
+func.func @tanh_scalar(%arg0: f32) -> f32 {
%0 = math.tanh %arg0 : f32
return %0 : f32
}
@@ -389,7 +389,7 @@ func @tanh_scalar(%arg0: f32) -> f32 {
// CHECK: %[[VAL_33:.*]] = arith.select
// CHECK: return %[[VAL_33]] : vector<8xf32>
// CHECK: }
-func @tanh_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
+func.func @tanh_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
%0 = math.tanh %arg0 : vector<8xf32>
return %0 : vector<8xf32>
}
@@ -399,7 +399,7 @@ func @tanh_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
// AVX2-LABEL: func @rsqrt_scalar
// CHECK: math.rsqrt
// AVX2: math.rsqrt
-func @rsqrt_scalar(%arg0: f32) -> f32 {
+func.func @rsqrt_scalar(%arg0: f32) -> f32 {
%0 = math.rsqrt %arg0 : f32
return %0 : f32
}
@@ -423,7 +423,7 @@ func @rsqrt_scalar(%arg0: f32) -> f32 {
// AVX2: %[[VAL_13:.*]] = arith.select %[[VAL_8]], %[[VAL_9]], %[[VAL_12]] : vector<8xi1>, vector<8xf32>
// AVX2: return %[[VAL_13]] : vector<8xf32>
// AVX2: }
-func @rsqrt_vector_8xf32(%arg0: vector<8xf32>) -> vector<8xf32> {
+func.func @rsqrt_vector_8xf32(%arg0: vector<8xf32>) -> vector<8xf32> {
%0 = math.rsqrt %arg0 : vector<8xf32>
return %0 : vector<8xf32>
}
@@ -434,7 +434,7 @@ func @rsqrt_vector_8xf32(%arg0: vector<8xf32>) -> vector<8xf32> {
// CHECK: math.rsqrt
// AVX2-LABEL: func @rsqrt_vector_5xf32
// AVX2: math.rsqrt
-func @rsqrt_vector_5xf32(%arg0: vector<5xf32>) -> vector<5xf32> {
+func.func @rsqrt_vector_5xf32(%arg0: vector<5xf32>) -> vector<5xf32> {
%0 = math.rsqrt %arg0 : vector<5xf32>
return %0 : vector<5xf32>
}
@@ -456,7 +456,7 @@ func @rsqrt_vector_5xf32(%arg0: vector<5xf32>) -> vector<5xf32> {
// AVX2: %[[RESULT0:.*]] = vector.insert %[[RSQRT0]], %[[INIT]] [0]
// AVX2: %[[RESULT1:.*]] = vector.insert %[[RSQRT1]], %[[RESULT0]] [1]
// AVX2: %[[RSQRT:.*]] = vector.shape_cast %[[RESULT1]] : vector<2x8xf32> to vector<16xf32>
-func @rsqrt_vector_16xf32(%arg0: vector<16xf32>) -> vector<16xf32> {
+func.func @rsqrt_vector_16xf32(%arg0: vector<16xf32>) -> vector<16xf32> {
%0 = math.rsqrt %arg0 : vector<16xf32>
return %0 : vector<16xf32>
}
@@ -477,7 +477,7 @@ func @rsqrt_vector_16xf32(%arg0: vector<16xf32>) -> vector<16xf32> {
// AVX2: %[[RESULT0:.*]] = vector.insert %[[RSQRT0]], %[[INIT]] [0]
// AVX2: %[[RESULT1:.*]] = vector.insert %[[RSQRT1]], %[[RESULT0]] [1]
// AVX2-NOT: vector.shape_cast
-func @rsqrt_vector_2x8xf32(%arg0: vector<2x8xf32>) -> vector<2x8xf32> {
+func.func @rsqrt_vector_2x8xf32(%arg0: vector<2x8xf32>) -> vector<2x8xf32> {
%0 = math.rsqrt %arg0 : vector<2x8xf32>
return %0 : vector<2x8xf32>
}
@@ -505,7 +505,7 @@ func @rsqrt_vector_2x8xf32(%arg0: vector<2x8xf32>) -> vector<2x8xf32> {
// AVX2: %[[RESULT2:.*]] = vector.insert %[[RSQRT10]], %[[RESULT1]] [1, 0]
// AVX2: %[[RESULT3:.*]] = vector.insert %[[RSQRT11]], %[[RESULT2]] [1, 1]
// AVX2: %[[RSQRT:.*]] = vector.shape_cast %[[RESULT3]] : vector<2x2x8xf32> to vector<2x16xf32>
-func @rsqrt_vector_2x16xf32(%arg0: vector<2x16xf32>) -> vector<2x16xf32> {
+func.func @rsqrt_vector_2x16xf32(%arg0: vector<2x16xf32>) -> vector<2x16xf32> {
%0 = math.rsqrt %arg0 : vector<2x16xf32>
return %0 : vector<2x16xf32>
}
@@ -529,7 +529,7 @@ func @rsqrt_vector_2x16xf32(%arg0: vector<2x16xf32>) -> vector<2x16xf32> {
// CHECK-DAG: %[[EST:.+]] = arith.select %[[CMP]], %[[P3]], %[[SUB]]
// CHECK-DAG: %[[RES:.+]] = math.copysign %[[EST]], %arg0
// CHECK: return %[[RES]]
-func @atan_scalar(%arg0: f32) -> f32 {
+func.func @atan_scalar(%arg0: f32) -> f32 {
%0 = math.atan %arg0 : f32
return %0 : f32
}
@@ -589,7 +589,7 @@ func @atan_scalar(%arg0: f32) -> f32 {
// CHECK: %[[RET:.+]] = arith.truncf %[[EDGE3]]
// CHECK: return %[[RET]]
-func @atan2_scalar(%arg0: f16, %arg1: f16) -> f16 {
+func.func @atan2_scalar(%arg0: f16, %arg1: f16) -> f16 {
%0 = math.atan2 %arg0, %arg1 : f16
return %0 : f16
}
diff --git a/mlir/test/Dialect/MemRef/canonicalize.mlir b/mlir/test/Dialect/MemRef/canonicalize.mlir
index 1a01460a24dc9..b3a9b7e31a309 100644
--- a/mlir/test/Dialect/MemRef/canonicalize.mlir
+++ b/mlir/test/Dialect/MemRef/canonicalize.mlir
@@ -5,7 +5,7 @@
// CHECK: %[[S:.+]] = memref.subview %[[ARG0]][0, 1, 0, 0] [1, 1, 16, 32] [1, 1, 1, 1] : memref<4x6x16x32xi8> to memref<16x32xi8, #{{.*}}>
// CHECK: %[[M:.+]] = memref.cast %[[S]] : memref<16x32xi8, #{{.*}}> to memref<16x32xi8, #{{.*}}>
// CHECK: return %[[M]] : memref<16x32xi8, #{{.*}}>
-func @subview_of_size_memcast(%arg : memref<4x6x16x32xi8>) ->
+func.func @subview_of_size_memcast(%arg : memref<4x6x16x32xi8>) ->
memref<16x32xi8, affine_map<(d0, d1)[s0] -> (d0 * 32 + d1 + s0)>>{
%0 = memref.cast %arg : memref<4x6x16x32xi8> to memref<?x?x16x32xi8>
%1 = memref.subview %0[0, 1, 0, 0] [1, 1, 16, 32] [1, 1, 1, 1] :
@@ -29,7 +29,7 @@ func @subview_of_size_memcast(%arg : memref<4x6x16x32xi8>) ->
// CHECK: %[[M:.+]] = memref.cast %[[S]]
// CHECK-SAME: to memref<1x4xf32, #[[MAP1]]>
// CHECK: return %[[M]]
-func @subview_of_strides_memcast(%arg : memref<1x1x?xf32, #map0>) -> memref<1x4xf32, #map2> {
+func.func @subview_of_strides_memcast(%arg : memref<1x1x?xf32, #map0>) -> memref<1x4xf32, #map2> {
%0 = memref.cast %arg : memref<1x1x?xf32, #map0> to memref<1x1x?xf32, #map1>
%1 = memref.subview %0[0, 0, 0] [1, 1, 4] [1, 1, 1] : memref<1x1x?xf32, #map1> to memref<1x4xf32, #map2>
return %1 : memref<1x4xf32, #map2>
@@ -41,7 +41,7 @@ func @subview_of_strides_memcast(%arg : memref<1x1x?xf32, #map0>) -> memref<1x4x
// CHECK-SAME: %[[ARG0:.+]]: memref<4x6x16x32xi8>
// CHECK-NOT: memref.subview
// CHECK: return %[[ARG0]] : memref<4x6x16x32xi8>
-func @subview_of_static_full_size(%arg0 : memref<4x6x16x32xi8>) -> memref<4x6x16x32xi8> {
+func.func @subview_of_static_full_size(%arg0 : memref<4x6x16x32xi8>) -> memref<4x6x16x32xi8> {
%0 = memref.subview %arg0[0, 0, 0, 0] [4, 6, 16, 32] [1, 1, 1, 1] : memref<4x6x16x32xi8> to memref<4x6x16x32xi8>
return %0 : memref<4x6x16x32xi8>
}
@@ -49,7 +49,7 @@ func @subview_of_static_full_size(%arg0 : memref<4x6x16x32xi8>) -> memref<4x6x16
// -----
#map0 = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
-func @subview_canonicalize(%arg0 : memref<?x?x?xf32>, %arg1 : index,
+func.func @subview_canonicalize(%arg0 : memref<?x?x?xf32>, %arg1 : index,
%arg2 : index) -> memref<?x?x?xf32, #map0>
{
%c0 = arith.constant 0 : index
@@ -69,7 +69,7 @@ func @subview_canonicalize(%arg0 : memref<?x?x?xf32>, %arg1 : index,
// -----
#map0 = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-func @rank_reducing_subview_canonicalize(%arg0 : memref<?x?x?xf32>, %arg1 : index,
+func.func @rank_reducing_subview_canonicalize(%arg0 : memref<?x?x?xf32>, %arg1 : index,
%arg2 : index) -> memref<?x?xf32, #map0>
{
%c0 = arith.constant 0 : index
@@ -88,7 +88,7 @@ func @rank_reducing_subview_canonicalize(%arg0 : memref<?x?x?xf32>, %arg1 : inde
// -----
-func @multiple_reducing_dims(%arg0 : memref<1x384x384xf32>,
+func.func @multiple_reducing_dims(%arg0 : memref<1x384x384xf32>,
%arg1 : index, %arg2 : index, %arg3 : index) -> memref<?xf32, offset: ?, strides: [1]>
{
%c1 = arith.constant 1 : index
@@ -106,7 +106,7 @@ func @multiple_reducing_dims(%arg0 : memref<1x384x384xf32>,
// -----
-func @multiple_reducing_dims_dynamic(%arg0 : memref<?x?x?xf32>,
+func.func @multiple_reducing_dims_dynamic(%arg0 : memref<?x?x?xf32>,
%arg1 : index, %arg2 : index, %arg3 : index) -> memref<?xf32, offset: ?, strides: [1]>
{
%c1 = arith.constant 1 : index
@@ -124,7 +124,7 @@ func @multiple_reducing_dims_dynamic(%arg0 : memref<?x?x?xf32>,
// -----
-func @multiple_reducing_dims_all_dynamic(%arg0 : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>,
+func.func @multiple_reducing_dims_all_dynamic(%arg0 : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>,
%arg1 : index, %arg2 : index, %arg3 : index) -> memref<?xf32, offset: ?, strides: [?]>
{
%c1 = arith.constant 1 : index
@@ -149,7 +149,7 @@ func @multiple_reducing_dims_all_dynamic(%arg0 : memref<?x?x?xf32, offset: ?, st
// CHECK-SAME: %{{[a-z0-9A-Z_]+}}: memref<?xi8>
// CHECK-SAME: %[[SIZE:.[a-z0-9A-Z_]+]]: index
// CHECK: return %[[SIZE]] : index
-func @dim_of_sized_view(%arg : memref<?xi8>, %size: index) -> index {
+func.func @dim_of_sized_view(%arg : memref<?xi8>, %size: index) -> index {
%c0 = arith.constant 0 : index
%0 = memref.reinterpret_cast %arg to offset: [0], sizes: [%size], strides: [1] : memref<?xi8> to memref<?xi8>
%1 = memref.dim %0, %c0 : memref<?xi8>
@@ -161,7 +161,7 @@ func @dim_of_sized_view(%arg : memref<?xi8>, %size: index) -> index {
// CHECK-LABEL: func @no_fold_of_store
// CHECK: %[[cst:.+]] = memref.cast %arg
// CHECK: memref.store %[[cst]]
-func @no_fold_of_store(%arg : memref<32xi8>, %holder: memref<memref<?xi8>>) {
+func.func @no_fold_of_store(%arg : memref<32xi8>, %holder: memref<memref<?xi8>>) {
%0 = memref.cast %arg : memref<32xi8> to memref<?xi8>
memref.store %0, %holder[] : memref<memref<?xi8>>
return
@@ -173,7 +173,7 @@ func @no_fold_of_store(%arg : memref<32xi8>, %holder: memref<memref<?xi8>>) {
// CHECK-LABEL: func @dim_of_alloca(
// CHECK-SAME: %[[SIZE:[0-9a-z]+]]: index
// CHECK-NEXT: return %[[SIZE]] : index
-func @dim_of_alloca(%size: index) -> index {
+func.func @dim_of_alloca(%size: index) -> index {
%0 = memref.alloca(%size) : memref<?xindex>
%c0 = arith.constant 0 : index
%1 = memref.dim %0, %c0 : memref<?xindex>
@@ -187,7 +187,7 @@ func @dim_of_alloca(%size: index) -> index {
// CHECK-SAME: %[[MEM:[0-9a-z]+]]: memref<*xf32>
// CHECK-NEXT: %[[RANK:.*]] = memref.rank %[[MEM]] : memref<*xf32>
// CHECK-NEXT: return %[[RANK]] : index
-func @dim_of_alloca_with_dynamic_size(%arg0: memref<*xf32>) -> index {
+func.func @dim_of_alloca_with_dynamic_size(%arg0: memref<*xf32>) -> index {
%0 = memref.rank %arg0 : memref<*xf32>
%1 = memref.alloca(%0) : memref<?xindex>
%c0 = arith.constant 0 : index
@@ -206,7 +206,7 @@ func @dim_of_alloca_with_dynamic_size(%arg0: memref<*xf32>) -> index {
// CHECK-NEXT: memref.store
// CHECK-NOT: memref.dim
// CHECK: return %[[DIM]] : index
-func @dim_of_memref_reshape(%arg0: memref<*xf32>, %arg1: memref<?xindex>)
+func.func @dim_of_memref_reshape(%arg0: memref<*xf32>, %arg1: memref<?xindex>)
-> index {
%c3 = arith.constant 3 : index
%0 = memref.reshape %arg0(%arg1)
@@ -228,7 +228,7 @@ func @dim_of_memref_reshape(%arg0: memref<*xf32>, %arg1: memref<?xindex>)
// CHECK-NEXT: %[[CAST:.*]] = arith.index_cast %[[DIM]]
// CHECK-NOT: memref.dim
// CHECK: return %[[CAST]] : index
-func @dim_of_memref_reshape_i32(%arg0: memref<*xf32>, %arg1: memref<?xi32>)
+func.func @dim_of_memref_reshape_i32(%arg0: memref<*xf32>, %arg1: memref<?xi32>)
-> index {
%c3 = arith.constant 3 : index
%0 = memref.reshape %arg0(%arg1)
@@ -240,7 +240,7 @@ func @dim_of_memref_reshape_i32(%arg0: memref<*xf32>, %arg1: memref<?xi32>)
// -----
// CHECK-LABEL: func @alloc_const_fold
-func @alloc_const_fold() -> memref<?xf32> {
+func.func @alloc_const_fold() -> memref<?xf32> {
// CHECK-NEXT: %0 = memref.alloc() : memref<4xf32>
%c4 = arith.constant 4 : index
%a = memref.alloc(%c4) : memref<?xf32>
@@ -253,7 +253,7 @@ func @alloc_const_fold() -> memref<?xf32> {
// -----
// CHECK-LABEL: func @alloc_alignment_const_fold
-func @alloc_alignment_const_fold() -> memref<?xf32> {
+func.func @alloc_alignment_const_fold() -> memref<?xf32> {
// CHECK-NEXT: %0 = memref.alloc() {alignment = 4096 : i64} : memref<4xf32>
%c4 = arith.constant 4 : index
%a = memref.alloc(%c4) {alignment = 4096 : i64} : memref<?xf32>
@@ -270,7 +270,7 @@ func @alloc_alignment_const_fold() -> memref<?xf32> {
// CHECK: %[[mem1:.+]] = memref.alloc({{.*}})[%[[c1]], %[[c1]]] : memref<?xi32, #map>
// CHECK: return %[[mem1]] : memref<?xi32, #map>
#map0 = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
-func @alloc_const_fold_with_symbols1(%arg0 : index) -> memref<?xi32, #map0> {
+func.func @alloc_const_fold_with_symbols1(%arg0 : index) -> memref<?xi32, #map0> {
%c1 = arith.constant 1 : index
%0 = memref.alloc(%arg0)[%c1, %c1] : memref<?xi32, #map0>
return %0 : memref<?xi32, #map0>
@@ -284,7 +284,7 @@ func @alloc_const_fold_with_symbols1(%arg0 : index) -> memref<?xi32, #map0> {
// CHECK: %[[mem2:.+]] = memref.cast %[[mem1]] : memref<1xi32, #map> to memref<?xi32, #map>
// CHECK: return %[[mem2]] : memref<?xi32, #map>
#map0 = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
-func @alloc_const_fold_with_symbols2() -> memref<?xi32, #map0> {
+func.func @alloc_const_fold_with_symbols2() -> memref<?xi32, #map0> {
%c1 = arith.constant 1 : index
%0 = memref.alloc(%c1)[%c1, %c1] : memref<?xi32, #map0>
return %0 : memref<?xi32, #map0>
@@ -294,7 +294,7 @@ func @alloc_const_fold_with_symbols2() -> memref<?xi32, #map0> {
// CHECK-LABEL: func @allocator
// CHECK: %[[alloc:.+]] = memref.alloc
// CHECK: memref.store %[[alloc:.+]], %arg0
-func @allocator(%arg0 : memref<memref<?xi32>>, %arg1 : index) {
+func.func @allocator(%arg0 : memref<memref<?xi32>>, %arg1 : index) {
%0 = memref.alloc(%arg1) : memref<?xi32>
memref.store %0, %arg0[] : memref<memref<?xi32>>
return
@@ -302,7 +302,7 @@ func @allocator(%arg0 : memref<memref<?xi32>>, %arg1 : index) {
// -----
-func @compose_collapse_of_collapse_zero_dim(%arg0 : memref<1x1x1xf32>)
+func.func @compose_collapse_of_collapse_zero_dim(%arg0 : memref<1x1x1xf32>)
-> memref<f32> {
%0 = memref.collapse_shape %arg0 [[0, 1, 2]]
: memref<1x1x1xf32> into memref<1xf32>
@@ -315,7 +315,7 @@ func @compose_collapse_of_collapse_zero_dim(%arg0 : memref<1x1x1xf32>)
// -----
-func @compose_collapse_of_collapse(%arg0 : memref<?x?x?x?x?xf32>)
+func.func @compose_collapse_of_collapse(%arg0 : memref<?x?x?x?x?xf32>)
-> memref<?x?xf32> {
%0 = memref.collapse_shape %arg0 [[0, 1], [2], [3, 4]]
: memref<?x?x?x?x?xf32> into memref<?x?x?xf32>
@@ -329,7 +329,7 @@ func @compose_collapse_of_collapse(%arg0 : memref<?x?x?x?x?xf32>)
// -----
-func @do_not_compose_collapse_of_expand_non_identity_layout(
+func.func @do_not_compose_collapse_of_expand_non_identity_layout(
%arg0: memref<?x?xf32, offset : 0, strides : [?, 1]>)
-> memref<?xf32> {
%1 = memref.expand_shape %arg0 [[0, 1], [2]] :
@@ -346,7 +346,7 @@ func @do_not_compose_collapse_of_expand_non_identity_layout(
// -----
-func @compose_expand_of_expand(%arg0 : memref<?x?xf32>)
+func.func @compose_expand_of_expand(%arg0 : memref<?x?xf32>)
-> memref<?x6x4x5x?xf32> {
%0 = memref.expand_shape %arg0 [[0, 1], [2]]
: memref<?x?xf32> into memref<?x4x?xf32>
@@ -360,7 +360,7 @@ func @compose_expand_of_expand(%arg0 : memref<?x?xf32>)
// -----
-func @compose_expand_of_expand_of_zero_dim(%arg0 : memref<f32>)
+func.func @compose_expand_of_expand_of_zero_dim(%arg0 : memref<f32>)
-> memref<1x1x1xf32> {
%0 = memref.expand_shape %arg0 [] : memref<f32> into memref<1xf32>
%1 = memref.expand_shape %0 [[0, 1, 2]]
@@ -373,7 +373,7 @@ func @compose_expand_of_expand_of_zero_dim(%arg0 : memref<f32>)
// -----
-func @fold_collapse_of_expand(%arg0 : memref<12x4xf32>) -> memref<12x4xf32> {
+func.func @fold_collapse_of_expand(%arg0 : memref<12x4xf32>) -> memref<12x4xf32> {
%0 = memref.expand_shape %arg0 [[0, 1], [2]]
: memref<12x4xf32> into memref<3x4x4xf32>
%1 = memref.collapse_shape %0 [[0, 1], [2]]
@@ -385,7 +385,7 @@ func @fold_collapse_of_expand(%arg0 : memref<12x4xf32>) -> memref<12x4xf32> {
// -----
-func @fold_collapse_collapse_of_expand(%arg0 : memref<?x?xf32>)
+func.func @fold_collapse_collapse_of_expand(%arg0 : memref<?x?xf32>)
-> memref<?x?xf32> {
%0 = memref.expand_shape %arg0 [[0, 1], [2]]
: memref<?x?xf32> into memref<?x4x?xf32>
@@ -398,7 +398,7 @@ func @fold_collapse_collapse_of_expand(%arg0 : memref<?x?xf32>)
// -----
-func @fold_memref_expand_cast(%arg0 : memref<?x?xf32>) -> memref<2x4x4xf32> {
+func.func @fold_memref_expand_cast(%arg0 : memref<?x?xf32>) -> memref<2x4x4xf32> {
%0 = memref.cast %arg0 : memref<?x?xf32> to memref<8x4xf32>
%1 = memref.expand_shape %0 [[0, 1], [2]]
: memref<8x4xf32> into memref<2x4x4xf32>
@@ -418,7 +418,7 @@ func @fold_memref_expand_cast(%arg0 : memref<?x?xf32>) -> memref<2x4x4xf32> {
// CHECK-SAME: memref<?x512xf32> to memref<?x?xf32>
// CHECK: return %[[DYNAMIC]] : memref<?x?xf32>
// CHECK: }
-func @collapse_after_memref_cast_type_change(%arg0 : memref<?x512x1x1xf32>) -> memref<?x?xf32> {
+func.func @collapse_after_memref_cast_type_change(%arg0 : memref<?x512x1x1xf32>) -> memref<?x?xf32> {
%dynamic = memref.cast %arg0: memref<?x512x1x1xf32> to memref<?x?x?x?xf32>
%collapsed = memref.collapse_shape %dynamic [[0], [1, 2, 3]] : memref<?x?x?x?xf32> into memref<?x?xf32>
return %collapsed : memref<?x?xf32>
@@ -431,7 +431,7 @@ func @collapse_after_memref_cast_type_change(%arg0 : memref<?x512x1x1xf32>) -> m
// CHECK: %[[COLLAPSED:.*]] = memref.collapse_shape %[[INPUT]]
// CHECK_SAME: {{\[\[}}0], [1, 2, 3]] : memref<?x512x1x?xf32> into memref<?x?xf32>
// CHECK: return %[[COLLAPSED]] : memref<?x?xf32>
-func @collapse_after_memref_cast(%arg0 : memref<?x512x1x?xf32>) -> memref<?x?xf32> {
+func.func @collapse_after_memref_cast(%arg0 : memref<?x512x1x?xf32>) -> memref<?x?xf32> {
%dynamic = memref.cast %arg0: memref<?x512x1x?xf32> to memref<?x?x?x?xf32>
%collapsed = memref.collapse_shape %dynamic [[0], [1, 2, 3]] : memref<?x?x?x?xf32> into memref<?x?xf32>
return %collapsed : memref<?x?xf32>
@@ -446,7 +446,7 @@ func @collapse_after_memref_cast(%arg0 : memref<?x512x1x?xf32>) -> memref<?x?xf3
// CHECK: %[[DYNAMIC:.*]] = memref.cast %[[COLLAPSED]] :
// CHECK-SAME: memref<1x?xi64> to memref<?x?xi64>
// CHECK: return %[[DYNAMIC]] : memref<?x?xi64>
-func @collapse_after_memref_cast_type_change_dynamic(%arg0: memref<1x1x1x?xi64>) -> memref<?x?xi64> {
+func.func @collapse_after_memref_cast_type_change_dynamic(%arg0: memref<1x1x1x?xi64>) -> memref<?x?xi64> {
%casted = memref.cast %arg0 : memref<1x1x1x?xi64> to memref<1x1x?x?xi64>
%collapsed = memref.collapse_shape %casted [[0, 1, 2], [3]] : memref<1x1x?x?xi64> into memref<?x?xi64>
return %collapsed : memref<?x?xi64>
@@ -454,7 +454,7 @@ func @collapse_after_memref_cast_type_change_dynamic(%arg0: memref<1x1x1x?xi64>)
// -----
-func @reduced_memref(%arg0: memref<2x5x7x1xf32>, %arg1 :index)
+func.func @reduced_memref(%arg0: memref<2x5x7x1xf32>, %arg1 :index)
-> memref<1x4x1xf32, affine_map<(d0, d1, d2)[s0] -> (d0 * 35 + s0 + d1 * 7 + d2)>> {
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
@@ -477,7 +477,7 @@ func @reduced_memref(%arg0: memref<2x5x7x1xf32>, %arg1 :index)
// -----
// CHECK-LABEL: func @fold_rank_memref
-func @fold_rank_memref(%arg0 : memref<?x?xf32>) -> (index) {
+func.func @fold_rank_memref(%arg0 : memref<?x?xf32>) -> (index) {
// Fold a rank into a constant
// CHECK-NEXT: [[C2:%.+]] = arith.constant 2 : index
%rank_0 = memref.rank %arg0 : memref<?x?xf32>
@@ -489,7 +489,7 @@ func @fold_rank_memref(%arg0 : memref<?x?xf32>) -> (index) {
// -----
#map = affine_map<(d0, d1) -> (d0 * 42 + d1)>
-func @fold_no_op_subview(%arg0 : memref<20x42xf32>) -> memref<20x42xf32, #map> {
+func.func @fold_no_op_subview(%arg0 : memref<20x42xf32>) -> memref<20x42xf32, #map> {
%0 = memref.subview %arg0[0, 0] [20, 42] [1, 1] : memref<20x42xf32> to memref<20x42xf32, #map>
return %0 : memref<20x42xf32, #map>
}
@@ -501,7 +501,7 @@ func @fold_no_op_subview(%arg0 : memref<20x42xf32>) -> memref<20x42xf32, #map> {
// -----
#map = affine_map<(d0, d1) -> (d0 * 42 + d1 + 1)>
-func @no_fold_subview_with_non_zero_offset(%arg0 : memref<20x42xf32>) -> memref<20x42xf32, #map> {
+func.func @no_fold_subview_with_non_zero_offset(%arg0 : memref<20x42xf32>) -> memref<20x42xf32, #map> {
%0 = memref.subview %arg0[0, 1] [20, 42] [1, 1] : memref<20x42xf32> to memref<20x42xf32, #map>
return %0 : memref<20x42xf32, #map>
}
@@ -512,7 +512,7 @@ func @no_fold_subview_with_non_zero_offset(%arg0 : memref<20x42xf32>) -> memref<
// -----
#map = affine_map<(d0, d1) -> (d0 * 42 + d1 * 2)>
-func @no_fold_subview_with_non_unit_stride(%arg0 : memref<20x42xf32>) -> memref<20x42xf32, #map> {
+func.func @no_fold_subview_with_non_unit_stride(%arg0 : memref<20x42xf32>) -> memref<20x42xf32, #map> {
%0 = memref.subview %arg0[0, 0] [20, 42] [1, 2] : memref<20x42xf32> to memref<20x42xf32, #map>
return %0 : memref<20x42xf32, #map>
}
@@ -523,7 +523,7 @@ func @no_fold_subview_with_non_unit_stride(%arg0 : memref<20x42xf32>) -> memref<
// -----
#map = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)>
-func @no_fold_dynamic_no_op_subview(%arg0 : memref<?x?xf32>) -> memref<?x?xf32, #map> {
+func.func @no_fold_dynamic_no_op_subview(%arg0 : memref<?x?xf32>) -> memref<?x?xf32, #map> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%0 = memref.dim %arg0, %c0 : memref<?x?xf32>
@@ -537,7 +537,7 @@ func @no_fold_dynamic_no_op_subview(%arg0 : memref<?x?xf32>) -> memref<?x?xf32,
// -----
-func @atomicrmw_cast_fold(%arg0 : f32, %arg1 : memref<4xf32>, %c : index) {
+func.func @atomicrmw_cast_fold(%arg0 : f32, %arg1 : memref<4xf32>, %c : index) {
%v = memref.cast %arg1 : memref<4xf32> to memref<?xf32>
%a = memref.atomic_rmw addf %arg0, %v[%c] : (f32, memref<?xf32>) -> f32
return
@@ -549,7 +549,7 @@ func @atomicrmw_cast_fold(%arg0 : f32, %arg1 : memref<4xf32>, %c : index) {
// -----
#map = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
-func @copy_of_cast(%m1: memref<?xf32>, %m2: memref<*xf32>) {
+func.func @copy_of_cast(%m1: memref<?xf32>, %m2: memref<*xf32>) {
%casted1 = memref.cast %m1 : memref<?xf32> to memref<?xf32, #map>
%casted2 = memref.cast %m2 : memref<*xf32> to memref<?xf32, #map>
memref.copy %casted1, %casted2 : memref<?xf32, #map> to memref<?xf32, #map>
@@ -563,7 +563,7 @@ func @copy_of_cast(%m1: memref<?xf32>, %m2: memref<*xf32>) {
// -----
-func @self_copy(%m1: memref<?xf32>) {
+func.func @self_copy(%m1: memref<?xf32>) {
memref.copy %m1, %m1 : memref<?xf32> to memref<?xf32>
return
}
@@ -573,7 +573,7 @@ func @self_copy(%m1: memref<?xf32>) {
// -----
-func @scopeMerge() {
+func.func @scopeMerge() {
memref.alloca_scope {
%cnt = "test.count"() : () -> index
%a = memref.alloca(%cnt) : memref<?xi64>
@@ -588,7 +588,7 @@ func @scopeMerge() {
// CHECK: "test.use"(%[[alloc]]) : (memref<?xi64>) -> ()
// CHECK: return
-func @scopeMerge2() {
+func.func @scopeMerge2() {
"test.region"() ({
memref.alloca_scope {
%cnt = "test.count"() : () -> index
@@ -612,7 +612,7 @@ func @scopeMerge2() {
// CHECK: return
// CHECK: }
-func @scopeMerge3() {
+func.func @scopeMerge3() {
%cnt = "test.count"() : () -> index
"test.region"() ({
memref.alloca_scope {
@@ -636,7 +636,7 @@ func @scopeMerge3() {
// CHECK: return
// CHECK: }
-func @scopeMerge4() {
+func.func @scopeMerge4() {
%cnt = "test.count"() : () -> index
"test.region"() ({
memref.alloca_scope {
@@ -662,7 +662,7 @@ func @scopeMerge4() {
// CHECK: return
// CHECK: }
-func @scopeMerge5() {
+func.func @scopeMerge5() {
"test.region"() ({
memref.alloca_scope {
affine.parallel (%arg) = (0) to (64) {
@@ -688,7 +688,7 @@ func @scopeMerge5() {
// CHECK: return
// CHECK: }
-func @scopeInline(%arg : memref<index>) {
+func.func @scopeInline(%arg : memref<index>) {
%cnt = "test.count"() : () -> index
"test.region"() ({
memref.alloca_scope {
@@ -708,7 +708,7 @@ func @scopeInline(%arg : memref<index>) {
// CHECK-SAME: (%[[ARG:.*]]: memref<?xi8>, %[[SIZE1:.*]]: index, %[[SIZE2:.*]]: index)
// CHECK: %[[RES:.*]] = memref.reinterpret_cast %[[ARG]] to offset: [0], sizes: [%[[SIZE2]]], strides: [1]
// CHECK: return %[[RES]]
-func @reinterpret_of_reinterpret(%arg : memref<?xi8>, %size1: index, %size2: index) -> memref<?xi8> {
+func.func @reinterpret_of_reinterpret(%arg : memref<?xi8>, %size1: index, %size2: index) -> memref<?xi8> {
%0 = memref.reinterpret_cast %arg to offset: [0], sizes: [%size1], strides: [1] : memref<?xi8> to memref<?xi8>
%1 = memref.reinterpret_cast %0 to offset: [0], sizes: [%size2], strides: [1] : memref<?xi8> to memref<?xi8>
return %1 : memref<?xi8>
@@ -720,7 +720,7 @@ func @reinterpret_of_reinterpret(%arg : memref<?xi8>, %size1: index, %size2: ind
// CHECK-SAME: (%[[ARG:.*]]: memref<?xi8>, %[[SIZE:.*]]: index)
// CHECK: %[[RES:.*]] = memref.reinterpret_cast %[[ARG]] to offset: [0], sizes: [%[[SIZE]]], strides: [1]
// CHECK: return %[[RES]]
-func @reinterpret_of_cast(%arg : memref<?xi8>, %size: index) -> memref<?xi8> {
+func.func @reinterpret_of_cast(%arg : memref<?xi8>, %size: index) -> memref<?xi8> {
%0 = memref.cast %arg : memref<?xi8> to memref<5xi8>
%1 = memref.reinterpret_cast %0 to offset: [0], sizes: [%size], strides: [1] : memref<5xi8> to memref<?xi8>
return %1 : memref<?xi8>
@@ -732,7 +732,7 @@ func @reinterpret_of_cast(%arg : memref<?xi8>, %size: index) -> memref<?xi8> {
// CHECK-SAME: (%[[ARG:.*]]: memref<?xi8>, %[[SIZE1:.*]]: index, %[[SIZE2:.*]]: index)
// CHECK: %[[RES:.*]] = memref.reinterpret_cast %[[ARG]] to offset: [0], sizes: [%[[SIZE2]]], strides: [1]
// CHECK: return %[[RES]]
-func @reinterpret_of_subview(%arg : memref<?xi8>, %size1: index, %size2: index) -> memref<?xi8> {
+func.func @reinterpret_of_subview(%arg : memref<?xi8>, %size1: index, %size2: index) -> memref<?xi8> {
%0 = memref.subview %arg[0] [%size1] [1] : memref<?xi8> to memref<?xi8>
%1 = memref.reinterpret_cast %0 to offset: [0], sizes: [%size2], strides: [1] : memref<?xi8> to memref<?xi8>
return %1 : memref<?xi8>
@@ -740,7 +740,7 @@ func @reinterpret_of_subview(%arg : memref<?xi8>, %size1: index, %size2: index)
// -----
-func @canonicalize_rank_reduced_subview(%arg0 : memref<8x?xf32>,
+func.func @canonicalize_rank_reduced_subview(%arg0 : memref<8x?xf32>,
%arg1 : index) -> memref<?xf32, offset : ?, strides : [?]> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
diff --git a/mlir/test/Dialect/MemRef/expand-ops.mlir b/mlir/test/Dialect/MemRef/expand-ops.mlir
index 261e95f881ba2..a0d8e52d6e7e2 100644
--- a/mlir/test/Dialect/MemRef/expand-ops.mlir
+++ b/mlir/test/Dialect/MemRef/expand-ops.mlir
@@ -2,7 +2,7 @@
// CHECK-LABEL: func @atomic_rmw_to_generic
// CHECK-SAME: ([[F:%.*]]: memref<10xf32>, [[f:%.*]]: f32, [[i:%.*]]: index)
-func @atomic_rmw_to_generic(%F: memref<10xf32>, %f: f32, %i: index) -> f32 {
+func.func @atomic_rmw_to_generic(%F: memref<10xf32>, %f: f32, %i: index) -> f32 {
%x = memref.atomic_rmw maxf %f, %F[%i] : (f32, memref<10xf32>) -> f32
return %x : f32
}
@@ -17,7 +17,7 @@ func @atomic_rmw_to_generic(%F: memref<10xf32>, %f: f32, %i: index) -> f32 {
// -----
// CHECK-LABEL: func @atomic_rmw_no_conversion
-func @atomic_rmw_no_conversion(%F: memref<10xf32>, %f: f32, %i: index) -> f32 {
+func.func @atomic_rmw_no_conversion(%F: memref<10xf32>, %f: f32, %i: index) -> f32 {
%x = memref.atomic_rmw addf %f, %F[%i] : (f32, memref<10xf32>) -> f32
return %x : f32
}
@@ -26,7 +26,7 @@ func @atomic_rmw_no_conversion(%F: memref<10xf32>, %f: f32, %i: index) -> f32 {
// -----
// CHECK-LABEL: func @memref_reshape(
-func @memref_reshape(%input: memref<*xf32>,
+func.func @memref_reshape(%input: memref<*xf32>,
%shape: memref<3xi32>) -> memref<?x?x8xf32> {
%result = memref.reshape %input(%shape)
: (memref<*xf32>, memref<3xi32>) -> memref<?x?x8xf32>
diff --git a/mlir/test/Dialect/MemRef/fold-subview-ops.mlir b/mlir/test/Dialect/MemRef/fold-subview-ops.mlir
index 6f8e46fab4442..28138e93aec9e 100644
--- a/mlir/test/Dialect/MemRef/fold-subview-ops.mlir
+++ b/mlir/test/Dialect/MemRef/fold-subview-ops.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt -fold-memref-subview-ops -split-input-file %s -o - | FileCheck %s
-func @fold_static_stride_subview_with_load(%arg0 : memref<12x32xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index) -> f32 {
+func.func @fold_static_stride_subview_with_load(%arg0 : memref<12x32xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index) -> f32 {
%0 = memref.subview %arg0[%arg1, %arg2][4, 4][2, 3] : memref<12x32xf32> to memref<4x4xf32, offset:?, strides: [64, 3]>
%1 = memref.load %0[%arg3, %arg4] : memref<4x4xf32, offset:?, strides: [64, 3]>
return %1 : f32
@@ -19,7 +19,7 @@ func @fold_static_stride_subview_with_load(%arg0 : memref<12x32xf32>, %arg1 : in
// -----
-func @fold_dynamic_stride_subview_with_load(%arg0 : memref<12x32xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index, %arg5 : index, %arg6 : index) -> f32 {
+func.func @fold_dynamic_stride_subview_with_load(%arg0 : memref<12x32xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index, %arg5 : index, %arg6 : index) -> f32 {
%0 = memref.subview %arg0[%arg1, %arg2][4, 4][%arg5, %arg6] :
memref<12x32xf32> to memref<4x4xf32, offset:?, strides: [?, ?]>
%1 = memref.load %0[%arg3, %arg4] : memref<4x4xf32, offset:?, strides: [?, ?]>
@@ -40,7 +40,7 @@ func @fold_dynamic_stride_subview_with_load(%arg0 : memref<12x32xf32>, %arg1 : i
// -----
-func @fold_static_stride_subview_with_store(%arg0 : memref<12x32xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index, %arg5 : f32) {
+func.func @fold_static_stride_subview_with_store(%arg0 : memref<12x32xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index, %arg5 : f32) {
%0 = memref.subview %arg0[%arg1, %arg2][4, 4][2, 3] :
memref<12x32xf32> to memref<4x4xf32, offset:?, strides: [64, 3]>
memref.store %arg5, %0[%arg3, %arg4] : memref<4x4xf32, offset:?, strides: [64, 3]>
@@ -60,7 +60,7 @@ func @fold_static_stride_subview_with_store(%arg0 : memref<12x32xf32>, %arg1 : i
// -----
-func @fold_dynamic_stride_subview_with_store(%arg0 : memref<12x32xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index, %arg5 : index, %arg6 : index, %arg7 : f32) {
+func.func @fold_dynamic_stride_subview_with_store(%arg0 : memref<12x32xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index, %arg5 : index, %arg6 : index, %arg7 : f32) {
%0 = memref.subview %arg0[%arg1, %arg2][4, 4][%arg5, %arg6] :
memref<12x32xf32> to memref<4x4xf32, offset:?, strides: [?, ?]>
memref.store %arg7, %0[%arg3, %arg4] : memref<4x4xf32, offset:?, strides: [?, ?]>
@@ -81,7 +81,7 @@ func @fold_dynamic_stride_subview_with_store(%arg0 : memref<12x32xf32>, %arg1 :
// -----
-func @fold_subview_with_transfer_read(%arg0 : memref<12x32xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index, %arg5 : index, %arg6 : index) -> vector<4xf32> {
+func.func @fold_subview_with_transfer_read(%arg0 : memref<12x32xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index, %arg5 : index, %arg6 : index) -> vector<4xf32> {
%f1 = arith.constant 1.0 : f32
%0 = memref.subview %arg0[%arg1, %arg2][4, 4][%arg5, %arg6] : memref<12x32xf32> to memref<4x4xf32, offset:?, strides: [?, ?]>
%1 = vector.transfer_read %0[%arg3, %arg4], %f1 {in_bounds = [true]} : memref<4x4xf32, offset:?, strides: [?, ?]>, vector<4xf32>
@@ -102,7 +102,7 @@ func @fold_subview_with_transfer_read(%arg0 : memref<12x32xf32>, %arg1 : index,
// -----
-func @fold_static_stride_subview_with_transfer_write(%arg0 : memref<12x32xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index, %arg5: index, %arg6 : index, %arg7 : vector<4xf32>) {
+func.func @fold_static_stride_subview_with_transfer_write(%arg0 : memref<12x32xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index, %arg5: index, %arg6 : index, %arg7 : vector<4xf32>) {
%0 = memref.subview %arg0[%arg1, %arg2][4, 4][%arg5, %arg6] :
memref<12x32xf32> to memref<4x4xf32, offset:?, strides: [?, ?]>
vector.transfer_write %arg7, %0[%arg3, %arg4] {in_bounds = [true]} : vector<4xf32>, memref<4x4xf32, offset:?, strides: [?, ?]>
@@ -123,7 +123,7 @@ func @fold_static_stride_subview_with_transfer_write(%arg0 : memref<12x32xf32>,
// -----
-func @fold_rank_reducing_subview_with_load
+func.func @fold_rank_reducing_subview_with_load
(%arg0 : memref<?x?x?x?x?x?xf32>, %arg1 : index, %arg2 : index,
%arg3 : index, %arg4 : index, %arg5 : index, %arg6 : index,
%arg7 : index, %arg8 : index, %arg9 : index, %arg10: index,
@@ -163,7 +163,7 @@ func @fold_rank_reducing_subview_with_load
// -----
-func @fold_vector_transfer_read_with_rank_reduced_subview(
+func.func @fold_vector_transfer_read_with_rank_reduced_subview(
%arg0 : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>,
%arg1: index, %arg2 : index, %arg3 : index, %arg4: index, %arg5 : index,
%arg6 : index) -> vector<4xf32> {
@@ -192,7 +192,7 @@ func @fold_vector_transfer_read_with_rank_reduced_subview(
// -----
-func @fold_vector_transfer_write_with_rank_reduced_subview(
+func.func @fold_vector_transfer_write_with_rank_reduced_subview(
%arg0 : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>,
%arg1 : vector<4xf32>, %arg2: index, %arg3 : index, %arg4 : index,
%arg5: index, %arg6 : index, %arg7 : index) {
@@ -222,7 +222,7 @@ func @fold_vector_transfer_write_with_rank_reduced_subview(
// -----
-func @fold_vector_transfer_write_with_inner_rank_reduced_subview(
+func.func @fold_vector_transfer_write_with_inner_rank_reduced_subview(
%arg0 : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>,
%arg1 : vector<4xf32>, %arg2: index, %arg3 : index, %arg4 : index,
%arg5: index, %arg6 : index, %arg7 : index) {
@@ -259,7 +259,7 @@ func @fold_vector_transfer_write_with_inner_rank_reduced_subview(
// ops would be generated.
// CHECK-LABEL: func @fold_static_stride_subview_with_affine_load_store
-func @fold_static_stride_subview_with_affine_load_store(%arg0 : memref<12x32xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index) -> f32 {
+func.func @fold_static_stride_subview_with_affine_load_store(%arg0 : memref<12x32xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index) -> f32 {
%0 = memref.subview %arg0[%arg1, %arg2][4, 4][2, 3] : memref<12x32xf32> to memref<4x4xf32, offset:?, strides: [64, 3]>
%1 = affine.load %0[%arg3, %arg4] : memref<4x4xf32, offset:?, strides: [64, 3]>
// CHECK-NEXT: affine.apply
diff --git a/mlir/test/Dialect/MemRef/invalid.mlir b/mlir/test/Dialect/MemRef/invalid.mlir
index 64e6d9b013f59..5b15b36b6a2df 100644
--- a/mlir/test/Dialect/MemRef/invalid.mlir
+++ b/mlir/test/Dialect/MemRef/invalid.mlir
@@ -1,20 +1,20 @@
// RUN: mlir-opt -allow-unregistered-dialect -split-input-file %s -verify-diagnostics
-func @dma_start_not_enough_operands() {
+func.func @dma_start_not_enough_operands() {
// expected-error at +1 {{expected at least 4 operands}}
"memref.dma_start"() : () -> ()
}
// -----
-func @dma_no_src_memref(%m : f32, %tag : f32, %c0 : index) {
+func.func @dma_no_src_memref(%m : f32, %tag : f32, %c0 : index) {
// expected-error at +1 {{expected source to be of memref type}}
memref.dma_start %m[%c0], %m[%c0], %c0, %tag[%c0] : f32, f32, f32
}
// -----
-func @dma_start_not_enough_operands_for_src(
+func.func @dma_start_not_enough_operands_for_src(
%src: memref<2x2x2xf32>, %idx: index) {
// expected-error at +1 {{expected at least 7 operands}}
"memref.dma_start"(%src, %idx, %idx, %idx) : (memref<2x2x2xf32>, index, index, index) -> ()
@@ -22,7 +22,7 @@ func @dma_start_not_enough_operands_for_src(
// -----
-func @dma_start_src_index_wrong_type(
+func.func @dma_start_src_index_wrong_type(
%src: memref<2x2xf32>, %idx: index, %dst: memref<2xf32,1>,
%tag: memref<i32,2>, %flt: f32) {
// expected-error at +1 {{expected source indices to be of index type}}
@@ -32,7 +32,7 @@ func @dma_start_src_index_wrong_type(
// -----
-func @dma_no_dst_memref(%m : f32, %tag : f32, %c0 : index) {
+func.func @dma_no_dst_memref(%m : f32, %tag : f32, %c0 : index) {
%mref = memref.alloc() : memref<8 x f32>
// expected-error at +1 {{expected destination to be of memref type}}
memref.dma_start %mref[%c0], %m[%c0], %c0, %tag[%c0] : memref<8 x f32>, f32, f32
@@ -40,7 +40,7 @@ func @dma_no_dst_memref(%m : f32, %tag : f32, %c0 : index) {
// -----
-func @dma_start_not_enough_operands_for_dst(
+func.func @dma_start_not_enough_operands_for_dst(
%src: memref<2x2xf32>, %idx: index, %dst: memref<2xf32,1>,
%tag: memref<i32,2>) {
// expected-error at +1 {{expected at least 7 operands}}
@@ -50,7 +50,7 @@ func @dma_start_not_enough_operands_for_dst(
// -----
-func @dma_start_dst_index_wrong_type(
+func.func @dma_start_dst_index_wrong_type(
%src: memref<2x2xf32>, %idx: index, %dst: memref<2xf32,1>,
%tag: memref<i32,2>, %flt: f32) {
// expected-error at +1 {{expected destination indices to be of index type}}
@@ -60,7 +60,7 @@ func @dma_start_dst_index_wrong_type(
// -----
-func @dma_start_dst_index_wrong_type(
+func.func @dma_start_dst_index_wrong_type(
%src: memref<2x2xf32>, %idx: index, %dst: memref<2xf32,1>,
%tag: memref<i32,2>, %flt: f32) {
// expected-error at +1 {{expected num elements to be of index type}}
@@ -70,7 +70,7 @@ func @dma_start_dst_index_wrong_type(
// -----
-func @dma_no_tag_memref(%tag : f32, %c0 : index) {
+func.func @dma_no_tag_memref(%tag : f32, %c0 : index) {
%mref = memref.alloc() : memref<8 x f32>
// expected-error at +1 {{expected tag to be of memref type}}
memref.dma_start %mref[%c0], %mref[%c0], %c0, %tag[%c0] : memref<8 x f32>, memref<8 x f32>, f32
@@ -78,7 +78,7 @@ func @dma_no_tag_memref(%tag : f32, %c0 : index) {
// -----
-func @dma_start_not_enough_operands_for_tag(
+func.func @dma_start_not_enough_operands_for_tag(
%src: memref<2x2xf32>, %idx: index, %dst: memref<2xf32,1>,
%tag: memref<2xi32,2>) {
// expected-error at +1 {{expected at least 8 operands}}
@@ -88,7 +88,7 @@ func @dma_start_not_enough_operands_for_tag(
// -----
-func @dma_start_dst_index_wrong_type(
+func.func @dma_start_dst_index_wrong_type(
%src: memref<2x2xf32>, %idx: index, %dst: memref<2xf32,1>,
%tag: memref<2xi32,2>, %flt: f32) {
// expected-error at +1 {{expected tag indices to be of index type}}
@@ -98,7 +98,7 @@ func @dma_start_dst_index_wrong_type(
// -----
-func @dma_start_too_many_operands(
+func.func @dma_start_too_many_operands(
%src: memref<2x2xf32>, %idx: index, %dst: memref<2xf32,1>,
%tag: memref<i32,2>) {
// expected-error at +1 {{incorrect number of operands}}
@@ -109,7 +109,7 @@ func @dma_start_too_many_operands(
// -----
-func @dma_start_wrong_stride_type(
+func.func @dma_start_wrong_stride_type(
%src: memref<2x2xf32>, %idx: index, %dst: memref<2xf32,1>,
%tag: memref<i32,2>, %flt: f32) {
// expected-error at +1 {{expected stride and num elements per stride to be of type index}}
@@ -119,7 +119,7 @@ func @dma_start_wrong_stride_type(
// -----
-func @dma_wait_wrong_index_type(%tag : memref<2x2xi32>, %idx: index, %flt: index) {
+func.func @dma_wait_wrong_index_type(%tag : memref<2x2xi32>, %idx: index, %flt: index) {
// expected-error at +1 {{expected tagIndices to have the same number of elements as the tagMemRef rank, expected 2, but got 1}}
"memref.dma_wait"(%tag, %flt, %idx) : (memref<2x2xi32>, index, index) -> ()
return
@@ -127,28 +127,28 @@ func @dma_wait_wrong_index_type(%tag : memref<2x2xi32>, %idx: index, %flt: index
// -----
-func @transpose_not_permutation(%v : memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>) {
+func.func @transpose_not_permutation(%v : memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>) {
// expected-error @+1 {{expected a permutation map}}
memref.transpose %v (i, j) -> (i, i) : memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>> to memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>
}
// -----
-func @transpose_bad_rank(%v : memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>) {
+func.func @transpose_bad_rank(%v : memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>) {
// expected-error @+1 {{expected a permutation map of same rank as the input}}
memref.transpose %v (i) -> (i) : memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>> to memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>
}
// -----
-func @transpose_wrong_type(%v : memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>) {
+func.func @transpose_wrong_type(%v : memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>) {
// expected-error @+1 {{output type 'memref<?x?xf32, affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>>' does not match transposed input type 'memref<?x?xf32, affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>>'}}
memref.transpose %v (i, j) -> (j, i) : memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>> to memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>
}
// -----
-func @memref_reinterpret_cast_too_many_offsets(%in: memref<?xf32>) {
+func.func @memref_reinterpret_cast_too_many_offsets(%in: memref<?xf32>) {
// expected-error @+1 {{expected 1 offset values}}
%out = memref.reinterpret_cast %in to
offset: [0, 0], sizes: [10, 10], strides: [10, 1]
@@ -158,7 +158,7 @@ func @memref_reinterpret_cast_too_many_offsets(%in: memref<?xf32>) {
// -----
-func @memref_reinterpret_cast_incompatible_element_types(%in: memref<*xf32>) {
+func.func @memref_reinterpret_cast_incompatible_element_types(%in: memref<*xf32>) {
// expected-error @+1 {{
diff erent element types specified}}
%out = memref.reinterpret_cast %in to
offset: [0], sizes: [10], strides: [1]
@@ -168,7 +168,7 @@ func @memref_reinterpret_cast_incompatible_element_types(%in: memref<*xf32>) {
// -----
-func @memref_reinterpret_cast_incompatible_memory_space(%in: memref<*xf32>) {
+func.func @memref_reinterpret_cast_incompatible_memory_space(%in: memref<*xf32>) {
// expected-error @+1 {{
diff erent memory spaces specified}}
%out = memref.reinterpret_cast %in to
offset: [0], sizes: [10], strides: [1]
@@ -178,7 +178,7 @@ func @memref_reinterpret_cast_incompatible_memory_space(%in: memref<*xf32>) {
// -----
-func @memref_reinterpret_cast_offset_mismatch(%in: memref<?xf32>) {
+func.func @memref_reinterpret_cast_offset_mismatch(%in: memref<?xf32>) {
// expected-error @+1 {{expected result type with offset = 2 instead of 1}}
%out = memref.reinterpret_cast %in to
offset: [1], sizes: [10], strides: [1]
@@ -188,7 +188,7 @@ func @memref_reinterpret_cast_offset_mismatch(%in: memref<?xf32>) {
// -----
-func @memref_reinterpret_cast_size_mismatch(%in: memref<*xf32>) {
+func.func @memref_reinterpret_cast_size_mismatch(%in: memref<*xf32>) {
// expected-error @+1 {{expected result type with size = 10 instead of 1 in dim = 0}}
%out = memref.reinterpret_cast %in to
offset: [0], sizes: [10], strides: [1]
@@ -198,7 +198,7 @@ func @memref_reinterpret_cast_size_mismatch(%in: memref<*xf32>) {
// -----
-func @memref_reinterpret_cast_offset_mismatch(%in: memref<?xf32>) {
+func.func @memref_reinterpret_cast_offset_mismatch(%in: memref<?xf32>) {
// expected-error @+1 {{expected result type with stride = 2 instead of 1 in dim = 0}}
%out = memref.reinterpret_cast %in to
offset: [2], sizes: [10], strides: [2]
@@ -208,7 +208,7 @@ func @memref_reinterpret_cast_offset_mismatch(%in: memref<?xf32>) {
// -----
-func @memref_reinterpret_cast_no_map_but_offset(%in: memref<?xf32>) {
+func.func @memref_reinterpret_cast_no_map_but_offset(%in: memref<?xf32>) {
// expected-error @+1 {{expected result type with offset = 0 instead of 2}}
%out = memref.reinterpret_cast %in to offset: [2], sizes: [10], strides: [1]
: memref<?xf32> to memref<10xf32>
@@ -217,7 +217,7 @@ func @memref_reinterpret_cast_no_map_but_offset(%in: memref<?xf32>) {
// -----
-func @memref_reinterpret_cast_no_map_but_stride(%in: memref<?xf32>) {
+func.func @memref_reinterpret_cast_no_map_but_stride(%in: memref<?xf32>) {
// expected-error @+1 {{expected result type with stride = 10 instead of 1 in dim = 0}}
%out = memref.reinterpret_cast %in to offset: [0], sizes: [10], strides: [10]
: memref<?xf32> to memref<10xf32>
@@ -226,7 +226,7 @@ func @memref_reinterpret_cast_no_map_but_stride(%in: memref<?xf32>) {
// -----
-func @memref_reinterpret_cast_no_map_but_strides(%in: memref<?x?xf32>) {
+func.func @memref_reinterpret_cast_no_map_but_strides(%in: memref<?x?xf32>) {
// expected-error @+1 {{expected result type with stride = 42 instead of 10 in dim = 0}}
%out = memref.reinterpret_cast %in to
offset: [0], sizes: [9, 10], strides: [42, 1]
@@ -236,7 +236,7 @@ func @memref_reinterpret_cast_no_map_but_strides(%in: memref<?x?xf32>) {
// -----
-func @memref_reinterpret_cast_non_strided_layout(%in: memref<?x?xf32>) {
+func.func @memref_reinterpret_cast_non_strided_layout(%in: memref<?x?xf32>) {
// expected-error @+1 {{expected result type to have strided layout but found 'memref<9x10xf32, affine_map<(d0, d1) -> (d0)>>}}
%out = memref.reinterpret_cast %in to
offset: [0], sizes: [9, 10], strides: [42, 1]
@@ -246,7 +246,7 @@ func @memref_reinterpret_cast_non_strided_layout(%in: memref<?x?xf32>) {
// -----
-func @memref_reshape_element_type_mismatch(
+func.func @memref_reshape_element_type_mismatch(
%buf: memref<*xf32>, %shape: memref<1xi32>) {
// expected-error @+1 {{element types of source and destination memref types should be the same}}
memref.reshape %buf(%shape) : (memref<*xf32>, memref<1xi32>) -> memref<?xi32>
@@ -254,7 +254,7 @@ func @memref_reshape_element_type_mismatch(
// -----
-func @memref_reshape_dst_ranked_shape_unranked(
+func.func @memref_reshape_dst_ranked_shape_unranked(
%buf: memref<*xf32>, %shape: memref<?xi32>) {
// expected-error @+1 {{cannot use shape operand with dynamic length to reshape to statically-ranked memref type}}
memref.reshape %buf(%shape) : (memref<*xf32>, memref<?xi32>) -> memref<?xf32>
@@ -262,7 +262,7 @@ func @memref_reshape_dst_ranked_shape_unranked(
// -----
-func @memref_reshape_dst_shape_rank_mismatch(
+func.func @memref_reshape_dst_shape_rank_mismatch(
%buf: memref<*xf32>, %shape: memref<1xi32>) {
// expected-error @+1 {{length of shape operand
diff ers from the result's memref rank}}
memref.reshape %buf(%shape)
@@ -271,7 +271,7 @@ func @memref_reshape_dst_shape_rank_mismatch(
// -----
-func @memref_reshape_src_affine_map_is_not_identity(
+func.func @memref_reshape_src_affine_map_is_not_identity(
%buf: memref<4x4xf32, offset: 0, strides: [3, 2]>,
%shape: memref<1xi32>) {
// expected-error @+1 {{source memref type should have identity affine map}}
@@ -282,7 +282,7 @@ func @memref_reshape_src_affine_map_is_not_identity(
// -----
-func @memref_reshape_result_affine_map_is_not_identity(
+func.func @memref_reshape_result_affine_map_is_not_identity(
%buf: memref<4x4xf32>, %shape: memref<1xi32>) {
// expected-error @+1 {{result memref type should have identity affine map}}
memref.reshape %buf(%shape)
@@ -343,7 +343,7 @@ memref.global "priate" constant @memref5 : memref<2xf32> = uninitialized
// -----
-func @nonexistent_global_memref() {
+func.func @nonexistent_global_memref() {
// expected-error @+1 {{'gv' does not reference a valid global memref}}
%0 = memref.get_global @gv : memref<3xf32>
return
@@ -351,9 +351,9 @@ func @nonexistent_global_memref() {
// -----
-func private @foo()
+func.func private @foo()
-func @nonexistent_global_memref() {
+func.func @nonexistent_global_memref() {
// expected-error @+1 {{'foo' does not reference a valid global memref}}
%0 = memref.get_global @foo : memref<3xf32>
return
@@ -363,7 +363,7 @@ func @nonexistent_global_memref() {
memref.global @gv : memref<3xi32>
-func @mismatched_types() {
+func.func @mismatched_types() {
// expected-error @+1 {{result type 'memref<3xf32>' does not match type 'memref<3xi32>' of the global memref @gv}}
%0 = memref.get_global @gv : memref<3xf32>
return
@@ -376,7 +376,7 @@ memref.global "private" @gv : memref<4xf32> = dense<1.0> { alignment = 63 }
// -----
-func @copy_
diff erent_shape(%arg0: memref<2xf32>, %arg1: memref<3xf32>) {
+func.func @copy_
diff erent_shape(%arg0: memref<2xf32>, %arg1: memref<3xf32>) {
// expected-error @+1 {{op requires the same shape for all operands}}
memref.copy %arg0, %arg1 : memref<2xf32> to memref<3xf32>
return
@@ -384,7 +384,7 @@ func @copy_
diff erent_shape(%arg0: memref<2xf32>, %arg1: memref<3xf32>) {
// -----
-func @copy_
diff erent_eltype(%arg0: memref<2xf32>, %arg1: memref<2xf16>) {
+func.func @copy_
diff erent_eltype(%arg0: memref<2xf32>, %arg1: memref<2xf16>) {
// expected-error @+1 {{op requires the same element type for all operands}}
memref.copy %arg0, %arg1 : memref<2xf32> to memref<2xf16>
return
@@ -392,7 +392,7 @@ func @copy_
diff erent_eltype(%arg0: memref<2xf32>, %arg1: memref<2xf16>) {
// -----
-func @expand_shape(%arg0: memref<f32>) {
+func.func @expand_shape(%arg0: memref<f32>) {
// expected-error @+1 {{invalid number of reassociation groups: found 1, expected 0}}
%0 = memref.expand_shape %arg0 [[0]] : memref<f32> into memref<f32>
return
@@ -400,7 +400,7 @@ func @expand_shape(%arg0: memref<f32>) {
// -----
-func @expand_shape(%arg0: memref<f32>) {
+func.func @expand_shape(%arg0: memref<f32>) {
// expected-error @+1 {{rank 0 memrefs can only be extended/collapsed with/from ones}}
%0 = memref.expand_shape %arg0 [] : memref<f32> into memref<1x2xf32>
return
@@ -408,21 +408,21 @@ func @expand_shape(%arg0: memref<f32>) {
// -----
-func @collapse_shape_to_higher_rank(%arg0: memref<f32>) {
+func.func @collapse_shape_to_higher_rank(%arg0: memref<f32>) {
// expected-error @+1 {{op reassociation index 0 is out of bounds}}
%0 = memref.collapse_shape %arg0 [[0]] : memref<f32> into memref<1xf32>
}
// -----
-func @expand_shape_to_smaller_rank(%arg0: memref<1xf32>) {
+func.func @expand_shape_to_smaller_rank(%arg0: memref<1xf32>) {
// expected-error @+1 {{op reassociation index 0 is out of bounds}}
%0 = memref.expand_shape %arg0 [[0]] : memref<1xf32> into memref<f32>
}
// -----
-func @expand_shape_invalid_result_layout(
+func.func @expand_shape_invalid_result_layout(
%arg0: memref<30x20xf32, offset : 100, strides : [4000, 2]>) {
// expected-error @+1 {{expected expanded type to be 'memref<2x15x20xf32, affine_map<(d0, d1, d2) -> (d0 * 60000 + d1 * 4000 + d2 * 2 + 100)>>' but found 'memref<2x15x20xf32, affine_map<(d0, d1, d2) -> (d0 * 5000 + d1 * 4000 + d2 * 2 + 100)>>'}}
%0 = memref.expand_shape %arg0 [[0, 1], [2]] :
@@ -432,7 +432,7 @@ func @expand_shape_invalid_result_layout(
// -----
-func @collapse_shape_mismatch_indices_num(%arg0: memref<?x?x?xf32>) {
+func.func @collapse_shape_mismatch_indices_num(%arg0: memref<?x?x?xf32>) {
// expected-error @+1 {{invalid number of reassociation groups: found 1, expected 2}}
%0 = memref.collapse_shape %arg0 [[0, 1]] :
memref<?x?x?xf32> into memref<?x?xf32, offset: 0, strides: [?, 1]>
@@ -440,7 +440,7 @@ func @collapse_shape_mismatch_indices_num(%arg0: memref<?x?x?xf32>) {
// -----
-func @collapse_shape_invalid_reassociation(%arg0: memref<?x?x?xf32>) {
+func.func @collapse_shape_invalid_reassociation(%arg0: memref<?x?x?xf32>) {
// expected-error @+1 {{reassociation indices must be contiguous}}
%0 = memref.collapse_shape %arg0 [[0, 1], [1, 2]] :
memref<?x?x?xf32> into memref<?x?xf32, offset: 0, strides: [?, 1]>
@@ -448,7 +448,7 @@ func @collapse_shape_invalid_reassociation(%arg0: memref<?x?x?xf32>) {
// -----
-func @collapse_shape_reshaping_non_contiguous(
+func.func @collapse_shape_reshaping_non_contiguous(
%arg0: memref<3x4x5xf32, offset: 0, strides: [270, 50, 10]>) {
// expected-error @+1 {{invalid source layout map or collapsing non-contiguous dims}}
%0 = memref.collapse_shape %arg0 [[0, 1], [2]] :
@@ -459,7 +459,7 @@ func @collapse_shape_reshaping_non_contiguous(
// -----
-func @collapse_shape_wrong_collapsed_type(%arg0: memref<?x?x?xf32>) {
+func.func @collapse_shape_wrong_collapsed_type(%arg0: memref<?x?x?xf32>) {
// expected-error @+1 {{expected collapsed type to be 'memref<?x?xf32>' but found 'memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>>'}}
%0 = memref.collapse_shape %arg0 [[0, 1], [2]] :
memref<?x?x?xf32> into memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>>
@@ -467,7 +467,7 @@ func @collapse_shape_wrong_collapsed_type(%arg0: memref<?x?x?xf32>) {
// -----
-func @expand_shape_illegal_dynamic_memref
+func.func @expand_shape_illegal_dynamic_memref
(%arg0: memref<?x?x?xf32>) -> memref<?x?x?x4x?xf32> {
// expected-error @+1 {{at most one dimension in a reassociation group may be dynamic}}
%0 = memref.expand_shape %arg0 [[0], [1], [2, 3, 4]]
@@ -477,7 +477,7 @@ func @expand_shape_illegal_dynamic_memref
// -----
-func @expand_shape_illegal_static_memref
+func.func @expand_shape_illegal_static_memref
(%arg0: memref<2x3x20xf32>) -> memref<2x3x2x4x5xf32> {
// expected-error @+1 {{collapsed dim size (20) must equal reassociation group size (40)}}
%0 = memref.expand_shape %arg0 [[0], [1], [2, 3, 4]]
@@ -487,7 +487,7 @@ func @expand_shape_illegal_static_memref
// -----
-func @collapse_shape_illegal_static_memref
+func.func @collapse_shape_illegal_static_memref
(%arg0: memref<2x3x2x4x5xf32>) -> memref<2x3x20xf32> {
// expected-error @+1 {{collapsed dim size (20) must equal reassociation group size (40)}}
%0 = memref.collapse_shape %arg0 [[0], [1], [2, 3, 4]]
@@ -497,7 +497,7 @@ func @collapse_shape_illegal_static_memref
// -----
-func @expand_shape_illegal_mixed_memref(%arg0 : memref<?x?xf32>)
+func.func @expand_shape_illegal_mixed_memref(%arg0 : memref<?x?xf32>)
-> memref<?x4x5xf32> {
// expected-error @+1 {{collapsed dim (1) must be dynamic if and only if reassociation group is dynamic}}
%0 = memref.expand_shape %arg0 [[0, 1], [2]]
@@ -507,7 +507,7 @@ func @expand_shape_illegal_mixed_memref(%arg0 : memref<?x?xf32>)
// -----
-func @expand_shape_illegal_mixed_memref_2(%arg0 : memref<?x?xf32>)
+func.func @expand_shape_illegal_mixed_memref_2(%arg0 : memref<?x?xf32>)
-> memref<?x4x5xf32> {
// expected-error @+1 {{collapsed dim (1) must be dynamic if and only if reassociation group is dynamic}}
%0 = memref.expand_shape %arg0 [[0], [1, 2]]
@@ -517,7 +517,7 @@ func @expand_shape_illegal_mixed_memref_2(%arg0 : memref<?x?xf32>)
// -----
-func @expand_shape_invalid_static_dim_size(%arg0 : memref<?x21xf32>)
+func.func @expand_shape_invalid_static_dim_size(%arg0 : memref<?x21xf32>)
-> memref<?x4x5xf32> {
// expected-error @+1 {{collapsed dim size (21) must equal reassociation group size (20)}}
%0 = memref.expand_shape %arg0 [[0], [1, 2]]
@@ -527,7 +527,7 @@ func @expand_shape_invalid_static_dim_size(%arg0 : memref<?x21xf32>)
// -----
-func @collapse_shape_illegal_mixed_memref(%arg0 : memref<?x4x5xf32>)
+func.func @collapse_shape_illegal_mixed_memref(%arg0 : memref<?x4x5xf32>)
-> memref<?x?xf32> {
// expected-error @+1 {{collapsed dim (1) must be dynamic if and only if reassociation group is dynamic}}
%0 = memref.collapse_shape %arg0 [[0, 1], [2]]
@@ -537,7 +537,7 @@ func @collapse_shape_illegal_mixed_memref(%arg0 : memref<?x4x5xf32>)
// -----
-func @collapse_shape_illegal_mixed_memref_2(%arg0 : memref<?x4x5xf32>)
+func.func @collapse_shape_illegal_mixed_memref_2(%arg0 : memref<?x4x5xf32>)
-> memref<?x?xf32> {
// expected-error @+1 {{collapsed dim (1) must be dynamic if and only if reassociation group is dynamic}}
%0 = memref.collapse_shape %arg0 [[0], [1, 2]]
@@ -547,7 +547,7 @@ func @collapse_shape_illegal_mixed_memref_2(%arg0 : memref<?x4x5xf32>)
// -----
-func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<2048xi8>
// expected-error at +1 {{expected SSA operand}}
%1 = memref.view %0[][%arg0, %arg1]
@@ -557,7 +557,7 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<2048xi8, affine_map<(d0) -> (d0 floordiv 8, d0 mod 8)>>
// expected-error at +1 {{unsupported map for base memref type}}
%1 = memref.view %0[%arg2][%arg0, %arg1]
@@ -568,7 +568,7 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<2048xi8>
// expected-error at +1 {{unsupported map for result memref type}}
%1 = memref.view %0[%arg2][%arg0, %arg1]
@@ -578,7 +578,7 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<2048xi8, 2>
// expected-error at +1 {{
diff erent memory spaces}}
%1 = memref.view %0[%arg2][%arg0, %arg1] : memref<2048xi8, 2> to memref<?x?xf32, 1>
@@ -587,7 +587,7 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<2048xi8>
// expected-error at +1 {{incorrect number of size operands for type}}
%1 = memref.view %0[%arg2][%arg0]
@@ -597,7 +597,7 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<8x16x4xf32>
// expected-error at +1 {{expected mixed offsets rank to match mixed sizes rank (2 vs 3) so the rank of the result type is well-formed}}
%1 = memref.subview %0[0, 0][2, 2, 2][1, 1, 1]
@@ -607,7 +607,7 @@ func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<8x16x4xf32>
// expected-error at +1 {{expected mixed sizes rank to match mixed strides rank (3 vs 2) so the rank of the result type is well-formed}}
%1 = memref.subview %0[0, 0, 0][2, 2, 2][1, 1]
@@ -617,7 +617,7 @@ func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<8x16x4xf32>
// expected-error at +1 {{expected mixed sizes rank to match mixed strides rank (3 vs 2) so the rank of the result type is well-formed}}
%1 = memref.reinterpret_cast %0 to offset: [0], sizes: [2, 2, 2], strides:[1, 1]
@@ -627,7 +627,7 @@ func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<8x16x4xf32, offset: 0, strides: [64, 4, 1], 2>
// expected-error at +1 {{
diff erent memory spaces}}
%1 = memref.subview %0[0, 0, 0][%arg2, %arg2, %arg2][1, 1, 1]
@@ -638,7 +638,7 @@ func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 + d1, d1 + d2, d2)>>
// expected-error at +1 {{is not strided}}
%1 = memref.subview %0[0, 0, 0][%arg2, %arg2, %arg2][1, 1, 1]
@@ -649,7 +649,7 @@ func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<8x16x4xf32>
// expected-error at +1 {{expected 3 offset values}}
%1 = memref.subview %0[%arg0, %arg1, 0, 0][%arg2, 0, 0, 0][1, 1, 1, 1]
@@ -660,7 +660,7 @@ func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<8x16x4xf32>
// expected-error at +1 {{expected result element type to be 'f32'}}
%1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1]
@@ -671,7 +671,7 @@ func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<8x16x4xf32>
// expected-error at +1 {{expected result rank to be smaller or equal to the source rank.}}
%1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1]
@@ -682,7 +682,7 @@ func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<8x16x4xf32>
// expected-error at +1 {{expected result type to be 'memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>' or a rank-reduced version. (mismatch of result sizes)}}
%1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1]
@@ -692,7 +692,7 @@ func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index)
// -----
-func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<8x16x4xf32>
// expected-error at +1 {{expected result type to be 'memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 8)>>' or a rank-reduced version. (mismatch of result sizes)}}
%1 = memref.subview %0[0, 2, 0][8, 16, 4][1, 1, 1]
@@ -702,7 +702,7 @@ func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index)
// -----
-func @invalid_rank_reducing_subview(%arg0 : memref<?x?xf32>, %arg1 : index, %arg2 : index) {
+func.func @invalid_rank_reducing_subview(%arg0 : memref<?x?xf32>, %arg1 : index, %arg2 : index) {
// expected-error at +1 {{expected result type to be 'memref<?x1xf32, affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>>' or a rank-reduced version. (mismatch of result layout)}}
%0 = memref.subview %arg0[0, %arg1][%arg2, 1][1, 1] : memref<?x?xf32> to memref<?xf32>
return
@@ -712,7 +712,7 @@ func @invalid_rank_reducing_subview(%arg0 : memref<?x?xf32>, %arg1 : index, %arg
#map0 = affine_map<(d0, d1)[s0] -> (d0 * 16 + d1)>
-func @subview_bad_offset_1(%arg0: memref<16x16xf32>) {
+func.func @subview_bad_offset_1(%arg0: memref<16x16xf32>) {
%c0 = arith.constant 0 : index
%c8 = arith.constant 8 : index
// expected-error @+1 {{expected result type to be 'memref<8x8xf32, affine_map<(d0, d1)[s0] -> (d0 * 16 + s0 + d1)>>' or a rank-reduced version}}
@@ -724,7 +724,7 @@ func @subview_bad_offset_1(%arg0: memref<16x16xf32>) {
#map0 = affine_map<(d0, d1)[s0] -> (d0 * 16 + d1 + 136)>
-func @subview_bad_offset_2(%arg0: memref<16x16xf32>) {
+func.func @subview_bad_offset_2(%arg0: memref<16x16xf32>) {
%c0 = arith.constant 0 : index
%c8 = arith.constant 8 : index
// expected-error @+1 {{expected result type to be 'memref<8x8xf32, affine_map<(d0, d1)[s0] -> (d0 * 16 + s0 + d1)>>' or a rank-reduced version}}
@@ -736,7 +736,7 @@ func @subview_bad_offset_2(%arg0: memref<16x16xf32>) {
#map0 = affine_map<(d0, d1)[s0] -> (d0 * 16 + d1 + s0 * 437)>
-func @subview_bad_offset_3(%arg0: memref<16x16xf32>) {
+func.func @subview_bad_offset_3(%arg0: memref<16x16xf32>) {
%c0 = arith.constant 0 : index
%c8 = arith.constant 8 : index
// expected-error @+1 {{expected result type to be 'memref<8x8xf32, affine_map<(d0, d1)[s0] -> (d0 * 16 + s0 + d1)>>' or a rank-reduced version}}
@@ -746,7 +746,7 @@ func @subview_bad_offset_3(%arg0: memref<16x16xf32>) {
// -----
-func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]>) {
+func.func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]>) {
// expected-error at +1{{operand type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2)>>' and result type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 128 + d1 * 32 + d2 * 2)>>' are cast incompatible}}
%0 = memref.cast %arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]> to memref<12x4x16xf32, offset:0, strides:[128, 32, 2]>
return
@@ -754,7 +754,7 @@ func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16,
// -----
-func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]>) {
+func.func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]>) {
// expected-error at +1{{operand type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2)>>' and result type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2 + 16)>>' are cast incompatible}}
%0 = memref.cast %arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]> to memref<12x4x16xf32, offset:16, strides:[64, 16, 1]>
return
@@ -763,7 +763,7 @@ func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16,
// -----
// incompatible element types
-func @invalid_memref_cast() {
+func.func @invalid_memref_cast() {
%0 = memref.alloc() : memref<2x5xf32, 0>
// expected-error at +1 {{operand type 'memref<2x5xf32>' and result type 'memref<*xi32>' are cast incompatible}}
%1 = memref.cast %0 : memref<2x5xf32, 0> to memref<*xi32>
@@ -772,7 +772,7 @@ func @invalid_memref_cast() {
// -----
-func @invalid_prefetch_rw(%i : index) {
+func.func @invalid_prefetch_rw(%i : index) {
%0 = memref.alloc() : memref<10xf32>
// expected-error at +1 {{rw specifier has to be 'read' or 'write'}}
memref.prefetch %0[%i], rw, locality<0>, data : memref<10xf32>
@@ -781,7 +781,7 @@ func @invalid_prefetch_rw(%i : index) {
// -----
-func @invalid_prefetch_cache_type(%i : index) {
+func.func @invalid_prefetch_cache_type(%i : index) {
%0 = memref.alloc() : memref<10xf32>
// expected-error at +1 {{cache type has to be 'data' or 'instr'}}
memref.prefetch %0[%i], read, locality<0>, false : memref<10xf32>
@@ -790,7 +790,7 @@ func @invalid_prefetch_cache_type(%i : index) {
// -----
-func @invalid_prefetch_locality_hint(%i : index) {
+func.func @invalid_prefetch_locality_hint(%i : index) {
%0 = memref.alloc() : memref<10xf32>
// expected-error at +1 {{32-bit signless integer attribute whose minimum value is 0 whose maximum value is 3}}
memref.prefetch %0[%i], read, locality<5>, data : memref<10xf32>
@@ -800,7 +800,7 @@ func @invalid_prefetch_locality_hint(%i : index) {
// -----
// incompatible memory space
-func @invalid_memref_cast() {
+func.func @invalid_memref_cast() {
%0 = memref.alloc() : memref<2x5xf32, 0>
// expected-error at +1 {{operand type 'memref<2x5xf32>' and result type 'memref<*xf32, 1>' are cast incompatible}}
%1 = memref.cast %0 : memref<2x5xf32, 0> to memref<*xf32, 1>
@@ -810,7 +810,7 @@ func @invalid_memref_cast() {
// -----
// unranked to unranked
-func @invalid_memref_cast() {
+func.func @invalid_memref_cast() {
%0 = memref.alloc() : memref<2x5xf32, 0>
%1 = memref.cast %0 : memref<2x5xf32, 0> to memref<*xf32, 0>
// expected-error at +1 {{operand type 'memref<*xf32>' and result type 'memref<*xf32>' are cast incompatible}}
@@ -821,7 +821,7 @@ func @invalid_memref_cast() {
// -----
// alignment is not power of 2.
-func @assume_alignment(%0: memref<4x4xf16>) {
+func.func @assume_alignment(%0: memref<4x4xf16>) {
// expected-error at +1 {{alignment must be power of 2}}
memref.assume_alignment %0, 12 : memref<4x4xf16>
return
@@ -830,7 +830,7 @@ func @assume_alignment(%0: memref<4x4xf16>) {
// -----
// 0 alignment value.
-func @assume_alignment(%0: memref<4x4xf16>) {
+func.func @assume_alignment(%0: memref<4x4xf16>) {
// expected-error at +1 {{attribute 'alignment' failed to satisfy constraint: 32-bit signless integer attribute whose value is positive}}
memref.assume_alignment %0, 0 : memref<4x4xf16>
return
@@ -846,7 +846,7 @@ func @assume_alignment(%0: memref<4x4xf16>) {
// -----
-func @bad_alloc_wrong_dynamic_dim_count() {
+func.func @bad_alloc_wrong_dynamic_dim_count() {
^bb0:
%0 = arith.constant 7 : index
// Test alloc with wrong number of dynamic dimensions.
@@ -857,7 +857,7 @@ func @bad_alloc_wrong_dynamic_dim_count() {
// -----
-func @bad_alloc_wrong_symbol_count() {
+func.func @bad_alloc_wrong_symbol_count() {
^bb0:
%0 = arith.constant 7 : index
// Test alloc with wrong number of symbols
@@ -868,7 +868,7 @@ func @bad_alloc_wrong_symbol_count() {
// -----
-func @test_store_zero_results() {
+func.func @test_store_zero_results() {
^bb0:
%0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
%1 = arith.constant 0 : index
@@ -881,14 +881,14 @@ func @test_store_zero_results() {
// -----
-func @test_store_zero_results2(%x: i32, %p: memref<i32>) {
+func.func @test_store_zero_results2(%x: i32, %p: memref<i32>) {
"memref.store"(%x,%p) : (i32, memref<i32>) -> i32 // expected-error {{'memref.store' op requires zero results}}
return
}
// -----
-func @test_alloc_memref_map_rank_mismatch() {
+func.func @test_alloc_memref_map_rank_mismatch() {
^bb0:
// expected-error at +1 {{memref layout mismatch between rank and affine map: 2 != 1}}
%0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0) -> (d0)>, 1>
@@ -897,7 +897,7 @@ func @test_alloc_memref_map_rank_mismatch() {
// -----
-func @rank(%0: f32) {
+func.func @rank(%0: f32) {
// expected-error at +1 {{'memref.rank' op operand #0 must be unranked.memref of any type values or memref of any type values}}
"memref.rank"(%0): (f32)->index
return
@@ -906,14 +906,14 @@ func @rank(%0: f32) {
// -----
#map = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (s0 + d0 * s1 + d1 * s2 + d2 * s3)>
-func @illegal_num_offsets(%arg0 : memref<?x?x?xf32>, %arg1 : index, %arg2 : index) {
+func.func @illegal_num_offsets(%arg0 : memref<?x?x?xf32>, %arg1 : index, %arg2 : index) {
// expected-error at +1 {{expected 3 offset values}}
%0 = memref.subview %arg0[0, 0] [%arg1, %arg2] [1, 1] : memref<?x?x?xf32> to memref<?x?x?xf32, #map>
}
// -----
-func @atomic_rmw_idxs_rank_mismatch(%I: memref<16x10xf32>, %i : index, %val : f32) {
+func.func @atomic_rmw_idxs_rank_mismatch(%I: memref<16x10xf32>, %i : index, %val : f32) {
// expected-error at +1 {{expects the number of subscripts to be equal to memref rank}}
%x = memref.atomic_rmw addf %val, %I[%i] : (f32, memref<16x10xf32>) -> f32
return
@@ -921,7 +921,7 @@ func @atomic_rmw_idxs_rank_mismatch(%I: memref<16x10xf32>, %i : index, %val : f3
// -----
-func @atomic_rmw_expects_float(%I: memref<16x10xi32>, %i : index, %val : i32) {
+func.func @atomic_rmw_expects_float(%I: memref<16x10xi32>, %i : index, %val : i32) {
// expected-error at +1 {{expects a floating-point type}}
%x = memref.atomic_rmw addf %val, %I[%i, %i] : (i32, memref<16x10xi32>) -> i32
return
@@ -929,7 +929,7 @@ func @atomic_rmw_expects_float(%I: memref<16x10xi32>, %i : index, %val : i32) {
// -----
-func @atomic_rmw_expects_int(%I: memref<16x10xf32>, %i : index, %val : f32) {
+func.func @atomic_rmw_expects_int(%I: memref<16x10xf32>, %i : index, %val : f32) {
// expected-error at +1 {{expects an integer type}}
%x = memref.atomic_rmw addi %val, %I[%i, %i] : (f32, memref<16x10xf32>) -> f32
return
@@ -937,7 +937,7 @@ func @atomic_rmw_expects_int(%I: memref<16x10xf32>, %i : index, %val : f32) {
// -----
-func @generic_atomic_rmw_wrong_arg_num(%I: memref<10xf32>, %i : index) {
+func.func @generic_atomic_rmw_wrong_arg_num(%I: memref<10xf32>, %i : index) {
// expected-error at +1 {{expected single number of entry block arguments}}
%x = memref.generic_atomic_rmw %I[%i] : memref<10xf32> {
^bb0(%arg0 : f32, %arg1 : f32):
@@ -949,7 +949,7 @@ func @generic_atomic_rmw_wrong_arg_num(%I: memref<10xf32>, %i : index) {
// -----
-func @generic_atomic_rmw_wrong_arg_type(%I: memref<10xf32>, %i : index) {
+func.func @generic_atomic_rmw_wrong_arg_type(%I: memref<10xf32>, %i : index) {
// expected-error at +1 {{expected block argument of the same type result type}}
%x = memref.generic_atomic_rmw %I[%i] : memref<10xf32> {
^bb0(%old_value : i32):
@@ -961,7 +961,7 @@ func @generic_atomic_rmw_wrong_arg_type(%I: memref<10xf32>, %i : index) {
// -----
-func @generic_atomic_rmw_result_type_mismatch(%I: memref<10xf32>, %i : index) {
+func.func @generic_atomic_rmw_result_type_mismatch(%I: memref<10xf32>, %i : index) {
// expected-error at +1 {{failed to verify that result type matches element type of memref}}
%0 = "memref.generic_atomic_rmw"(%I, %i) ({
^bb0(%old_value: f32):
@@ -973,7 +973,7 @@ func @generic_atomic_rmw_result_type_mismatch(%I: memref<10xf32>, %i : index) {
// -----
-func @generic_atomic_rmw_has_side_effects(%I: memref<10xf32>, %i : index) {
+func.func @generic_atomic_rmw_has_side_effects(%I: memref<10xf32>, %i : index) {
// expected-error at +4 {{should contain only operations with no side effects}}
%x = memref.generic_atomic_rmw %I[%i] : memref<10xf32> {
^bb0(%old_value : f32):
@@ -985,7 +985,7 @@ func @generic_atomic_rmw_has_side_effects(%I: memref<10xf32>, %i : index) {
// -----
-func @atomic_yield_type_mismatch(%I: memref<10xf32>, %i : index) {
+func.func @atomic_yield_type_mismatch(%I: memref<10xf32>, %i : index) {
// expected-error at +4 {{op types mismatch between yield op: 'i32' and its parent: 'f32'}}
%x = memref.generic_atomic_rmw %I[%i] : memref<10xf32> {
^bb0(%old_value : f32):
diff --git a/mlir/test/Dialect/MemRef/multibuffer.mlir b/mlir/test/Dialect/MemRef/multibuffer.mlir
index 372210bc38319..d6030af1eb879 100644
--- a/mlir/test/Dialect/MemRef/multibuffer.mlir
+++ b/mlir/test/Dialect/MemRef/multibuffer.mlir
@@ -4,7 +4,7 @@
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (((d0 - d1) floordiv d2) mod 5)>
// CHECK-LABEL: func @multi_buffer
-func @multi_buffer(%a: memref<1024x1024xf32>) {
+func.func @multi_buffer(%a: memref<1024x1024xf32>) {
// CHECK-DAG: %[[A:.*]] = memref.alloc() : memref<5x4x128xf32>
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index
@@ -31,7 +31,7 @@ func @multi_buffer(%a: memref<1024x1024xf32>) {
// -----
// CHECK-LABEL: func @multi_buffer_affine
-func @multi_buffer_affine(%a: memref<1024x1024xf32>) {
+func.func @multi_buffer_affine(%a: memref<1024x1024xf32>) {
// CHECK-DAG: %[[A:.*]] = memref.alloc() : memref<5x4x128xf32>
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index
@@ -61,7 +61,7 @@ func @multi_buffer_affine(%a: memref<1024x1024xf32>) {
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (((d0 - d1) floordiv d2) mod 5)>
// CHECK-LABEL: func @multi_buffer_subview_use
-func @multi_buffer_subview_use(%a: memref<1024x1024xf32>) {
+func.func @multi_buffer_subview_use(%a: memref<1024x1024xf32>) {
// CHECK-DAG: %[[A:.*]] = memref.alloc() : memref<5x4x128xf32>
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index
@@ -91,7 +91,7 @@ func @multi_buffer_subview_use(%a: memref<1024x1024xf32>) {
// -----
// CHECK-LABEL: func @multi_buffer_negative
-func @multi_buffer_negative(%a: memref<1024x1024xf32>) {
+func.func @multi_buffer_negative(%a: memref<1024x1024xf32>) {
// CHECK-NOT: %{{.*}} = memref.alloc() : memref<5x4x128xf32>
// CHECK: %{{.*}} = memref.alloc() : memref<4x128xf32>
%0 = memref.alloc() : memref<4x128xf32>
diff --git a/mlir/test/Dialect/MemRef/ops.mlir b/mlir/test/Dialect/MemRef/ops.mlir
index bd2a6fc7489c9..39a15b121034c 100644
--- a/mlir/test/Dialect/MemRef/ops.mlir
+++ b/mlir/test/Dialect/MemRef/ops.mlir
@@ -8,7 +8,7 @@
// CHECK-DAG: #[[$strided2D42:.*]] = affine_map<(d0, d1) -> (d0 * 42 + d1)>
// CHECK-LABEL: func @memref_reinterpret_cast
-func @memref_reinterpret_cast(%in: memref<?xf32>)
+func.func @memref_reinterpret_cast(%in: memref<?xf32>)
-> memref<10x?xf32, offset: ?, strides: [?, 1]> {
%c0 = arith.constant 0 : index
%c10 = arith.constant 10 : index
@@ -19,7 +19,7 @@ func @memref_reinterpret_cast(%in: memref<?xf32>)
}
// CHECK-LABEL: func @memref_reinterpret_cast_static_to_dynamic_sizes
-func @memref_reinterpret_cast_static_to_dynamic_sizes(%in: memref<?xf32>)
+func.func @memref_reinterpret_cast_static_to_dynamic_sizes(%in: memref<?xf32>)
-> memref<10x?xf32, offset: ?, strides: [?, 1]> {
%out = memref.reinterpret_cast %in to
offset: [1], sizes: [10, 10], strides: [1, 1]
@@ -28,7 +28,7 @@ func @memref_reinterpret_cast_static_to_dynamic_sizes(%in: memref<?xf32>)
}
// CHECK-LABEL: func @memref_reinterpret_cast_dynamic_offset
-func @memref_reinterpret_cast_dynamic_offset(%in: memref<?xf32>, %offset: index)
+func.func @memref_reinterpret_cast_dynamic_offset(%in: memref<?xf32>, %offset: index)
-> memref<10x?xf32, offset: ?, strides: [?, 1]> {
%out = memref.reinterpret_cast %in to
offset: [%offset], sizes: [10, 10], strides: [1, 1]
@@ -37,7 +37,7 @@ func @memref_reinterpret_cast_dynamic_offset(%in: memref<?xf32>, %offset: index)
}
// CHECK-LABEL: func @memref_reshape(
-func @memref_reshape(%unranked: memref<*xf32>, %shape1: memref<1xi32>,
+func.func @memref_reshape(%unranked: memref<*xf32>, %shape1: memref<1xi32>,
%shape2: memref<2xi32>, %shape3: memref<?xi32>) -> memref<*xf32> {
%dyn_vec = memref.reshape %unranked(%shape1)
: (memref<*xf32>, memref<1xi32>) -> memref<?xf32>
@@ -64,7 +64,7 @@ memref.global "private" @memref3 : memref<2xf32> = uninitialized
memref.global "private" constant @memref4 : memref<2xf32> = uninitialized
// CHECK-LABEL: func @write_global_memref
-func @write_global_memref() {
+func.func @write_global_memref() {
%0 = memref.get_global @memref0 : memref<2xf32>
%1 = arith.constant dense<[1.0, 2.0]> : tensor<2xf32>
memref.tensor_store %1, %0 : memref<2xf32>
@@ -72,13 +72,13 @@ func @write_global_memref() {
}
// CHECK-LABEL: func @read_global_memref
-func @read_global_memref() {
+func.func @read_global_memref() {
%0 = memref.get_global @memref0 : memref<2xf32>
return
}
// CHECK-LABEL: func @memref_copy
-func @memref_copy() {
+func.func @memref_copy() {
%0 = memref.alloc() : memref<2xf32>
%1 = memref.cast %0 : memref<2xf32> to memref<*xf32>
%2 = memref.alloc() : memref<2xf32>
@@ -88,7 +88,7 @@ func @memref_copy() {
}
// CHECK-LABEL: func @memref_dealloc
-func @memref_dealloc() {
+func.func @memref_dealloc() {
%0 = memref.alloc() : memref<2xf32>
%1 = memref.cast %0 : memref<2xf32> to memref<*xf32>
memref.dealloc %1 : memref<*xf32>
@@ -97,7 +97,7 @@ func @memref_dealloc() {
// CHECK-LABEL: func @memref_alloca_scope
-func @memref_alloca_scope() {
+func.func @memref_alloca_scope() {
memref.alloca_scope {
memref.alloca_scope.return
}
@@ -105,7 +105,7 @@ func @memref_alloca_scope() {
}
// CHECK-LABEL: func @expand_collapse_shape_static
-func @expand_collapse_shape_static(
+func.func @expand_collapse_shape_static(
%arg0: memref<3x4x5xf32>,
%arg1: tensor<3x4x5xf32>,
%arg2: tensor<3x?x5xf32>,
@@ -208,7 +208,7 @@ func @expand_collapse_shape_static(
}
// CHECK-LABEL: func @expand_collapse_shape_dynamic
-func @expand_collapse_shape_dynamic(%arg0: memref<?x?x?xf32>,
+func.func @expand_collapse_shape_dynamic(%arg0: memref<?x?x?xf32>,
%arg1: memref<?x?x?xf32, offset : 0, strides : [?, ?, 1]>,
%arg2: memref<?x?x?xf32, offset : ?, strides : [?, ?, 1]>,
%arg3: memref<?x42xf32, offset : 0, strides : [42, 1]>) {
@@ -259,7 +259,7 @@ func @expand_collapse_shape_dynamic(%arg0: memref<?x?x?xf32>,
return
}
-func @expand_collapse_shape_zero_dim(%arg0 : memref<1x1xf32>, %arg1 : memref<f32>)
+func.func @expand_collapse_shape_zero_dim(%arg0 : memref<1x1xf32>, %arg1 : memref<f32>)
-> (memref<f32>, memref<1x1xf32>) {
%0 = memref.collapse_shape %arg0 [] : memref<1x1xf32> into memref<f32>
%1 = memref.expand_shape %0 [] : memref<f32> into memref<1x1xf32>
@@ -269,7 +269,7 @@ func @expand_collapse_shape_zero_dim(%arg0 : memref<1x1xf32>, %arg1 : memref<f32
// CHECK: memref.collapse_shape %{{.*}} [] : memref<1x1xf32> into memref<f32>
// CHECK: memref.expand_shape %{{.*}} [] : memref<f32> into memref<1x1xf32>
-func @collapse_shape_to_dynamic
+func.func @collapse_shape_to_dynamic
(%arg0: memref<?x?x?x4x?xf32>) -> memref<?x?x?xf32> {
%0 = memref.collapse_shape %arg0 [[0], [1], [2, 3, 4]] :
memref<?x?x?x4x?xf32> into memref<?x?x?xf32>
@@ -282,7 +282,7 @@ func @collapse_shape_to_dynamic
// -----
// CHECK-LABEL: func @expand_collapse_shape_transposed_layout
-func @expand_collapse_shape_transposed_layout(
+func.func @expand_collapse_shape_transposed_layout(
%m0: memref<?x?xf32, offset : 0, strides : [1, 10]>,
%m1: memref<4x5x6xf32, offset : 0, strides : [1, ?, 1000]>) {
@@ -304,7 +304,7 @@ func @expand_collapse_shape_transposed_layout(
// -----
-func @rank(%t : memref<4x4x?xf32>) {
+func.func @rank(%t : memref<4x4x?xf32>) {
// CHECK: %{{.*}} = memref.rank %{{.*}} : memref<4x4x?xf32>
%0 = "memref.rank"(%t) : (memref<4x4x?xf32>) -> index
@@ -317,7 +317,7 @@ func @rank(%t : memref<4x4x?xf32>) {
// CHECK-LABEL: func @atomic_rmw
// CHECK-SAME: ([[BUF:%.*]]: memref<10xf32>, [[VAL:%.*]]: f32, [[I:%.*]]: index)
-func @atomic_rmw(%I: memref<10xf32>, %val: f32, %i : index) {
+func.func @atomic_rmw(%I: memref<10xf32>, %val: f32, %i : index) {
%x = memref.atomic_rmw addf %val, %I[%i] : (f32, memref<10xf32>) -> f32
// CHECK: memref.atomic_rmw addf [[VAL]], [[BUF]]{{\[}}[[I]]]
return
@@ -325,7 +325,7 @@ func @atomic_rmw(%I: memref<10xf32>, %val: f32, %i : index) {
// CHECK-LABEL: func @generic_atomic_rmw
// CHECK-SAME: ([[BUF:%.*]]: memref<1x2xf32>, [[I:%.*]]: index, [[J:%.*]]: index)
-func @generic_atomic_rmw(%I: memref<1x2xf32>, %i : index, %j : index) {
+func.func @generic_atomic_rmw(%I: memref<1x2xf32>, %i : index, %j : index) {
%x = memref.generic_atomic_rmw %I[%i, %j] : memref<1x2xf32> {
// CHECK-NEXT: memref.generic_atomic_rmw [[BUF]]{{\[}}[[I]], [[J]]] : memref
^bb0(%old_value : f32):
diff --git a/mlir/test/Dialect/MemRef/subview.mlir b/mlir/test/Dialect/MemRef/subview.mlir
index 3bfc62d346b66..cb34de760f6e6 100644
--- a/mlir/test/Dialect/MemRef/subview.mlir
+++ b/mlir/test/Dialect/MemRef/subview.mlir
@@ -21,7 +21,7 @@
// CHECK-DAG: #[[$SUBVIEW_MAP12:map[0-9]+]] = affine_map<()[s0] -> (s0)>
// CHECK-LABEL: func @memref_subview(%arg0
-func @memref_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @memref_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
diff --git a/mlir/test/Dialect/NVGPU/roundtrip.mlir b/mlir/test/Dialect/NVGPU/roundtrip.mlir
index 5a35d39f1acca..255694003eab0 100644
--- a/mlir/test/Dialect/NVGPU/roundtrip.mlir
+++ b/mlir/test/Dialect/NVGPU/roundtrip.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s | mlir-opt | FileCheck %s
// CHECK-LABEL: func @ldmatrix(
-func @ldmatrix(%arg0: memref<?x?xf16, 3>, %x: index, %y: index) {
+func.func @ldmatrix(%arg0: memref<?x?xf16, 3>, %x: index, %y: index) {
// CHECK: nvgpu.ldmatrix %{{.*}}[%{{.*}}, %{{.*}}]
// CHECK-SAME: {numTiles = 4 : i32, transpose = false} : memref<?x?xf16, 3> -> vector<4x2xf16>
%l = nvgpu.ldmatrix %arg0[%x, %y] {numTiles = 4 : i32, transpose = false} :
@@ -10,7 +10,7 @@ func @ldmatrix(%arg0: memref<?x?xf16, 3>, %x: index, %y: index) {
}
// CHECK-LABEL: func @mma_sync(
-func @mma_sync(%arg0: vector<4x2xf16>,
+func.func @mma_sync(%arg0: vector<4x2xf16>,
%arg1: vector<2x2xf16>,
%arg2: vector<2x2xf16>) -> vector<2x2xf16> {
// CHECK: nvgpu.mma.sync(%{{.*}}, %{{.*}}, %{{.*}}) {mmaShape = [16, 8, 16]} : (vector<4x2xf16>, vector<2x2xf16>, vector<2x2xf16>) -> vector<2x2xf16>
diff --git a/mlir/test/Dialect/OpenACC/canonicalize.mlir b/mlir/test/Dialect/OpenACC/canonicalize.mlir
index 8337f587f973e..71c388caba920 100644
--- a/mlir/test/Dialect/OpenACC/canonicalize.mlir
+++ b/mlir/test/Dialect/OpenACC/canonicalize.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s -canonicalize -split-input-file | FileCheck %s
-func @testenterdataop(%a: memref<10xf32>) -> () {
+func.func @testenterdataop(%a: memref<10xf32>) -> () {
%ifCond = arith.constant true
acc.enter_data if(%ifCond) create(%a: memref<10xf32>)
return
@@ -10,7 +10,7 @@ func @testenterdataop(%a: memref<10xf32>) -> () {
// -----
-func @testenterdataop(%a: memref<10xf32>) -> () {
+func.func @testenterdataop(%a: memref<10xf32>) -> () {
%ifCond = arith.constant false
acc.enter_data if(%ifCond) create(%a: memref<10xf32>)
return
@@ -21,7 +21,7 @@ func @testenterdataop(%a: memref<10xf32>) -> () {
// -----
-func @testexitdataop(%a: memref<10xf32>) -> () {
+func.func @testexitdataop(%a: memref<10xf32>) -> () {
%ifCond = arith.constant true
acc.exit_data if(%ifCond) delete(%a: memref<10xf32>)
return
@@ -31,7 +31,7 @@ func @testexitdataop(%a: memref<10xf32>) -> () {
// -----
-func @testexitdataop(%a: memref<10xf32>) -> () {
+func.func @testexitdataop(%a: memref<10xf32>) -> () {
%ifCond = arith.constant false
acc.exit_data if(%ifCond) delete(%a: memref<10xf32>)
return
@@ -42,7 +42,7 @@ func @testexitdataop(%a: memref<10xf32>) -> () {
// -----
-func @testupdateop(%a: memref<10xf32>) -> () {
+func.func @testupdateop(%a: memref<10xf32>) -> () {
%ifCond = arith.constant true
acc.update if(%ifCond) host(%a: memref<10xf32>)
return
@@ -52,7 +52,7 @@ func @testupdateop(%a: memref<10xf32>) -> () {
// -----
-func @testupdateop(%a: memref<10xf32>) -> () {
+func.func @testupdateop(%a: memref<10xf32>) -> () {
%ifCond = arith.constant false
acc.update if(%ifCond) host(%a: memref<10xf32>)
return
@@ -63,7 +63,7 @@ func @testupdateop(%a: memref<10xf32>) -> () {
// -----
-func @testenterdataop(%a: memref<10xf32>, %ifCond: i1) -> () {
+func.func @testenterdataop(%a: memref<10xf32>, %ifCond: i1) -> () {
acc.enter_data if(%ifCond) create(%a: memref<10xf32>)
return
}
@@ -73,7 +73,7 @@ func @testenterdataop(%a: memref<10xf32>, %ifCond: i1) -> () {
// -----
-func @testexitdataop(%a: memref<10xf32>, %ifCond: i1) -> () {
+func.func @testexitdataop(%a: memref<10xf32>, %ifCond: i1) -> () {
acc.exit_data if(%ifCond) delete(%a: memref<10xf32>)
return
}
@@ -83,7 +83,7 @@ func @testexitdataop(%a: memref<10xf32>, %ifCond: i1) -> () {
// -----
-func @testupdateop(%a: memref<10xf32>, %ifCond: i1) -> () {
+func.func @testupdateop(%a: memref<10xf32>, %ifCond: i1) -> () {
acc.update if(%ifCond) host(%a: memref<10xf32>)
return
}
diff --git a/mlir/test/Dialect/OpenACC/ops.mlir b/mlir/test/Dialect/OpenACC/ops.mlir
index 05e8026b554ca..9760ee610c81f 100644
--- a/mlir/test/Dialect/OpenACC/ops.mlir
+++ b/mlir/test/Dialect/OpenACC/ops.mlir
@@ -4,7 +4,7 @@
// Verify the generic form can be parsed.
// RUN: mlir-opt -split-input-file -mlir-print-op-generic %s | mlir-opt -split-input-file | FileCheck %s
-func @compute1(%A: memref<10x10xf32>, %B: memref<10x10xf32>, %C: memref<10x10xf32>) -> memref<10x10xf32> {
+func.func @compute1(%A: memref<10x10xf32>, %B: memref<10x10xf32>, %C: memref<10x10xf32>) -> memref<10x10xf32> {
%c0 = arith.constant 0 : index
%c10 = arith.constant 10 : index
%c1 = arith.constant 1 : index
@@ -60,7 +60,7 @@ func @compute1(%A: memref<10x10xf32>, %B: memref<10x10xf32>, %C: memref<10x10xf3
// -----
-func @compute2(%A: memref<10x10xf32>, %B: memref<10x10xf32>, %C: memref<10x10xf32>) -> memref<10x10xf32> {
+func.func @compute2(%A: memref<10x10xf32>, %B: memref<10x10xf32>, %C: memref<10x10xf32>) -> memref<10x10xf32> {
%c0 = arith.constant 0 : index
%c10 = arith.constant 10 : index
%c1 = arith.constant 1 : index
@@ -114,7 +114,7 @@ func @compute2(%A: memref<10x10xf32>, %B: memref<10x10xf32>, %C: memref<10x10xf3
// -----
-func @compute3(%a: memref<10x10xf32>, %b: memref<10x10xf32>, %c: memref<10xf32>, %d: memref<10xf32>) -> memref<10xf32> {
+func.func @compute3(%a: memref<10x10xf32>, %b: memref<10x10xf32>, %c: memref<10xf32>, %d: memref<10xf32>) -> memref<10xf32> {
%lb = arith.constant 0 : index
%st = arith.constant 1 : index
%c10 = arith.constant 10 : index
@@ -197,7 +197,7 @@ func @compute3(%a: memref<10x10xf32>, %b: memref<10x10xf32>, %c: memref<10xf32>,
// -----
-func @testloopop() -> () {
+func.func @testloopop() -> () {
%i64Value = arith.constant 1 : i64
%i32Value = arith.constant 128 : i32
%idxValue = arith.constant 8 : index
@@ -323,7 +323,7 @@ func @testloopop() -> () {
// -----
-func @testparallelop(%a: memref<10xf32>, %b: memref<10xf32>, %c: memref<10x10xf32>) -> () {
+func.func @testparallelop(%a: memref<10xf32>, %b: memref<10xf32>, %c: memref<10x10xf32>) -> () {
%i64value = arith.constant 1 : i64
%i32value = arith.constant 1 : i32
%idxValue = arith.constant 1 : index
@@ -453,7 +453,7 @@ func @testparallelop(%a: memref<10xf32>, %b: memref<10xf32>, %c: memref<10x10xf3
// -----
-func @testdataop(%a: memref<10xf32>, %b: memref<10xf32>, %c: memref<10x10xf32>) -> () {
+func.func @testdataop(%a: memref<10xf32>, %b: memref<10xf32>, %c: memref<10x10xf32>) -> () {
%ifCond = arith.constant true
acc.data if(%ifCond) present(%a : memref<10xf32>) {
}
@@ -527,7 +527,7 @@ func @testdataop(%a: memref<10xf32>, %b: memref<10xf32>, %c: memref<10x10xf32>)
// -----
-func @testupdateop(%a: memref<10xf32>, %b: memref<10xf32>, %c: memref<10x10xf32>) -> () {
+func.func @testupdateop(%a: memref<10xf32>, %b: memref<10xf32>, %c: memref<10x10xf32>) -> () {
%i64Value = arith.constant 1 : i64
%i32Value = arith.constant 1 : i32
%idxValue = arith.constant 1 : index
@@ -657,7 +657,7 @@ acc.shutdown if(%ifCond)
// -----
-func @testexitdataop(%a: memref<10xf32>, %b: memref<10xf32>, %c: memref<10x10xf32>) -> () {
+func.func @testexitdataop(%a: memref<10xf32>, %b: memref<10xf32>, %c: memref<10x10xf32>) -> () {
%ifCond = arith.constant true
%i64Value = arith.constant 1 : i64
%i32Value = arith.constant 1 : i32
@@ -693,7 +693,7 @@ func @testexitdataop(%a: memref<10xf32>, %b: memref<10xf32>, %c: memref<10x10xf3
// -----
-func @testenterdataop(%a: memref<10xf32>, %b: memref<10xf32>, %c: memref<10x10xf32>) -> () {
+func.func @testenterdataop(%a: memref<10xf32>, %b: memref<10xf32>, %c: memref<10x10xf32>) -> () {
%ifCond = arith.constant true
%i64Value = arith.constant 1 : i64
%i32Value = arith.constant 1 : i32
diff --git a/mlir/test/Dialect/OpenMP/invalid.mlir b/mlir/test/Dialect/OpenMP/invalid.mlir
index 1b391815e6de7..e88c7c4b70597 100644
--- a/mlir/test/Dialect/OpenMP/invalid.mlir
+++ b/mlir/test/Dialect/OpenMP/invalid.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt -split-input-file -verify-diagnostics %s
-func @unknown_clause() {
+func.func @unknown_clause() {
// expected-error at +1 {{expected '{' to begin a region}}
omp.parallel invalid {
}
@@ -10,7 +10,7 @@ func @unknown_clause() {
// -----
-func @if_once(%n : i1) {
+func.func @if_once(%n : i1) {
// expected-error at +1 {{`if` clause can appear at most once in the expansion of the oilist directive}}
omp.parallel if(%n : i1) if(%n : i1) {
}
@@ -20,7 +20,7 @@ func @if_once(%n : i1) {
// -----
-func @num_threads_once(%n : si32) {
+func.func @num_threads_once(%n : si32) {
// expected-error at +1 {{`num_threads` clause can appear at most once in the expansion of the oilist directive}}
omp.parallel num_threads(%n : si32) num_threads(%n : si32) {
}
@@ -30,7 +30,7 @@ func @num_threads_once(%n : si32) {
// -----
-func @nowait_not_allowed(%n : memref<i32>) {
+func.func @nowait_not_allowed(%n : memref<i32>) {
// expected-error at +1 {{expected '{' to begin a region}}
omp.parallel nowait {}
return
@@ -38,7 +38,7 @@ func @nowait_not_allowed(%n : memref<i32>) {
// -----
-func @linear_not_allowed(%data_var : memref<i32>, %linear_var : i32) {
+func.func @linear_not_allowed(%data_var : memref<i32>, %linear_var : i32) {
// expected-error at +1 {{expected '{' to begin a region}}
omp.parallel linear(%data_var = %linear_var : memref<i32>) {}
return
@@ -46,7 +46,7 @@ func @linear_not_allowed(%data_var : memref<i32>, %linear_var : i32) {
// -----
-func @schedule_not_allowed() {
+func.func @schedule_not_allowed() {
// expected-error at +1 {{expected '{' to begin a region}}
omp.parallel schedule(static) {}
return
@@ -54,7 +54,7 @@ func @schedule_not_allowed() {
// -----
-func @collapse_not_allowed() {
+func.func @collapse_not_allowed() {
// expected-error at +1 {{expected '{' to begin a region}}
omp.parallel collapse(3) {}
return
@@ -62,7 +62,7 @@ func @collapse_not_allowed() {
// -----
-func @order_not_allowed() {
+func.func @order_not_allowed() {
// expected-error at +1 {{expected '{' to begin a region}}
omp.parallel order(concurrent) {}
return
@@ -70,14 +70,14 @@ func @order_not_allowed() {
// -----
-func @ordered_not_allowed() {
+func.func @ordered_not_allowed() {
// expected-error at +1 {{expected '{' to begin a region}}
omp.parallel ordered(2) {}
}
// -----
-func @proc_bind_once() {
+func.func @proc_bind_once() {
// expected-error at +1 {{`proc_bind` clause can appear at most once in the expansion of the oilist directive}}
omp.parallel proc_bind(close) proc_bind(spread) {
}
@@ -87,7 +87,7 @@ func @proc_bind_once() {
// -----
-func @inclusive_not_a_clause(%lb : index, %ub : index, %step : index) {
+func.func @inclusive_not_a_clause(%lb : index, %ub : index, %step : index) {
// expected-error @below {{expected 'for'}}
omp.wsloop nowait inclusive
for (%iv) : index = (%lb) to (%ub) step (%step) {
@@ -97,7 +97,7 @@ func @inclusive_not_a_clause(%lb : index, %ub : index, %step : index) {
// -----
-func @order_value(%lb : index, %ub : index, %step : index) {
+func.func @order_value(%lb : index, %ub : index, %step : index) {
// expected-error @below {{invalid clause value: 'default'}}
omp.wsloop order(default)
for (%iv) : index = (%lb) to (%ub) step (%step) {
@@ -107,7 +107,7 @@ func @order_value(%lb : index, %ub : index, %step : index) {
// -----
-func @if_not_allowed(%lb : index, %ub : index, %step : index, %bool_var : i1) {
+func.func @if_not_allowed(%lb : index, %ub : index, %step : index, %bool_var : i1) {
// expected-error @below {{expected 'for'}}
omp.wsloop if(%bool_var: i1)
for (%iv) : index = (%lb) to (%ub) step (%step) {
@@ -117,7 +117,7 @@ func @if_not_allowed(%lb : index, %ub : index, %step : index, %bool_var : i1) {
// -----
-func @num_threads_not_allowed(%lb : index, %ub : index, %step : index, %int_var : i32) {
+func.func @num_threads_not_allowed(%lb : index, %ub : index, %step : index, %int_var : i32) {
// expected-error @below {{expected 'for'}}
omp.wsloop num_threads(%int_var: i32)
for (%iv) : index = (%lb) to (%ub) step (%step) {
@@ -127,7 +127,7 @@ func @num_threads_not_allowed(%lb : index, %ub : index, %step : index, %int_var
// -----
-func @proc_bind_not_allowed(%lb : index, %ub : index, %step : index) {
+func.func @proc_bind_not_allowed(%lb : index, %ub : index, %step : index) {
// expected-error @below {{expected 'for'}}
omp.wsloop proc_bind(close)
for (%iv) : index = (%lb) to (%ub) step (%step) {
@@ -192,7 +192,7 @@ llvm.func @test_omp_wsloop_dynamic_wrong_modifier3(%lb : i64, %ub : i64, %step :
// -----
-func @omp_simdloop(%lb : index, %ub : index, %step : i32) -> () {
+func.func @omp_simdloop(%lb : index, %ub : index, %step : i32) -> () {
// expected-error @below {{op failed to verify that all of {lowerBound, upperBound, step} have same type}}
"omp.simdloop" (%lb, %ub, %step) ({
^bb0(%iv: index):
@@ -316,7 +316,7 @@ combiner {
omp.yield (%1 : f32)
}
-func @foo(%lb : index, %ub : index, %step : index) {
+func.func @foo(%lb : index, %ub : index, %step : index) {
%c1 = arith.constant 1 : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr<f32>
%1 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr<f32>
@@ -333,7 +333,7 @@ func @foo(%lb : index, %ub : index, %step : index) {
// -----
-func @foo(%lb : index, %ub : index, %step : index) {
+func.func @foo(%lb : index, %ub : index, %step : index) {
%c1 = arith.constant 1 : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr<f32>
%1 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr<f32>
@@ -362,7 +362,7 @@ combiner {
omp.yield (%1 : f32)
}
-func @foo(%lb : index, %ub : index, %step : index) {
+func.func @foo(%lb : index, %ub : index, %step : index) {
%c1 = arith.constant 1 : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr<f32>
@@ -396,7 +396,7 @@ atomic {
omp.yield
}
-func @foo(%lb : index, %ub : index, %step : index, %mem : memref<1xf32>) {
+func.func @foo(%lb : index, %ub : index, %step : index, %mem : memref<1xf32>) {
%c1 = arith.constant 1 : i32
// expected-error @below {{expected accumulator ('memref<1xf32>') to be the same type as reduction declaration ('!llvm.ptr<f32>')}}
@@ -411,7 +411,7 @@ func @foo(%lb : index, %ub : index, %step : index, %mem : memref<1xf32>) {
// -----
-func @omp_critical2() -> () {
+func.func @omp_critical2() -> () {
// expected-error @below {{expected symbol reference @excl to point to a critical declaration}}
omp.critical(@excl) {
omp.terminator
@@ -436,7 +436,7 @@ omp.critical.declare @mutex hint(invalid_hint)
// -----
-func @omp_ordered1(%arg1 : i32, %arg2 : i32, %arg3 : i32) -> () {
+func.func @omp_ordered1(%arg1 : i32, %arg2 : i32, %arg3 : i32) -> () {
omp.wsloop ordered(1)
for (%0) : i32 = (%arg1) to (%arg2) step (%arg3) {
// expected-error @below {{ordered region must be closely nested inside a worksharing-loop region with an ordered clause without parameter present}}
@@ -450,7 +450,7 @@ func @omp_ordered1(%arg1 : i32, %arg2 : i32, %arg3 : i32) -> () {
// -----
-func @omp_ordered2(%arg1 : i32, %arg2 : i32, %arg3 : i32) -> () {
+func.func @omp_ordered2(%arg1 : i32, %arg2 : i32, %arg3 : i32) -> () {
omp.wsloop for (%0) : i32 = (%arg1) to (%arg2) step (%arg3) {
// expected-error @below {{ordered region must be closely nested inside a worksharing-loop region with an ordered clause without parameter present}}
omp.ordered_region {
@@ -463,7 +463,7 @@ func @omp_ordered2(%arg1 : i32, %arg2 : i32, %arg3 : i32) -> () {
// -----
-func @omp_ordered3(%vec0 : i64) -> () {
+func.func @omp_ordered3(%vec0 : i64) -> () {
// expected-error @below {{ordered depend directive must be closely nested inside a worksharing-loop with ordered clause with parameter present}}
omp.ordered depend_type(dependsink) depend_vec(%vec0 : i64) {num_loops_val = 1 : i64}
return
@@ -471,7 +471,7 @@ func @omp_ordered3(%vec0 : i64) -> () {
// -----
-func @omp_ordered4(%arg1 : i32, %arg2 : i32, %arg3 : i32, %vec0 : i64) -> () {
+func.func @omp_ordered4(%arg1 : i32, %arg2 : i32, %arg3 : i32, %vec0 : i64) -> () {
omp.wsloop ordered(0)
for (%0) : i32 = (%arg1) to (%arg2) step (%arg3) {
// expected-error @below {{ordered depend directive must be closely nested inside a worksharing-loop with ordered clause with parameter present}}
@@ -483,7 +483,7 @@ func @omp_ordered4(%arg1 : i32, %arg2 : i32, %arg3 : i32, %vec0 : i64) -> () {
}
// -----
-func @omp_ordered5(%arg1 : i32, %arg2 : i32, %arg3 : i32, %vec0 : i64, %vec1 : i64) -> () {
+func.func @omp_ordered5(%arg1 : i32, %arg2 : i32, %arg3 : i32, %vec0 : i64, %vec1 : i64) -> () {
omp.wsloop ordered(1)
for (%0) : i32 = (%arg1) to (%arg2) step (%arg3) {
// expected-error @below {{number of variables in depend clause does not match number of iteration variables in the doacross loop}}
@@ -496,7 +496,7 @@ func @omp_ordered5(%arg1 : i32, %arg2 : i32, %arg3 : i32, %vec0 : i64, %vec1 : i
// -----
-func @omp_atomic_read1(%x: memref<i32>, %v: memref<i32>) {
+func.func @omp_atomic_read1(%x: memref<i32>, %v: memref<i32>) {
// expected-error @below {{the hints omp_sync_hint_nonspeculative and omp_sync_hint_speculative cannot be combined.}}
omp.atomic.read %v = %x hint(speculative, nonspeculative) : memref<i32>
return
@@ -504,7 +504,7 @@ func @omp_atomic_read1(%x: memref<i32>, %v: memref<i32>) {
// -----
-func @omp_atomic_read2(%x: memref<i32>, %v: memref<i32>) {
+func.func @omp_atomic_read2(%x: memref<i32>, %v: memref<i32>) {
// expected-error @below {{invalid clause value: 'xyz'}}
omp.atomic.read %v = %x memory_order(xyz) : memref<i32>
return
@@ -512,7 +512,7 @@ func @omp_atomic_read2(%x: memref<i32>, %v: memref<i32>) {
// -----
-func @omp_atomic_read3(%x: memref<i32>, %v: memref<i32>) {
+func.func @omp_atomic_read3(%x: memref<i32>, %v: memref<i32>) {
// expected-error @below {{memory-order must not be acq_rel or release for atomic reads}}
omp.atomic.read %v = %x memory_order(acq_rel) : memref<i32>
return
@@ -520,7 +520,7 @@ func @omp_atomic_read3(%x: memref<i32>, %v: memref<i32>) {
// -----
-func @omp_atomic_read4(%x: memref<i32>, %v: memref<i32>) {
+func.func @omp_atomic_read4(%x: memref<i32>, %v: memref<i32>) {
// expected-error @below {{memory-order must not be acq_rel or release for atomic reads}}
omp.atomic.read %v = %x memory_order(release) : memref<i32>
return
@@ -528,7 +528,7 @@ func @omp_atomic_read4(%x: memref<i32>, %v: memref<i32>) {
// -----
-func @omp_atomic_read5(%x: memref<i32>, %v: memref<i32>) {
+func.func @omp_atomic_read5(%x: memref<i32>, %v: memref<i32>) {
// expected-error @below {{`memory_order` clause can appear at most once in the expansion of the oilist directive}}
omp.atomic.read %v = %x memory_order(acquire) memory_order(relaxed) : memref<i32>
return
@@ -536,7 +536,7 @@ func @omp_atomic_read5(%x: memref<i32>, %v: memref<i32>) {
// -----
-func @omp_atomic_read6(%x: memref<i32>, %v: memref<i32>) {
+func.func @omp_atomic_read6(%x: memref<i32>, %v: memref<i32>) {
// expected-error @below {{`hint` clause can appear at most once in the expansion of the oilist directive}}
omp.atomic.read %v = %x hint(speculative) hint(contended) : memref<i32>
return
@@ -544,7 +544,7 @@ func @omp_atomic_read6(%x: memref<i32>, %v: memref<i32>) {
// -----
-func @omp_atomic_read6(%x: memref<i32>, %v: memref<i32>) {
+func.func @omp_atomic_read6(%x: memref<i32>, %v: memref<i32>) {
// expected-error @below {{read and write must not be to the same location for atomic reads}}
omp.atomic.read %x = %x hint(speculative) : memref<i32>
return
@@ -552,7 +552,7 @@ func @omp_atomic_read6(%x: memref<i32>, %v: memref<i32>) {
// -----
-func @omp_atomic_write1(%addr : memref<i32>, %val : i32) {
+func.func @omp_atomic_write1(%addr : memref<i32>, %val : i32) {
// expected-error @below {{the hints omp_sync_hint_uncontended and omp_sync_hint_contended cannot be combined}}
omp.atomic.write %addr = %val hint(contended, uncontended) : memref<i32>, i32
return
@@ -560,7 +560,7 @@ func @omp_atomic_write1(%addr : memref<i32>, %val : i32) {
// -----
-func @omp_atomic_write2(%addr : memref<i32>, %val : i32) {
+func.func @omp_atomic_write2(%addr : memref<i32>, %val : i32) {
// expected-error @below {{memory-order must not be acq_rel or acquire for atomic writes}}
omp.atomic.write %addr = %val memory_order(acq_rel) : memref<i32>, i32
return
@@ -568,7 +568,7 @@ func @omp_atomic_write2(%addr : memref<i32>, %val : i32) {
// -----
-func @omp_atomic_write3(%addr : memref<i32>, %val : i32) {
+func.func @omp_atomic_write3(%addr : memref<i32>, %val : i32) {
// expected-error @below {{memory-order must not be acq_rel or acquire for atomic writes}}
omp.atomic.write %addr = %val memory_order(acquire) : memref<i32>, i32
return
@@ -576,7 +576,7 @@ func @omp_atomic_write3(%addr : memref<i32>, %val : i32) {
// -----
-func @omp_atomic_write4(%addr : memref<i32>, %val : i32) {
+func.func @omp_atomic_write4(%addr : memref<i32>, %val : i32) {
// expected-error @below {{`memory_order` clause can appear at most once in the expansion of the oilist directive}}
omp.atomic.write %addr = %val memory_order(release) memory_order(seq_cst) : memref<i32>, i32
return
@@ -584,7 +584,7 @@ func @omp_atomic_write4(%addr : memref<i32>, %val : i32) {
// -----
-func @omp_atomic_write5(%addr : memref<i32>, %val : i32) {
+func.func @omp_atomic_write5(%addr : memref<i32>, %val : i32) {
// expected-error @below {{`hint` clause can appear at most once in the expansion of the oilist directive}}
omp.atomic.write %addr = %val hint(contended) hint(speculative) : memref<i32>, i32
return
@@ -592,7 +592,7 @@ func @omp_atomic_write5(%addr : memref<i32>, %val : i32) {
// -----
-func @omp_atomic_write6(%addr : memref<i32>, %val : i32) {
+func.func @omp_atomic_write6(%addr : memref<i32>, %val : i32) {
// expected-error @below {{invalid clause value: 'xyz'}}
omp.atomic.write %addr = %val memory_order(xyz) : memref<i32>, i32
return
@@ -600,7 +600,7 @@ func @omp_atomic_write6(%addr : memref<i32>, %val : i32) {
// -----
-func @omp_atomic_update1(%x: memref<i32>, %expr: f32) {
+func.func @omp_atomic_update1(%x: memref<i32>, %expr: f32) {
// expected-error @below {{the type of the operand must be a pointer type whose element type is the same as that of the region argument}}
omp.atomic.update %x : memref<i32> {
^bb0(%xval: f32):
@@ -612,7 +612,7 @@ func @omp_atomic_update1(%x: memref<i32>, %expr: f32) {
// -----
-func @omp_atomic_update2(%x: memref<i32>, %expr: i32) {
+func.func @omp_atomic_update2(%x: memref<i32>, %expr: i32) {
// expected-error @+2 {{op expects regions to end with 'omp.yield', found 'omp.terminator'}}
// expected-note @below {{in custom textual format, the absence of terminator implies 'omp.yield'}}
omp.atomic.update %x : memref<i32> {
@@ -625,7 +625,7 @@ func @omp_atomic_update2(%x: memref<i32>, %expr: i32) {
// -----
-func @omp_atomic_update3(%x: memref<i32>, %expr: i32) {
+func.func @omp_atomic_update3(%x: memref<i32>, %expr: i32) {
// expected-error @below {{memory-order must not be acq_rel or acquire for atomic updates}}
omp.atomic.update memory_order(acq_rel) %x : memref<i32> {
^bb0(%xval: i32):
@@ -637,7 +637,7 @@ func @omp_atomic_update3(%x: memref<i32>, %expr: i32) {
// -----
-func @omp_atomic_update4(%x: memref<i32>, %expr: i32) {
+func.func @omp_atomic_update4(%x: memref<i32>, %expr: i32) {
// expected-error @below {{memory-order must not be acq_rel or acquire for atomic updates}}
omp.atomic.update memory_order(acquire) %x : memref<i32> {
^bb0(%xval: i32):
@@ -649,7 +649,7 @@ func @omp_atomic_update4(%x: memref<i32>, %expr: i32) {
// -----
-func @omp_atomic_update5(%x: memref<i32>, %expr: i32) {
+func.func @omp_atomic_update5(%x: memref<i32>, %expr: i32) {
// expected-error @below {{invalid kind of type specified}}
omp.atomic.update %x : i32 {
^bb0(%xval: i32):
@@ -661,7 +661,7 @@ func @omp_atomic_update5(%x: memref<i32>, %expr: i32) {
// -----
-func @omp_atomic_update6(%x: memref<i32>, %expr: i32) {
+func.func @omp_atomic_update6(%x: memref<i32>, %expr: i32) {
// expected-error @below {{only updated value must be returned}}
omp.atomic.update %x : memref<i32> {
^bb0(%xval: i32):
@@ -673,7 +673,7 @@ func @omp_atomic_update6(%x: memref<i32>, %expr: i32) {
// -----
-func @omp_atomic_update7(%x: memref<i32>, %expr: i32, %y: f32) {
+func.func @omp_atomic_update7(%x: memref<i32>, %expr: i32, %y: f32) {
// expected-error @below {{input and yielded value must have the same type}}
omp.atomic.update %x : memref<i32> {
^bb0(%xval: i32):
@@ -685,7 +685,7 @@ func @omp_atomic_update7(%x: memref<i32>, %expr: i32, %y: f32) {
// -----
-func @omp_atomic_update8(%x: memref<i32>, %expr: i32) {
+func.func @omp_atomic_update8(%x: memref<i32>, %expr: i32) {
// expected-error @below {{the region must accept exactly one argument}}
omp.atomic.update %x : memref<i32> {
^bb0(%xval: i32, %tmp: i32):
@@ -697,7 +697,7 @@ func @omp_atomic_update8(%x: memref<i32>, %expr: i32) {
// -----
-func @omp_atomic_update9(%x: memref<i32>, %expr: i32) {
+func.func @omp_atomic_update9(%x: memref<i32>, %expr: i32) {
// expected-error @below {{the update region must have at least two operations (binop and terminator)}}
omp.atomic.update %x : memref<i32> {
^bb0(%xval: i32):
@@ -744,7 +744,7 @@ func @omp_atomic_update(%x: memref<i32>, %expr: i32) {
// -----
-func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
+func.func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
// expected-error @below {{expected three operations in omp.atomic.capture region}}
omp.atomic.capture {
omp.atomic.read %v = %x : memref<i32>
@@ -755,7 +755,7 @@ func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
// -----
-func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
+func.func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
omp.atomic.capture {
// expected-error @below {{invalid sequence of operations in the capture region}}
omp.atomic.read %v = %x : memref<i32>
@@ -767,7 +767,7 @@ func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
// -----
-func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
+func.func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
omp.atomic.capture {
// expected-error @below {{invalid sequence of operations in the capture region}}
omp.atomic.update %x : memref<i32> {
@@ -787,7 +787,7 @@ func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
// -----
-func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
+func.func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
omp.atomic.capture {
// expected-error @below {{invalid sequence of operations in the capture region}}
omp.atomic.write %x = %expr : memref<i32>, i32
@@ -799,7 +799,7 @@ func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
// -----
-func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
+func.func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
omp.atomic.capture {
// expected-error @below {{invalid sequence of operations in the capture region}}
omp.atomic.write %x = %expr : memref<i32>, i32
@@ -815,7 +815,7 @@ func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
// -----
-func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
+func.func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
omp.atomic.capture {
// expected-error @below {{invalid sequence of operations in the capture region}}
omp.atomic.update %x : memref<i32> {
@@ -831,7 +831,7 @@ func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
// -----
-func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
+func.func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
omp.atomic.capture {
// expected-error @below {{invalid sequence of operations in the capture region}}
omp.atomic.write %x = %expr : memref<i32>, i32
@@ -843,7 +843,7 @@ func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
// -----
-func @omp_atomic_capture(%x: memref<i32>, %y: memref<i32>, %v: memref<i32>, %expr: i32) {
+func.func @omp_atomic_capture(%x: memref<i32>, %y: memref<i32>, %v: memref<i32>, %expr: i32) {
omp.atomic.capture {
// expected-error @below {{updated variable in omp.atomic.update must be captured in second operation}}
omp.atomic.update %x : memref<i32> {
@@ -858,7 +858,7 @@ func @omp_atomic_capture(%x: memref<i32>, %y: memref<i32>, %v: memref<i32>, %exp
// -----
-func @omp_atomic_capture(%x: memref<i32>, %y: memref<i32>, %v: memref<i32>, %expr: i32) {
+func.func @omp_atomic_capture(%x: memref<i32>, %y: memref<i32>, %v: memref<i32>, %expr: i32) {
omp.atomic.capture {
// expected-error @below {{captured variable in omp.atomic.read must be updated in second operation}}
omp.atomic.read %v = %y : memref<i32>
@@ -873,7 +873,7 @@ func @omp_atomic_capture(%x: memref<i32>, %y: memref<i32>, %v: memref<i32>, %exp
// -----
-func @omp_atomic_capture(%x: memref<i32>, %y: memref<i32>, %v: memref<i32>, %expr: i32) {
+func.func @omp_atomic_capture(%x: memref<i32>, %y: memref<i32>, %v: memref<i32>, %expr: i32) {
omp.atomic.capture {
// expected-error @below {{captured variable in omp.atomic.read must be updated in second operation}}
omp.atomic.read %v = %x : memref<i32>
@@ -944,7 +944,7 @@ func @omp_atomic_capture(%x: memref<i32>, %v: memref<i32>, %expr: i32) {
// -----
-func @omp_sections(%data_var : memref<i32>) -> () {
+func.func @omp_sections(%data_var : memref<i32>) -> () {
// expected-error @below {{expected equal sizes for allocate and allocator variables}}
"omp.sections" (%data_var) ({
omp.terminator
@@ -954,7 +954,7 @@ func @omp_sections(%data_var : memref<i32>) -> () {
// -----
-func @omp_sections(%data_var : memref<i32>) -> () {
+func.func @omp_sections(%data_var : memref<i32>) -> () {
// expected-error @below {{expected as many reduction symbol references as reduction variables}}
"omp.sections" (%data_var) ({
omp.terminator
@@ -964,7 +964,7 @@ func @omp_sections(%data_var : memref<i32>) -> () {
// -----
-func @omp_sections(%data_var : memref<i32>) -> () {
+func.func @omp_sections(%data_var : memref<i32>) -> () {
// expected-error @below {{expected omp.section op or terminator op inside region}}
omp.sections {
"test.payload" () : () -> ()
@@ -974,7 +974,7 @@ func @omp_sections(%data_var : memref<i32>) -> () {
// -----
-func @omp_sections(%cond : i1) {
+func.func @omp_sections(%cond : i1) {
// expected-error @below {{expected '{' to begin a region}}
omp.sections if(%cond) {
omp.terminator
@@ -984,7 +984,7 @@ func @omp_sections(%cond : i1) {
// -----
-func @omp_sections() {
+func.func @omp_sections() {
// expected-error @below {{expected '{' to begin a region}}
omp.sections num_threads(10) {
omp.terminator
@@ -994,7 +994,7 @@ func @omp_sections() {
// -----
-func @omp_sections() {
+func.func @omp_sections() {
// expected-error @below {{expected '{' to begin a region}}
omp.sections proc_bind(close) {
omp.terminator
@@ -1004,7 +1004,7 @@ func @omp_sections() {
// -----
-func @omp_sections(%data_var : memref<i32>, %linear_var : i32) {
+func.func @omp_sections(%data_var : memref<i32>, %linear_var : i32) {
// expected-error @below {{expected '{' to begin a region}}
omp.sections linear(%data_var = %linear_var : memref<i32>) {
omp.terminator
@@ -1014,7 +1014,7 @@ func @omp_sections(%data_var : memref<i32>, %linear_var : i32) {
// -----
-func @omp_sections() {
+func.func @omp_sections() {
// expected-error @below {{expected '{' to begin a region}}
omp.sections schedule(static, none) {
omp.terminator
@@ -1024,7 +1024,7 @@ func @omp_sections() {
// -----
-func @omp_sections() {
+func.func @omp_sections() {
// expected-error @below {{expected '{' to begin a region}}
omp.sections collapse(3) {
omp.terminator
@@ -1034,7 +1034,7 @@ func @omp_sections() {
// -----
-func @omp_sections() {
+func.func @omp_sections() {
// expected-error @below {{expected '{' to begin a region}}
omp.sections ordered(2) {
omp.terminator
@@ -1044,7 +1044,7 @@ func @omp_sections() {
// -----
-func @omp_sections() {
+func.func @omp_sections() {
// expected-error @below {{expected '{' to begin a region}}
omp.sections order(concurrent) {
omp.terminator
@@ -1054,7 +1054,7 @@ func @omp_sections() {
// -----
-func @omp_sections() {
+func.func @omp_sections() {
// expected-error @below {{failed to verify constraint: region with 1 blocks}}
omp.sections {
omp.section {
@@ -1069,7 +1069,7 @@ func @omp_sections() {
// -----
-func @omp_single(%data_var : memref<i32>) -> () {
+func.func @omp_single(%data_var : memref<i32>) -> () {
// expected-error @below {{expected equal sizes for allocate and allocator variables}}
"omp.single" (%data_var) ({
omp.barrier
@@ -1079,7 +1079,7 @@ func @omp_single(%data_var : memref<i32>) -> () {
// -----
-func @omp_task(%ptr: !llvm.ptr<f32>) {
+func.func @omp_task(%ptr: !llvm.ptr<f32>) {
// expected-error @below {{op expected symbol reference @add_f32 to point to a reduction declaration}}
omp.task in_reduction(@add_f32 -> %ptr : !llvm.ptr<f32>) {
// CHECK: "test.foo"() : () -> ()
@@ -1103,7 +1103,7 @@ combiner {
omp.yield (%1 : f32)
}
-func @omp_task(%ptr: !llvm.ptr<f32>) {
+func.func @omp_task(%ptr: !llvm.ptr<f32>) {
// expected-error @below {{op accumulator variable used more than once}}
omp.task in_reduction(@add_f32 -> %ptr : !llvm.ptr<f32>, @add_f32 -> %ptr : !llvm.ptr<f32>) {
// CHECK: "test.foo"() : () -> ()
@@ -1133,7 +1133,7 @@ atomic {
omp.yield
}
-func @omp_task(%mem: memref<1xf32>) {
+func.func @omp_task(%mem: memref<1xf32>) {
// expected-error @below {{op expected accumulator ('memref<1xf32>') to be the same type as reduction declaration ('!llvm.ptr<i32>')}}
omp.task in_reduction(@add_i32 -> %mem : memref<1xf32>) {
// CHECK: "test.foo"() : () -> ()
diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir
index 21b1c232a2b96..57387b35abd04 100644
--- a/mlir/test/Dialect/OpenMP/ops.mlir
+++ b/mlir/test/Dialect/OpenMP/ops.mlir
@@ -1,12 +1,12 @@
// RUN: mlir-opt -split-input-file %s | mlir-opt | FileCheck %s
-func @omp_barrier() -> () {
+func.func @omp_barrier() -> () {
// CHECK: omp.barrier
omp.barrier
return
}
-func @omp_master() -> () {
+func.func @omp_master() -> () {
// CHECK: omp.master
omp.master {
// CHECK: omp.terminator
@@ -16,13 +16,13 @@ func @omp_master() -> () {
return
}
-func @omp_taskwait() -> () {
+func.func @omp_taskwait() -> () {
// CHECK: omp.taskwait
omp.taskwait
return
}
-func @omp_taskyield() -> () {
+func.func @omp_taskyield() -> () {
// CHECK: omp.taskyield
omp.taskyield
return
@@ -30,7 +30,7 @@ func @omp_taskyield() -> () {
// CHECK-LABEL: func @omp_flush
// CHECK-SAME: ([[ARG0:%.*]]: i32) {
-func @omp_flush(%arg0 : i32) -> () {
+func.func @omp_flush(%arg0 : i32) -> () {
// Test without data var
// CHECK: omp.flush
omp.flush
@@ -46,12 +46,12 @@ func @omp_flush(%arg0 : i32) -> () {
return
}
-func @omp_terminator() -> () {
+func.func @omp_terminator() -> () {
// CHECK: omp.terminator
omp.terminator
}
-func @omp_parallel(%data_var : memref<i32>, %if_cond : i1, %num_threads : si32) -> () {
+func.func @omp_parallel(%data_var : memref<i32>, %if_cond : i1, %num_threads : si32) -> () {
// CHECK: omp.parallel if(%{{.*}}) num_threads(%{{.*}} : si32) allocate(%{{.*}} : memref<i32> -> %{{.*}} : memref<i32>)
"omp.parallel" (%if_cond, %num_threads, %data_var, %data_var) ({
@@ -88,7 +88,7 @@ func @omp_parallel(%data_var : memref<i32>, %if_cond : i1, %num_threads : si32)
return
}
-func @omp_parallel_pretty(%data_var : memref<i32>, %if_cond : i1, %num_threads : si32, %allocator : si32) -> () {
+func.func @omp_parallel_pretty(%data_var : memref<i32>, %if_cond : i1, %num_threads : si32, %allocator : si32) -> () {
// CHECK: omp.parallel
omp.parallel {
omp.terminator
@@ -122,7 +122,7 @@ func @omp_parallel_pretty(%data_var : memref<i32>, %if_cond : i1, %num_threads :
}
// CHECK-LABEL: omp_wsloop
-func @omp_wsloop(%lb : index, %ub : index, %step : index, %data_var : memref<i32>, %linear_var : i32, %chunk_var : i32) -> () {
+func.func @omp_wsloop(%lb : index, %ub : index, %step : index, %data_var : memref<i32>, %linear_var : i32, %chunk_var : i32) -> () {
// CHECK: omp.wsloop collapse(2) ordered(1)
// CHECK-SAME: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
@@ -168,7 +168,7 @@ func @omp_wsloop(%lb : index, %ub : index, %step : index, %data_var : memref<i32
}
// CHECK-LABEL: omp_wsloop_pretty
-func @omp_wsloop_pretty(%lb : index, %ub : index, %step : index, %data_var : memref<i32>, %linear_var : i32, %chunk_var : i32, %chunk_var2 : i16) -> () {
+func.func @omp_wsloop_pretty(%lb : index, %ub : index, %step : index, %data_var : memref<i32>, %linear_var : i32, %chunk_var : i32, %chunk_var2 : i16) -> () {
// CHECK: omp.wsloop collapse(2) ordered(2)
// CHECK-SAME: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
@@ -233,7 +233,7 @@ func @omp_wsloop_pretty(%lb : index, %ub : index, %step : index, %data_var : mem
}
// CHECK-LABEL: omp_wsloop_pretty_multi_block
-func @omp_wsloop_pretty_multi_block(%lb : index, %ub : index, %step : index, %data1 : memref<?xi32>, %data2 : memref<?xi32>) -> () {
+func.func @omp_wsloop_pretty_multi_block(%lb : index, %ub : index, %step : index, %data1 : memref<?xi32>, %data2 : memref<?xi32>) -> () {
// CHECK: omp.wsloop for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) {
@@ -276,7 +276,7 @@ func @omp_wsloop_pretty_multi_block(%lb : index, %ub : index, %step : index, %da
}
// CHECK-LABEL: omp_wsloop_pretty_non_index
-func @omp_wsloop_pretty_non_index(%lb1 : i32, %ub1 : i32, %step1 : i32, %lb2 : i64, %ub2 : i64, %step2 : i64,
+func.func @omp_wsloop_pretty_non_index(%lb1 : i32, %ub1 : i32, %step1 : i32, %lb2 : i64, %ub2 : i64, %step2 : i64,
%data1 : memref<?xi32>, %data2 : memref<?xi64>) -> () {
// CHECK: omp.wsloop for (%{{.*}}) : i32 = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
@@ -301,7 +301,7 @@ func @omp_wsloop_pretty_non_index(%lb1 : i32, %ub1 : i32, %step1 : i32, %lb2 : i
}
// CHECK-LABEL: omp_wsloop_pretty_multiple
-func @omp_wsloop_pretty_multiple(%lb1 : i32, %ub1 : i32, %step1 : i32, %lb2 : i32, %ub2 : i32, %step2 : i32, %data1 : memref<?xi32>) -> () {
+func.func @omp_wsloop_pretty_multiple(%lb1 : i32, %ub1 : i32, %step1 : i32, %lb2 : i32, %ub2 : i32, %step2 : i32, %data1 : memref<?xi32>) -> () {
// CHECK: omp.wsloop for (%{{.*}}, %{{.*}}) : i32 = (%{{.*}}, %{{.*}}) to (%{{.*}}, %{{.*}}) step (%{{.*}}, %{{.*}})
omp.wsloop for (%iv1, %iv2) : i32 = (%lb1, %lb2) to (%ub1, %ub2) step (%step1, %step2) {
@@ -316,7 +316,7 @@ func @omp_wsloop_pretty_multiple(%lb1 : i32, %ub1 : i32, %step1 : i32, %lb2 : i3
}
// CHECK-LABEL: omp_simdloop
-func @omp_simdloop(%lb : index, %ub : index, %step : index) -> () {
+func.func @omp_simdloop(%lb : index, %ub : index, %step : index) -> () {
// CHECK: omp.simdloop (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
"omp.simdloop" (%lb, %ub, %step) ({
^bb0(%iv: index):
@@ -329,7 +329,7 @@ func @omp_simdloop(%lb : index, %ub : index, %step : index) -> () {
// CHECK-LABEL: omp_simdloop_pretty
-func @omp_simdloop_pretty(%lb : index, %ub : index, %step : index) -> () {
+func.func @omp_simdloop_pretty(%lb : index, %ub : index, %step : index) -> () {
// CHECK: omp.simdloop (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
omp.simdloop (%iv) : index = (%lb) to (%ub) step (%step) {
omp.yield
@@ -338,7 +338,7 @@ func @omp_simdloop_pretty(%lb : index, %ub : index, %step : index) -> () {
}
// CHECK-LABEL: omp_simdloop_pretty_multiple
-func @omp_simdloop_pretty_multiple(%lb1 : index, %ub1 : index, %step1 : index, %lb2 : index, %ub2 : index, %step2 : index) -> () {
+func.func @omp_simdloop_pretty_multiple(%lb1 : index, %ub1 : index, %step1 : index, %lb2 : index, %ub2 : index, %step2 : index) -> () {
// CHECK: omp.simdloop (%{{.*}}, %{{.*}}) : index = (%{{.*}}, %{{.*}}) to (%{{.*}}, %{{.*}}) step (%{{.*}}, %{{.*}})
omp.simdloop (%iv1, %iv2) : index = (%lb1, %lb2) to (%ub1, %ub2) step (%step1, %step2) {
omp.yield
@@ -347,7 +347,7 @@ func @omp_simdloop_pretty_multiple(%lb1 : index, %ub1 : index, %step1 : index, %
}
// CHECK-LABEL: omp_target
-func @omp_target(%if_cond : i1, %device : si32, %num_threads : si32) -> () {
+func.func @omp_target(%if_cond : i1, %device : si32, %num_threads : si32) -> () {
// Test with optional operands; if_expr, device, thread_limit, private, firstprivate and nowait.
// CHECK: omp.target if({{.*}}) device({{.*}}) thread_limit({{.*}}) nowait
@@ -363,7 +363,7 @@ func @omp_target(%if_cond : i1, %device : si32, %num_threads : si32) -> () {
}
// CHECK-LABEL: omp_target_pretty
-func @omp_target_pretty(%if_cond : i1, %device : si32, %num_threads : si32) -> () {
+func.func @omp_target_pretty(%if_cond : i1, %device : si32, %num_threads : si32) -> () {
// CHECK: omp.target if({{.*}}) device({{.*}})
omp.target if(%if_cond) device(%device : si32) {
omp.terminator
@@ -408,7 +408,7 @@ atomic {
}
// CHECK-LABEL: func @wsloop_reduction
-func @wsloop_reduction(%lb : index, %ub : index, %step : index) {
+func.func @wsloop_reduction(%lb : index, %ub : index, %step : index) {
%c1 = arith.constant 1 : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr<f32>
// CHECK: reduction(@add_f32 -> %{{.+}} : !llvm.ptr<f32>)
@@ -423,7 +423,7 @@ func @wsloop_reduction(%lb : index, %ub : index, %step : index) {
}
// CHECK-LABEL: func @parallel_reduction
-func @parallel_reduction() {
+func.func @parallel_reduction() {
%c1 = arith.constant 1 : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr<f32>
// CHECK: omp.parallel reduction(@add_f32 -> {{.+}} : !llvm.ptr<f32>)
@@ -437,7 +437,7 @@ func @parallel_reduction() {
}
// CHECK: func @parallel_wsloop_reduction
-func @parallel_wsloop_reduction(%lb : index, %ub : index, %step : index) {
+func.func @parallel_wsloop_reduction(%lb : index, %ub : index, %step : index) {
%c1 = arith.constant 1 : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr<f32>
// CHECK: omp.parallel reduction(@add_f32 -> %{{.+}} : !llvm.ptr<f32>) {
@@ -457,7 +457,7 @@ func @parallel_wsloop_reduction(%lb : index, %ub : index, %step : index) {
}
// CHECK-LABEL: func @sections_reduction
-func @sections_reduction() {
+func.func @sections_reduction() {
%c1 = arith.constant 1 : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr<f32>
// CHECK: omp.sections reduction(@add_f32 -> {{.+}} : !llvm.ptr<f32>)
@@ -499,7 +499,7 @@ combiner {
// CHECK-NOT: atomic
// CHECK-LABEL: func @wsloop_reduction2
-func @wsloop_reduction2(%lb : index, %ub : index, %step : index) {
+func.func @wsloop_reduction2(%lb : index, %ub : index, %step : index) {
%0 = memref.alloca() : memref<1xf32>
// CHECK: omp.wsloop reduction(@add2_f32 -> %{{.+}} : memref<1xf32>)
omp.wsloop reduction(@add2_f32 -> %0 : memref<1xf32>)
@@ -513,7 +513,7 @@ func @wsloop_reduction2(%lb : index, %ub : index, %step : index) {
}
// CHECK-LABEL: func @parallel_reduction2
-func @parallel_reduction2() {
+func.func @parallel_reduction2() {
%0 = memref.alloca() : memref<1xf32>
// CHECK: omp.parallel reduction(@add2_f32 -> %{{.+}} : memref<1xf32>)
omp.parallel reduction(@add2_f32 -> %0 : memref<1xf32>) {
@@ -526,7 +526,7 @@ func @parallel_reduction2() {
}
// CHECK: func @parallel_wsloop_reduction2
-func @parallel_wsloop_reduction2(%lb : index, %ub : index, %step : index) {
+func.func @parallel_wsloop_reduction2(%lb : index, %ub : index, %step : index) {
%c1 = arith.constant 1 : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr<f32>
// CHECK: omp.parallel reduction(@add2_f32 -> %{{.+}} : !llvm.ptr<f32>) {
@@ -546,7 +546,7 @@ func @parallel_wsloop_reduction2(%lb : index, %ub : index, %step : index) {
}
// CHECK-LABEL: func @sections_reduction2
-func @sections_reduction2() {
+func.func @sections_reduction2() {
%0 = memref.alloca() : memref<1xf32>
// CHECK: omp.sections reduction(@add2_f32 -> %{{.+}} : memref<1xf32>)
omp.sections reduction(@add2_f32 -> %0 : memref<1xf32>) {
@@ -590,7 +590,7 @@ omp.critical.declare @mutex10
// CHECK-LABEL: omp_critical
-func @omp_critical() -> () {
+func.func @omp_critical() -> () {
// CHECK: omp.critical
omp.critical {
omp.terminator
@@ -603,7 +603,7 @@ func @omp_critical() -> () {
return
}
-func @omp_ordered(%arg1 : i32, %arg2 : i32, %arg3 : i32,
+func.func @omp_ordered(%arg1 : i32, %arg2 : i32, %arg3 : i32,
%vec0 : i64, %vec1 : i64, %vec2 : i64, %vec3 : i64) -> () {
// CHECK: omp.ordered_region
omp.ordered_region {
@@ -648,7 +648,7 @@ func @omp_ordered(%arg1 : i32, %arg2 : i32, %arg3 : i32,
// CHECK-LABEL: omp_atomic_read
// CHECK-SAME: (%[[v:.*]]: memref<i32>, %[[x:.*]]: memref<i32>)
-func @omp_atomic_read(%v: memref<i32>, %x: memref<i32>) {
+func.func @omp_atomic_read(%v: memref<i32>, %x: memref<i32>) {
// CHECK: omp.atomic.read %[[v]] = %[[x]] : memref<i32>
omp.atomic.read %v = %x : memref<i32>
// CHECK: omp.atomic.read %[[v]] = %[[x]] memory_order(seq_cst) : memref<i32>
@@ -668,7 +668,7 @@ func @omp_atomic_read(%v: memref<i32>, %x: memref<i32>) {
// CHECK-LABEL: omp_atomic_write
// CHECK-SAME: (%[[ADDR:.*]]: memref<i32>, %[[VAL:.*]]: i32)
-func @omp_atomic_write(%addr : memref<i32>, %val : i32) {
+func.func @omp_atomic_write(%addr : memref<i32>, %val : i32) {
// CHECK: omp.atomic.write %[[ADDR]] = %[[VAL]] : memref<i32>, i32
omp.atomic.write %addr = %val : memref<i32>, i32
// CHECK: omp.atomic.write %[[ADDR]] = %[[VAL]] memory_order(seq_cst) : memref<i32>, i32
@@ -686,7 +686,7 @@ func @omp_atomic_write(%addr : memref<i32>, %val : i32) {
// CHECK-LABEL: omp_atomic_update
// CHECK-SAME: (%[[X:.*]]: memref<i32>, %[[EXPR:.*]]: i32, %[[XBOOL:.*]]: memref<i1>, %[[EXPRBOOL:.*]]: i1)
-func @omp_atomic_update(%x : memref<i32>, %expr : i32, %xBool : memref<i1>, %exprBool : i1) {
+func.func @omp_atomic_update(%x : memref<i32>, %expr : i32, %xBool : memref<i1>, %exprBool : i1) {
// CHECK: omp.atomic.update %[[X]] : memref<i32>
// CHECK-NEXT: (%[[XVAL:.*]]: i32):
// CHECK-NEXT: %[[NEWVAL:.*]] = llvm.add %[[XVAL]], %[[EXPR]] : i32
@@ -832,7 +832,7 @@ func @omp_atomic_update(%x : memref<i32>, %expr : i32, %xBool : memref<i1>, %exp
// CHECK-LABEL: omp_atomic_capture
// CHECK-SAME: (%[[v:.*]]: memref<i32>, %[[x:.*]]: memref<i32>, %[[expr:.*]]: i32)
-func @omp_atomic_capture(%v: memref<i32>, %x: memref<i32>, %expr: i32) {
+func.func @omp_atomic_capture(%v: memref<i32>, %x: memref<i32>, %expr: i32) {
// CHECK: omp.atomic.capture {
// CHECK-NEXT: omp.atomic.update %[[x]] : memref<i32>
// CHECK-NEXT: (%[[xval:.*]]: i32):
@@ -1030,7 +1030,7 @@ func @omp_atomic_capture(%v: memref<i32>, %x: memref<i32>, %expr: i32) {
}
// CHECK-LABEL: omp_sectionsop
-func @omp_sectionsop(%data_var1 : memref<i32>, %data_var2 : memref<i32>,
+func.func @omp_sectionsop(%data_var1 : memref<i32>, %data_var2 : memref<i32>,
%data_var3 : memref<i32>, %redn_var : !llvm.ptr<f32>) {
// CHECK: omp.sections allocate(%{{.*}} : memref<i32> -> %{{.*}} : memref<i32>)
"omp.sections" (%data_var1, %data_var1) ({
@@ -1090,7 +1090,7 @@ func @omp_sectionsop(%data_var1 : memref<i32>, %data_var2 : memref<i32>,
}
// CHECK-LABEL: func @omp_single
-func @omp_single() {
+func.func @omp_single() {
omp.parallel {
// CHECK: omp.single {
omp.single {
@@ -1105,7 +1105,7 @@ func @omp_single() {
}
// CHECK-LABEL: func @omp_single_nowait
-func @omp_single_nowait() {
+func.func @omp_single_nowait() {
omp.parallel {
// CHECK: omp.single nowait {
omp.single nowait {
@@ -1120,7 +1120,7 @@ func @omp_single_nowait() {
}
// CHECK-LABEL: func @omp_single_allocate
-func @omp_single_allocate(%data_var: memref<i32>) {
+func.func @omp_single_allocate(%data_var: memref<i32>) {
omp.parallel {
// CHECK: omp.single allocate(%{{.*}} : memref<i32> -> %{{.*}} : memref<i32>) {
omp.single allocate(%data_var : memref<i32> -> %data_var : memref<i32>) {
@@ -1135,7 +1135,7 @@ func @omp_single_allocate(%data_var: memref<i32>) {
}
// CHECK-LABEL: func @omp_single_allocate_nowait
-func @omp_single_allocate_nowait(%data_var: memref<i32>) {
+func.func @omp_single_allocate_nowait(%data_var: memref<i32>) {
omp.parallel {
// CHECK: omp.single allocate(%{{.*}} : memref<i32> -> %{{.*}} : memref<i32>) nowait {
omp.single allocate(%data_var : memref<i32> -> %data_var : memref<i32>) nowait {
@@ -1151,7 +1151,7 @@ func @omp_single_allocate_nowait(%data_var: memref<i32>) {
// CHECK-LABEL: @omp_task
// CHECK-SAME: (%[[bool_var:.*]]: i1, %[[i64_var:.*]]: i64, %[[i32_var:.*]]: i32, %[[data_var:.*]]: memref<i32>)
-func @omp_task(%bool_var: i1, %i64_var: i64, %i32_var: i32, %data_var: memref<i32>) {
+func.func @omp_task(%bool_var: i1, %i64_var: i64, %i32_var: i32, %data_var: memref<i32>) {
// Checking simple task
// CHECK: omp.task {
@@ -1241,7 +1241,7 @@ func @omp_task(%bool_var: i1, %i64_var: i64, %i32_var: i32, %data_var: memref<i3
// -----
-func @omp_threadprivate() {
+func.func @omp_threadprivate() {
%0 = arith.constant 1 : i32
%1 = arith.constant 2 : i32
%2 = arith.constant 3 : i32
diff --git a/mlir/test/Dialect/PDLInterp/ops.mlir b/mlir/test/Dialect/PDLInterp/ops.mlir
index 8bf559b3aa606..52b711a9419ae 100644
--- a/mlir/test/Dialect/PDLInterp/ops.mlir
+++ b/mlir/test/Dialect/PDLInterp/ops.mlir
@@ -6,7 +6,7 @@
// -----
-func @operations(%attribute: !pdl.attribute,
+func.func @operations(%attribute: !pdl.attribute,
%input: !pdl.value,
%type: !pdl.type) {
// attributes, operands, and results
@@ -26,7 +26,7 @@ func @operations(%attribute: !pdl.attribute,
// -----
-func @extract(%attrs : !pdl.range<attribute>, %ops : !pdl.range<operation>, %types : !pdl.range<type>, %vals: !pdl.range<value>) {
+func.func @extract(%attrs : !pdl.range<attribute>, %ops : !pdl.range<operation>, %types : !pdl.range<type>, %vals: !pdl.range<value>) {
// attribute at index 0
%attr = pdl_interp.extract 0 of %attrs : !pdl.attribute
@@ -44,7 +44,7 @@ func @extract(%attrs : !pdl.range<attribute>, %ops : !pdl.range<operation>, %typ
// -----
-func @foreach(%ops: !pdl.range<operation>) {
+func.func @foreach(%ops: !pdl.range<operation>) {
// iterate over a range of operations
pdl_interp.foreach %op : !pdl.operation in %ops {
%val = pdl_interp.get_result 0 of %op
@@ -57,7 +57,7 @@ func @foreach(%ops: !pdl.range<operation>) {
// -----
-func @users(%value: !pdl.value, %values: !pdl.range<value>) {
+func.func @users(%value: !pdl.value, %values: !pdl.range<value>) {
// all the users of a single value
%ops1 = pdl_interp.get_users of %value : !pdl.value
diff --git a/mlir/test/Dialect/Quant/canonicalize.mlir b/mlir/test/Dialect/Quant/canonicalize.mlir
index 8bca8abe92017..fca8116d40e76 100644
--- a/mlir/test/Dialect/Quant/canonicalize.mlir
+++ b/mlir/test/Dialect/Quant/canonicalize.mlir
@@ -2,7 +2,7 @@
// -----
// CHECK-LABEL: redundant_scast
-func @redundant_scast() -> tensor<4xi8> {
+func.func @redundant_scast() -> tensor<4xi8> {
// CHECK-NEXT: arith.constant dense<10> : tensor<4xi8>
// CHECK-NEXT: return
%cst = arith.constant dense<5> : tensor<4xi8>
@@ -14,7 +14,7 @@ func @redundant_scast() -> tensor<4xi8> {
// -----
// CHECK-LABEL: non_redundant_scast
-func @non_redundant_scast() -> tensor<4x!quant.uniform<u8:f32, 7.812500e-03:128>> {
+func.func @non_redundant_scast() -> tensor<4x!quant.uniform<u8:f32, 7.812500e-03:128>> {
// CHECK-NEXT: arith.constant dense<5> : tensor<4xi8>
// CHECK-NEXT: scast
// CHECK-NEXT: return
diff --git a/mlir/test/Dialect/Quant/convert-const.mlir b/mlir/test/Dialect/Quant/convert-const.mlir
index 49c2d9524d94a..78fe85d561000 100644
--- a/mlir/test/Dialect/Quant/convert-const.mlir
+++ b/mlir/test/Dialect/Quant/convert-const.mlir
@@ -10,7 +10,7 @@
// quantized, is the signed printed version of an unsigned quantity
// (-64 signed == 192 unsigned).
// CHECK-LABEL: constant_splat_tensor_u8_affine
-func @constant_splat_tensor_u8_affine() -> tensor<4xf32> {
+func.func @constant_splat_tensor_u8_affine() -> tensor<4xf32> {
// CHECK: %cst = arith.constant dense<-64> : tensor<4xi8>
// CHECK-NEXT: %0 = "quant.scast"(%cst) : (tensor<4xi8>) -> tensor<4x!quant.uniform<u8:f32, 7.812500e-03:128>>
%cst = arith.constant dense<0.5> : tensor<4xf32>
@@ -22,7 +22,7 @@ func @constant_splat_tensor_u8_affine() -> tensor<4xf32> {
// -----
// Verifies i8 affine quantization on a splat tensor.
// CHECK-LABEL: constant_splat_tensor_i8_affine
-func @constant_splat_tensor_i8_affine() -> tensor<4xf32> {
+func.func @constant_splat_tensor_i8_affine() -> tensor<4xf32> {
// CHECK: %cst = arith.constant dense<63> : tensor<4xi8>
// CHECK-NEXT: %0 = "quant.scast"(%cst) : (tensor<4xi8>) -> tensor<4x!quant.uniform<i8:f32, 7.812500e-03:-1>>
%cst = arith.constant dense<0.5> : tensor<4xf32>
@@ -34,7 +34,7 @@ func @constant_splat_tensor_i8_affine() -> tensor<4xf32> {
// -----
// Verifies i8 fixedpoint quantization on a splat tensor.
// CHECK-LABEL: const_splat_tensor_i8_fixedpoint
-func @const_splat_tensor_i8_fixedpoint() -> tensor<4xf32> {
+func.func @const_splat_tensor_i8_fixedpoint() -> tensor<4xf32> {
// CHECK: %cst = arith.constant dense<64> : tensor<4xi8>
// CHECK-NEXT: %0 = "quant.scast"(%cst) : (tensor<4xi8>) -> tensor<4x!quant.uniform<i8:f32, 7.812500e-03>>
%cst = arith.constant dense<0.5> : tensor<4xf32>
@@ -46,7 +46,7 @@ func @const_splat_tensor_i8_fixedpoint() -> tensor<4xf32> {
// -----
// Verifies i8 fixedpoint quantization on a splat tensor resulting in a negative storage value.
// CHECK-LABEL: const_splat_tensor_i8_fixedpoint_neg
-func @const_splat_tensor_i8_fixedpoint_neg() -> tensor<4xf32> {
+func.func @const_splat_tensor_i8_fixedpoint_neg() -> tensor<4xf32> {
// CHECK: %cst = arith.constant dense<-64> : tensor<4xi8>
%cst = arith.constant dense<-0.5> : tensor<4xf32>
%1 = "quant.qcast"(%cst) : (tensor<4xf32>) -> tensor<4x!quant.uniform<i8:f32, 7.812500e-03>>
@@ -57,7 +57,7 @@ func @const_splat_tensor_i8_fixedpoint_neg() -> tensor<4xf32> {
// -----
// Verifies i8 fixedpoint quantization on a dense tensor, sweeping values.
// CHECK-LABEL: const_dense_tensor_i8_fixedpoint
-func @const_dense_tensor_i8_fixedpoint() -> tensor<7xf32> {
+func.func @const_dense_tensor_i8_fixedpoint() -> tensor<7xf32> {
// CHECK: %cst = arith.constant dense<[-128, -128, -64, 0, 64, 127, 127]> : tensor<7xi8>
%cst = arith.constant dense<[-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> : tensor<7xf32>
%1 = "quant.qcast"(%cst) : (tensor<7xf32>) -> tensor<7x!quant.uniform<i8:f32, 7.812500e-03>>
@@ -68,7 +68,7 @@ func @const_dense_tensor_i8_fixedpoint() -> tensor<7xf32> {
// -----
// Verifies i8 fixedpoint quantization on a sparse tensor, sweeping values.
// CHECK-LABEL: const_sparse_tensor_i8_fixedpoint
-func @const_sparse_tensor_i8_fixedpoint() -> tensor<2x7xf32> {
+func.func @const_sparse_tensor_i8_fixedpoint() -> tensor<2x7xf32> {
// NOTE: Ugly regex match pattern for opening "[[" of indices tensor.
// CHECK: %cst = arith.constant sparse<{{\[}}[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6]], [-128, -128, -64, 0, 64, 127, 127]> : tensor<2x7xi8>
%cst = arith.constant sparse<
@@ -82,7 +82,7 @@ func @const_sparse_tensor_i8_fixedpoint() -> tensor<2x7xf32> {
// -----
// Verifies i8 fixedpoint quantization on a primitive const.
// CHECK-LABEL: const_primitive_float_i8_fixedpoint
-func @const_primitive_float_i8_fixedpoint() -> f32 {
+func.func @const_primitive_float_i8_fixedpoint() -> f32 {
// CHECK: %c64_i8 = arith.constant 64 : i8
// CHECK-NEXT: %0 = "quant.scast"(%c64_i8) : (i8) -> !quant.uniform<i8:f32, 7.812500e-03>
%cst = arith.constant 0.5 : f32
@@ -94,7 +94,7 @@ func @const_primitive_float_i8_fixedpoint() -> f32 {
// -----
// Verifies u4 affine quantization on a dense tensor, sweeping values.
// CHECK-LABEL: const_dense_tensor_u4_affine
-func @const_dense_tensor_u4_affine() -> tensor<7xf32> {
+func.func @const_dense_tensor_u4_affine() -> tensor<7xf32> {
// NOTE: Unsigned quantities printed by MLIR as signed.
// CHECK: %cst = arith.constant dense<[0, 0, 4, -8, -4, -1, -1]> : tensor<7xi4>
%cst = arith.constant dense<[-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> : tensor<7xf32>
@@ -106,7 +106,7 @@ func @const_dense_tensor_u4_affine() -> tensor<7xf32> {
// -----
// Verifies i4 affine quantization on a dense tensor, sweeping values.
// CHECK-LABEL: const_dense_tensor_i4_affine
-func @const_dense_tensor_i4_affine() -> tensor<7xf32> {
+func.func @const_dense_tensor_i4_affine() -> tensor<7xf32> {
// NOTE: Unsigned quantities printed by MLIR as signed.
// CHECK: %cst = arith.constant dense<[-8, -8, -5, -1, 3, 7, 7]> : tensor<7xi4>
%cst = arith.constant dense<[-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> : tensor<7xf32>
@@ -118,7 +118,7 @@ func @const_dense_tensor_i4_affine() -> tensor<7xf32> {
// -----
// Verifies i4 fixed point quantization on a dense tensor, sweeping values.
// CHECK-LABEL: const_dense_tensor_i4_fixedpoint
-func @const_dense_tensor_i4_fixedpoint() -> tensor<7xf32> {
+func.func @const_dense_tensor_i4_fixedpoint() -> tensor<7xf32> {
// CHECK: %cst = arith.constant dense<[-8, -8, -4, 0, 4, 7, 7]> : tensor<7xi4>
%cst = arith.constant dense<[-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> : tensor<7xf32>
%1 = "quant.qcast"(%cst) : (tensor<7xf32>) -> tensor<7x!quant.uniform<i4:f32, 1.250000e-01>>
@@ -131,7 +131,7 @@ func @const_dense_tensor_i4_fixedpoint() -> tensor<7xf32> {
// custom storage range. (the -128 should be clamped to -100, and the 127 should
// be clamped to 100).
// CHECK-LABEL: const_custom_storage_range_i8_fixedpoint
-func @const_custom_storage_range_i8_fixedpoint() -> tensor<7xf32> {
+func.func @const_custom_storage_range_i8_fixedpoint() -> tensor<7xf32> {
// CHECK: %cst = arith.constant dense<[-100, -100, -64, 0, 64, 100, 100]> : tensor<7xi8>
%cst = arith.constant dense<[-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> : tensor<7xf32>
%1 = "quant.qcast"(%cst) : (tensor<7xf32>) -> tensor<7x!quant.uniform<i8<-100:100>:f32, 7.812500e-03>>
@@ -142,7 +142,7 @@ func @const_custom_storage_range_i8_fixedpoint() -> tensor<7xf32> {
// -----
// Verifies quantization results of all-0.0 tensors are quantized to zero points.
// CHECK-LABEL: zero_tensors_to_zero_points
-func @zero_tensors_to_zero_points() -> (tensor<7xf32>, tensor<7xf32>, tensor<7xf32>, tensor<7xf32>) {
+func.func @zero_tensors_to_zero_points() -> (tensor<7xf32>, tensor<7xf32>, tensor<7xf32>, tensor<7xf32>) {
// CHECK-DAG: %[[cst1:.*]] = arith.constant dense<1> : tensor<7xi8>
// CHECK-DAG: %[[cst:.*]] = arith.constant dense<-127> : tensor<7xi8>
@@ -174,7 +174,7 @@ func @zero_tensors_to_zero_points() -> (tensor<7xf32>, tensor<7xf32>, tensor<7xf
// -----
// Verifies per-axis quantization results for dense.
// CHECK-LABEL: per_axis_dense_quantization
-func @per_axis_dense_quantization() -> (tensor<2x3xf32>, tensor<2x3xf32>) {
+func.func @per_axis_dense_quantization() -> (tensor<2x3xf32>, tensor<2x3xf32>) {
// CHECK-DAG: %[[cst0:.*]] = arith.constant dense<{{\[}}[-128, -1, 1], [127, 1, 3]]> : tensor<2x3xi8>
// CHECK-DAG: %[[cst:.*]] = arith.constant dense<{{\[}}[-128, 64, 127], [0, 1, 2]]> : tensor<2x3xi8>
diff --git a/mlir/test/Dialect/Quant/convert-fakequant-invalid.mlir b/mlir/test/Dialect/Quant/convert-fakequant-invalid.mlir
index d6b6a524e593c..bd4a0f96ababa 100644
--- a/mlir/test/Dialect/Quant/convert-fakequant-invalid.mlir
+++ b/mlir/test/Dialect/Quant/convert-fakequant-invalid.mlir
@@ -2,7 +2,7 @@
// -----
// Unsupported quantizable type (i1 is currently not a supported element type).
-func @fakeQuantArgs(tensor<8x4x3xi1>) -> tensor<8x4x3xi1> {
+func.func @fakeQuantArgs(tensor<8x4x3xi1>) -> tensor<8x4x3xi1> {
^bb0(%arg0: tensor<8x4x3xi1>):
// expected-error at +1 {{op operand #0 must be tensor of 32-bit float values}}
%0 = "quant.const_fake_quant"(%arg0) {
diff --git a/mlir/test/Dialect/Quant/convert-fakequant.mlir b/mlir/test/Dialect/Quant/convert-fakequant.mlir
index 0fa665eaad84a..14983591fac7c 100644
--- a/mlir/test/Dialect/Quant/convert-fakequant.mlir
+++ b/mlir/test/Dialect/Quant/convert-fakequant.mlir
@@ -3,7 +3,7 @@
// -----
// Verifies a quint8 single point.
// CHECK-LABEL: fakeQuantArgs_Quint8_0
-func @fakeQuantArgs_Quint8_0(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @fakeQuantArgs_Quint8_0(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
// CHECK: %[[qc:.*]] = "quant.qcast"(%arg0) : (tensor<8x4x3xf32>)
// CHECK-SAME: -> tensor<8x4x3x!quant.uniform<u8:f32, 1.000000e+00>>
@@ -18,7 +18,7 @@ func @fakeQuantArgs_Quint8_0(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
// -----
// Verifies a quint8 single point (with narrow_range = true).
// CHECK-LABEL: fakeQuantArgs_Quint8_0_NarrowRange
-func @fakeQuantArgs_Quint8_0_NarrowRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @fakeQuantArgs_Quint8_0_NarrowRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
// CHECK: %[[qc:.*]] = "quant.qcast"(%arg0) : (tensor<8x4x3xf32>)
// CHECK-SAME: -> tensor<8x4x3x!quant.uniform<u8<1:255>:f32, 1.000000e+00:1>>
@@ -33,7 +33,7 @@ func @fakeQuantArgs_Quint8_0_NarrowRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32>
// -----
// Verifies a quint8 asymmetric 0..1 range.
// CHECK-LABEL: fakeQuantArgs_Quint8_0_1
-func @fakeQuantArgs_Quint8_0_1(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @fakeQuantArgs_Quint8_0_1(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
// CHECK: %0 = "quant.qcast"(%arg0) : (tensor<8x4x3xf32>)
// CHECK-SAME: -> tensor<8x4x3x!quant.uniform<u8:f32, 0.0039215686274509803>>
@@ -48,7 +48,7 @@ func @fakeQuantArgs_Quint8_0_1(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
// -----
// Verifies a quint8 asymmetric 0..1 range (with narrow_range = true).
// CHECK-LABEL: fakeQuantArgs_Quint8_NarrowRange
-func @fakeQuantArgs_Quint8_NarrowRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @fakeQuantArgs_Quint8_NarrowRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
// CHECK: %0 = "quant.qcast"(%arg0) : (tensor<8x4x3xf32>)
// CHECK-SAME: -> tensor<8x4x3x!quant.uniform<u8<1:255>:f32, 0.003937007874015748:1>>
@@ -63,7 +63,7 @@ func @fakeQuantArgs_Quint8_NarrowRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
// -----
// Verifies a quint8 symmetric range of -1..127/128.
// CHECK-LABEL: fakeQuantArgs_Quint8_SymmetricRange
-func @fakeQuantArgs_Quint8_SymmetricRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @fakeQuantArgs_Quint8_SymmetricRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
// CHECK: %0 = "quant.qcast"(%arg0) : (tensor<8x4x3xf32>)
// CHECK-SAME: -> tensor<8x4x3x!quant.uniform<u8:f32, 7.812500e-03:128>>
@@ -78,7 +78,7 @@ func @fakeQuantArgs_Quint8_SymmetricRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32
// -----
// Verifies a qint8 single point.
// CHECK-LABEL: fakeQuantArgs_Qint8_0
-func @fakeQuantArgs_Qint8_0(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @fakeQuantArgs_Qint8_0(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
// CHECK: %[[qc:.*]] = "quant.qcast"(%arg0) : (tensor<8x4x3xf32>)
// CHECK-SAME: -> tensor<8x4x3x!quant.uniform<i8:f32, 1.000000e+00:-128>>
@@ -93,7 +93,7 @@ func @fakeQuantArgs_Qint8_0(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
// -----
// Verifies a qint8 single point (with narrow_range = true).
// CHECK-LABEL: fakeQuantArgs_Qint8_0_NarrowRange
-func @fakeQuantArgs_Qint8_0_NarrowRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @fakeQuantArgs_Qint8_0_NarrowRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
// CHECK: %[[qc:.*]] = "quant.qcast"(%arg0) : (tensor<8x4x3xf32>)
// CHECK-SAME: -> tensor<8x4x3x!quant.uniform<i8<-127:127>:f32, 1.000000e+00:-127>>
@@ -108,7 +108,7 @@ func @fakeQuantArgs_Qint8_0_NarrowRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32>
// -----
// Verifies a qint8 asymmetric 0..1 range.
// CHECK-LABEL: fakeQuantArgs_Qint8_0_1
-func @fakeQuantArgs_Qint8_0_1(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @fakeQuantArgs_Qint8_0_1(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
// CHECK: %0 = "quant.qcast"(%arg0) : (tensor<8x4x3xf32>)
// CHECK-SAME: -> tensor<8x4x3x!quant.uniform<i8:f32, 0.0039215686274509803:-128>>
@@ -123,7 +123,7 @@ func @fakeQuantArgs_Qint8_0_1(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
// -----
// Verifies a qint8 asymmetric 0..1 range (with narrow_range = true).
// CHECK-LABEL: fakeQuantArgs_Qint8_NarrowRange
-func @fakeQuantArgs_Qint8_NarrowRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @fakeQuantArgs_Qint8_NarrowRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
// CHECK: %0 = "quant.qcast"(%arg0) : (tensor<8x4x3xf32>)
// CHECK-SAME: -> tensor<8x4x3x!quant.uniform<i8<-127:127>:f32, 0.003937007874015748:-127>>
@@ -138,7 +138,7 @@ func @fakeQuantArgs_Qint8_NarrowRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
// -----
// Verifies a qint8 symmetric range of -1..127/128.
// CHECK-LABEL: fakeQuantArgs_Qint8_SymmetricRange
-func @fakeQuantArgs_Qint8_SymmetricRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @fakeQuantArgs_Qint8_SymmetricRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
// CHECK: %0 = "quant.qcast"(%arg0) : (tensor<8x4x3xf32>)
// CHECK-SAME: -> tensor<8x4x3x!quant.uniform<i8:f32, 7.812500e-03>>
@@ -154,7 +154,7 @@ func @fakeQuantArgs_Qint8_SymmetricRange(tensor<8x4x3xf32>) -> tensor<8x4x3xf32>
// Verifies a commonly used -1..1 symmetric 16bit range with a zero point of
// 0 and range -1.0 .. 32767/32768.
// CHECK-LABEL: fakeQuantArgs_Qint16_Symmetric
-func @fakeQuantArgs_Qint16_Symmetric(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @fakeQuantArgs_Qint16_Symmetric(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
// CHECK: %0 = "quant.qcast"(%arg0) : (tensor<8x4x3xf32>)
// CHECK-SAME: -> tensor<8x4x3x!quant.uniform<i16:f32, 3.0517578125E-5>>
@@ -169,7 +169,7 @@ func @fakeQuantArgs_Qint16_Symmetric(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
// -----
// Verify that lowering to barriers of unranked tensors functions.
// CHECK-LABEL: fakeQuantArgs_UnrankedTensor
-func @fakeQuantArgs_UnrankedTensor(tensor<f32>) -> tensor<f32> {
+func.func @fakeQuantArgs_UnrankedTensor(tensor<f32>) -> tensor<f32> {
^bb0(%arg0: tensor<f32>):
// CHECK: %0 = "quant.qcast"(%arg0) : (tensor<f32>)
// CHECK-SAME: -> tensor<!quant.uniform<u8:f32, 0.0039215686274509803>>
@@ -183,7 +183,7 @@ func @fakeQuantArgs_UnrankedTensor(tensor<f32>) -> tensor<f32> {
// -----
// CHECK-LABEL: fakeQuantArgs_all_positive
-func @fakeQuantArgs_all_positive(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @fakeQuantArgs_all_positive(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
// CHECK: %[[qc:.*]] = "quant.qcast"(%arg0) : (tensor<8x4x3xf32>)
@@ -199,7 +199,7 @@ func @fakeQuantArgs_all_positive(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
// -----
// CHECK-LABEL: fakeQuantArgs_all_negative
-func @fakeQuantArgs_all_negative(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @fakeQuantArgs_all_negative(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
// CHECK: %[[qc:.*]] = "quant.qcast"(%arg0) : (tensor<8x4x3xf32>)
@@ -216,7 +216,7 @@ func @fakeQuantArgs_all_negative(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
// -----
// Verifies a qint8 per axis
// CHECK-LABEL: fakeQuantPerAxis
-func @fakeQuantPerAxis(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @fakeQuantPerAxis(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
// CHECK: %[[q:.*]] = "quant.qcast"(%arg0) : (tensor<8x4x3xf32>)
diff --git a/mlir/test/Dialect/Quant/parse-any.mlir b/mlir/test/Dialect/Quant/parse-any.mlir
index 8dbd62c3a2312..74438b52cb9a6 100644
--- a/mlir/test/Dialect/Quant/parse-any.mlir
+++ b/mlir/test/Dialect/Quant/parse-any.mlir
@@ -4,7 +4,7 @@
// CHECK-LABEL: parseFullySpecified
// CHECK: !quant.any<i8<-8:7>:f32>
!qalias = type !quant.any<i8<-8:7>:f32>
-func @parseFullySpecified() -> !qalias {
+func.func @parseFullySpecified() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -13,7 +13,7 @@ func @parseFullySpecified() -> !qalias {
// CHECK-LABEL: parseNoExpressedType
// CHECK: !quant.any<i8<-8:7>>
!qalias = type !quant.any<i8<-8:7>>
-func @parseNoExpressedType() -> !qalias {
+func.func @parseNoExpressedType() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -22,7 +22,7 @@ func @parseNoExpressedType() -> !qalias {
// CHECK-LABEL: parseOnlyStorageType
// CHECK: !quant.any<i8>
!qalias = type !quant.any<i8>
-func @parseOnlyStorageType() -> !qalias {
+func.func @parseOnlyStorageType() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
diff --git a/mlir/test/Dialect/Quant/parse-calibrated.mlir b/mlir/test/Dialect/Quant/parse-calibrated.mlir
index 87f5b7ade21a6..d1caa37194d98 100644
--- a/mlir/test/Dialect/Quant/parse-calibrated.mlir
+++ b/mlir/test/Dialect/Quant/parse-calibrated.mlir
@@ -4,7 +4,7 @@
// CHECK-LABEL: parseCalibrated
// CHECK: !quant.calibrated<f32<-0.998:1.232100e+00>
!qalias = type !quant.calibrated<f32<-0.998:1.2321>>
-func @parseCalibrated() -> !qalias {
+func.func @parseCalibrated() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
diff --git a/mlir/test/Dialect/Quant/parse-ops-invalid.mlir b/mlir/test/Dialect/Quant/parse-ops-invalid.mlir
index ead636fb19852..2b2a9eed84806 100644
--- a/mlir/test/Dialect/Quant/parse-ops-invalid.mlir
+++ b/mlir/test/Dialect/Quant/parse-ops-invalid.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -verify-diagnostics
// -----
-func @invalidStatisticsMismatchedLayerType(%arg0: tensor<8x4x3xf32>) ->
+func.func @invalidStatisticsMismatchedLayerType(%arg0: tensor<8x4x3xf32>) ->
tensor<8x4x3xf32> {
// expected-error at +1 {{layerStats must have a floating point element type}}
%0 = "quant.stats"(%arg0) {
@@ -11,7 +11,7 @@ func @invalidStatisticsMismatchedLayerType(%arg0: tensor<8x4x3xf32>) ->
}
// -----
-func @invalidStatisticsMismatchedLayerRank(%arg0: tensor<8x4x3xf32>) ->
+func.func @invalidStatisticsMismatchedLayerRank(%arg0: tensor<8x4x3xf32>) ->
tensor<8x4x3xf32> {
// expected-error at +1 {{layerStats must have shape [2]}}
%0 = "quant.stats"(%arg0) {
@@ -21,7 +21,7 @@ func @invalidStatisticsMismatchedLayerRank(%arg0: tensor<8x4x3xf32>) ->
}
// -----
-func @invalidStatisticsMismatchedLayerShape(%arg0: tensor<8x4x3xf32>) ->
+func.func @invalidStatisticsMismatchedLayerShape(%arg0: tensor<8x4x3xf32>) ->
tensor<8x4x3xf32> {
// expected-error at +1 {{layerStats must have shape [2]}}
%0 = "quant.stats"(%arg0) {
@@ -32,7 +32,7 @@ func @invalidStatisticsMismatchedLayerShape(%arg0: tensor<8x4x3xf32>) ->
// -----
// CHECK-LABEL: validStatistics
-func @invalidStatisticsMismatchedAxisType(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @invalidStatisticsMismatchedAxisType(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
// expected-error at +1 {{axisStats must have a floating point element type}}
%0 = "quant.stats"(%0) {
layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>,
@@ -46,7 +46,7 @@ func @invalidStatisticsMismatchedAxisType(%arg0: tensor<8x4x3xf32>) -> tensor<8x
}
// -----
-func @invalidStatisticsMismatchedAxisSize(%arg0: tensor<8x4x3xf32>) ->
+func.func @invalidStatisticsMismatchedAxisSize(%arg0: tensor<8x4x3xf32>) ->
tensor<8x4x3xf32> {
// expected-error at +1 {{axisStats must have shape [N,2] where N = the slice size defined by the axis dim}}
%0 = "quant.stats"(%arg0) {
@@ -62,7 +62,7 @@ func @invalidStatisticsMismatchedAxisSize(%arg0: tensor<8x4x3xf32>) ->
}
// -----
-func @invalidStatisticsMismatchedAxisShape(%arg0: tensor<8x4x3xf32>) ->
+func.func @invalidStatisticsMismatchedAxisShape(%arg0: tensor<8x4x3xf32>) ->
tensor<8x4x3xf32> {
// expected-error at +1 {{axisStats must have shape [N,2] where N = the slice size defined by the axis dim}}
%0 = "quant.stats"(%arg0) {
@@ -77,7 +77,7 @@ func @invalidStatisticsMismatchedAxisShape(%arg0: tensor<8x4x3xf32>) ->
}
// -----
-func @axisIsRequiredForAxisStats(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @axisIsRequiredForAxisStats(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
// expected-error at +1 {{axis must be specified for axisStats}}
%1 = "quant.stats"(%arg0) {
layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>,
diff --git a/mlir/test/Dialect/Quant/parse-ops.mlir b/mlir/test/Dialect/Quant/parse-ops.mlir
index bdcd751a969d0..c20b0deb49865 100644
--- a/mlir/test/Dialect/Quant/parse-ops.mlir
+++ b/mlir/test/Dialect/Quant/parse-ops.mlir
@@ -2,7 +2,7 @@
// -----
// CHECK-LABEL: validConstFakeQuant
-func @validConstFakeQuant(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @validConstFakeQuant(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
%0 = "quant.const_fake_quant"(%arg0) {
min = 0.0 : f32, max = 1.0 : f32, num_bits = 8, narrow_range = true
} : (tensor<8x4x3xf32>) -> tensor<8x4x3xf32>
@@ -17,7 +17,7 @@ func @validConstFakeQuant(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
// -----
// CHECK-LABEL: validConstFakeQuantPerAxis
-func @validConstFakeQuantPerAxis(%arg0: tensor<8x4x2xf32>) -> tensor<8x4x2xf32> {
+func.func @validConstFakeQuantPerAxis(%arg0: tensor<8x4x2xf32>) -> tensor<8x4x2xf32> {
%0 = "quant.const_fake_quant_per_axis"(%arg0) {
min = [0.0 : f32, 1.0 : f32], max = [2.0 : f32, 3.0 : f32], axis = 2, num_bits = 8, narrow_range = true
} : (tensor<8x4x2xf32>) -> tensor<8x4x2xf32>
@@ -32,7 +32,7 @@ func @validConstFakeQuantPerAxis(%arg0: tensor<8x4x2xf32>) -> tensor<8x4x2xf32>
// -----
// CHECK-LABEL: validStatisticsRef
-func @validStatisticsRef(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @validStatisticsRef(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
%0 = "quant.stats_ref"(%arg0) { statsKey = "foobar" } :
(tensor<8x4x3xf32>) -> tensor<8x4x3xf32>
return %0 : tensor<8x4x3xf32>
@@ -40,7 +40,7 @@ func @validStatisticsRef(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
// -----
// CHECK-LABEL: validStatistics
-func @validStatistics(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @validStatistics(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
%0 = "quant.stats"(%arg0) {
layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>
} : (tensor<8x4x3xf32>) -> tensor<8x4x3xf32>
@@ -57,7 +57,7 @@ func @validStatistics(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
// -----
// CHECK-LABEL: validCoupledRef
-func @validCoupledRef(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
+func.func @validCoupledRef(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
%0 = "quant.coupled_ref"(%arg0) { coupledKey = "foobar" } :
(tensor<8x4x3xf32>) -> tensor<8x4x3xf32>
return %0 : tensor<8x4x3xf32>
diff --git a/mlir/test/Dialect/Quant/parse-uniform.mlir b/mlir/test/Dialect/Quant/parse-uniform.mlir
index 3f9dad898361e..3a3449e658de2 100644
--- a/mlir/test/Dialect/Quant/parse-uniform.mlir
+++ b/mlir/test/Dialect/Quant/parse-uniform.mlir
@@ -5,7 +5,7 @@
// [signed] storageType, storageTypeMin, storageTypeMax, expressedType, scale, zeroPoint
// CHECK: !quant.uniform<i8<-8:7>:f32, 9.987200e-01:127>
!qalias = type !quant.uniform<i8<-8:7>:f32, 0.99872:127>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -14,7 +14,7 @@ func @parse() -> !qalias {
// Trailing whitespace.
// CHECK: !quant.uniform<i8<-8:7>:f32, 9.987200e-01:127>
!qalias = type !quant.uniform<i8<-8:7>:f32, 0.99872:127 >
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -24,7 +24,7 @@ func @parse() -> !qalias {
// [unsigned] storageType, expressedType, scale
// CHECK: !quant.uniform<u8:f32, 9.987200e-01>
!qalias = type !quant.uniform<u8:f32, 0.99872>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -33,7 +33,7 @@ func @parse() -> !qalias {
// Exponential scale (-)
// CHECK: !quant.uniform<u8:f32, 2.000000e-02>
!qalias = type !quant.uniform<u8:f32, 2.0e-2>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -42,7 +42,7 @@ func @parse() -> !qalias {
// Exponential scale (+)
// CHECK: !quant.uniform<u8:f32, 2.000000e+02>
!qalias = type !quant.uniform<u8:f32, 2.0e+2>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -51,7 +51,7 @@ func @parse() -> !qalias {
// Storage type: i16
// CHECK: !quant.uniform<i16:f32, 2.000000e+02>
!qalias = type !quant.uniform<i16:f32, 2.0e+2>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -60,7 +60,7 @@ func @parse() -> !qalias {
// Storage type: u16
// CHECK: !quant.uniform<u16:f32, 2.000000e+02>
!qalias = type !quant.uniform<u16:f32, 2.0e+2>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -69,7 +69,7 @@ func @parse() -> !qalias {
// Storage type: i32
// CHECK: !quant.uniform<i32:f32, 2.000000e+02>
!qalias = type !quant.uniform<i32:f32, 2.0e+2>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -78,7 +78,7 @@ func @parse() -> !qalias {
// Storage type: u32
// CHECK: !quant.uniform<u32:f32, 2.000000e+02>
!qalias = type !quant.uniform<u32:f32, 2.0e+2>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -87,7 +87,7 @@ func @parse() -> !qalias {
// Expressed type: f32
// CHECK: !quant.uniform<u8:f32, 2.000000e+02>
!qalias = type !quant.uniform<u8:f32, 2.0e+2>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -96,7 +96,7 @@ func @parse() -> !qalias {
// Expressed type: f32
// CHECK: !quant.uniform<u8:f32, 0x41646ABBA0000000:128>
!qalias = type !quant.uniform<u8:f32, 0x41646ABBA0000000:128>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -105,7 +105,7 @@ func @parse() -> !qalias {
// Expressed type: f16
// CHECK: !quant.uniform<u8:f16, 2.000000e+02>
!qalias = type !quant.uniform<u8:f16, 2.0e+2>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -114,7 +114,7 @@ func @parse() -> !qalias {
// Expressed type: f64
// CHECK: !quant.uniform<u8:f64, 2.000000e+02>
!qalias = type !quant.uniform<u8:f64, 2.0e+2>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -123,7 +123,7 @@ func @parse() -> !qalias {
// Expressed type: bf16
// CHECK: !quant.uniform<u8:bf16, 2.000000e+02>
!qalias = type !quant.uniform<u8:bf16, 2.0e+2>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -132,7 +132,7 @@ func @parse() -> !qalias {
// Per-axis scales and zero points (affine)
// CHECK: !quant.uniform<u8:f32:1, {2.000000e+02:-120,9.987200e-01:127}>
!qalias = type !quant.uniform<u8:f32:1, {2.0e+2:-120,0.99872:127}>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -141,7 +141,7 @@ func @parse() -> !qalias {
// Per-axis scales and no zero points (fixedpoint)
// CHECK: !quant.uniform<i8:f32:1, {2.000000e+02,9.987200e-01}>
!qalias = type !quant.uniform<i8:f32:1, {2.0e+2,0.99872}>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
@@ -150,7 +150,7 @@ func @parse() -> !qalias {
// Per-axis scales and zero points (mixed affine and fixedpoint)
// CHECK: !quant.uniform<i8:f32:1, {2.000000e+02,9.987200e-01:120}>
!qalias = type !quant.uniform<i8:f32:1, {2.0e+2,0.99872:120}>
-func @parse() -> !qalias {
+func.func @parse() -> !qalias {
%0 = "foo"() : () -> !qalias
return %0 : !qalias
}
diff --git a/mlir/test/Dialect/Quant/quant_region.mlir b/mlir/test/Dialect/Quant/quant_region.mlir
index 7a7b6440e94a0..8c7d6c40ef40d 100644
--- a/mlir/test/Dialect/Quant/quant_region.mlir
+++ b/mlir/test/Dialect/Quant/quant_region.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt -allow-unregistered-dialect -split-input-file -verify-diagnostics %s | FileCheck %s
// CHECK-LABEL: @source
-func @source(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>) -> (tensor<4xf32>) {
+func.func @source(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>) -> (tensor<4xf32>) {
%0 = "quant.region"(%arg0, %arg1, %arg2) ({
^bb0(%10: tensor<4xf32>, %11: tensor<4xf32>, %12: tensor<4xf32>):
%13 = "foo"(%10, %11) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
@@ -13,7 +13,7 @@ func @source(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>) -
}
// CHECK-LABEL: @annotated
-func @annotated(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>) -> (tensor<4xf32>) {
+func.func @annotated(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>) -> (tensor<4xf32>) {
%0 = "quant.region"(%arg0, %arg1, %arg2) ({
^bb0(%10: tensor<4xf32>, %11: tensor<4xf32>, %12: tensor<4xf32>):
%13 = "foo"(%10, %11) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
@@ -26,7 +26,7 @@ func @annotated(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>
}
// CHECK-LABEL: @quantized
-func @quantized(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>) -> (tensor<4xf32>) {
+func.func @quantized(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>) -> (tensor<4xf32>) {
%0 = "quant.region"(%arg0, %arg1, %arg2) ({
^bb0(%10: tensor<4xf32>, %11: tensor<4xf32>, %12: tensor<4xf32>):
%13 = "foo"(%10, %11) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
@@ -40,7 +40,7 @@ func @quantized(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>
// -----
-func @unmatched_quantize(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>) -> (tensor<4xf32>) {
+func.func @unmatched_quantize(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>) -> (tensor<4xf32>) {
// @expected-error @+1 {{'quant.region' op has incompatible specification !quant.uniform<i32:f16, 3.000000e+00> and input type 'tensor<4xf32>'}}
%0 = "quant.region"(%arg0, %arg1, %arg2) ({
^bb0(%10: tensor<4xf32>, %11: tensor<4xf32>, %12: tensor<4xf32>):
@@ -55,7 +55,7 @@ func @unmatched_quantize(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tens
// -----
-func @unmatched_primitive(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>) -> (tensor<4xf32>) {
+func.func @unmatched_primitive(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>) -> (tensor<4xf32>) {
// @expected-error @+1 {{'quant.region' op has incompatible specification i32 and input type 'tensor<4xf32>'}}
%0 = "quant.region"(%arg0, %arg1, %arg2) ({
^bb0(%10: tensor<4xf32>, %11: tensor<4xf32>, %12: tensor<4xf32>):
@@ -70,7 +70,7 @@ func @unmatched_primitive(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: ten
// -----
-func @unmatched_number(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>) -> (tensor<4xf32>) {
+func.func @unmatched_number(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>) -> (tensor<4xf32>) {
// @expected-error @+1 {{'quant.region' op has unmatched operands/results number and spec attributes number}}
%0 = "quant.region"(%arg0, %arg1, %arg2) ({
^bb0(%10: tensor<4xf32>, %11: tensor<4xf32>, %12: tensor<4xf32>):
@@ -85,7 +85,7 @@ func @unmatched_number(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor
// -----
-func @isolated(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>) -> (tensor<4xf32>) {
+func.func @isolated(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<4xf32>) -> (tensor<4xf32>) {
// @expected-note @+1 {{required by region isolation constraints}}
%0 = "quant.region"(%arg0, %arg1) ({
^bb0(%10: tensor<4xf32>, %11: tensor<4xf32>):
diff --git a/mlir/test/Dialect/SCF/bufferize.mlir b/mlir/test/Dialect/SCF/bufferize.mlir
index 05dc51802602e..eb795aeb5eb1f 100644
--- a/mlir/test/Dialect/SCF/bufferize.mlir
+++ b/mlir/test/Dialect/SCF/bufferize.mlir
@@ -14,7 +14,7 @@
// CHECK: %[[RESULT_TENSOR:.*]] = bufferization.to_tensor %[[RESULT_MEMREF:.*]] : memref<?xf32>
// CHECK: return %[[RESULT_TENSOR]] : tensor<?xf32>
// CHECK: }
-func @if(%pred: i1, %true_val: tensor<?xf32>, %false_val: tensor<?xf32>) -> tensor<?xf32> {
+func.func @if(%pred: i1, %true_val: tensor<?xf32>, %false_val: tensor<?xf32>) -> tensor<?xf32> {
%0 = scf.if %pred -> (tensor<?xf32>) {
scf.yield %true_val : tensor<?xf32>
} else {
@@ -34,7 +34,7 @@ func @if(%pred: i1, %true_val: tensor<?xf32>, %false_val: tensor<?xf32>) -> tens
// CHECK: %[[VAL_8:.*]] = bufferization.to_tensor %[[VAL_9:.*]] : memref<f32>
// CHECK: return %[[VAL_8]] : tensor<f32>
// CHECK: }
-func @for(%arg0: tensor<f32>, %lb: index, %ub: index, %step: index) -> tensor<f32> {
+func.func @for(%arg0: tensor<f32>, %lb: index, %ub: index, %step: index) -> tensor<f32> {
%ret = scf.for %iv = %lb to %ub step %step iter_args(%iter = %arg0) -> tensor<f32> {
scf.yield %iter : tensor<f32>
}
@@ -46,7 +46,7 @@ func @for(%arg0: tensor<f32>, %lb: index, %ub: index, %step: index) -> tensor<f3
// It would previously fail altogether.
// CHECK-LABEL: func @if_correct_recursive_legalization_behavior
// CHECK: "test.munge_tensor"
-func @if_correct_recursive_legalization_behavior(%pred: i1, %tensor: tensor<f32>) -> tensor<f32> {
+func.func @if_correct_recursive_legalization_behavior(%pred: i1, %tensor: tensor<f32>) -> tensor<f32> {
%0 = scf.if %pred -> (tensor<f32>) {
%1 = "test.munge_tensor"(%tensor) : (tensor<f32>) -> (tensor<f32>)
scf.yield %1: tensor<f32>
@@ -70,7 +70,7 @@ func @if_correct_recursive_legalization_behavior(%pred: i1, %tensor: tensor<f32>
// CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[RESULT:.*]] : memref<f32>
// CHECK: return %[[TENSOR]] : tensor<f32>
// CHECK: }
-func @for_correct_recursive_legalization_behavior(%arg0: tensor<f32>, %index: index) -> tensor<f32> {
+func.func @for_correct_recursive_legalization_behavior(%arg0: tensor<f32>, %index: index) -> tensor<f32> {
%ret = scf.for %iv = %index to %index step %index iter_args(%iter = %arg0) -> tensor<f32> {
%0 = "test.munge_tensor"(%iter) : (tensor<f32>) -> (tensor<f32>)
scf.yield %0 : tensor<f32>
@@ -87,7 +87,7 @@ func @for_correct_recursive_legalization_behavior(%arg0: tensor<f32>, %index: in
// CHECK: scf.yield %{{.*}}, %{{.*}} : i64, memref<f32>
// CHECK: %[[RES2:.*]] = bufferization.to_tensor %[[RES1]]#2 : memref<f32>
// CHECK: return %[[RES1]]#1, %[[RES2]] : i64, tensor<f32>
-func @bufferize_while(%arg0: i64, %arg1: i64, %arg2: tensor<f32>) -> (i64, tensor<f32>) {
+func.func @bufferize_while(%arg0: i64, %arg1: i64, %arg2: tensor<f32>) -> (i64, tensor<f32>) {
%c2_i64 = arith.constant 2 : i64
%0:3 = scf.while (%arg3 = %arg0, %arg4 = %arg2) : (i64, tensor<f32>) -> (i64, i64, tensor<f32>) {
%1 = arith.cmpi slt, %arg3, %arg1 : i64
diff --git a/mlir/test/Dialect/SCF/canonicalize.mlir b/mlir/test/Dialect/SCF/canonicalize.mlir
index de176fb588d87..1b7adfeb778ca 100644
--- a/mlir/test/Dialect/SCF/canonicalize.mlir
+++ b/mlir/test/Dialect/SCF/canonicalize.mlir
@@ -3,7 +3,7 @@
// -----
-func @single_iteration_some(%A: memref<?x?x?xi32>) {
+func.func @single_iteration_some(%A: memref<?x?x?xi32>) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -35,7 +35,7 @@ func @single_iteration_some(%A: memref<?x?x?xi32>) {
// -----
-func @single_iteration_all(%A: memref<?x?x?xi32>) {
+func.func @single_iteration_all(%A: memref<?x?x?xi32>) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c3 = arith.constant 3 : index
@@ -63,7 +63,7 @@ func @single_iteration_all(%A: memref<?x?x?xi32>) {
// -----
-func @single_iteration_reduce(%A: index, %B: index) -> (index, index) {
+func.func @single_iteration_reduce(%A: index, %B: index) -> (index, index) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -99,7 +99,7 @@ func @single_iteration_reduce(%A: index, %B: index) -> (index, index) {
// -----
-func @nested_parallel(%0: memref<?x?x?xf64>) -> memref<?x?x?xf64> {
+func.func @nested_parallel(%0: memref<?x?x?xf64>) -> memref<?x?x?xf64> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -134,8 +134,8 @@ func @nested_parallel(%0: memref<?x?x?xf64>) -> memref<?x?x?xf64> {
// -----
-func private @side_effect()
-func @one_unused(%cond: i1) -> (index) {
+func.func private @side_effect()
+func.func @one_unused(%cond: i1) -> (index) {
%0, %1 = scf.if %cond -> (index, index) {
call @side_effect() : () -> ()
%c0 = "test.value0"() : () -> (index)
@@ -162,8 +162,8 @@ func @one_unused(%cond: i1) -> (index) {
// -----
-func private @side_effect()
-func @nested_unused(%cond1: i1, %cond2: i1) -> (index) {
+func.func private @side_effect()
+func.func @nested_unused(%cond1: i1, %cond2: i1) -> (index) {
%0, %1 = scf.if %cond1 -> (index, index) {
%2, %3 = scf.if %cond2 -> (index, index) {
call @side_effect() : () -> ()
@@ -203,8 +203,8 @@ func @nested_unused(%cond1: i1, %cond2: i1) -> (index) {
// -----
-func private @side_effect()
-func @all_unused(%cond: i1) {
+func.func private @side_effect()
+func.func @all_unused(%cond: i1) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%0, %1 = scf.if %cond -> (index, index) {
@@ -227,7 +227,7 @@ func @all_unused(%cond: i1) {
// -----
-func @empty_if1(%cond: i1) {
+func.func @empty_if1(%cond: i1) {
scf.if %cond {
scf.yield
}
@@ -240,7 +240,7 @@ func @empty_if1(%cond: i1) {
// -----
-func @empty_if2(%cond: i1) {
+func.func @empty_if2(%cond: i1) {
scf.if %cond {
scf.yield
} else {
@@ -255,7 +255,7 @@ func @empty_if2(%cond: i1) {
// -----
-func @empty_else(%cond: i1, %v : memref<i1>) {
+func.func @empty_else(%cond: i1, %v : memref<i1>) {
scf.if %cond {
memref.store %cond, %v[] : memref<i1>
} else {
@@ -269,7 +269,7 @@ func @empty_else(%cond: i1, %v : memref<i1>) {
// -----
-func @to_select1(%cond: i1) -> index {
+func.func @to_select1(%cond: i1) -> index {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%0 = scf.if %cond -> index {
@@ -288,7 +288,7 @@ func @to_select1(%cond: i1) -> index {
// -----
-func @to_select_same_val(%cond: i1) -> (index, index) {
+func.func @to_select_same_val(%cond: i1) -> (index, index) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%0, %1 = scf.if %cond -> (index, index) {
@@ -306,7 +306,7 @@ func @to_select_same_val(%cond: i1) -> (index, index) {
// CHECK: return [[V0]], [[C1]] : index, index
-func @to_select_with_body(%cond: i1) -> index {
+func.func @to_select_with_body(%cond: i1) -> index {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%0 = scf.if %cond -> index {
@@ -328,7 +328,7 @@ func @to_select_with_body(%cond: i1) -> index {
// CHECK: return [[V0]] : index
// -----
-func @to_select2(%cond: i1) -> (index, index) {
+func.func @to_select2(%cond: i1) -> (index, index) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -352,9 +352,9 @@ func @to_select2(%cond: i1) -> (index, index) {
// -----
-func private @make_i32() -> i32
+func.func private @make_i32() -> i32
-func @for_yields_2(%lb : index, %ub : index, %step : index) -> i32 {
+func.func @for_yields_2(%lb : index, %ub : index, %step : index) -> i32 {
%a = call @make_i32() : () -> (i32)
%b = scf.for %i = %lb to %ub step %step iter_args(%0 = %a) -> i32 {
scf.yield %0 : i32
@@ -366,7 +366,7 @@ func @for_yields_2(%lb : index, %ub : index, %step : index) -> i32 {
// CHECK-NEXT: %[[R:.*]] = call @make_i32() : () -> i32
// CHECK-NEXT: return %[[R]] : i32
-func @for_yields_3(%lb : index, %ub : index, %step : index) -> (i32, i32, i32) {
+func.func @for_yields_3(%lb : index, %ub : index, %step : index) -> (i32, i32, i32) {
%a = call @make_i32() : () -> (i32)
%b = call @make_i32() : () -> (i32)
%r:3 = scf.for %i = %lb to %ub step %step iter_args(%0 = %a, %1 = %a, %2 = %b) -> (i32, i32, i32) {
@@ -389,7 +389,7 @@ func @for_yields_3(%lb : index, %ub : index, %step : index) -> (i32, i32, i32) {
// Test that an empty loop which iterates at least once and only returns
// values defined outside of the loop is folded away.
-func @for_yields_4() -> i32 {
+func.func @for_yields_4() -> i32 {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -408,7 +408,7 @@ func @for_yields_4() -> i32 {
// -----
// CHECK-LABEL: @replace_true_if
-func @replace_true_if() {
+func.func @replace_true_if() {
%true = arith.constant true
// CHECK-NOT: scf.if
// CHECK: "test.op"
@@ -422,7 +422,7 @@ func @replace_true_if() {
// -----
// CHECK-LABEL: @remove_false_if
-func @remove_false_if() {
+func.func @remove_false_if() {
%false = arith.constant false
// CHECK-NOT: scf.if
// CHECK-NOT: "test.op"
@@ -436,7 +436,7 @@ func @remove_false_if() {
// -----
// CHECK-LABEL: @replace_true_if_with_values
-func @replace_true_if_with_values() {
+func.func @replace_true_if_with_values() {
%true = arith.constant true
// CHECK-NOT: scf.if
// CHECK: %[[VAL:.*]] = "test.op"
@@ -455,7 +455,7 @@ func @replace_true_if_with_values() {
// -----
// CHECK-LABEL: @replace_false_if_with_values
-func @replace_false_if_with_values() {
+func.func @replace_false_if_with_values() {
%false = arith.constant false
// CHECK-NOT: scf.if
// CHECK: %[[VAL:.*]] = "test.other_op"
@@ -475,7 +475,7 @@ func @replace_false_if_with_values() {
// CHECK-LABEL: @merge_nested_if
// CHECK-SAME: (%[[ARG0:.*]]: i1, %[[ARG1:.*]]: i1)
-func @merge_nested_if(%arg0: i1, %arg1: i1) {
+func.func @merge_nested_if(%arg0: i1, %arg1: i1) {
// CHECK: %[[COND:.*]] = arith.andi %[[ARG0]], %[[ARG1]]
// CHECK: scf.if %[[COND]] {
// CHECK-NEXT: "test.op"()
@@ -493,7 +493,7 @@ func @merge_nested_if(%arg0: i1, %arg1: i1) {
// CHECK-LABEL: @merge_yielding_nested_if
// CHECK-SAME: (%[[ARG0:.*]]: i1, %[[ARG1:.*]]: i1)
-func @merge_yielding_nested_if(%arg0: i1, %arg1: i1) -> (i32, f32, i32, i8) {
+func.func @merge_yielding_nested_if(%arg0: i1, %arg1: i1) -> (i32, f32, i32, i8) {
// CHECK: %[[PRE0:.*]] = "test.op"() : () -> i32
// CHECK: %[[PRE1:.*]] = "test.op1"() : () -> f32
// CHECK: %[[PRE2:.*]] = "test.op2"() : () -> i32
@@ -528,7 +528,7 @@ func @merge_yielding_nested_if(%arg0: i1, %arg1: i1) -> (i32, f32, i32, i8) {
// CHECK-LABEL: @merge_yielding_nested_if_nv1
// CHECK-SAME: (%[[ARG0:.*]]: i1, %[[ARG1:.*]]: i1)
-func @merge_yielding_nested_if_nv1(%arg0: i1, %arg1: i1) {
+func.func @merge_yielding_nested_if_nv1(%arg0: i1, %arg1: i1) {
// CHECK: %[[PRE0:.*]] = "test.op"() : () -> i32
// CHECK: %[[PRE1:.*]] = "test.op1"() : () -> f32
// CHECK: %[[COND:.*]] = arith.andi %[[ARG0]], %[[ARG1]]
@@ -552,7 +552,7 @@ func @merge_yielding_nested_if_nv1(%arg0: i1, %arg1: i1) {
// CHECK-LABEL: @merge_yielding_nested_if_nv2
// CHECK-SAME: (%[[ARG0:.*]]: i1, %[[ARG1:.*]]: i1)
-func @merge_yielding_nested_if_nv2(%arg0: i1, %arg1: i1) -> i32 {
+func.func @merge_yielding_nested_if_nv2(%arg0: i1, %arg1: i1) -> i32 {
// CHECK: %[[PRE0:.*]] = "test.op"() : () -> i32
// CHECK: %[[PRE1:.*]] = "test.op1"() : () -> i32
// CHECK: %[[COND:.*]] = arith.andi %[[ARG0]], %[[ARG1]]
@@ -576,7 +576,7 @@ func @merge_yielding_nested_if_nv2(%arg0: i1, %arg1: i1) -> i32 {
// CHECK-LABEL: @merge_fail_yielding_nested_if
// CHECK-SAME: (%[[ARG0:.*]]: i1, %[[ARG1:.*]]: i1)
-func @merge_fail_yielding_nested_if(%arg0: i1, %arg1: i1) -> (i32, f32, i32, i8) {
+func.func @merge_fail_yielding_nested_if(%arg0: i1, %arg1: i1) -> (i32, f32, i32, i8) {
// CHECK-NOT: andi
%0 = "test.op"() : () -> (i32)
%1 = "test.op1"() : () -> (f32)
@@ -607,7 +607,7 @@ func @merge_fail_yielding_nested_if(%arg0: i1, %arg1: i1) -> (i32, f32, i32, i8)
// CHECK-NEXT: %[[i2:.+]] = "test.origTrue"() : () -> index
// CHECK-NEXT: scf.yield %[[i2]] : index
// CHECK-NEXT: }
-func @if_condition_swap(%cond: i1) -> index {
+func.func @if_condition_swap(%cond: i1) -> index {
%true = arith.constant true
%not = arith.xori %cond, %true : i1
%0 = scf.if %not -> (index) {
@@ -623,7 +623,7 @@ func @if_condition_swap(%cond: i1) -> index {
// -----
// CHECK-LABEL: @remove_zero_iteration_loop
-func @remove_zero_iteration_loop() {
+func.func @remove_zero_iteration_loop() {
%c42 = arith.constant 42 : index
%c1 = arith.constant 1 : index
// CHECK: %[[INIT:.*]] = "test.init"
@@ -641,7 +641,7 @@ func @remove_zero_iteration_loop() {
// -----
// CHECK-LABEL: @remove_zero_iteration_loop_vals
-func @remove_zero_iteration_loop_vals(%arg0: index) {
+func.func @remove_zero_iteration_loop_vals(%arg0: index) {
%c2 = arith.constant 2 : index
// CHECK: %[[INIT:.*]] = "test.init"
%init = "test.init"() : () -> i32
@@ -659,7 +659,7 @@ func @remove_zero_iteration_loop_vals(%arg0: index) {
// -----
// CHECK-LABEL: @replace_single_iteration_loop_1
-func @replace_single_iteration_loop_1() {
+func.func @replace_single_iteration_loop_1() {
// CHECK: %[[LB:.*]] = arith.constant 42
%c42 = arith.constant 42 : index
%c43 = arith.constant 43 : index
@@ -680,7 +680,7 @@ func @replace_single_iteration_loop_1() {
// -----
// CHECK-LABEL: @replace_single_iteration_loop_2
-func @replace_single_iteration_loop_2() {
+func.func @replace_single_iteration_loop_2() {
// CHECK: %[[LB:.*]] = arith.constant 5
%c5 = arith.constant 5 : index
%c6 = arith.constant 6 : index
@@ -701,7 +701,7 @@ func @replace_single_iteration_loop_2() {
// -----
// CHECK-LABEL: @replace_single_iteration_loop_non_unit_step
-func @replace_single_iteration_loop_non_unit_step() {
+func.func @replace_single_iteration_loop_non_unit_step() {
// CHECK: %[[LB:.*]] = arith.constant 42
%c42 = arith.constant 42 : index
%c47 = arith.constant 47 : index
@@ -722,7 +722,7 @@ func @replace_single_iteration_loop_non_unit_step() {
// -----
// CHECK-LABEL: @remove_empty_parallel_loop
-func @remove_empty_parallel_loop(%lb: index, %ub: index, %s: index) {
+func.func @remove_empty_parallel_loop(%lb: index, %ub: index, %s: index) {
// CHECK: %[[INIT:.*]] = "test.init"
%init = "test.init"() : () -> f32
// CHECK-NOT: scf.parallel
@@ -744,15 +744,15 @@ func @remove_empty_parallel_loop(%lb: index, %ub: index, %s: index) {
// -----
-func private @process(%0 : memref<128x128xf32>)
-func private @process_tensor(%0 : tensor<128x128xf32>) -> memref<128x128xf32>
+func.func private @process(%0 : memref<128x128xf32>)
+func.func private @process_tensor(%0 : tensor<128x128xf32>) -> memref<128x128xf32>
// CHECK-LABEL: last_value
// CHECK-SAME: %[[T0:[0-9a-z]*]]: tensor<128x128xf32>
// CHECK-SAME: %[[T1:[0-9a-z]*]]: tensor<128x128xf32>
// CHECK-SAME: %[[T2:[0-9a-z]*]]: tensor<128x128xf32>
// CHECK-SAME: %[[M0:[0-9a-z]*]]: memref<128x128xf32>
-func @last_value(%t0: tensor<128x128xf32>, %t1: tensor<128x128xf32>,
+func.func @last_value(%t0: tensor<128x128xf32>, %t1: tensor<128x128xf32>,
%t2: tensor<128x128xf32>, %m0: memref<128x128xf32>,
%lb : index, %ub : index, %step : index)
-> (tensor<128x128xf32>, tensor<128x128xf32>, tensor<128x128xf32>)
@@ -796,7 +796,7 @@ func @last_value(%t0: tensor<128x128xf32>, %t1: tensor<128x128xf32>,
// CHECK-LABEL: fold_away_iter_with_no_use_and_yielded_input
// CHECK-SAME: %[[A0:[0-9a-z]*]]: i32
-func @fold_away_iter_with_no_use_and_yielded_input(%arg0 : i32,
+func.func @fold_away_iter_with_no_use_and_yielded_input(%arg0 : i32,
%ub : index, %lb : index, %step : index) -> (i32, i32) {
// CHECK-NEXT: %[[C32:.*]] = arith.constant 32 : i32
%cst = arith.constant 32 : i32
@@ -815,7 +815,7 @@ func @fold_away_iter_with_no_use_and_yielded_input(%arg0 : i32,
// CHECK-LABEL: fold_away_iter_and_result_with_no_use
// CHECK-SAME: %[[A0:[0-9a-z]*]]: i32
-func @fold_away_iter_and_result_with_no_use(%arg0 : i32,
+func.func @fold_away_iter_and_result_with_no_use(%arg0 : i32,
%ub : index, %lb : index, %step : index) -> (i32) {
%cst = arith.constant 32 : i32
// CHECK: %[[FOR_RES:.*]] = scf.for {{.*}} iter_args({{.*}} = %[[A0]]) -> (i32) {
@@ -831,12 +831,12 @@ func @fold_away_iter_and_result_with_no_use(%arg0 : i32,
// -----
-func private @do(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32>
+func.func private @do(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32>
// CHECK-LABEL: matmul_on_tensors
// CHECK-SAME: %[[T0:[0-9a-z]*]]: tensor<32x1024xf32>
// CHECK-SAME: %[[T1:[0-9a-z]*]]: tensor<1024x1024xf32>
-func @matmul_on_tensors(%t0: tensor<32x1024xf32>, %t1: tensor<1024x1024xf32>) -> tensor<1024x1024xf32> {
+func.func @matmul_on_tensors(%t0: tensor<32x1024xf32>, %t1: tensor<1024x1024xf32>) -> tensor<1024x1024xf32> {
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c1024 = arith.constant 1024 : index
@@ -862,7 +862,7 @@ func @matmul_on_tensors(%t0: tensor<32x1024xf32>, %t1: tensor<1024x1024xf32>) ->
// -----
// CHECK-LABEL: @cond_prop
-func @cond_prop(%arg0 : i1) -> index {
+func.func @cond_prop(%arg0 : i1) -> index {
%res = scf.if %arg0 -> index {
%res1 = scf.if %arg0 -> index {
%v1 = "test.get_some_value1"() : () -> index
@@ -897,7 +897,7 @@ func @cond_prop(%arg0 : i1) -> index {
// -----
// CHECK-LABEL: @replace_if_with_cond1
-func @replace_if_with_cond1(%arg0 : i1) -> (i32, i1) {
+func.func @replace_if_with_cond1(%arg0 : i1) -> (i32, i1) {
%true = arith.constant true
%false = arith.constant false
%res:2 = scf.if %arg0 -> (i32, i1) {
@@ -921,7 +921,7 @@ func @replace_if_with_cond1(%arg0 : i1) -> (i32, i1) {
// -----
// CHECK-LABEL: @replace_if_with_cond2
-func @replace_if_with_cond2(%arg0 : i1) -> (i32, i1) {
+func.func @replace_if_with_cond2(%arg0 : i1) -> (i32, i1) {
%true = arith.constant true
%false = arith.constant false
%res:2 = scf.if %arg0 -> (i32, i1) {
@@ -947,7 +947,7 @@ func @replace_if_with_cond2(%arg0 : i1) -> (i32, i1) {
// -----
// CHECK-LABEL: @replace_if_with_cond3
-func @replace_if_with_cond3(%arg0 : i1, %arg2: i64) -> (i32, i64) {
+func.func @replace_if_with_cond3(%arg0 : i1, %arg2: i64) -> (i32, i64) {
%res:2 = scf.if %arg0 -> (i32, i64) {
%v = "test.get_some_value"() : () -> i32
scf.yield %v, %arg2 : i32, i64
@@ -969,7 +969,7 @@ func @replace_if_with_cond3(%arg0 : i1, %arg2: i64) -> (i32, i64) {
// -----
// CHECK-LABEL: @while_cond_true
-func @while_cond_true() -> i1 {
+func.func @while_cond_true() -> i1 {
%0 = scf.while () : () -> i1 {
%condition = "test.condition"() : () -> i1
scf.condition(%condition) %condition : i1
@@ -993,7 +993,7 @@ func @while_cond_true() -> i1 {
// -----
// CHECK-LABEL: @while_unused_arg
-func @while_unused_arg(%x : i32, %y : f64) -> i32 {
+func.func @while_unused_arg(%x : i32, %y : f64) -> i32 {
%0 = scf.while (%arg1 = %x, %arg2 = %y) : (i32, f64) -> (i32) {
%condition = "test.condition"(%arg1) : (i32) -> i1
scf.condition(%condition) %arg1 : i32
@@ -1018,7 +1018,7 @@ func @while_unused_arg(%x : i32, %y : f64) -> i32 {
// CHECK-LABEL: @invariant_loop_args_in_same_order
// CHECK-SAME: (%[[FUNC_ARG0:.*]]: tensor<i32>)
-func @invariant_loop_args_in_same_order(%f_arg0: tensor<i32>) -> (tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>) {
+func.func @invariant_loop_args_in_same_order(%f_arg0: tensor<i32>) -> (tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>) {
%cst_0 = arith.constant dense<0> : tensor<i32>
%cst_1 = arith.constant dense<1> : tensor<i32>
%cst_42 = arith.constant dense<42> : tensor<i32>
@@ -1052,7 +1052,7 @@ func @invariant_loop_args_in_same_order(%f_arg0: tensor<i32>) -> (tensor<i32>, t
// CHECK: return %[[WHILE]]#0, %[[FUNC_ARG0]], %[[WHILE]]#1, %[[WHILE]]#2, %[[ZERO]]
// CHECK-LABEL: @while_loop_invariant_argument_
diff erent_order
-func @while_loop_invariant_argument_
diff erent_order() -> (tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>) {
+func.func @while_loop_invariant_argument_
diff erent_order() -> (tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>) {
%cst_0 = arith.constant dense<0> : tensor<i32>
%cst_1 = arith.constant dense<1> : tensor<i32>
%cst_42 = arith.constant dense<42> : tensor<i32>
@@ -1085,7 +1085,7 @@ func @while_loop_invariant_argument_
diff erent_order() -> (tensor<i32>, tensor<i3
// -----
// CHECK-LABEL: @while_unused_result
-func @while_unused_result() -> i32 {
+func.func @while_unused_result() -> i32 {
%0:2 = scf.while () : () -> (i32, i64) {
%condition = "test.condition"() : () -> i1
%v1 = "test.get_some_value"() : () -> i32
@@ -1111,7 +1111,7 @@ func @while_unused_result() -> i32 {
// CHECK-NEXT: return %[[res]] : i32
// CHECK-LABEL: @while_cmp_lhs
-func @while_cmp_lhs(%arg0 : i32) {
+func.func @while_cmp_lhs(%arg0 : i32) {
%0 = scf.while () : () -> i32 {
%val = "test.val"() : () -> i32
%condition = arith.cmpi ne, %val, %arg0 : i32
@@ -1138,7 +1138,7 @@ func @while_cmp_lhs(%arg0 : i32) {
// CHECK-NEXT: }
// CHECK-LABEL: @while_cmp_rhs
-func @while_cmp_rhs(%arg0 : i32) {
+func.func @while_cmp_rhs(%arg0 : i32) {
%0 = scf.while () : () -> i32 {
%val = "test.val"() : () -> i32
%condition = arith.cmpi ne, %arg0, %val : i32
@@ -1167,7 +1167,7 @@ func @while_cmp_rhs(%arg0 : i32) {
// -----
// CHECK-LABEL: @combineIfs
-func @combineIfs(%arg0 : i1, %arg2: i64) -> (i32, i32) {
+func.func @combineIfs(%arg0 : i1, %arg2: i64) -> (i32, i32) {
%res = scf.if %arg0 -> i32 {
%v = "test.firstCodeTrue"() : () -> i32
scf.yield %v : i32
@@ -1197,7 +1197,7 @@ func @combineIfs(%arg0 : i1, %arg2: i64) -> (i32, i32) {
// CHECK-LABEL: @combineIfs2
-func @combineIfs2(%arg0 : i1, %arg2: i64) -> i32 {
+func.func @combineIfs2(%arg0 : i1, %arg2: i64) -> i32 {
scf.if %arg0 {
"test.firstCodeTrue"() : () -> ()
scf.yield
@@ -1223,7 +1223,7 @@ func @combineIfs2(%arg0 : i1, %arg2: i64) -> i32 {
// CHECK-LABEL: @combineIfs3
-func @combineIfs3(%arg0 : i1, %arg2: i64) -> i32 {
+func.func @combineIfs3(%arg0 : i1, %arg2: i64) -> i32 {
%res = scf.if %arg0 -> i32 {
%v = "test.firstCodeTrue"() : () -> i32
scf.yield %v : i32
@@ -1248,7 +1248,7 @@ func @combineIfs3(%arg0 : i1, %arg2: i64) -> i32 {
// CHECK-NEXT: return %[[res]] : i32
// CHECK-LABEL: @combineIfs4
-func @combineIfs4(%arg0 : i1, %arg2: i64) {
+func.func @combineIfs4(%arg0 : i1, %arg2: i64) {
scf.if %arg0 {
"test.firstCodeTrue"() : () -> ()
scf.yield
@@ -1267,7 +1267,7 @@ func @combineIfs4(%arg0 : i1, %arg2: i64) {
// CHECK-LABEL: @combineIfsUsed
// CHECK-SAME: %[[arg0:.+]]: i1
-func @combineIfsUsed(%arg0 : i1, %arg2: i64) -> (i32, i32) {
+func.func @combineIfsUsed(%arg0 : i1, %arg2: i64) -> (i32, i32) {
%res = scf.if %arg0 -> i32 {
%v = "test.firstCodeTrue"() : () -> i32
scf.yield %v : i32
@@ -1297,7 +1297,7 @@ func @combineIfsUsed(%arg0 : i1, %arg2: i64) -> (i32, i32) {
// CHECK-LABEL: @combineIfsNot
// CHECK-SAME: %[[arg0:.+]]: i1
-func @combineIfsNot(%arg0 : i1, %arg2: i64) {
+func.func @combineIfsNot(%arg0 : i1, %arg2: i64) {
%true = arith.constant true
%not = arith.xori %arg0, %true : i1
scf.if %arg0 {
@@ -1319,7 +1319,7 @@ func @combineIfsNot(%arg0 : i1, %arg2: i64) {
// CHECK-LABEL: @combineIfsNot2
// CHECK-SAME: %[[arg0:.+]]: i1
-func @combineIfsNot2(%arg0 : i1, %arg2: i64) {
+func.func @combineIfsNot2(%arg0 : i1, %arg2: i64) {
%true = arith.constant true
%not = arith.xori %arg0, %true : i1
scf.if %not {
@@ -1341,7 +1341,7 @@ func @combineIfsNot2(%arg0 : i1, %arg2: i64) {
// -----
// CHECK-LABEL: func @propagate_into_execute_region
-func @propagate_into_execute_region() {
+func.func @propagate_into_execute_region() {
%cond = arith.constant 0 : i1
affine.for %i = 0 to 100 {
"test.foo"() : () -> ()
@@ -1370,7 +1370,7 @@ func @propagate_into_execute_region() {
// -----
// CHECK-LABEL: func @execute_region_elim
-func @execute_region_elim() {
+func.func @execute_region_elim() {
affine.for %i = 0 to 100 {
"test.foo"() : () -> ()
%v = scf.execute_region -> i64 {
@@ -1392,7 +1392,7 @@ func @execute_region_elim() {
// -----
// CHECK-LABEL: func @func_execute_region_elim
-func @func_execute_region_elim() {
+func.func @func_execute_region_elim() {
"test.foo"() : () -> ()
%v = scf.execute_region -> i64 {
%c = "test.cmp"() : () -> i1
@@ -1428,7 +1428,7 @@ func @func_execute_region_elim() {
// -----
// CHECK-LABEL: func @func_execute_region_elim_multi_yield
-func @func_execute_region_elim_multi_yield() {
+func.func @func_execute_region_elim_multi_yield() {
"test.foo"() : () -> ()
%v = scf.execute_region -> i64 {
%c = "test.cmp"() : () -> i1
diff --git a/mlir/test/Dialect/SCF/control-flow-sink.mlir b/mlir/test/Dialect/SCF/control-flow-sink.mlir
index 787c8d0c2914a..8fe2a23750c27 100644
--- a/mlir/test/Dialect/SCF/control-flow-sink.mlir
+++ b/mlir/test/Dialect/SCF/control-flow-sink.mlir
@@ -9,7 +9,7 @@
// CHECK: %[[V1:.*]] = arith.muli %[[ARG1]], %[[ARG1]]
// CHECK: scf.yield %[[V1]]
// CHECK: return %[[V0]]
-func @test_scf_if_sink(%arg0: i1, %arg1: i32) -> i32 {
+func.func @test_scf_if_sink(%arg0: i1, %arg1: i32) -> i32 {
%0 = arith.addi %arg1, %arg1 : i32
%1 = arith.muli %arg1, %arg1 : i32
%result = scf.if %arg0 -> i32 {
@@ -22,14 +22,14 @@ func @test_scf_if_sink(%arg0: i1, %arg1: i32) -> i32 {
// -----
-func private @consume(i32) -> ()
+func.func private @consume(i32) -> ()
// CHECK-LABEL: @test_scf_if_then_only_sink
// CHECK-SAME: (%[[ARG0:.*]]: i1, %[[ARG1:.*]]: i32)
// CHECK: scf.if %[[ARG0]]
// CHECK: %[[V0:.*]] = arith.addi %[[ARG1]], %[[ARG1]]
// CHECK: call @consume(%[[V0]])
-func @test_scf_if_then_only_sink(%arg0: i1, %arg1: i32) {
+func.func @test_scf_if_then_only_sink(%arg0: i1, %arg1: i32) {
%0 = arith.addi %arg1, %arg1 : i32
scf.if %arg0 {
call @consume(%0) : (i32) -> ()
@@ -40,7 +40,7 @@ func @test_scf_if_then_only_sink(%arg0: i1, %arg1: i32) {
// -----
-func private @consume(i32) -> ()
+func.func private @consume(i32) -> ()
// CHECK-LABEL: @test_scf_if_double_sink
// CHECK-SAME: (%[[ARG0:.*]]: i1, %[[ARG1:.*]]: i32)
@@ -48,7 +48,7 @@ func private @consume(i32) -> ()
// CHECK: scf.if %[[ARG0]]
// CHECK: %[[V0:.*]] = arith.addi %[[ARG1]], %[[ARG1]]
// CHECK: call @consume(%[[V0]])
-func @test_scf_if_double_sink(%arg0: i1, %arg1: i32) {
+func.func @test_scf_if_double_sink(%arg0: i1, %arg1: i32) {
%0 = arith.addi %arg1, %arg1 : i32
scf.if %arg0 {
scf.if %arg0 {
diff --git a/mlir/test/Dialect/SCF/for-loop-canonicalization.mlir b/mlir/test/Dialect/SCF/for-loop-canonicalization.mlir
index da66be5ae5cca..0497429152550 100644
--- a/mlir/test/Dialect/SCF/for-loop-canonicalization.mlir
+++ b/mlir/test/Dialect/SCF/for-loop-canonicalization.mlir
@@ -4,7 +4,7 @@
// CHECK: %[[C2:.*]] = arith.constant 2 : i64
// CHECK: scf.for
// CHECK: memref.store %[[C2]], %{{.*}}[] : memref<i64>
-func @scf_for_canonicalize_min(%A : memref<i64>) {
+func.func @scf_for_canonicalize_min(%A : memref<i64>) {
%c0 = arith.constant 0 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
@@ -23,7 +23,7 @@ func @scf_for_canonicalize_min(%A : memref<i64>) {
// CHECK: %[[Cneg2:.*]] = arith.constant -2 : i64
// CHECK: scf.for
// CHECK: memref.store %[[Cneg2]], %{{.*}}[] : memref<i64>
-func @scf_for_canonicalize_max(%A : memref<i64>) {
+func.func @scf_for_canonicalize_max(%A : memref<i64>) {
%c0 = arith.constant 0 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
@@ -42,7 +42,7 @@ func @scf_for_canonicalize_max(%A : memref<i64>) {
// CHECK: scf.for
// CHECK: affine.max
// CHECK: arith.index_cast
-func @scf_for_max_not_canonicalizable(%A : memref<i64>) {
+func.func @scf_for_max_not_canonicalizable(%A : memref<i64>) {
%c0 = arith.constant 0 : index
%c2 = arith.constant 2 : index
%c3 = arith.constant 3 : index
@@ -63,7 +63,7 @@ func @scf_for_max_not_canonicalizable(%A : memref<i64>) {
// CHECK: scf.for
// CHECK: scf.for
// CHECK: memref.store %[[C5]], %{{.*}}[] : memref<i64>
-func @scf_for_loop_nest_canonicalize_min(%A : memref<i64>) {
+func.func @scf_for_loop_nest_canonicalize_min(%A : memref<i64>) {
%c0 = arith.constant 0 : index
%c2 = arith.constant 2 : index
%c3 = arith.constant 3 : index
@@ -86,7 +86,7 @@ func @scf_for_loop_nest_canonicalize_min(%A : memref<i64>) {
// CHECK: scf.for
// CHECK: affine.min
// CHECK: arith.index_cast
-func @scf_for_not_canonicalizable_1(%A : memref<i64>) {
+func.func @scf_for_not_canonicalizable_1(%A : memref<i64>) {
// This should not canonicalize because: 4 - %i may take the value 1 < 2.
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -106,7 +106,7 @@ func @scf_for_not_canonicalizable_1(%A : memref<i64>) {
// CHECK: scf.for
// CHECK: affine.apply
// CHECK: arith.index_cast
-func @scf_for_canonicalize_partly(%A : memref<i64>) {
+func.func @scf_for_canonicalize_partly(%A : memref<i64>) {
// This should canonicalize only partly: 256 - %i <= 256.
%c1 = arith.constant 1 : index
%c16 = arith.constant 16 : index
@@ -126,7 +126,7 @@ func @scf_for_canonicalize_partly(%A : memref<i64>) {
// CHECK: scf.for
// CHECK: affine.min
// CHECK: arith.index_cast
-func @scf_for_not_canonicalizable_2(%A : memref<i64>, %step : index) {
+func.func @scf_for_not_canonicalizable_2(%A : memref<i64>, %step : index) {
// This example should simplify but affine_map is currently missing
// semi-affine canonicalizations: `((s0 * 42 - 1) floordiv s0) * s0`
// should evaluate to 41 * s0.
@@ -149,7 +149,7 @@ func @scf_for_not_canonicalizable_2(%A : memref<i64>, %step : index) {
// CHECK: scf.for
// CHECK: affine.min
// CHECK: arith.index_cast
-func @scf_for_not_canonicalizable_3(%A : memref<i64>, %step : index) {
+func.func @scf_for_not_canonicalizable_3(%A : memref<i64>, %step : index) {
// This example should simplify but affine_map is currently missing
// semi-affine canonicalizations: `-(((s0 * s0 - 1) floordiv s0) * s0)`
// should evaluate to (s0 - 1) * s0.
@@ -172,7 +172,7 @@ func @scf_for_not_canonicalizable_3(%A : memref<i64>, %step : index) {
// CHECK: scf.for
// CHECK: affine.min
// CHECK: arith.index_cast
-func @scf_for_invalid_loop(%A : memref<i64>, %step : index) {
+func.func @scf_for_invalid_loop(%A : memref<i64>, %step : index) {
// This is an invalid loop. It should not be touched by the canonicalization
// pattern.
%c1 = arith.constant 1 : index
@@ -193,7 +193,7 @@ func @scf_for_invalid_loop(%A : memref<i64>, %step : index) {
// CHECK: %[[C2:.*]] = arith.constant 2 : i64
// CHECK: scf.parallel
// CHECK-NEXT: memref.store %[[C2]], %{{.*}}[] : memref<i64>
-func @scf_parallel_canonicalize_min_1(%A : memref<i64>) {
+func.func @scf_parallel_canonicalize_min_1(%A : memref<i64>) {
%c0 = arith.constant 0 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
@@ -212,7 +212,7 @@ func @scf_parallel_canonicalize_min_1(%A : memref<i64>) {
// CHECK: %[[C2:.*]] = arith.constant 2 : i64
// CHECK: scf.parallel
// CHECK-NEXT: memref.store %[[C2]], %{{.*}}[] : memref<i64>
-func @scf_parallel_canonicalize_min_2(%A : memref<i64>) {
+func.func @scf_parallel_canonicalize_min_2(%A : memref<i64>) {
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
%c7 = arith.constant 7 : index
@@ -231,7 +231,7 @@ func @scf_parallel_canonicalize_min_2(%A : memref<i64>) {
// CHECK-SAME: %[[t:.*]]: tensor<?x?xf32>
// CHECK: scf.for
// CHECK: tensor.dim %[[t]]
-func @tensor_dim_of_iter_arg(%t : tensor<?x?xf32>) -> index {
+func.func @tensor_dim_of_iter_arg(%t : tensor<?x?xf32>) -> index {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c10 = arith.constant 10 : index
@@ -249,7 +249,7 @@ func @tensor_dim_of_iter_arg(%t : tensor<?x?xf32>) -> index {
// CHECK-SAME: %[[t:.*]]: tensor<?x?xf32>,
// CHECK: scf.for
// CHECK: tensor.dim %[[t]]
-func @tensor_dim_of_iter_arg_insertslice(%t : tensor<?x?xf32>,
+func.func @tensor_dim_of_iter_arg_insertslice(%t : tensor<?x?xf32>,
%t2 : tensor<10x10xf32>) -> index {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -273,7 +273,7 @@ func @tensor_dim_of_iter_arg_insertslice(%t : tensor<?x?xf32>,
// CHECK: scf.for
// CHECK: scf.for
// CHECK: tensor.dim %[[t]]
-func @tensor_dim_of_iter_arg_nested_for(%t : tensor<?x?xf32>,
+func.func @tensor_dim_of_iter_arg_nested_for(%t : tensor<?x?xf32>,
%t2 : tensor<10x10xf32>) -> index {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -302,7 +302,7 @@ func @tensor_dim_of_iter_arg_nested_for(%t : tensor<?x?xf32>,
// CHECK-SAME: %[[t:.*]]: tensor<?x?xf32>,
// CHECK: scf.for {{.*}} iter_args(%[[arg0:.*]] = %[[t]]
// CHECK: tensor.dim %[[arg0]]
-func @tensor_dim_of_iter_arg_no_canonicalize(%t : tensor<?x?xf32>,
+func.func @tensor_dim_of_iter_arg_no_canonicalize(%t : tensor<?x?xf32>,
%t2 : tensor<?x?xf32>) -> index {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -320,7 +320,7 @@ func @tensor_dim_of_iter_arg_no_canonicalize(%t : tensor<?x?xf32>,
// CHECK-LABEL: func @tensor_dim_of_loop_result(
// CHECK-SAME: %[[t:.*]]: tensor<?x?xf32>
// CHECK: tensor.dim %[[t]]
-func @tensor_dim_of_loop_result(%t : tensor<?x?xf32>) -> index {
+func.func @tensor_dim_of_loop_result(%t : tensor<?x?xf32>) -> index {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c10 = arith.constant 10 : index
@@ -337,7 +337,7 @@ func @tensor_dim_of_loop_result(%t : tensor<?x?xf32>) -> index {
// CHECK-LABEL: func @tensor_dim_of_loop_result_no_canonicalize(
// CHECK: %[[loop:.*]]:2 = scf.for
// CHECK: tensor.dim %[[loop]]#1
-func @tensor_dim_of_loop_result_no_canonicalize(%t : tensor<?x?xf32>,
+func.func @tensor_dim_of_loop_result_no_canonicalize(%t : tensor<?x?xf32>,
%u : tensor<?x?xf32>) -> index {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -356,7 +356,7 @@ func @tensor_dim_of_loop_result_no_canonicalize(%t : tensor<?x?xf32>,
// CHECK: %[[C4:.*]] = arith.constant 4 : i64
// CHECK: scf.for
// CHECK: memref.store %[[C4]], %{{.*}}[] : memref<i64>
-func @one_trip_scf_for_canonicalize_min(%A : memref<i64>) {
+func.func @one_trip_scf_for_canonicalize_min(%A : memref<i64>) {
%c0 = arith.constant 0 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
diff --git a/mlir/test/Dialect/SCF/for-loop-peeling.mlir b/mlir/test/Dialect/SCF/for-loop-peeling.mlir
index 50c0ef24a953a..b1cf1f2414de0 100644
--- a/mlir/test/Dialect/SCF/for-loop-peeling.mlir
+++ b/mlir/test/Dialect/SCF/for-loop-peeling.mlir
@@ -22,7 +22,7 @@
// CHECK: }
// CHECK: return %[[RESULT]]
#map = affine_map<(d0, d1)[s0] -> (s0, d0 - d1)>
-func @fully_dynamic_bounds(%lb : index, %ub: index, %step: index) -> i32 {
+func.func @fully_dynamic_bounds(%lb : index, %ub: index, %step: index) -> i32 {
%c0 = arith.constant 0 : i32
%r = scf.for %iv = %lb to %ub step %step iter_args(%arg = %c0) -> i32 {
%s = affine.min #map(%ub, %iv)[%step]
@@ -50,7 +50,7 @@ func @fully_dynamic_bounds(%lb : index, %ub: index, %step: index) -> i32 {
// CHECK: %[[RESULT:.*]] = arith.addi %[[LOOP]], %[[C1_I32]] : i32
// CHECK: return %[[RESULT]]
#map = affine_map<(d0, d1)[s0] -> (s0, d0 - d1)>
-func @fully_static_bounds() -> i32 {
+func.func @fully_static_bounds() -> i32 {
%c0_i32 = arith.constant 0 : i32
%lb = arith.constant 0 : index
%step = arith.constant 4 : index
@@ -90,7 +90,7 @@ func @fully_static_bounds() -> i32 {
// CHECK: }
// CHECK: return %[[RESULT]]
#map = affine_map<(d0, d1)[s0] -> (s0, d0 - d1)>
-func @dynamic_upper_bound(%ub : index) -> i32 {
+func.func @dynamic_upper_bound(%ub : index) -> i32 {
%c0_i32 = arith.constant 0 : i32
%lb = arith.constant 0 : index
%step = arith.constant 4 : index
@@ -128,7 +128,7 @@ func @dynamic_upper_bound(%ub : index) -> i32 {
// CHECK: }
// CHECK: return
#map = affine_map<(d0, d1)[s0] -> (s0, d0 - d1)>
-func @no_loop_results(%ub : index, %d : memref<i32>) {
+func.func @no_loop_results(%ub : index, %d : memref<i32>) {
%c0_i32 = arith.constant 0 : i32
%lb = arith.constant 0 : index
%step = arith.constant 4 : index
@@ -192,7 +192,7 @@ func @no_loop_results(%ub : index, %d : memref<i32>) {
#map3 = affine_map<(d0, d1)[s0] -> (s0, d0 - d1 - 1)>
#map4 = affine_map<(d0, d1, d2)[s0] -> (s0, d0 - d1, d2)>
#map5 = affine_map<(d0, d1)[s0] -> (-s0, -d0 + d1)>
-func @test_affine_op_rewrite(%lb : index, %ub: index,
+func.func @test_affine_op_rewrite(%lb : index, %ub: index,
%step: index, %d : memref<?xindex>,
%some_val: index) {
%c0 = arith.constant 0 : index
@@ -260,7 +260,7 @@ func @test_affine_op_rewrite(%lb : index, %ub: index,
// CHECK-NO-SKIP: }
// CHECK-NO-SKIP: }
#map = affine_map<(d0, d1)[s0] -> (s0, d0 - d1)>
-func @nested_loops(%lb0: index, %lb1 : index, %ub0: index, %ub1: index,
+func.func @nested_loops(%lb0: index, %lb1 : index, %ub0: index, %ub1: index,
%step: index) -> i32 {
%c0 = arith.constant 0 : i32
%r0 = scf.for %iv0 = %lb0 to %ub0 step %step iter_args(%arg0 = %c0) -> i32 {
diff --git a/mlir/test/Dialect/SCF/for-loop-specialization.mlir b/mlir/test/Dialect/SCF/for-loop-specialization.mlir
index d0e1c286b6381..40e8d7dfe4571 100644
--- a/mlir/test/Dialect/SCF/for-loop-specialization.mlir
+++ b/mlir/test/Dialect/SCF/for-loop-specialization.mlir
@@ -3,7 +3,7 @@
#map0 = affine_map<()[s0, s1] -> (1024, s0 - s1)>
#map1 = affine_map<()[s0, s1] -> (64, s0 - s1)>
-func @for(%outer: index, %A: memref<?xf32>, %B: memref<?xf32>,
+func.func @for(%outer: index, %A: memref<?xf32>, %B: memref<?xf32>,
%C: memref<?xf32>, %result: memref<?xf32>) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
diff --git a/mlir/test/Dialect/SCF/for-loop-to-while-loop.mlir b/mlir/test/Dialect/SCF/for-loop-to-while-loop.mlir
index 4187ae591a5f4..40a12ac5afb17 100644
--- a/mlir/test/Dialect/SCF/for-loop-to-while-loop.mlir
+++ b/mlir/test/Dialect/SCF/for-loop-to-while-loop.mlir
@@ -19,7 +19,7 @@
// CHECK: }
// CHECK: return
// CHECK: }
-func @single_loop(%arg0: memref<?xi32>, %arg1: index, %arg2: i32) {
+func.func @single_loop(%arg0: memref<?xi32>, %arg1: index, %arg2: i32) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
scf.for %i = %c0 to %arg1 step %c1 {
@@ -58,7 +58,7 @@ func @single_loop(%arg0: memref<?xi32>, %arg1: index, %arg2: i32) {
// CHECK: }
// CHECK: return
// CHECK: }
-func @nested_loop(%arg0: memref<?xi32>, %arg1: index, %arg2: i32) {
+func.func @nested_loop(%arg0: memref<?xi32>, %arg1: index, %arg2: i32) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
scf.for %i = %c0 to %arg1 step %c1 {
@@ -88,7 +88,7 @@ func @nested_loop(%arg0: memref<?xi32>, %arg1: index, %arg2: i32) {
// CHECK: }
// CHECK: return %[[VAL_14:.*]]#2 : f32
// CHECK: }
-func @for_iter_args(%arg0 : index, %arg1: index, %arg2: index) -> f32 {
+func.func @for_iter_args(%arg0 : index, %arg1: index, %arg2: index) -> f32 {
%s0 = arith.constant 0.0 : f32
%result:2 = scf.for %i0 = %arg0 to %arg1 step %arg2 iter_args(%iarg0 = %s0, %iarg1 = %s0) -> (f32, f32) {
%sn = arith.addf %iarg0, %iarg1 : f32
@@ -125,7 +125,7 @@ func @for_iter_args(%arg0 : index, %arg1: index, %arg2: index) -> f32 {
// CHECK: }
// CHECK: return %[[VAL_17:.*]]#1 : i32
// CHECK: }
-func @exec_region_multiple_yields(%arg0: i32, %arg1: index, %arg2: i32) -> i32 {
+func.func @exec_region_multiple_yields(%arg0: i32, %arg1: index, %arg2: i32) -> i32 {
%c1_i32 = arith.constant 1 : i32
%c2_i32 = arith.constant 2 : i32
%c0 = arith.constant 0 : index
diff --git a/mlir/test/Dialect/SCF/invalid.mlir b/mlir/test/Dialect/SCF/invalid.mlir
index 941a2a2994406..6e887047e26cc 100644
--- a/mlir/test/Dialect/SCF/invalid.mlir
+++ b/mlir/test/Dialect/SCF/invalid.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -verify-diagnostics
-func @loop_for_lb(%arg0: f32, %arg1: index) {
+func.func @loop_for_lb(%arg0: f32, %arg1: index) {
// expected-error at +1 {{operand #0 must be index}}
"scf.for"(%arg0, %arg1, %arg1) ({}) : (f32, index, index) -> ()
return
@@ -8,7 +8,7 @@ func @loop_for_lb(%arg0: f32, %arg1: index) {
// -----
-func @loop_for_ub(%arg0: f32, %arg1: index) {
+func.func @loop_for_ub(%arg0: f32, %arg1: index) {
// expected-error at +1 {{operand #1 must be index}}
"scf.for"(%arg1, %arg0, %arg1) ({}) : (index, f32, index) -> ()
return
@@ -16,7 +16,7 @@ func @loop_for_ub(%arg0: f32, %arg1: index) {
// -----
-func @loop_for_step(%arg0: f32, %arg1: index) {
+func.func @loop_for_step(%arg0: f32, %arg1: index) {
// expected-error at +1 {{operand #2 must be index}}
"scf.for"(%arg1, %arg1, %arg0) ({}) : (index, index, f32) -> ()
return
@@ -24,7 +24,7 @@ func @loop_for_step(%arg0: f32, %arg1: index) {
// -----
-func @loop_for_step_positive(%arg0: index) {
+func.func @loop_for_step_positive(%arg0: index) {
// expected-error at +2 {{constant step operand must be positive}}
%c0 = arith.constant 0 : index
"scf.for"(%arg0, %arg0, %c0) ({
@@ -36,7 +36,7 @@ func @loop_for_step_positive(%arg0: index) {
// -----
-func @loop_for_one_region(%arg0: index) {
+func.func @loop_for_one_region(%arg0: index) {
// expected-error at +1 {{requires one region}}
"scf.for"(%arg0, %arg0, %arg0) (
{scf.yield},
@@ -47,7 +47,7 @@ func @loop_for_one_region(%arg0: index) {
// -----
-func @loop_for_single_block(%arg0: index) {
+func.func @loop_for_single_block(%arg0: index) {
// expected-error at +1 {{expects region #0 to have 0 or 1 blocks}}
"scf.for"(%arg0, %arg0, %arg0) (
{
@@ -62,7 +62,7 @@ func @loop_for_single_block(%arg0: index) {
// -----
-func @loop_for_single_index_argument(%arg0: index) {
+func.func @loop_for_single_index_argument(%arg0: index) {
// expected-error at +1 {{op expected body first argument to be an index argument for the induction variable}}
"scf.for"(%arg0, %arg0, %arg0) (
{
@@ -75,7 +75,7 @@ func @loop_for_single_index_argument(%arg0: index) {
// -----
-func @loop_if_not_i1(%arg0: index) {
+func.func @loop_if_not_i1(%arg0: index) {
// expected-error at +1 {{operand #0 must be 1-bit signless integer}}
"scf.if"(%arg0) ({}, {}) : (index) -> ()
return
@@ -83,7 +83,7 @@ func @loop_if_not_i1(%arg0: index) {
// -----
-func @loop_if_more_than_2_regions(%arg0: i1) {
+func.func @loop_if_more_than_2_regions(%arg0: i1) {
// expected-error at +1 {{expected 2 regions}}
"scf.if"(%arg0) ({}, {}, {}): (i1) -> ()
return
@@ -91,7 +91,7 @@ func @loop_if_more_than_2_regions(%arg0: i1) {
// -----
-func @loop_if_not_one_block_per_region(%arg0: i1) {
+func.func @loop_if_not_one_block_per_region(%arg0: i1) {
// expected-error at +1 {{expects region #0 to have 0 or 1 blocks}}
"scf.if"(%arg0) ({
^bb0:
@@ -104,7 +104,7 @@ func @loop_if_not_one_block_per_region(%arg0: i1) {
// -----
-func @loop_if_illegal_block_argument(%arg0: i1) {
+func.func @loop_if_illegal_block_argument(%arg0: i1) {
// expected-error at +1 {{region #0 should have no arguments}}
"scf.if"(%arg0) ({
^bb0(%0 : index):
@@ -115,7 +115,7 @@ func @loop_if_illegal_block_argument(%arg0: i1) {
// -----
-func @parallel_arguments_
diff erent_tuple_size(
+func.func @parallel_arguments_
diff erent_tuple_size(
%arg0: index, %arg1: index, %arg2: index) {
// expected-error at +1 {{custom op 'scf.parallel' expected 1 operands}}
scf.parallel (%i0) = (%arg0) to (%arg1, %arg2) step () {
@@ -125,7 +125,7 @@ func @parallel_arguments_
diff erent_tuple_size(
// -----
-func @parallel_body_arguments_wrong_type(
+func.func @parallel_body_arguments_wrong_type(
%arg0: index, %arg1: index, %arg2: index) {
// expected-error at +1 {{'scf.parallel' op expects arguments for the induction variable to be of index type}}
"scf.parallel"(%arg0, %arg1, %arg2) ({
@@ -137,7 +137,7 @@ func @parallel_body_arguments_wrong_type(
// -----
-func @parallel_body_wrong_number_of_arguments(
+func.func @parallel_body_wrong_number_of_arguments(
%arg0: index, %arg1: index, %arg2: index) {
// expected-error at +1 {{'scf.parallel' op expects the same number of induction variables: 2 as bound and step values: 1}}
"scf.parallel"(%arg0, %arg1, %arg2) ({
@@ -149,7 +149,7 @@ func @parallel_body_wrong_number_of_arguments(
// -----
-func @parallel_no_tuple_elements() {
+func.func @parallel_no_tuple_elements() {
// expected-error at +1 {{'scf.parallel' op needs at least one tuple element for lowerBound, upperBound and step}}
scf.parallel () = () to () step () {
}
@@ -158,7 +158,7 @@ func @parallel_no_tuple_elements() {
// -----
-func @parallel_step_not_positive(
+func.func @parallel_step_not_positive(
%arg0: index, %arg1: index, %arg2: index, %arg3: index) {
// expected-error at +3 {{constant step operand must be positive}}
%c0 = arith.constant 1 : index
@@ -170,7 +170,7 @@ func @parallel_step_not_positive(
// -----
-func @parallel_fewer_results_than_reduces(
+func.func @parallel_fewer_results_than_reduces(
%arg0 : index, %arg1: index, %arg2: index) {
// expected-error at +1 {{expects number of results: 0 to be the same as number of reductions: 1}}
scf.parallel (%i0) = (%arg0) to (%arg1) step (%arg2) {
@@ -185,7 +185,7 @@ func @parallel_fewer_results_than_reduces(
// -----
-func @parallel_more_results_than_reduces(
+func.func @parallel_more_results_than_reduces(
%arg0 : index, %arg1 : index, %arg2 : index) {
// expected-error at +2 {{expects number of results: 1 to be the same as number of reductions: 0}}
%zero = arith.constant 1.0 : f32
@@ -197,7 +197,7 @@ func @parallel_more_results_than_reduces(
// -----
-func @parallel_more_results_than_initial_values(
+func.func @parallel_more_results_than_initial_values(
%arg0 : index, %arg1: index, %arg2: index) {
// expected-error at +1 {{expects number of results: 1 to be the same as number of initial values: 0}}
%res = scf.parallel (%i0) = (%arg0) to (%arg1) step (%arg2) -> f32 {
@@ -210,7 +210,7 @@ func @parallel_more_results_than_initial_values(
// -----
-func @parallel_
diff erent_types_of_results_and_reduces(
+func.func @parallel_
diff erent_types_of_results_and_reduces(
%arg0 : index, %arg1: index, %arg2: index) {
%zero = arith.constant 0.0 : f32
%res = scf.parallel (%i0) = (%arg0) to (%arg1)
@@ -226,7 +226,7 @@ func @parallel_
diff erent_types_of_results_and_reduces(
// -----
-func @top_level_reduce(%arg0 : f32) {
+func.func @top_level_reduce(%arg0 : f32) {
// expected-error at +1 {{expects parent op 'scf.parallel'}}
scf.reduce(%arg0) : f32 {
^bb0(%lhs : f32, %rhs : f32):
@@ -237,7 +237,7 @@ func @top_level_reduce(%arg0 : f32) {
// -----
-func @reduce_empty_block(%arg0 : index, %arg1 : f32) {
+func.func @reduce_empty_block(%arg0 : index, %arg1 : f32) {
%zero = arith.constant 0.0 : f32
%res = scf.parallel (%i0) = (%arg0) to (%arg0)
step (%arg0) init (%zero) -> f32 {
@@ -251,7 +251,7 @@ func @reduce_empty_block(%arg0 : index, %arg1 : f32) {
// -----
-func @reduce_too_many_args(%arg0 : index, %arg1 : f32) {
+func.func @reduce_too_many_args(%arg0 : index, %arg1 : f32) {
%zero = arith.constant 0.0 : f32
%res = scf.parallel (%i0) = (%arg0) to (%arg0)
step (%arg0) init (%zero) -> f32 {
@@ -266,7 +266,7 @@ func @reduce_too_many_args(%arg0 : index, %arg1 : f32) {
// -----
-func @reduce_wrong_args(%arg0 : index, %arg1 : f32) {
+func.func @reduce_wrong_args(%arg0 : index, %arg1 : f32) {
%zero = arith.constant 0.0 : f32
%res = scf.parallel (%i0) = (%arg0) to (%arg0)
step (%arg0) init (%zero) -> f32 {
@@ -282,7 +282,7 @@ func @reduce_wrong_args(%arg0 : index, %arg1 : f32) {
// -----
-func @reduce_wrong_terminator(%arg0 : index, %arg1 : f32) {
+func.func @reduce_wrong_terminator(%arg0 : index, %arg1 : f32) {
%zero = arith.constant 0.0 : f32
%res = scf.parallel (%i0) = (%arg0) to (%arg0)
step (%arg0) init (%zero) -> f32 {
@@ -297,7 +297,7 @@ func @reduce_wrong_terminator(%arg0 : index, %arg1 : f32) {
// -----
-func @reduceReturn_wrong_type(%arg0 : index, %arg1: f32) {
+func.func @reduceReturn_wrong_type(%arg0 : index, %arg1: f32) {
%zero = arith.constant 0.0 : f32
%res = scf.parallel (%i0) = (%arg0) to (%arg0)
step (%arg0) init (%zero) -> f32 {
@@ -313,7 +313,7 @@ func @reduceReturn_wrong_type(%arg0 : index, %arg1: f32) {
// -----
-func @reduceReturn_not_inside_reduce(%arg0 : f32) {
+func.func @reduceReturn_not_inside_reduce(%arg0 : f32) {
"foo.region"() ({
// expected-error at +1 {{expects parent op 'scf.reduce'}}
scf.reduce.return %arg0 : f32
@@ -323,7 +323,7 @@ func @reduceReturn_not_inside_reduce(%arg0 : f32) {
// -----
-func @std_if_incorrect_yield(%arg0: i1, %arg1: f32)
+func.func @std_if_incorrect_yield(%arg0: i1, %arg1: f32)
{
// expected-error at +1 {{region control flow edge from Region #0 to parent results: source has 1 operands, but target successor needs 2}}
%x, %y = scf.if %arg0 -> (f32, f32) {
@@ -338,7 +338,7 @@ func @std_if_incorrect_yield(%arg0: i1, %arg1: f32)
// -----
-func @std_if_missing_else(%arg0: i1, %arg1: f32)
+func.func @std_if_missing_else(%arg0: i1, %arg1: f32)
{
// expected-error at +1 {{must have an else block if defining values}}
%x = scf.if %arg0 -> (f32) {
@@ -350,7 +350,7 @@ func @std_if_missing_else(%arg0: i1, %arg1: f32)
// -----
-func @std_for_operands_mismatch(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @std_for_operands_mismatch(%arg0 : index, %arg1 : index, %arg2 : index) {
%s0 = arith.constant 0.0 : f32
%t0 = arith.constant 1 : i32
// expected-error at +1 {{mismatch in number of loop-carried values and defined values}}
@@ -365,7 +365,7 @@ func @std_for_operands_mismatch(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @std_for_operands_mismatch_2(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @std_for_operands_mismatch_2(%arg0 : index, %arg1 : index, %arg2 : index) {
%s0 = arith.constant 0.0 : f32
%t0 = arith.constant 1 : i32
%u0 = arith.constant 1.0 : f32
@@ -382,7 +382,7 @@ func @std_for_operands_mismatch_2(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @std_for_operands_mismatch_3(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @std_for_operands_mismatch_3(%arg0 : index, %arg1 : index, %arg2 : index) {
// expected-note at +1 {{prior use here}}
%s0 = arith.constant 0.0 : f32
%t0 = arith.constant 1.0 : f32
@@ -398,7 +398,7 @@ func @std_for_operands_mismatch_3(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @std_for_operands_mismatch_4(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @std_for_operands_mismatch_4(%arg0 : index, %arg1 : index, %arg2 : index) {
%s0 = arith.constant 0.0 : f32
%t0 = arith.constant 1.0 : f32
// expected-error @+1 {{along control flow edge from Region #0 to Region #0: source type #1 'i32' should match input type #1 'f32'}}
@@ -414,7 +414,7 @@ func @std_for_operands_mismatch_4(%arg0 : index, %arg1 : index, %arg2 : index) {
// -----
-func @parallel_invalid_yield(
+func.func @parallel_invalid_yield(
%arg0: index, %arg1: index, %arg2: index) {
scf.parallel (%i0) = (%arg0) to (%arg1) step (%arg2) {
%c0 = arith.constant 1.0 : f32
@@ -426,7 +426,7 @@ func @parallel_invalid_yield(
// -----
-func @yield_invalid_parent_op() {
+func.func @yield_invalid_parent_op() {
"my.op"() ({
// expected-error at +1 {{'scf.yield' op expects parent op to be one of 'scf.execute_region, scf.for, scf.if, scf.parallel, scf.while'}}
scf.yield
@@ -436,7 +436,7 @@ func @yield_invalid_parent_op() {
// -----
-func @while_parser_type_mismatch() {
+func.func @while_parser_type_mismatch() {
%true = arith.constant true
// expected-error at +1 {{expected as many input types as operands (expected 0 got 1)}}
scf.while : (i32) -> () {
@@ -448,7 +448,7 @@ func @while_parser_type_mismatch() {
// -----
-func @while_bad_terminator() {
+func.func @while_bad_terminator() {
// expected-error at +1 {{expects the 'before' region to terminate with 'scf.condition'}}
scf.while : () -> () {
// expected-note at +1 {{terminator here}}
@@ -460,7 +460,7 @@ func @while_bad_terminator() {
// -----
-func @while_cross_region_type_mismatch() {
+func.func @while_cross_region_type_mismatch() {
%true = arith.constant true
// expected-error at +1 {{'scf.while' op region control flow edge from Region #0 to Region #1: source has 0 operands, but target successor needs 1}}
scf.while : () -> () {
@@ -473,7 +473,7 @@ func @while_cross_region_type_mismatch() {
// -----
-func @while_cross_region_type_mismatch() {
+func.func @while_cross_region_type_mismatch() {
%true = arith.constant true
// expected-error at +1 {{'scf.while' op along control flow edge from Region #0 to Region #1: source type #0 'i1' should match input type #0 'i32'}}
scf.while : () -> () {
@@ -486,7 +486,7 @@ func @while_cross_region_type_mismatch() {
// -----
-func @while_result_type_mismatch() {
+func.func @while_result_type_mismatch() {
%true = arith.constant true
// expected-error at +1 {{'scf.while' op region control flow edge from Region #0 to parent results: source has 1 operands, but target successor needs 0}}
scf.while : () -> () {
@@ -499,7 +499,7 @@ func @while_result_type_mismatch() {
// -----
-func @while_bad_terminator() {
+func.func @while_bad_terminator() {
%true = arith.constant true
// expected-error at +1 {{expects the 'after' region to terminate with 'scf.yield'}}
scf.while : () -> () {
@@ -512,7 +512,7 @@ func @while_bad_terminator() {
// -----
-func @execute_region() {
+func.func @execute_region() {
// expected-error @+1 {{region cannot have any arguments}}
"scf.execute_region"() ({
^bb0(%i : i32):
diff --git a/mlir/test/Dialect/SCF/loop-pipelining.mlir b/mlir/test/Dialect/SCF/loop-pipelining.mlir
index e2c740d1e56a2..d7e8827c46a3d 100644
--- a/mlir/test/Dialect/SCF/loop-pipelining.mlir
+++ b/mlir/test/Dialect/SCF/loop-pipelining.mlir
@@ -20,7 +20,7 @@
// Epilogue:
// CHECK-NEXT: %[[ADD1:.*]] = arith.addf %[[L1]], %{{.*}} : f32
// CHECK-NEXT: memref.store %[[ADD1]], %[[R]][%[[C3]]] : memref<?xf32>
-func @simple_pipeline(%A: memref<?xf32>, %result: memref<?xf32>) {
+func.func @simple_pipeline(%A: memref<?xf32>, %result: memref<?xf32>) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c4 = arith.constant 4 : index
@@ -59,7 +59,7 @@ func @simple_pipeline(%A: memref<?xf32>, %result: memref<?xf32>) {
// CHECK-NEXT: memref.store %[[ADD1]], %[[R]][%[[C6]]] : memref<?xf32>
// CHECK-NEXT: %[[ADD2:.*]] = arith.addf %[[L2]]#1, %{{.*}} : f32
// CHECK-NEXT: memref.store %[[ADD2]], %[[R]][%[[C9]]] : memref<?xf32>
-func @simple_pipeline_step(%A: memref<?xf32>, %result: memref<?xf32>) {
+func.func @simple_pipeline_step(%A: memref<?xf32>, %result: memref<?xf32>) {
%c0 = arith.constant 0 : index
%c3 = arith.constant 3 : index
%c11 = arith.constant 11 : index
@@ -114,7 +114,7 @@ func @simple_pipeline_step(%A: memref<?xf32>, %result: memref<?xf32>) {
// ANNOTATE: arith.addf {{.*}} {__test_pipelining_iteration = 0 : i32, __test_pipelining_part = "epilogue"}
// ANNOTATE: memref.store {{.*}} {__test_pipelining_iteration = 1 : i32, __test_pipelining_part = "epilogue"}
-func @three_stage(%A: memref<?xf32>, %result: memref<?xf32>) {
+func.func @three_stage(%A: memref<?xf32>, %result: memref<?xf32>) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c4 = arith.constant 4 : index
@@ -164,7 +164,7 @@ func @three_stage(%A: memref<?xf32>, %result: memref<?xf32>) {
// CHECK-NEXT: memref.store %[[ADD3]], %[[R]][%[[C8]]] : memref<?xf32>
// CHECK-NEXT: %[[ADD4:.*]] = arith.addf %[[LR]]#3, %{{.*}} : f32
// CHECK-NEXT: memref.store %[[ADD4]], %[[R]][%[[C9]]] : memref<?xf32>
-func @long_liverange(%A: memref<?xf32>, %result: memref<?xf32>) {
+func.func @long_liverange(%A: memref<?xf32>, %result: memref<?xf32>) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c10 = arith.constant 10 : index
@@ -214,7 +214,7 @@ func @long_liverange(%A: memref<?xf32>, %result: memref<?xf32>) {
// CHECK-NEXT: %[[MUL3:.*]] = arith.mulf %[[ADD3]], %[[LR]]#1 : f32
// CHECK-NEXT: memref.store %[[MUL2]], %[[R]][%[[C8]]] : memref<?xf32>
// CHECK-NEXT: memref.store %[[MUL3]], %[[R]][%[[C9]]] : memref<?xf32>
-func @multiple_uses(%A: memref<?xf32>, %result: memref<?xf32>) {
+func.func @multiple_uses(%A: memref<?xf32>, %result: memref<?xf32>) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c10 = arith.constant 10 : index
@@ -250,7 +250,7 @@ func @multiple_uses(%A: memref<?xf32>, %result: memref<?xf32>) {
// Epilogue:
// CHECK-NEXT: %[[ADD1:.*]] = arith.addf %[[LR]]#1, %[[LR]]#0 : f32
// CHECK-NEXT: memref.store %[[ADD1]], %[[R]][%[[C0]]] : memref<?xf32>
-func @loop_carried(%A: memref<?xf32>, %result: memref<?xf32>) {
+func.func @loop_carried(%A: memref<?xf32>, %result: memref<?xf32>) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c4 = arith.constant 4 : index
@@ -288,7 +288,7 @@ func @loop_carried(%A: memref<?xf32>, %result: memref<?xf32>) {
// Epilogue:
// CHECK-NEXT: %[[ADD2:.*]] = arith.addf %[[R]]#2, %[[R]]#1 : f32
// CHECK-NEXT: return %[[ADD2]] : f32
-func @backedge_
diff erent_stage(%A: memref<?xf32>) -> f32 {
+func.func @backedge_
diff erent_stage(%A: memref<?xf32>) -> f32 {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c4 = arith.constant 4 : index
@@ -324,7 +324,7 @@ func @backedge_
diff erent_stage(%A: memref<?xf32>) -> f32 {
// Epilogue:
// CHECK-NEXT: %[[ADD1:.*]] = arith.addf %[[R]]#1, %[[R]]#0 : f32
// CHECK-NEXT: return %[[ADD1]] : f32
-func @backedge_same_stage(%A: memref<?xf32>) -> f32 {
+func.func @backedge_same_stage(%A: memref<?xf32>) -> f32 {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c4 = arith.constant 4 : index
diff --git a/mlir/test/Dialect/SCF/loop-range.mlir b/mlir/test/Dialect/SCF/loop-range.mlir
index ab344d04ffae3..3494621fb92bc 100644
--- a/mlir/test/Dialect/SCF/loop-range.mlir
+++ b/mlir/test/Dialect/SCF/loop-range.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s -pass-pipeline='func.func(scf-for-loop-range-folding)' -split-input-file | FileCheck %s
-func @fold_one_loop(%arg0: memref<?xi32>, %arg1: index, %arg2: index) {
+func.func @fold_one_loop(%arg0: memref<?xi32>, %arg1: index, %arg2: index) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c4 = arith.constant 4 : index
@@ -28,7 +28,7 @@ func @fold_one_loop(%arg0: memref<?xi32>, %arg1: index, %arg2: index) {
// CHECK: %[[I5:.*]] = arith.muli %[[I4]], %[[I4]] : i32
// CHECK: memref.store %[[I5]], %[[ARG0]]{{\[}}%[[I]]
-func @fold_one_loop2(%arg0: memref<?xi32>, %arg1: index, %arg2: index) {
+func.func @fold_one_loop2(%arg0: memref<?xi32>, %arg1: index, %arg2: index) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c4 = arith.constant 4 : index
@@ -61,7 +61,7 @@ func @fold_one_loop2(%arg0: memref<?xi32>, %arg1: index, %arg2: index) {
// CHECK: %[[I5:.*]] = arith.muli %[[I4]], %[[I4]] : i32
// CHECK: memref.store %[[I5]], %[[ARG0]]{{\[}}%[[I]]
-func @fold_two_loops(%arg0: memref<?xi32>, %arg1: index, %arg2: index) {
+func.func @fold_two_loops(%arg0: memref<?xi32>, %arg1: index, %arg2: index) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c4 = arith.constant 4 : index
@@ -98,7 +98,7 @@ func @fold_two_loops(%arg0: memref<?xi32>, %arg1: index, %arg2: index) {
// If an instruction's operands are not defined outside the loop, we cannot
// perform the optimization, as is the case with the arith.muli below. (If
// paired with loop invariant code motion we can continue.)
-func @fold_only_first_add(%arg0: memref<?xi32>, %arg1: index, %arg2: index) {
+func.func @fold_only_first_add(%arg0: memref<?xi32>, %arg1: index, %arg2: index) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c4 = arith.constant 4 : index
diff --git a/mlir/test/Dialect/SCF/loop-unroll.mlir b/mlir/test/Dialect/SCF/loop-unroll.mlir
index 533ad9ecdac84..6a832578d581b 100644
--- a/mlir/test/Dialect/SCF/loop-unroll.mlir
+++ b/mlir/test/Dialect/SCF/loop-unroll.mlir
@@ -5,7 +5,7 @@
// RUN: mlir-opt %s -test-loop-unrolling='unroll-factor=2 annotate=true' | FileCheck %s --check-prefix UNROLL-BY-2-ANNOTATE
// RUN: mlir-opt %s --affine-loop-unroll='unroll-factor=6 unroll-up-to-factor=true' | FileCheck %s --check-prefix UNROLL-UP-TO
-func @dynamic_loop_unroll(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @dynamic_loop_unroll(%arg0 : index, %arg1 : index, %arg2 : index,
%arg3: memref<?xf32>) {
%0 = arith.constant 7.0 : f32
scf.for %i0 = %arg0 to %arg1 step %arg2 {
@@ -83,7 +83,7 @@ func @dynamic_loop_unroll(%arg0 : index, %arg1 : index, %arg2 : index,
// UNROLL-BY-3-NEXT: }
// UNROLL-BY-3-NEXT: return
-func @dynamic_loop_unroll_outer_by_2(
+func.func @dynamic_loop_unroll_outer_by_2(
%arg0 : index, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index,
%arg5 : index, %arg6: memref<?xf32>) {
%0 = arith.constant 7.0 : f32
@@ -118,7 +118,7 @@ func @dynamic_loop_unroll_outer_by_2(
// UNROLL-OUTER-BY-2-NEXT: }
// UNROLL-OUTER-BY-2-NEXT: return
-func @dynamic_loop_unroll_inner_by_2(
+func.func @dynamic_loop_unroll_inner_by_2(
%arg0 : index, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index,
%arg5 : index, %arg6: memref<?xf32>) {
%0 = arith.constant 7.0 : f32
@@ -154,7 +154,7 @@ func @dynamic_loop_unroll_inner_by_2(
// Test that no epilogue clean-up loop is generated because the trip count is
// a multiple of the unroll factor.
-func @static_loop_unroll_by_2(%arg0 : memref<?xf32>) {
+func.func @static_loop_unroll_by_2(%arg0 : memref<?xf32>) {
%0 = arith.constant 7.0 : f32
%lb = arith.constant 0 : index
%ub = arith.constant 20 : index
@@ -186,7 +186,7 @@ func @static_loop_unroll_by_2(%arg0 : memref<?xf32>) {
// Test that epilogue clean up loop is generated (trip count is not
// a multiple of unroll factor).
-func @static_loop_unroll_by_3(%arg0 : memref<?xf32>) {
+func.func @static_loop_unroll_by_3(%arg0 : memref<?xf32>) {
%0 = arith.constant 7.0 : f32
%lb = arith.constant 0 : index
%ub = arith.constant 20 : index
@@ -223,7 +223,7 @@ func @static_loop_unroll_by_3(%arg0 : memref<?xf32>) {
// Test that the single iteration epilogue loop body is promoted to the loops
// containing block.
-func @static_loop_unroll_by_3_promote_epilogue(%arg0 : memref<?xf32>) {
+func.func @static_loop_unroll_by_3_promote_epilogue(%arg0 : memref<?xf32>) {
%0 = arith.constant 7.0 : f32
%lb = arith.constant 0 : index
%ub = arith.constant 10 : index
@@ -256,7 +256,7 @@ func @static_loop_unroll_by_3_promote_epilogue(%arg0 : memref<?xf32>) {
// UNROLL-BY-3-NEXT: return
// Test unroll-up-to functionality.
-func @static_loop_unroll_up_to_factor(%arg0 : memref<?xf32>) {
+func.func @static_loop_unroll_up_to_factor(%arg0 : memref<?xf32>) {
%0 = arith.constant 7.0 : f32
%lb = arith.constant 0 : index
%ub = arith.constant 2 : index
diff --git a/mlir/test/Dialect/SCF/ops.mlir b/mlir/test/Dialect/SCF/ops.mlir
index a3c3391c852ba..b732b1ede38de 100644
--- a/mlir/test/Dialect/SCF/ops.mlir
+++ b/mlir/test/Dialect/SCF/ops.mlir
@@ -4,7 +4,7 @@
// Verify the generic form can be parsed.
// RUN: mlir-opt -mlir-print-op-generic %s | mlir-opt | FileCheck %s
-func @std_for(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @std_for(%arg0 : index, %arg1 : index, %arg2 : index) {
scf.for %i0 = %arg0 to %arg1 step %arg2 {
scf.for %i1 = %arg0 to %arg1 step %arg2 {
%min_cmp = arith.cmpi slt, %i0, %i1 : index
@@ -26,7 +26,7 @@ func @std_for(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK-NEXT: %{{.*}} = arith.select %{{.*}}, %{{.*}}, %{{.*}} : index
// CHECK-NEXT: scf.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
-func @std_if(%arg0: i1, %arg1: f32) {
+func.func @std_if(%arg0: i1, %arg1: f32) {
scf.if %arg0 {
%0 = arith.addf %arg1, %arg1 : f32
}
@@ -36,7 +36,7 @@ func @std_if(%arg0: i1, %arg1: f32) {
// CHECK-NEXT: scf.if %{{.*}} {
// CHECK-NEXT: %{{.*}} = arith.addf %{{.*}}, %{{.*}} : f32
-func @std_if_else(%arg0: i1, %arg1: f32) {
+func.func @std_if_else(%arg0: i1, %arg1: f32) {
scf.if %arg0 {
%0 = arith.addf %arg1, %arg1 : f32
} else {
@@ -50,7 +50,7 @@ func @std_if_else(%arg0: i1, %arg1: f32) {
// CHECK-NEXT: } else {
// CHECK-NEXT: %{{.*}} = arith.addf %{{.*}}, %{{.*}} : f32
-func @std_parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @std_parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
%arg3 : index, %arg4 : index) {
%step = arith.constant 1 : index
scf.parallel (%i0, %i1) = (%arg0, %arg1) to (%arg2, %arg3)
@@ -113,7 +113,7 @@ func @std_parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
// CHECK-NEXT: }
// CHECK-NEXT: scf.yield
-func @parallel_explicit_yield(
+func.func @parallel_explicit_yield(
%arg0: index, %arg1: index, %arg2: index) {
scf.parallel (%i0) = (%arg0) to (%arg1) step (%arg2) {
scf.yield
@@ -131,7 +131,7 @@ func @parallel_explicit_yield(
// CHECK-NEXT: return
// CHECK-NEXT: }
-func @std_if_yield(%arg0: i1, %arg1: f32)
+func.func @std_if_yield(%arg0: i1, %arg1: f32)
{
%x, %y = scf.if %arg0 -> (f32, f32) {
%0 = arith.addf %arg1, %arg1 : f32
@@ -157,7 +157,7 @@ func @std_if_yield(%arg0: i1, %arg1: f32)
// CHECK-NEXT: scf.yield %[[T3]], %[[T4]] : f32, f32
// CHECK-NEXT: }
-func @std_for_yield(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @std_for_yield(%arg0 : index, %arg1 : index, %arg2 : index) {
%s0 = arith.constant 0.0 : f32
%result = scf.for %i0 = %arg0 to %arg1 step %arg2 iter_args(%si = %s0) -> (f32) {
%sn = arith.addf %si, %si : f32
@@ -177,7 +177,7 @@ func @std_for_yield(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK-NEXT: }
-func @std_for_yield_multi(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @std_for_yield_multi(%arg0 : index, %arg1 : index, %arg2 : index) {
%s0 = arith.constant 0.0 : f32
%t0 = arith.constant 1 : i32
%u0 = arith.constant 1.0 : f32
@@ -204,7 +204,7 @@ func @std_for_yield_multi(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK-NEXT: scf.yield %[[NEXT1]], %[[NEXT2]], %[[NEXT3]] : f32, i32, f32
-func @conditional_reduce(%buffer: memref<1024xf32>, %lb: index, %ub: index, %step: index) -> (f32) {
+func.func @conditional_reduce(%buffer: memref<1024xf32>, %lb: index, %ub: index, %step: index) -> (f32) {
%sum_0 = arith.constant 0.0 : f32
%c0 = arith.constant 0.0 : f32
%sum = scf.for %iv = %lb to %ub step %step iter_args(%sum_iter = %sum_0) -> (f32) {
@@ -242,7 +242,7 @@ func @conditional_reduce(%buffer: memref<1024xf32>, %lb: index, %ub: index, %ste
// CHECK-NEXT: return %[[RESULT]]
// CHECK-LABEL: @while
-func @while() {
+func.func @while() {
%0 = "test.get_some_value"() : () -> i32
%1 = "test.get_some_value"() : () -> f32
@@ -265,7 +265,7 @@ func @while() {
}
// CHECK-LABEL: @infinite_while
-func @infinite_while() {
+func.func @infinite_while() {
%true = arith.constant true
// CHECK: scf.while : () -> () {
@@ -281,7 +281,7 @@ func @infinite_while() {
}
// CHECK-LABEL: func @execute_region
-func @execute_region() -> i64 {
+func.func @execute_region() -> i64 {
// CHECK: scf.execute_region -> i64 {
// CHECK-NEXT: arith.constant
// CHECK-NEXT: scf.yield
diff --git a/mlir/test/Dialect/SCF/parallel-loop-fusion.mlir b/mlir/test/Dialect/SCF/parallel-loop-fusion.mlir
index e082b85750056..eb6ad8a809fdf 100644
--- a/mlir/test/Dialect/SCF/parallel-loop-fusion.mlir
+++ b/mlir/test/Dialect/SCF/parallel-loop-fusion.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(scf-parallel-loop-fusion)' -split-input-file | FileCheck %s
-func @fuse_empty_loops() {
+func.func @fuse_empty_loops() {
%c2 = arith.constant 2 : index
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -24,7 +24,7 @@ func @fuse_empty_loops() {
// -----
-func @fuse_two(%A: memref<2x2xf32>, %B: memref<2x2xf32>,
+func.func @fuse_two(%A: memref<2x2xf32>, %B: memref<2x2xf32>,
%C: memref<2x2xf32>, %result: memref<2x2xf32>) {
%c2 = arith.constant 2 : index
%c0 = arith.constant 0 : index
@@ -70,7 +70,7 @@ func @fuse_two(%A: memref<2x2xf32>, %B: memref<2x2xf32>,
// -----
-func @fuse_three(%lhs: memref<100x10xf32>, %rhs: memref<100xf32>,
+func.func @fuse_three(%lhs: memref<100x10xf32>, %rhs: memref<100xf32>,
%result: memref<100x10xf32>) {
%c100 = arith.constant 100 : index
%c10 = arith.constant 10 : index
@@ -127,7 +127,7 @@ func @fuse_three(%lhs: memref<100x10xf32>, %rhs: memref<100xf32>,
// -----
-func @do_not_fuse_nested_ploop1() {
+func.func @do_not_fuse_nested_ploop1() {
%c2 = arith.constant 2 : index
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -149,7 +149,7 @@ func @do_not_fuse_nested_ploop1() {
// -----
-func @do_not_fuse_nested_ploop2() {
+func.func @do_not_fuse_nested_ploop2() {
%c2 = arith.constant 2 : index
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -171,7 +171,7 @@ func @do_not_fuse_nested_ploop2() {
// -----
-func @do_not_fuse_loops_unmatching_num_loops() {
+func.func @do_not_fuse_loops_unmatching_num_loops() {
%c2 = arith.constant 2 : index
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -189,7 +189,7 @@ func @do_not_fuse_loops_unmatching_num_loops() {
// -----
-func @do_not_fuse_loops_with_side_effecting_ops_in_between() {
+func.func @do_not_fuse_loops_with_side_effecting_ops_in_between() {
%c2 = arith.constant 2 : index
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -208,7 +208,7 @@ func @do_not_fuse_loops_with_side_effecting_ops_in_between() {
// -----
-func @do_not_fuse_loops_unmatching_iteration_space() {
+func.func @do_not_fuse_loops_unmatching_iteration_space() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@@ -227,7 +227,7 @@ func @do_not_fuse_loops_unmatching_iteration_space() {
// -----
-func @do_not_fuse_unmatching_write_read_patterns(
+func.func @do_not_fuse_unmatching_write_read_patterns(
%A: memref<2x2xf32>, %B: memref<2x2xf32>,
%C: memref<2x2xf32>, %result: memref<2x2xf32>) {
%c2 = arith.constant 2 : index
@@ -258,7 +258,7 @@ func @do_not_fuse_unmatching_write_read_patterns(
// -----
-func @do_not_fuse_unmatching_read_write_patterns(
+func.func @do_not_fuse_unmatching_read_write_patterns(
%A: memref<2x2xf32>, %B: memref<2x2xf32>, %common_buf: memref<2x2xf32>) {
%c2 = arith.constant 2 : index
%c0 = arith.constant 0 : index
@@ -288,7 +288,7 @@ func @do_not_fuse_unmatching_read_write_patterns(
// -----
-func @do_not_fuse_loops_with_memref_defined_in_loop_bodies() {
+func.func @do_not_fuse_loops_with_memref_defined_in_loop_bodies() {
%c2 = arith.constant 2 : index
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -310,7 +310,7 @@ func @do_not_fuse_loops_with_memref_defined_in_loop_bodies() {
// -----
-func @nested_fuse(%A: memref<2x2xf32>, %B: memref<2x2xf32>,
+func.func @nested_fuse(%A: memref<2x2xf32>, %B: memref<2x2xf32>,
%C: memref<2x2xf32>, %result: memref<2x2xf32>) {
%c2 = arith.constant 2 : index
%c0 = arith.constant 0 : index
diff --git a/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir b/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir
index bbdcbd13947f0..0a472b59a9205 100644
--- a/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir
+++ b/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir
@@ -3,7 +3,7 @@
#map0 = affine_map<()[s0, s1] -> (1024, s0 - s1)>
#map1 = affine_map<()[s0, s1] -> (64, s0 - s1)>
-func @parallel_loop(%outer_i0: index, %outer_i1: index, %A: memref<?x?xf32>, %B: memref<?x?xf32>,
+func.func @parallel_loop(%outer_i0: index, %outer_i1: index, %A: memref<?x?xf32>, %B: memref<?x?xf32>,
%C: memref<?x?xf32>, %result: memref<?x?xf32>) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
diff --git a/mlir/test/Dialect/SCF/parallel-loop-tiling-inbound-check.mlir b/mlir/test/Dialect/SCF/parallel-loop-tiling-inbound-check.mlir
index 6c4cef3d4fbde..75cade55aef3d 100644
--- a/mlir/test/Dialect/SCF/parallel-loop-tiling-inbound-check.mlir
+++ b/mlir/test/Dialect/SCF/parallel-loop-tiling-inbound-check.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s -pass-pipeline='func.func(scf-parallel-loop-tiling{parallel-loop-tile-sizes=1,4 no-min-max-bounds=true})' -split-input-file | FileCheck %s
-func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
%arg3 : index, %arg4 : index, %arg5 : index,
%A: memref<?x?xf32>, %B: memref<?x?xf32>,
%C: memref<?x?xf32>, %result: memref<?x?xf32>) {
@@ -45,7 +45,7 @@ func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
// -----
-func @static_loop_with_step() {
+func.func @static_loop_with_step() {
%c0 = arith.constant 0 : index
%c3 = arith.constant 3 : index
%c22 = arith.constant 22 : index
@@ -76,7 +76,7 @@ func @static_loop_with_step() {
// -----
-func @tile_nested_innermost() {
+func.func @tile_nested_innermost() {
%c2 = arith.constant 2 : index
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -124,7 +124,7 @@ func @tile_nested_innermost() {
// -----
-func @tile_nested_in_non_ploop() {
+func.func @tile_nested_in_non_ploop() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
diff --git a/mlir/test/Dialect/SCF/parallel-loop-tiling.mlir b/mlir/test/Dialect/SCF/parallel-loop-tiling.mlir
index e3e21cf68e355..af2b567fb7c3f 100644
--- a/mlir/test/Dialect/SCF/parallel-loop-tiling.mlir
+++ b/mlir/test/Dialect/SCF/parallel-loop-tiling.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s -pass-pipeline='func.func(scf-parallel-loop-tiling{parallel-loop-tile-sizes=1,4})' -split-input-file | FileCheck %s
-func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
%arg3 : index, %arg4 : index, %arg5 : index,
%A: memref<?x?xf32>, %B: memref<?x?xf32>,
%C: memref<?x?xf32>, %result: memref<?x?xf32>) {
@@ -37,7 +37,7 @@ func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
// -----
-func @static_loop_with_step() {
+func.func @static_loop_with_step() {
%c0 = arith.constant 0 : index
%c3 = arith.constant 3 : index
%c22 = arith.constant 22 : index
@@ -67,7 +67,7 @@ func @static_loop_with_step() {
// -----
-func @tile_nested_innermost() {
+func.func @tile_nested_innermost() {
%c2 = arith.constant 2 : index
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -115,7 +115,7 @@ func @tile_nested_innermost() {
// -----
-func @tile_nested_in_non_ploop() {
+func.func @tile_nested_in_non_ploop() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
diff --git a/mlir/test/Dialect/Shape/bufferize.mlir b/mlir/test/Dialect/Shape/bufferize.mlir
index 46546c4786608..963a5e8bcf578 100644
--- a/mlir/test/Dialect/Shape/bufferize.mlir
+++ b/mlir/test/Dialect/Shape/bufferize.mlir
@@ -13,7 +13,7 @@
// CHECK: "test.sink"(%[[TENSOR]]) : (tensor<2xf16>) -> ()
// CHECK: return
// CHECK: }
-func @shape_assuming() {
+func.func @shape_assuming() {
%0 = shape.const_witness true
%1 = shape.assuming %0 -> (tensor<2xf16>) {
%2 = "test.source"() : () -> (tensor<2xf16>)
diff --git a/mlir/test/Dialect/Shape/canonicalize.mlir b/mlir/test/Dialect/Shape/canonicalize.mlir
index 470f22190f738..456f27fbb126a 100644
--- a/mlir/test/Dialect/Shape/canonicalize.mlir
+++ b/mlir/test/Dialect/Shape/canonicalize.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt -split-input-file -allow-unregistered-dialect -canonicalize %s | FileCheck %s
// CHECK-LABEL: func @f
-func @f(%arg0: tensor<2x3x4xf32>) -> tensor<3xindex> {
+func.func @f(%arg0: tensor<2x3x4xf32>) -> tensor<3xindex> {
// CHECK: shape.const_shape [2, 3, 4] : tensor<3xindex>
%0 = shape.shape_of %arg0 : tensor<2x3x4xf32> -> tensor<3xindex>
return %0 : tensor<3xindex>
@@ -11,7 +11,7 @@ func @f(%arg0: tensor<2x3x4xf32>) -> tensor<3xindex> {
// Basic case.
// CHECK-LABEL: func @f
-func @f() -> (!shape.shape, !shape.shape) {
+func.func @f() -> (!shape.shape, !shape.shape) {
// CHECK-DAG: shape.const_shape [2, 3] : !shape.shape
// CHECK-DAG: shape.const_shape [4, 5] : !shape.shape
%c2 = arith.constant 2 : index
@@ -25,7 +25,7 @@ func @f() -> (!shape.shape, !shape.shape) {
// Negative split point.
// CHECK-LABEL: func @f
-func @f() -> (!shape.shape, !shape.shape) {
+func.func @f() -> (!shape.shape, !shape.shape) {
// CHECK-DAG: shape.const_shape [2, 3, 4] : !shape.shape
// CHECK-DAG: shape.const_shape [5] : !shape.shape
%c-1 = arith.constant -1 : index
@@ -38,7 +38,7 @@ func @f() -> (!shape.shape, !shape.shape) {
// Out of range split point. No folding.
// CHECK-LABEL: func @f
-func @f() -> (!shape.shape, !shape.shape) {
+func.func @f() -> (!shape.shape, !shape.shape) {
// CHECK: shape.split_at
%c5 = arith.constant 5 : index
%0 = shape.const_shape [2, 3, 4, 5] : !shape.shape
@@ -50,7 +50,7 @@ func @f() -> (!shape.shape, !shape.shape) {
// Basic case.
// CHECK-LABEL: func @f
-func @f() -> !shape.shape {
+func.func @f() -> !shape.shape {
// CHECK: shape.const_shape [7, 2] : !shape.shape
%0 = shape.const_shape [1, 2] : !shape.shape
%1 = shape.const_shape [7, 1] : !shape.shape
@@ -62,7 +62,7 @@ func @f() -> !shape.shape {
// Basic case including extent tensors.
// CHECK-LABEL: @broadcast
-func @broadcast() -> tensor<2xindex> {
+func.func @broadcast() -> tensor<2xindex> {
// CHECK: shape.const_shape [7, 2] : tensor<2xindex>
%0 = shape.const_shape [1, 2] : tensor<2xindex>
%1 = shape.const_shape [7, 1] : tensor<2xindex>
@@ -75,7 +75,7 @@ func @broadcast() -> tensor<2xindex> {
// Basic case including extent tensors.
// CHECK-LABEL: @broadcast
-func @broadcast() -> !shape.shape {
+func.func @broadcast() -> !shape.shape {
// CHECK: shape.const_shape [7, 2] : !shape.shape
%0 = shape.const_shape [1, 2] : tensor<2xindex>
%1 = shape.const_shape [7, 1] : tensor<2xindex>
@@ -87,7 +87,7 @@ func @broadcast() -> !shape.shape {
// Rhs is a scalar.
// CHECK-LABEL: func @f
-func @f(%arg0 : !shape.shape) -> !shape.shape {
+func.func @f(%arg0 : !shape.shape) -> !shape.shape {
// CHECK: return %arg0
%0 = shape.const_shape [] : !shape.shape
%1 = shape.broadcast %arg0, %0 : !shape.shape, !shape.shape -> !shape.shape
@@ -98,7 +98,7 @@ func @f(%arg0 : !shape.shape) -> !shape.shape {
// Lhs is a scalar.
// CHECK-LABEL: func @f
-func @f(%arg0 : !shape.shape) -> !shape.shape {
+func.func @f(%arg0 : !shape.shape) -> !shape.shape {
// CHECK: return %arg0
%0 = shape.const_shape [] : !shape.shape
%1 = shape.broadcast %0, %arg0 : !shape.shape, !shape.shape -> !shape.shape
@@ -109,7 +109,7 @@ func @f(%arg0 : !shape.shape) -> !shape.shape {
// Lhs is a scalar and rhs is constant.
// CHECK-LABEL: func @f
-func @f() -> !shape.shape {
+func.func @f() -> !shape.shape {
// CHECK: %[[CST:.*]] = shape.const_shape [1, 2, 3] : !shape.shape
// CHECK: return %[[CST]]
%0 = shape.const_shape [] : !shape.shape
@@ -123,7 +123,7 @@ func @f() -> !shape.shape {
// All but one operands are known empty shapes.
// CHECK-LABEL: @all_but_one_empty
// CHECK-SAME: (%[[ARG:.*]]: !shape.shape)
-func @all_but_one_empty(%arg0 : !shape.shape) -> !shape.shape {
+func.func @all_but_one_empty(%arg0 : !shape.shape) -> !shape.shape {
// CHECK: return %[[ARG]]
%0 = shape.const_shape [] : !shape.shape
%1 = shape.const_shape [] : tensor<0xindex>
@@ -137,7 +137,7 @@ func @all_but_one_empty(%arg0 : !shape.shape) -> !shape.shape {
// Partial folding.
// CHECK-LABEL: @partial_folding
// CHECK-SAME: (%[[ARG:.*]]: !shape.shape)
-func @partial_folding(%arg0 : !shape.shape) -> !shape.shape {
+func.func @partial_folding(%arg0 : !shape.shape) -> !shape.shape {
// CHECK: %[[CST_SHAPE:.*]] = shape.const_shape [1, 2, 3] : tensor<3xindex>
// CHECK: %[[RESULT:.*]] = shape.broadcast %[[ARG]], %[[CST_SHAPE]] : !shape.shape, tensor<3xindex> -> !shape.shape
// CHECK: return %[[RESULT]]
@@ -152,7 +152,7 @@ func @partial_folding(%arg0 : !shape.shape) -> !shape.shape {
// Incompatible shapes. No folding.
// CHECK-LABEL: func @f
-func @f() -> !shape.shape {
+func.func @f() -> !shape.shape {
// CHECK: shape.broadcast
%0 = shape.const_shape [2] : !shape.shape
%1 = shape.const_shape [7] : !shape.shape
@@ -164,7 +164,7 @@ func @f() -> !shape.shape {
// Dead code
// CHECK-LABEL: @broadcast
-func @broadcast(%arg0 : !shape.shape, %arg1 : !shape.shape) {
+func.func @broadcast(%arg0 : !shape.shape, %arg1 : !shape.shape) {
// CHECK-NEXT: return
%0 = shape.broadcast %arg0, %arg1
: !shape.shape, !shape.shape -> !shape.shape
@@ -175,7 +175,7 @@ func @broadcast(%arg0 : !shape.shape, %arg1 : !shape.shape) {
// Basic case.
// CHECK-LABEL: func @f
-func @f() -> !shape.shape {
+func.func @f() -> !shape.shape {
// CHECK: shape.const_shape [0, 1, 2, 3] : !shape.shape
%lhs = shape.const_shape [0, 1] : !shape.shape
%rhs = shape.const_shape [2, 3] : !shape.shape
@@ -187,7 +187,7 @@ func @f() -> !shape.shape {
// Basic case.
// CHECK-LABEL: func @f
-func @f() -> tensor<2xindex> {
+func.func @f() -> tensor<2xindex> {
// CHECK: shape.const_shape [0, 1] : tensor<2xindex>
%cs = shape.const_shape [0, 1] : !shape.shape
%0 = shape.to_extent_tensor %cs : !shape.shape -> tensor<2xindex>
@@ -198,7 +198,7 @@ func @f() -> tensor<2xindex> {
// Basic case.
// CHECK-LABEL: func @f()
-func @f() -> !shape.shape {
+func.func @f() -> !shape.shape {
// CHECK: shape.const_shape [3, 5, 11] : !shape.shape
%e0 = arith.constant 3 : index
%e1 = arith.constant 5 : index
@@ -211,7 +211,7 @@ func @f() -> !shape.shape {
// fold_const_size
// CHECK-LABEL: func @fold_const_size()
-func @fold_const_size() -> !shape.shape {
+func.func @fold_const_size() -> !shape.shape {
// CHECK: shape.const_shape [3, 5] : !shape.shape
%e0 = shape.const_size 3
%e1 = shape.const_size 5
@@ -222,7 +222,7 @@ func @fold_const_size() -> !shape.shape {
// -----
// CHECK-LABEL: func @no_fold
-func @no_fold(%arg0: index) -> !shape.shape {
+func.func @no_fold(%arg0: index) -> !shape.shape {
// CHECK-NOT: shape.const_shape
%e0 = arith.constant 3 : index
%ret = shape.from_extents %e0, %arg0 : index, index
@@ -233,7 +233,7 @@ func @no_fold(%arg0: index) -> !shape.shape {
// Cast constant size to index and fold it away.
// CHECK-LABEL: func @const_size_to_index
-func @const_size_to_index() -> index {
+func.func @const_size_to_index() -> index {
// CHECK-NOT: shape.index_cast
%cs = shape.const_size 123
// CHECK: arith.constant 123 : index
@@ -245,7 +245,7 @@ func @const_size_to_index() -> index {
// Cast constant index to size and fold it away.
// CHECK-LABEL: func @const_index_to_size
-func @const_index_to_size() -> !shape.size {
+func.func @const_index_to_size() -> !shape.size {
// CHECK-NOT: arith.index_cast
%ci = arith.constant 123 : index
// CHECK: shape.const_size 123
@@ -257,7 +257,7 @@ func @const_index_to_size() -> !shape.size {
// Cast constant index to size, then back, and fold it away.
// CHECK-LABEL: func @const_index_to_size_to_index
-func @const_index_to_size_to_index() -> index {
+func.func @const_index_to_size_to_index() -> index {
// CHECK-NOT: shape.index_cast
%ci0 = arith.constant 123 : index
%cs0 = shape.index_to_size %ci0
@@ -271,7 +271,7 @@ func @const_index_to_size_to_index() -> index {
// No folding.
// CHECK-LABEL: func @nonfoldable_size_to_index
-func @nonfoldable_size_to_index(%cs : !shape.size) -> index {
+func.func @nonfoldable_size_to_index(%cs : !shape.size) -> index {
// CHECK: shape.size_to_index
%ci = shape.size_to_index %cs : !shape.size
return %ci : index
@@ -281,7 +281,7 @@ func @nonfoldable_size_to_index(%cs : !shape.size) -> index {
// No folding.
// CHECK-LABEL: func @nonfoldable_index_to_size
-func @nonfoldable_index_to_size(%ci : index) -> !shape.size {
+func.func @nonfoldable_index_to_size(%ci : index) -> !shape.size {
// CHECK: shape.index_to_size
%cs = shape.index_to_size %ci
return %cs : !shape.size
@@ -291,7 +291,7 @@ func @nonfoldable_index_to_size(%ci : index) -> !shape.size {
// Fold number of elements computation.
// CHECK-LABEL: func @num_elements
-func @num_elements() -> !shape.size {
+func.func @num_elements() -> !shape.size {
// CHECK-NOT: shape.const_shape
%shape = shape.const_shape [4, 5, 6] : !shape.shape
// CHECK-NOT: shape.num_elements
@@ -305,7 +305,7 @@ func @num_elements() -> !shape.size {
// No folding.
// CHECK-LABEL: func @nonfoldable_num_elements
-func @nonfoldable_num_elements(%shape : !shape.shape) -> !shape.size {
+func.func @nonfoldable_num_elements(%shape : !shape.shape) -> !shape.size {
// CHECK-NOT: shape.const_{{.*}}
%num_elements = shape.num_elements %shape : !shape.shape -> !shape.size
return %num_elements : !shape.size
@@ -315,7 +315,7 @@ func @nonfoldable_num_elements(%shape : !shape.shape) -> !shape.size {
// Basic folding.
// CHECK-LABEL: func @basic
-func @basic() -> index {
+func.func @basic() -> index {
// CHECK: constant 2 : index
%0 = shape.const_shape [0, 1, 2] : tensor<3xindex>
%c2 = arith.constant 2 : index
@@ -327,7 +327,7 @@ func @basic() -> index {
// Should not fold.
// CHECK-LABEL: func @out_of_bounds
-func @out_of_bounds() -> index {
+func.func @out_of_bounds() -> index {
// CHECK: shape.const_shape
// CHECK: shape.get_extent
%0 = shape.const_shape [0, 1, 2] : tensor<3xindex>
@@ -340,7 +340,7 @@ func @out_of_bounds() -> index {
// Should not fold.
// CHECK-LABEL: func @not_const
-func @not_const(%arg0: tensor<?xindex>) -> index {
+func.func @not_const(%arg0: tensor<?xindex>) -> index {
// CHECK: shape.get_extent
%c3 = arith.constant 3 : index
%0 = shape.get_extent %arg0, %c3 : tensor<?xindex>, index -> index
@@ -351,7 +351,7 @@ func @not_const(%arg0: tensor<?xindex>) -> index {
// Basic folding.
// CHECK-LABEL: func @basic
-func @basic() -> !shape.size {
+func.func @basic() -> !shape.size {
// CHECK: shape.const_size 2
%0 = shape.const_shape [0, 1, 2] : !shape.shape
%c2 = shape.const_size 2
@@ -363,7 +363,7 @@ func @basic() -> !shape.size {
// Should not fold.
// CHECK-LABEL: func @out_of_bounds
-func @out_of_bounds() -> !shape.size {
+func.func @out_of_bounds() -> !shape.size {
// CHECK: shape.const_shape
// CHECK: shape.get_extent
%0 = shape.const_shape [0, 1, 2] : !shape.shape
@@ -376,7 +376,7 @@ func @out_of_bounds() -> !shape.size {
// Should not fold.
// CHECK-LABEL: func @not_const
-func @not_const(%arg0 : !shape.shape) -> !shape.size {
+func.func @not_const(%arg0 : !shape.shape) -> !shape.size {
// CHECK: shape.get_extent
%c3 = shape.const_size 3
%0 = shape.get_extent %arg0, %c3 : !shape.shape, !shape.size -> !shape.size
@@ -386,7 +386,7 @@ func @not_const(%arg0 : !shape.shape) -> !shape.size {
// -----
// cstr_eq with non-constant but known equal shapes can be removed.
// CHECK-LABEL: func @f
-func @f(%arg0 : !shape.shape) {
+func.func @f(%arg0 : !shape.shape) {
// CHECK-NEXT: shape.const_witness true
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -398,7 +398,7 @@ func @f(%arg0 : !shape.shape) {
// -----
// cstr_eq with equal const_shapes can be folded
// CHECK-LABEL: func @f
-func @f() {
+func.func @f() {
// CHECK-NEXT: shape.const_witness true
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -413,7 +413,7 @@ func @f() {
// -----
// cstr_eq with unequal const_shapes cannot be folded
// CHECK-LABEL: func @f
-func @f() {
+func.func @f() {
// CHECK-NEXT: shape.const_shape
// CHECK-NEXT: shape.const_shape
// CHECK-NEXT: shape.cstr_eq
@@ -429,7 +429,7 @@ func @f() {
// -----
// cstr_eq without const_shapes cannot be folded
// CHECK-LABEL: func @f
-func @f(%arg0: !shape.shape, %arg1: !shape.shape) {
+func.func @f(%arg0: !shape.shape, %arg1: !shape.shape) {
// CHECK-NEXT: shape.cstr_eq
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -441,7 +441,7 @@ func @f(%arg0: !shape.shape, %arg1: !shape.shape) {
// -----
// cstr_require with constant can be folded
// CHECK-LABEL: func @cstr_require_fold
-func @cstr_require_fold() {
+func.func @cstr_require_fold() {
// CHECK-NEXT: shape.const_witness true
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -454,7 +454,7 @@ func @cstr_require_fold() {
// -----
// cstr_require without constant cannot be folded
// CHECK-LABEL: func @cstr_require_no_fold
-func @cstr_require_no_fold(%arg0: i1) {
+func.func @cstr_require_no_fold(%arg0: i1) {
// CHECK-NEXT: shape.cstr_require
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -467,7 +467,7 @@ func @cstr_require_no_fold(%arg0: i1) {
// merge assuming_all operations
// CHECK-LABEL: func @f
-func @f() {
+func.func @f() {
// CHECK-NEXT: %[[W0:.*]] = "test.source"
// CHECK-NEXT: %[[W1:.*]] = "test.source"
// CHECK-NEXT: %[[W2:.*]] = "test.source"
@@ -487,7 +487,7 @@ func @f() {
// `assuming_all` with all `cstr_eq` and shared operands can be collapsed.
// CHECK-LABEL: func @assuming_all_to_cstr_eq
// CHECK-SAME: (%[[A:.*]]: !shape.shape, %[[B:.*]]: tensor<?xindex>, %[[C:.*]]: tensor<3xindex>)
-func @assuming_all_to_cstr_eq(%a : !shape.shape, %b : tensor<?xindex>,
+func.func @assuming_all_to_cstr_eq(%a : !shape.shape, %b : tensor<?xindex>,
%c : tensor<3xindex>) -> !shape.witness {
// CHECK: %[[RESULT:.*]] = shape.cstr_eq %[[A]], %[[B]], %[[B]], %[[C]]
// CHECK: return %[[RESULT]]
@@ -501,7 +501,7 @@ func @assuming_all_to_cstr_eq(%a : !shape.shape, %b : tensor<?xindex>,
// `assuming_all` with duplicate operands.
// CHECK-LABEL: func @assuming_all_duplicate_operands
// CHECK-SAME: (%[[ARG0:.*]]: tensor<?xindex>, %[[ARG1:.*]]: tensor<?xindex>)
-func @assuming_all_duplicate_operands(%arg0 : tensor<?xindex>,
+func.func @assuming_all_duplicate_operands(%arg0 : tensor<?xindex>,
%arg1 : tensor<?xindex>) -> !shape.witness {
// CHECK: %[[RES:.*]] = shape.cstr_broadcastable %[[ARG0]], %[[ARG1]]
// CHECK: return %[[RES]]
@@ -514,7 +514,7 @@ func @assuming_all_duplicate_operands(%arg0 : tensor<?xindex>,
// `assuming_all` with all `cstr_eq` but disjoint operands cannot be collapsed.
// CHECK-LABEL: func @assuming_all_to_cstr_eq
// CHECK-SAME: (%[[A:.*]]: !shape.shape, %[[B:.*]]: tensor<?xindex>, %[[C:.*]]: tensor<3xindex>, %[[D:.*]]: tensor<3xindex>)
-func @assuming_all_to_cstr_eq(%a : !shape.shape, %b : tensor<?xindex>,
+func.func @assuming_all_to_cstr_eq(%a : !shape.shape, %b : tensor<?xindex>,
%c : tensor<3xindex>, %d : tensor<3xindex>) -> !shape.witness {
// CHECK: %[[EQ0:.*]] = shape.cstr_eq %[[A]], %[[B]]
// CHECK: %[[EQ1:.*]] = shape.cstr_eq %[[C]], %[[D]]
@@ -529,7 +529,7 @@ func @assuming_all_to_cstr_eq(%a : !shape.shape, %b : tensor<?xindex>,
// -----
// assuming_all with known passing witnesses can be folded
// CHECK-LABEL: func @f
-func @f() {
+func.func @f() {
// CHECK-NEXT: shape.const_witness true
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -549,7 +549,7 @@ func @f() {
// Additionally check that the attribute is moved to the end as this op is
// commutative.
// CHECK-LABEL: func @f
-func @f() {
+func.func @f() {
// CHECK-NEXT: %[[UNKNOWN1:.*]] = "test.source"
// CHECK-NEXT: %[[UNKNOWN2:.*]] = "test.source"
// CHECK-NEXT: shape.assuming_all %[[UNKNOWN1]], %[[UNKNOWN2]]
@@ -571,7 +571,7 @@ func @f() {
// CHECK: %[[ARG0:[a-z0-9]*]]: !shape.shape
// CHECK-SAME: %[[ARG1:[a-z0-9]*]]: !shape.shape
// CHECK-SAME: %[[ARG2:[a-z0-9]*]]: !shape.shape
-func @f(%arg0 : !shape.shape, %arg1 : !shape.shape, %arg2 : !shape.shape) {
+func.func @f(%arg0 : !shape.shape, %arg1 : !shape.shape, %arg2 : !shape.shape) {
// CHECK-NEXT: %[[W:.*]] = shape.cstr_broadcastable %[[ARG0]], %[[ARG1]], %[[ARG2]]
// CHECK-NEXT: "consume.witness"(%[[W]])
// CHECK-NEXT: return
@@ -590,7 +590,7 @@ func @f(%arg0 : !shape.shape, %arg1 : !shape.shape, %arg2 : !shape.shape) {
// CHECK: %[[ARG0:[a-z0-9]*]]: !shape.shape
// CHECK-SAME: %[[ARG1:[a-z0-9]*]]: !shape.shape
// CHECK-SAME: %[[ARG2:[a-z0-9]*]]: !shape.shape
-func @f(%arg0 : !shape.shape, %arg1 : !shape.shape, %arg2 : !shape.shape) {
+func.func @f(%arg0 : !shape.shape, %arg1 : !shape.shape, %arg2 : !shape.shape) {
// CHECK-NEXT: %[[W0:.*]] = shape.cstr_broadcastable %[[ARG0]], %[[ARG1]]
// CHECK-NEXT: %[[W1:.*]] = shape.cstr_broadcastable %[[ARG1]], %[[ARG2]]
// CHECK-NEXT: %[[W2:.*]] = shape.assuming_all %[[W0]], %[[W1]]
@@ -607,7 +607,7 @@ func @f(%arg0 : !shape.shape, %arg1 : !shape.shape, %arg2 : !shape.shape) {
// any can be replaced with a constant input if it has one.
// CHECK-LABEL: func @f
-func @f(%arg : !shape.shape) -> !shape.shape {
+func.func @f(%arg : !shape.shape) -> !shape.shape {
// CHECK-NEXT: %[[CS:.*]] = shape.const_shape
// CHECK-NEXT: return %[[CS]]
%0 = shape.const_shape [2, 3, 4] : !shape.shape
@@ -619,7 +619,7 @@ func @f(%arg : !shape.shape) -> !shape.shape {
// any can be replaced with a constant input if it has one.
// CHECK-LABEL: func @f
-func @f(%arg : tensor<?xindex>) -> tensor<3xindex> {
+func.func @f(%arg : tensor<?xindex>) -> tensor<3xindex> {
// CHECK-NEXT: %[[CS:.*]] = shape.const_shape [2, 3, 4] : tensor<3xindex>
// CHECK-NEXT: return %[[CS]] : tensor<3xindex>
%0 = shape.const_shape [2, 3, 4] : tensor<3xindex>
@@ -631,7 +631,7 @@ func @f(%arg : tensor<?xindex>) -> tensor<3xindex> {
// Folding of any with partially constant operands is not yet implemented.
// CHECK-LABEL: func @f
-func @f(%arg0 : !shape.shape, %arg1 : !shape.shape) -> !shape.shape {
+func.func @f(%arg0 : !shape.shape, %arg1 : !shape.shape) -> !shape.shape {
// CHECK-NEXT: %[[CS:.*]] = shape.any
// CHECK-NEXT: return %[[CS]]
%1 = shape.any %arg0, %arg1 : !shape.shape, !shape.shape -> !shape.shape
@@ -642,7 +642,7 @@ func @f(%arg0 : !shape.shape, %arg1 : !shape.shape) -> !shape.shape {
// assuming with a known passing witness can be removed
// CHECK-LABEL: func @f
-func @f() {
+func.func @f() {
// CHECK-NEXT: source
// CHECK-NEXT: sink
// CHECK-NEXT: return
@@ -659,7 +659,7 @@ func @f() {
// assuming without a known passing passing witness cannot be removed
// CHECK-LABEL: func @f
-func @f() {
+func.func @f() {
// CHECK-NEXT: test.source
// CHECK-NEXT: shape.assuming
// CHECK-NEXT: test.source
@@ -680,7 +680,7 @@ func @f() {
// Remove unused results from assuming ops.
// CHECK-LABEL: func @unused_assuming_results
-func @unused_assuming_results() {
+func.func @unused_assuming_results() {
// CHECK: %[[ASSUMING_RESULT:.*]] = shape.assuming %0 -> (f32) {
// CHECK: %{{.*}} = "produce.redundant"
// CHECK: %[[MEANINGFUL:.*]] = "produce.meaningful"
@@ -700,7 +700,7 @@ func @unused_assuming_results() {
// -----
// Broadcastable with broadcastable constant shapes can be removed.
// CHECK-LABEL: func @f
-func @f() {
+func.func @f() {
// CHECK-NEXT: shape.const_witness true
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -715,7 +715,7 @@ func @f() {
// Empty shape arguments can be removed from broadcastable ops.
// CHECK-LABEL: func @f
// CHECK-SAME: (%[[ARG0:.*]]: tensor<?xindex>, %[[ARG1:.*]]: tensor<?xindex>, %{{.*}}: tensor<0xindex>)
-func @f(%arg0 : tensor<?xindex>, %arg1 : tensor<?xindex>, %arg2 : tensor<0xindex>) {
+func.func @f(%arg0 : tensor<?xindex>, %arg1 : tensor<?xindex>, %arg2 : tensor<0xindex>) {
// CHECK-NOT: const_shape
// CHECK: cstr_broadcastable %[[ARG0]], %[[ARG1]] : tensor<?xindex>, tensor<?xindex>
%0 = shape.const_shape [] : !shape.shape
@@ -728,7 +728,7 @@ func @f(%arg0 : tensor<?xindex>, %arg1 : tensor<?xindex>, %arg2 : tensor<0xindex
// -----
// Broadcastable with non-broadcastable constant shapes is always false
// CHECK-LABEL: func @static_non_broadcastable
-func @static_non_broadcastable() {
+func.func @static_non_broadcastable() {
// CHECK-NEXT: shape.const_shape
// CHECK-NEXT: shape.const_shape
// CHECK-NEXT: shape.cstr_broadcastable
@@ -744,7 +744,7 @@ func @static_non_broadcastable() {
// -----
// Broadcastable without guaranteed broadcastable shapes cannot be removed.
// CHECK-LABEL: func @f
-func @f(%arg0 : !shape.shape) {
+func.func @f(%arg0 : !shape.shape) {
// CHECK-NEXT: shape.const_shape
// CHECK-NEXT: shape.cstr_broadcastable
// CHECK-NEXT: consume.witness
@@ -758,7 +758,7 @@ func @f(%arg0 : !shape.shape) {
// -----
// Broadcastable with non-constant but known equal shapes can be removed.
// CHECK-LABEL: func @f
-func @f(%arg0 : !shape.shape) {
+func.func @f(%arg0 : !shape.shape) {
// CHECK-NEXT: shape.const_witness true
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -771,7 +771,7 @@ func @f(%arg0 : !shape.shape) {
// Broadcastable canonicalization also works on extent tensors.
// CHECK-LABEL: func @broadcastable_on_extent_tensors
-func @broadcastable_on_extent_tensors(%arg : tensor<?xindex>) {
+func.func @broadcastable_on_extent_tensors(%arg : tensor<?xindex>) {
// CHECK-NEXT: shape.const_witness true
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -783,7 +783,7 @@ func @broadcastable_on_extent_tensors(%arg : tensor<?xindex>) {
// -----
// Fold ternary broadcastable
// CHECK-LABEL: func @f
-func @f() {
+func.func @f() {
// CHECK-NEXT: shape.const_witness true
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -798,7 +798,7 @@ func @f() {
// -----
// Fold ternary broadcastable with dynamic ranks
// CHECK-LABEL: func @f
-func @f() {
+func.func @f() {
// CHECK-NEXT: shape.const_witness true
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -812,7 +812,7 @@ func @f() {
// -----
// One scalar and one non-scalar and one unknown cannot be broadcasted at compile time
// CHECK-LABEL: func @f
-func @f() {
+func.func @f() {
// CHECK: shape.cstr_broadcastable
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -827,7 +827,7 @@ func @f() {
// -----
// One scalar and two unknowns cannot be broadcasted at compile time
// CHECK-LABEL: func @f
-func @f() {
+func.func @f() {
// CHECK: shape.cstr_broadcastable
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -842,7 +842,7 @@ func @f() {
// -----
// Broadcastable with scalars and a non-scalar can be constant folded
// CHECK-LABEL: func @f
-func @f(%arg0 : !shape.shape) {
+func.func @f(%arg0 : !shape.shape) {
// CHECK-NEXT: shape.const_witness true
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -855,7 +855,7 @@ func @f(%arg0 : !shape.shape) {
// -----
// One scalar and one non-scalar and one unknown cannot be folded.
// CHECK-LABEL: func @f
-func @f(%arg0 : !shape.shape) {
+func.func @f(%arg0 : !shape.shape) {
// CHECK: shape.cstr_broadcastable
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -870,7 +870,7 @@ func @f(%arg0 : !shape.shape) {
// Fold `rank` based on constant shape.
// CHECK-LABEL: @fold_rank
-func @fold_rank() -> !shape.size {
+func.func @fold_rank() -> !shape.size {
// CHECK: %[[RESULT:.*]] = shape.const_size 5
// CHECK: return %[[RESULT]] : !shape.size
%shape = shape.const_shape [3, 4, 5, 6, 7] : !shape.shape
@@ -883,7 +883,7 @@ func @fold_rank() -> !shape.size {
// Do not fold `rank` if shape is dynamic.
// CHECK-LABEL: @dont_fold_rank
// CHECK-SAME: (%[[SHAPE:.*]]: !shape.shape) -> !shape.size
-func @dont_fold_rank(%shape : !shape.shape) -> !shape.size {
+func.func @dont_fold_rank(%shape : !shape.shape) -> !shape.size {
// CHECK: %[[RESULT:.*]] = shape.rank %[[SHAPE]]
// CHECK: return %[[RESULT]] : !shape.size
%rank = shape.rank %shape : !shape.shape -> !shape.size
@@ -894,7 +894,7 @@ func @dont_fold_rank(%shape : !shape.shape) -> !shape.size {
// Fold `rank` based on constant extent tensor.
// CHECK-LABEL: @fold_rank
-func @fold_rank() -> index {
+func.func @fold_rank() -> index {
// CHECK: %[[RESULT:.*]] = arith.constant 5 : index
// CHECK: return %[[RESULT]] : index
%shape = shape.const_shape [3, 4, 5, 6, 7] : tensor<5xindex>
@@ -907,7 +907,7 @@ func @fold_rank() -> index {
// Do not fold `rank` for non-constant extent tensors.
// CHECK-LABEL: @dont_fold_rank
// CHECK-SAME: (%[[SHAPE:.*]]: tensor<?xindex>) -> index
-func @dont_fold_rank(%shape : tensor<?xindex>) -> index {
+func.func @dont_fold_rank(%shape : tensor<?xindex>) -> index {
// CHECK: %[[RESULT:.*]] = shape.rank %[[SHAPE]] : tensor<?xindex> -> index
// CHECK: return %[[RESULT]] : index
%rank = shape.rank %shape : tensor<?xindex> -> index
@@ -918,7 +918,7 @@ func @dont_fold_rank(%shape : tensor<?xindex>) -> index {
// Canonicalize `rank` when shape is derived from ranked tensor.
// CHECK-LABEL: @canonicalize_rank
-func @canonicalize_rank(%arg : tensor<1x2x?xf32>) -> index {
+func.func @canonicalize_rank(%arg : tensor<1x2x?xf32>) -> index {
// CHECK: %[[RESULT:.*]] = arith.constant 3 : index
// CHECK: return %[[RESULT]] : index
%shape = shape.shape_of %arg : tensor<1x2x?xf32> -> tensor<?xindex>
@@ -930,7 +930,7 @@ func @canonicalize_rank(%arg : tensor<1x2x?xf32>) -> index {
// Canonicalize `rank` when shape is derived from ranked tensor.
// CHECK-LABEL: @canonicalize_rank
-func @canonicalize_rank_size(%arg : tensor<1x2x?xf32>) -> !shape.size {
+func.func @canonicalize_rank_size(%arg : tensor<1x2x?xf32>) -> !shape.size {
// CHECK: %[[RESULT:.*]] = shape.const_size 3
// CHECK: return %[[RESULT]] : !shape.size
%shape = shape.shape_of %arg : tensor<1x2x?xf32> -> !shape.shape
@@ -943,7 +943,7 @@ func @canonicalize_rank_size(%arg : tensor<1x2x?xf32>) -> !shape.size {
// Do not canonicalize `rank` when shape is derived from unranked tensor.
// CHECK-LABEL: @dont_canonicalize_rank
// CHECK-SAME: (%[[ARG:.*]]: tensor<*xf32>) -> index
-func @dont_canonicalize_rank(%arg : tensor<*xf32>) -> index {
+func.func @dont_canonicalize_rank(%arg : tensor<*xf32>) -> index {
// CHECK: %[[SHAPE:.*]] = shape.shape_of %[[ARG]] : tensor<*xf32> -> tensor<?xindex>
// CHECK: %[[SIZE:.*]] = shape.rank %[[SHAPE]]
// CHECK: return %[[SIZE]] : index
@@ -957,7 +957,7 @@ func @dont_canonicalize_rank(%arg : tensor<*xf32>) -> index {
// Canonicalize redundant conversion from `index` to `size` and back.
// CHECK-LABEL: @index_to_size_to_index
// CHECK-SAME: (%[[IDX:.*]]: index) -> index
-func @index_to_size_to_index(%index : index) -> index {
+func.func @index_to_size_to_index(%index : index) -> index {
// CHECK: return %[[IDX]] : index
%size = shape.index_to_size %index
%result = shape.size_to_index %size : !shape.size
@@ -969,7 +969,7 @@ func @index_to_size_to_index(%index : index) -> index {
// Canonicalize redundant conversion from `size` to `index` and back.
// CHECK-LABEL: @size_to_index_to_size
// CHECK-SAME: (%[[SIZE:.*]]: !shape.size) -> !shape.size
-func @size_to_index_to_size(%size : !shape.size) -> !shape.size {
+func.func @size_to_index_to_size(%size : !shape.size) -> !shape.size {
// CHECK: return %[[SIZE]] : !shape.size
%idx = shape.size_to_index %size : !shape.size
%result = shape.index_to_size %idx
@@ -980,7 +980,7 @@ func @size_to_index_to_size(%size : !shape.size) -> !shape.size {
// Canonicalize scalar cstr_broadcastable checks
// CHECK-LABEL: @cstr_broadcastable_scalar
-func @cstr_broadcastable_scalar(%arg0 : tensor<?xf32>) {
+func.func @cstr_broadcastable_scalar(%arg0 : tensor<?xf32>) {
// CHECK-NEXT: shape.const_witness true
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -995,7 +995,7 @@ func @cstr_broadcastable_scalar(%arg0 : tensor<?xf32>) {
// Do not canonicalize cstr_broadcastable checks with 2 unknowns
// CHECK-LABEL: @cstr_broadcastable_unknown
-func @cstr_broadcastable_unknown(%arg0 : tensor<?xf32>, %arg1 : tensor<?xf32>) {
+func.func @cstr_broadcastable_unknown(%arg0 : tensor<?xf32>, %arg1 : tensor<?xf32>) {
// CHECK-NEXT: shape.shape_of %arg0
// CHECK-NEXT: shape.shape_of %arg1
// CHECK-NEXT: shape.cstr_broadcastable
@@ -1012,7 +1012,7 @@ func @cstr_broadcastable_unknown(%arg0 : tensor<?xf32>, %arg1 : tensor<?xf32>) {
// Scalars are safe to broadcast to unranked sizes.
// CHECK-LABEL: @cstr_broadcastable_scalar_unranked
-func @cstr_broadcastable_scalar_unranked(%arg0 : tensor<*xf32>, %arg1 : tensor<index>) {
+func.func @cstr_broadcastable_scalar_unranked(%arg0 : tensor<*xf32>, %arg1 : tensor<index>) {
// CHECK-NEXT: shape.const_witness true
// CHECK-NEXT: consume.witness
// CHECK-NEXT: return
@@ -1027,7 +1027,7 @@ func @cstr_broadcastable_scalar_unranked(%arg0 : tensor<*xf32>, %arg1 : tensor<i
// Fold `shape_eq` for equal and constant shapes.
// CHECK-LABEL: @shape_eq_fold_1
-func @shape_eq_fold_1() -> i1 {
+func.func @shape_eq_fold_1() -> i1 {
// CHECK: %[[RESULT:.*]] = arith.constant true
// CHECK: return %[[RESULT]] : i1
%a = shape.const_shape [1, 2, 3] : !shape.shape
@@ -1041,7 +1041,7 @@ func @shape_eq_fold_1() -> i1 {
// Fold `shape_eq` for
diff erent but constant shapes of same length.
// CHECK-LABEL: @shape_eq_fold_0
-func @shape_eq_fold_0() -> i1 {
+func.func @shape_eq_fold_0() -> i1 {
// CHECK: %[[RESULT:.*]] = arith.constant false
// CHECK: return %[[RESULT]] : i1
%a = shape.const_shape [1, 2, 3] : tensor<3xindex>
@@ -1055,7 +1055,7 @@ func @shape_eq_fold_0() -> i1 {
// Fold `shape_eq` for
diff erent but constant shapes of
diff erent length.
// CHECK-LABEL: @shape_eq_fold_0
-func @shape_eq_fold_0() -> i1 {
+func.func @shape_eq_fold_0() -> i1 {
// CHECK: %[[RESULT:.*]] = arith.constant false
// CHECK: return %[[RESULT]] : i1
%a = shape.const_shape [1, 2, 3, 4, 5, 6] : !shape.shape
@@ -1069,7 +1069,7 @@ func @shape_eq_fold_0() -> i1 {
// Do not fold `shape_eq` for non-constant
diff erent shapes.
// CHECK-LABEL: @shape_eq_do_not_fold
// CHECK-SAME: (%[[A:.*]]: !shape.shape) -> i1
-func @shape_eq_do_not_fold(%a : !shape.shape) -> i1 {
+func.func @shape_eq_do_not_fold(%a : !shape.shape) -> i1 {
// CHECK: %[[B:.*]] = shape.const_shape [4, 5, 6]
// CHECK: %[[RESULT:.*]] = shape.shape_eq %[[A]], %[[B]] : !shape.shape, !shape.shape
// CHECK: return %[[RESULT]] : i1
@@ -1082,7 +1082,7 @@ func @shape_eq_do_not_fold(%a : !shape.shape) -> i1 {
// Fold `add` for constant sizes.
// CHECK-LABEL: @fold_add_size
-func @fold_add_size() -> !shape.size {
+func.func @fold_add_size() -> !shape.size {
// CHECK: %[[RESULT:.*]] = shape.const_size 5
// CHECK: return %[[RESULT]] : !shape.size
%c2 = shape.const_size 2
@@ -1095,7 +1095,7 @@ func @fold_add_size() -> !shape.size {
// Fold `mul` for constant sizes.
// CHECK-LABEL: @fold_mul_size
-func @fold_mul_size() -> !shape.size {
+func.func @fold_mul_size() -> !shape.size {
// CHECK: %[[RESULT:.*]] = shape.const_size 6
// CHECK: return %[[RESULT]] : !shape.size
%c2 = shape.const_size 2
@@ -1108,7 +1108,7 @@ func @fold_mul_size() -> !shape.size {
// Fold `mul` for constant indices.
// CHECK-LABEL: @fold_mul_index
-func @fold_mul_index() -> index {
+func.func @fold_mul_index() -> index {
// CHECK: %[[RESULT:.*]] = arith.constant 6 : index
// CHECK: return %[[RESULT]] : index
%c2 = arith.constant 2 : index
@@ -1121,7 +1121,7 @@ func @fold_mul_index() -> index {
// Fold `mul` for mixed constants.
// CHECK-LABEL: @fold_mul_mixed
-func @fold_mul_mixed() -> !shape.size {
+func.func @fold_mul_mixed() -> !shape.size {
// CHECK: %[[RESULT:.*]] = shape.const_size 6
// CHECK: return %[[RESULT]] : !shape.size
%c2 = shape.const_size 2
@@ -1134,7 +1134,7 @@ func @fold_mul_mixed() -> !shape.size {
// Fold `div` for constant sizes.
// CHECK-LABEL: @fold_div_size
-func @fold_div_size() -> !shape.size {
+func.func @fold_div_size() -> !shape.size {
// CHECK: %[[RESULT:.*]] = shape.const_size 3
// CHECK: return %[[RESULT]] : !shape.size
%c2 = shape.const_size 10
@@ -1147,7 +1147,7 @@ func @fold_div_size() -> !shape.size {
// Fold `div` for constant indices.
// CHECK-LABEL: @fold_div_index
-func @fold_div_index() -> index {
+func.func @fold_div_index() -> index {
// CHECK: %[[RESULT:.*]] = arith.constant 2 : index
// CHECK: return %[[RESULT]] : index
%c2 = arith.constant 10 : index
@@ -1160,7 +1160,7 @@ func @fold_div_index() -> index {
// Fold `div` for constant indices and lhs is negative.
// CHECK-LABEL: @fold_div_index_neg_lhs
-func @fold_div_index_neg_lhs() -> index {
+func.func @fold_div_index_neg_lhs() -> index {
// CHECK: %[[RESULT:.*]] = arith.constant -3 : index
// CHECK: return %[[RESULT]] : index
%c2 = arith.constant -10 : index
@@ -1173,7 +1173,7 @@ func @fold_div_index_neg_lhs() -> index {
// Fold `div` for constant indices and rhs is negative.
// CHECK-LABEL: @fold_div_index_neg_rhs
-func @fold_div_index_neg_rhs() -> index {
+func.func @fold_div_index_neg_rhs() -> index {
// CHECK: %[[RESULT:.*]] = arith.constant -3 : index
// CHECK: return %[[RESULT]] : index
%c2 = arith.constant 10 : index
@@ -1186,7 +1186,7 @@ func @fold_div_index_neg_rhs() -> index {
// Fold `div` for mixed constants.
// CHECK-LABEL: @fold_div_mixed
-func @fold_div_mixed() -> !shape.size {
+func.func @fold_div_mixed() -> !shape.size {
// CHECK: %[[RESULT:.*]] = shape.const_size 4
// CHECK: return %[[RESULT]] : !shape.size
%c2 = shape.const_size 12
@@ -1199,7 +1199,7 @@ func @fold_div_mixed() -> !shape.size {
// Fold index_cast when already on index.
// CHECK-LABEL: @fold_index_cast_on_index
-func @fold_index_cast_on_index(%arg: index) -> index {
+func.func @fold_index_cast_on_index(%arg: index) -> index {
// CHECK-NOT: size_to_index
%0 = shape.size_to_index %arg : index
return %0 : index
@@ -1209,7 +1209,7 @@ func @fold_index_cast_on_index(%arg: index) -> index {
// Fold to_extent_tensor when already on tensor.
// CHECK-LABEL: @fold_to_extent_tensor_on_tensor
-func @fold_to_extent_tensor_on_tensor(%arg: tensor<?xindex>) -> tensor<?xindex> {
+func.func @fold_to_extent_tensor_on_tensor(%arg: tensor<?xindex>) -> tensor<?xindex> {
// CHECK-NOT: to_extent_tensor
%0 = shape.to_extent_tensor %arg : tensor<?xindex> -> tensor<?xindex>
return %0 : tensor<?xindex>
@@ -1219,7 +1219,7 @@ func @fold_to_extent_tensor_on_tensor(%arg: tensor<?xindex>) -> tensor<?xindex>
// Fold assuming_all with a single input
// CHECK-LABEL: @fold_assuming_all_single_element
-func @fold_assuming_all_single_element(%arg: tensor<?xindex>) {
+func.func @fold_assuming_all_single_element(%arg: tensor<?xindex>) {
// CHECK-NOT: assuming_all
%0 = "test.source"() : () -> (!shape.witness)
%1 = shape.assuming_all %0
@@ -1231,7 +1231,7 @@ func @fold_assuming_all_single_element(%arg: tensor<?xindex>) {
// Verify that tensor.cast folding uses the correct type
// CHECK-LABEL: @fold_tensor.cast_of_const_shape_returned
-func @fold_tensor.cast_of_const_shape_returned(%arg: i1) -> tensor<1xindex> {
+func.func @fold_tensor.cast_of_const_shape_returned(%arg: i1) -> tensor<1xindex> {
// CHECK: shape.const_shape [2] : tensor<1xindex>
// CHECK-NOT: tensor.cast
%0 = shape.const_shape [2] : tensor<1xindex>
@@ -1242,7 +1242,7 @@ func @fold_tensor.cast_of_const_shape_returned(%arg: i1) -> tensor<1xindex> {
// -----
// CHECK-LABEL: @dont_fold_tensor.cast_of_const_shape_returned_dynamic
-func @dont_fold_tensor.cast_of_const_shape_returned_dynamic(%arg: i1) -> tensor<?xindex> {
+func.func @dont_fold_tensor.cast_of_const_shape_returned_dynamic(%arg: i1) -> tensor<?xindex> {
// CHECK: %[[CONST_SHAPE:.*]] = shape.const_shape [2] : tensor<1xindex>
// CHECK: tensor.cast %[[CONST_SHAPE]] : tensor<1xindex> to tensor<?xindex>
%0 = shape.const_shape [2] : tensor<1xindex>
@@ -1253,7 +1253,7 @@ func @dont_fold_tensor.cast_of_const_shape_returned_dynamic(%arg: i1) -> tensor<
// -----
// CHECK-LABEL: @is_broadcastable_on_same_shape
-func @is_broadcastable_on_same_shape(%shape : !shape.shape) -> i1 {
+func.func @is_broadcastable_on_same_shape(%shape : !shape.shape) -> i1 {
// CHECK-NOT: is_broadcastable
// CHECK: %[[RES:.*]] = arith.constant true
// CHECK: return %[[RES]]
@@ -1266,7 +1266,7 @@ func @is_broadcastable_on_same_shape(%shape : !shape.shape) -> i1 {
// CHECK-LABEL: @is_broadcastable_on_duplicate_shapes
// CHECK-SAME: (%[[A:.*]]: !shape.shape, %[[B:.*]]: !shape.shape)
-func @is_broadcastable_on_duplicate_shapes(%a : !shape.shape, %b : !shape.shape)
+func.func @is_broadcastable_on_duplicate_shapes(%a : !shape.shape, %b : !shape.shape)
-> i1 {
// CHECK: %[[RES:.*]] = shape.is_broadcastable %[[A]], %[[B]] :
// CHECK: return %[[RES]]
@@ -1279,7 +1279,7 @@ func @is_broadcastable_on_duplicate_shapes(%a : !shape.shape, %b : !shape.shape)
// CHECK-LABEL: @cstr_broadcastable_on_duplicate_shapes
// CHECK-SAME: (%[[A:.*]]: !shape.shape, %[[B:.*]]: !shape.shape)
-func @cstr_broadcastable_on_duplicate_shapes(%a : !shape.shape,
+func.func @cstr_broadcastable_on_duplicate_shapes(%a : !shape.shape,
%b : !shape.shape) -> !shape.witness {
// CHECK: %[[RES:.*]] = shape.cstr_broadcastable %[[A]], %[[B]] :
// CHECK: return %[[RES]]
@@ -1292,7 +1292,7 @@ func @cstr_broadcastable_on_duplicate_shapes(%a : !shape.shape,
// CHECK-LABEL: @broadcast_on_same_shape
// CHECK-SAME: (%[[SHAPE:.*]]: !shape.shape)
-func @broadcast_on_same_shape(%shape : !shape.shape) -> !shape.shape {
+func.func @broadcast_on_same_shape(%shape : !shape.shape) -> !shape.shape {
// CHECK-NOT: broadcast
// CHECK: return %[[SHAPE]]
%0 = shape.broadcast %shape, %shape, %shape : !shape.shape, !shape.shape,
@@ -1304,7 +1304,7 @@ func @broadcast_on_same_shape(%shape : !shape.shape) -> !shape.shape {
// CHECK-LABEL: @broadcast_on_duplicate_shapes
// CHECK-SAME: (%[[A:.*]]: !shape.shape, %[[B:.*]]: !shape.shape)
-func @broadcast_on_duplicate_shapes(%a : !shape.shape, %b : !shape.shape)
+func.func @broadcast_on_duplicate_shapes(%a : !shape.shape, %b : !shape.shape)
-> !shape.shape {
// CHECK: %[[RES:.*]] = shape.broadcast %[[A]], %[[B]] :
// CHECK: return %[[RES]]
@@ -1317,7 +1317,7 @@ func @broadcast_on_duplicate_shapes(%a : !shape.shape, %b : !shape.shape)
// CHECK-LABEL: @broadcast_on_single_operand
// CHECK-SAME: (%[[A:.*]]: tensor<?xindex>)
-func @broadcast_on_single_operand(%a : tensor<?xindex>) {
+func.func @broadcast_on_single_operand(%a : tensor<?xindex>) {
// CHECK-NOT: broadcast
// CHECK: "use"(%[[A]])
%0 = shape.broadcast %a : tensor<?xindex> -> tensor<?xindex>
@@ -1329,7 +1329,7 @@ func @broadcast_on_single_operand(%a : tensor<?xindex>) {
// CHECK-LABEL: @broadcast_as_tensor_cast
// CHECK-SAME: (%[[A:.*]]: tensor<3xindex>)
-func @broadcast_as_tensor_cast(%a : tensor<3xindex>) -> tensor<?xindex> {
+func.func @broadcast_as_tensor_cast(%a : tensor<3xindex>) -> tensor<?xindex> {
// CHECK: %[[RESULT:.*]] = tensor.cast %[[A]] : tensor<3xindex> to tensor<?xindex>
// CHECK: return %[[RESULT]] : tensor<?xindex>
%0 = shape.broadcast %a : tensor<3xindex> -> tensor<?xindex>
@@ -1340,7 +1340,7 @@ func @broadcast_as_tensor_cast(%a : tensor<3xindex>) -> tensor<?xindex> {
// CHECK-LABEL: @broadcast_as_from_extent_tensor
// CHECK-SAME: (%[[A:.*]]: tensor<?xindex>)
-func @broadcast_as_from_extent_tensor(%a : tensor<?xindex>) -> !shape.shape {
+func.func @broadcast_as_from_extent_tensor(%a : tensor<?xindex>) -> !shape.shape {
// CHECK: %[[RESULT:.*]] = shape.from_extent_tensor %[[A]] : tensor<?xindex>
// CHECK: return %[[RESULT]] : !shape.shape
%0 = shape.broadcast %a : tensor<?xindex> -> !shape.shape
@@ -1351,7 +1351,7 @@ func @broadcast_as_from_extent_tensor(%a : tensor<?xindex>) -> !shape.shape {
// CHECK-LABEL: @cast_extent_tensor
// CHECK-SAME: (%[[ARG:.*]]: tensor<?x?x?xf32>) -> tensor<?xindex>
-func @cast_extent_tensor(%arg : tensor<?x?x?xf32>) -> tensor<?xindex> {
+func.func @cast_extent_tensor(%arg : tensor<?x?x?xf32>) -> tensor<?xindex> {
// CHECK: %[[RESULT:.*]] = shape.shape_of %[[ARG]] : tensor<?x?x?xf32> -> tensor<?xindex>
// CHECK: return %[[RESULT]] : tensor<?xindex>
%0 = shape.shape_of %arg : tensor<?x?x?xf32> -> tensor<3xindex>
@@ -1363,7 +1363,7 @@ func @cast_extent_tensor(%arg : tensor<?x?x?xf32>) -> tensor<?xindex> {
// CHECK-LABEL: @cast_extent_tensor
// CHECK-SAME: (%[[ARG:.*]]: tensor<?x?x?xf32>) -> tensor<3xindex>
-func @cast_extent_tensor(%arg : tensor<?x?x?xf32>) -> tensor<3xindex> {
+func.func @cast_extent_tensor(%arg : tensor<?x?x?xf32>) -> tensor<3xindex> {
// CHECK: %[[RESULT:.*]] = shape.shape_of %[[ARG]] : tensor<?x?x?xf32> -> tensor<3xindex>
// CHECK: return %[[RESULT]] : tensor<3xindex>
%0 = shape.shape_of %arg : tensor<?x?x?xf32> -> tensor<?xindex>
@@ -1374,7 +1374,7 @@ func @cast_extent_tensor(%arg : tensor<?x?x?xf32>) -> tensor<3xindex> {
// -----
// CHECK-LABEL: @cast_extent_tensor
-func @cast_extent_tensor(%arg : tensor<?x?x?x?xf32>) -> tensor<3xindex> {
+func.func @cast_extent_tensor(%arg : tensor<?x?x?x?xf32>) -> tensor<3xindex> {
// CHECK: tensor.cast %{{.*}} : tensor<?xindex> to tensor<3xindex>
%0 = shape.shape_of %arg : tensor<?x?x?x?xf32> -> tensor<?xindex>
%1 = tensor.cast %0 : tensor<?xindex> to tensor<3xindex>
@@ -1384,7 +1384,7 @@ func @cast_extent_tensor(%arg : tensor<?x?x?x?xf32>) -> tensor<3xindex> {
// -----
// CHECK-LABEL: @cast_extent_tensor
-func @cast_extent_tensor(%arg : tensor<*xf32>) -> tensor<3xindex> {
+func.func @cast_extent_tensor(%arg : tensor<*xf32>) -> tensor<3xindex> {
// CHECK: tensor.cast %{{.*}} : tensor<?xindex> to tensor<3xindex>
%0 = shape.shape_of %arg : tensor<*xf32> -> tensor<?xindex>
%1 = tensor.cast %0 : tensor<?xindex> to tensor<3xindex>
@@ -1395,7 +1395,7 @@ func @cast_extent_tensor(%arg : tensor<*xf32>) -> tensor<3xindex> {
// CHECK-LABEL: max_same_arg
// CHECK-SAME: (%[[SHAPE:.*]]: !shape.shape)
-func @max_same_arg(%a: !shape.shape) -> !shape.shape {
+func.func @max_same_arg(%a: !shape.shape) -> !shape.shape {
%1 = shape.max %a, %a : !shape.shape, !shape.shape -> !shape.shape
// CHECK: return %[[SHAPE]]
return %1 : !shape.shape
@@ -1405,7 +1405,7 @@ func @max_same_arg(%a: !shape.shape) -> !shape.shape {
// CHECK-LABEL: min_same_arg
// CHECK-SAME: (%[[SHAPE:.*]]: !shape.shape)
-func @min_same_arg(%a: !shape.shape) -> !shape.shape {
+func.func @min_same_arg(%a: !shape.shape) -> !shape.shape {
%1 = shape.min %a, %a : !shape.shape, !shape.shape -> !shape.shape
// CHECK: return %[[SHAPE]]
return %1 : !shape.shape
@@ -1413,7 +1413,7 @@ func @min_same_arg(%a: !shape.shape) -> !shape.shape {
// -----
// CHECK-LABEL: @cstr_broadcastable_folding
-func @cstr_broadcastable_folding(%arg : tensor<?x4xf32>) {
+func.func @cstr_broadcastable_folding(%arg : tensor<?x4xf32>) {
// CHECK: const_witness true
%0 = shape.shape_of %arg : tensor<?x4xf32> -> tensor<2xindex>
%1 = shape.const_shape [4] : tensor<1xindex>
@@ -1425,7 +1425,7 @@ func @cstr_broadcastable_folding(%arg : tensor<?x4xf32>) {
// CHECK-LABEL: @cast_extent_tensor_operands
// CHECK-SAME: (%[[ARG0:.*]]: tensor<?xindex>, %[[ARG1:.*]]: tensor<3xindex>)
-func @cast_extent_tensor_operands(%arg0 : tensor<?xindex>,
+func.func @cast_extent_tensor_operands(%arg0 : tensor<?xindex>,
%arg1 : tensor<3xindex>) -> (!shape.witness, tensor<?xindex>) {
// CHECK: %[[CAST_ARG0:.*]] = tensor.cast %[[ARG0]] : tensor<?xindex> to tensor<3xindex>
// CHECK: %[[WIT:.*]] = shape.cstr_broadcastable %[[CAST_ARG0]], %[[ARG1]] : tensor<3xindex>, tensor<3xindex>
@@ -1444,7 +1444,7 @@ func @cast_extent_tensor_operands(%arg0 : tensor<?xindex>,
// CHECK-LABEL: @concretize_broadcast_result_type
// CHECK-SAME: (%[[ARG0:.*]]: tensor<2xindex>, %[[ARG1:.*]]: tensor<3xindex>)
-func @concretize_broadcast_result_type(%arg0 : tensor<2xindex>,
+func.func @concretize_broadcast_result_type(%arg0 : tensor<2xindex>,
%arg1 : tensor<3xindex>) -> tensor<?xindex> {
// CHECK: %[[CONCR:.*]] = shape.broadcast %[[ARG0]], %[[ARG1]] : tensor<2xindex>, tensor<3xindex> -> tensor<3xindex>
// CHECK: %[[RES:.*]] = tensor.cast %[[CONCR]] : tensor<3xindex> to tensor<?xindex>
@@ -1458,7 +1458,7 @@ func @concretize_broadcast_result_type(%arg0 : tensor<2xindex>,
// CHECK-LABEL: func @extract_shapeof
// CHECK-SAME: %[[ARG0:.*]]: tensor<?x?xf64>
-func @extract_shapeof(%arg0 : tensor<?x?xf64>) -> index {
+func.func @extract_shapeof(%arg0 : tensor<?x?xf64>) -> index {
%c1 = arith.constant 1 : index
// CHECK: %[[C1:.*]] = arith.constant 1
%shape = shape.shape_of %arg0 : tensor<?x?xf64> -> tensor<2xindex>
diff --git a/mlir/test/Dialect/Shape/invalid.mlir b/mlir/test/Dialect/Shape/invalid.mlir
index 37a6e87d67bde..3b4059b1d6026 100644
--- a/mlir/test/Dialect/Shape/invalid.mlir
+++ b/mlir/test/Dialect/Shape/invalid.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s -split-input-file -verify-diagnostics
-func @reduce_op_args_num_mismatch(%shape : !shape.shape, %init : !shape.size) {
+func.func @reduce_op_args_num_mismatch(%shape : !shape.shape, %init : !shape.size) {
// expected-error at +1 {{ReduceOp body is expected to have 3 arguments}}
%num_elements = shape.reduce(%shape, %init) : !shape.shape -> !shape.size {
^bb0(%index: index, %dim: !shape.size):
@@ -11,7 +11,7 @@ func @reduce_op_args_num_mismatch(%shape : !shape.shape, %init : !shape.size) {
// -----
-func @reduce_op_arg0_wrong_type(%shape : !shape.shape, %init : !shape.size) {
+func.func @reduce_op_arg0_wrong_type(%shape : !shape.shape, %init : !shape.size) {
// expected-error at +1 {{argument 0 of ReduceOp body is expected to be of IndexType}}
%num_elements = shape.reduce(%shape, %init) : !shape.shape -> !shape.size {
^bb0(%index: f32, %dim: !shape.size, %acc: !shape.size):
@@ -24,7 +24,7 @@ func @reduce_op_arg0_wrong_type(%shape : !shape.shape, %init : !shape.size) {
// -----
-func @reduce_op_arg1_wrong_type(%shape : !shape.shape, %init : !shape.size) {
+func.func @reduce_op_arg1_wrong_type(%shape : !shape.shape, %init : !shape.size) {
// expected-error at +1 {{argument 1 of ReduceOp body is expected to be of SizeType if the ReduceOp operates on a ShapeType}}
%num_elements = shape.reduce(%shape, %init) : !shape.shape -> !shape.size {
^bb0(%index: index, %dim: f32, %lci: !shape.size):
@@ -35,7 +35,7 @@ func @reduce_op_arg1_wrong_type(%shape : !shape.shape, %init : !shape.size) {
// -----
-func @reduce_op_arg1_wrong_type(%shape : tensor<?xindex>, %init : index) {
+func.func @reduce_op_arg1_wrong_type(%shape : tensor<?xindex>, %init : index) {
// expected-error at +1 {{argument 1 of ReduceOp body is expected to be of IndexType if the ReduceOp operates on an extent tensor}}
%num_elements = shape.reduce(%shape, %init) : tensor<?xindex> -> index {
^bb0(%index: index, %dim: f32, %lci: index):
@@ -46,7 +46,7 @@ func @reduce_op_arg1_wrong_type(%shape : tensor<?xindex>, %init : index) {
// -----
-func @reduce_op_init_type_mismatch(%shape : !shape.shape, %init : f32) {
+func.func @reduce_op_init_type_mismatch(%shape : !shape.shape, %init : f32) {
// expected-error at +1 {{type mismatch between argument 2 of ReduceOp body and initial value 0}}
%num_elements = shape.reduce(%shape, %init) : !shape.shape -> f32 {
^bb0(%index: index, %dim: !shape.size, %lci: !shape.size):
@@ -57,7 +57,7 @@ func @reduce_op_init_type_mismatch(%shape : !shape.shape, %init : f32) {
// -----
-func @yield_op_args_num_mismatch(%shape : !shape.shape, %init : !shape.size) {
+func.func @yield_op_args_num_mismatch(%shape : !shape.shape, %init : !shape.size) {
// expected-error at +3 {{number of operands does not match number of results of its parent}}
%num_elements = shape.reduce(%shape, %init) : !shape.shape -> !shape.size {
^bb0(%index: index, %dim: !shape.size, %lci: !shape.size):
@@ -68,7 +68,7 @@ func @yield_op_args_num_mismatch(%shape : !shape.shape, %init : !shape.size) {
// -----
-func @yield_op_type_mismatch(%shape : !shape.shape, %init : !shape.size) {
+func.func @yield_op_type_mismatch(%shape : !shape.shape, %init : !shape.size) {
// expected-error at +4 {{types mismatch between yield op and its parent}}
%num_elements = shape.reduce(%shape, %init) : !shape.shape -> !shape.size {
^bb0(%index: index, %dim: !shape.size, %lci: !shape.size):
@@ -80,7 +80,7 @@ func @yield_op_type_mismatch(%shape : !shape.shape, %init : !shape.size) {
// -----
-func @assuming_all_op_too_few_operands() {
+func.func @assuming_all_op_too_few_operands() {
// expected-error at +1 {{no operands specified}}
%w0 = shape.assuming_all
return
@@ -88,7 +88,7 @@ func @assuming_all_op_too_few_operands() {
// -----
-func @shape_of(%value_arg : !shape.value_shape,
+func.func @shape_of(%value_arg : !shape.value_shape,
%shaped_arg : tensor<?x3x4xf32>) {
// expected-error at +1 {{if at least one of the operands can hold error values then the result must be of type `shape` to propagate them}}
%0 = shape.shape_of %value_arg : !shape.value_shape -> tensor<?xindex>
@@ -97,7 +97,7 @@ func @shape_of(%value_arg : !shape.value_shape,
// -----
-func @shape_of_incompatible_return_types(%value_arg : tensor<1x2xindex>) {
+func.func @shape_of_incompatible_return_types(%value_arg : tensor<1x2xindex>) {
// expected-error at +1 {{'shape.shape_of' op inferred type(s) 'tensor<2xindex>' are incompatible with return type(s) of operation 'tensor<3xindex>'}}
%0 = shape.shape_of %value_arg : tensor<1x2xindex> -> tensor<3xindex>
return
@@ -105,7 +105,7 @@ func @shape_of_incompatible_return_types(%value_arg : tensor<1x2xindex>) {
// -----
-func @rank(%arg : !shape.shape) {
+func.func @rank(%arg : !shape.shape) {
// expected-error at +1 {{if at least one of the operands can hold error values then the result must be of type `size` to propagate them}}
%0 = shape.rank %arg : !shape.shape -> index
return
@@ -113,7 +113,7 @@ func @rank(%arg : !shape.shape) {
// -----
-func @get_extent(%arg : tensor<?xindex>) -> index {
+func.func @get_extent(%arg : tensor<?xindex>) -> index {
%c0 = shape.const_size 0
// expected-error at +1 {{if at least one of the operands can hold error values then the result must be of type `size` to propagate them}}
%result = shape.get_extent %arg, %c0 : tensor<?xindex>, !shape.size -> index
@@ -122,7 +122,7 @@ func @get_extent(%arg : tensor<?xindex>) -> index {
// -----
-func @mul(%lhs : !shape.size, %rhs : index) -> index {
+func.func @mul(%lhs : !shape.size, %rhs : index) -> index {
// expected-error at +1 {{if at least one of the operands can hold error values then the result must be of type `size` to propagate them}}
%result = shape.mul %lhs, %rhs : !shape.size, index -> index
return %result : index
@@ -130,7 +130,7 @@ func @mul(%lhs : !shape.size, %rhs : index) -> index {
// -----
-func @num_elements(%arg : !shape.shape) -> index {
+func.func @num_elements(%arg : !shape.shape) -> index {
// expected-error at +1 {{if at least one of the operands can hold error values then the result must be of type `size` to propagate them}}
%result = shape.num_elements %arg : !shape.shape -> index
return %result : index
@@ -138,7 +138,7 @@ func @num_elements(%arg : !shape.shape) -> index {
// -----
-func @add(%lhs : !shape.size, %rhs : index) -> index {
+func.func @add(%lhs : !shape.size, %rhs : index) -> index {
// expected-error at +1 {{if at least one of the operands can hold error values then the result must be of type `size` to propagate them}}
%result = shape.add %lhs, %rhs : !shape.size, index -> index
return %result : index
@@ -146,7 +146,7 @@ func @add(%lhs : !shape.size, %rhs : index) -> index {
// -----
-func @broadcast(%arg0 : !shape.shape, %arg1 : !shape.shape) -> tensor<?xindex> {
+func.func @broadcast(%arg0 : !shape.shape, %arg1 : !shape.shape) -> tensor<?xindex> {
// expected-error at +1 {{if at least one of the operands can hold error values then the result must be of type `shape` to propagate them}}
%result = shape.broadcast %arg0, %arg1
: !shape.shape, !shape.shape -> tensor<?xindex>
@@ -156,7 +156,7 @@ func @broadcast(%arg0 : !shape.shape, %arg1 : !shape.shape) -> tensor<?xindex> {
// -----
-func @broadcast(%arg0 : !shape.shape, %arg1 : tensor<?xindex>) -> tensor<?xindex> {
+func.func @broadcast(%arg0 : !shape.shape, %arg1 : tensor<?xindex>) -> tensor<?xindex> {
// expected-error at +1 {{if at least one of the operands can hold error values then the result must be of type `shape` to propagate them}}
%result = shape.broadcast %arg0, %arg1
: !shape.shape, tensor<?xindex> -> tensor<?xindex>
@@ -231,7 +231,7 @@ shape.function_library @shape_lib {
// expected-error at +1 {{required to be shape function library}}
module attributes {shape.lib = @fn} {
-func @fn(%arg: !shape.value_shape) -> !shape.shape {
+func.func @fn(%arg: !shape.value_shape) -> !shape.shape {
%0 = shape.shape_of %arg : !shape.value_shape -> !shape.shape
return %0 : !shape.shape
}
@@ -242,7 +242,7 @@ func @fn(%arg: !shape.value_shape) -> !shape.shape {
// Test that op referred to by shape lib is a shape function library.
-func @fn(%arg: !shape.value_shape) -> !shape.shape {
+func.func @fn(%arg: !shape.value_shape) -> !shape.shape {
// expected-error at +1 {{SymbolTable}}
%0 = shape.shape_of %arg {shape.lib = @fn} : !shape.value_shape -> !shape.shape
return %0 : !shape.shape
@@ -257,7 +257,7 @@ module attributes {shape.lib = @fn} { }
// -----
-func @fn(%arg: !shape.shape) -> !shape.witness {
+func.func @fn(%arg: !shape.shape) -> !shape.witness {
// expected-error at +1 {{required at least 2 input shapes}}
%0 = shape.cstr_broadcastable %arg : !shape.shape
return %0 : !shape.witness
@@ -267,7 +267,7 @@ func @fn(%arg: !shape.shape) -> !shape.witness {
// Test that type inference flags the wrong return type.
-func @const_shape() {
+func.func @const_shape() {
// expected-error at +1 {{'tensor<3xindex>' are incompatible with return type(s) of operation 'tensor<2xindex>'}}
%0 = shape.const_shape [4, 5, 6] : tensor<2xindex>
return
diff --git a/mlir/test/Dialect/Shape/ops.mlir b/mlir/test/Dialect/Shape/ops.mlir
index 8c21878c1ef85..60ee2f4541a7c 100644
--- a/mlir/test/Dialect/Shape/ops.mlir
+++ b/mlir/test/Dialect/Shape/ops.mlir
@@ -4,7 +4,7 @@
// RUN: mlir-opt -mlir-print-op-generic %s | mlir-opt | FileCheck %s
// CHECK-LABEL: shape_num_elements
-func @shape_num_elements(%shape : !shape.shape) -> !shape.size {
+func.func @shape_num_elements(%shape : !shape.shape) -> !shape.size {
%init = shape.const_size 1
%num_elements = shape.reduce(%shape, %init) : !shape.shape -> !shape.size {
^bb0(%index : index, %extent : !shape.size, %acc : !shape.size):
@@ -16,7 +16,7 @@ func @shape_num_elements(%shape : !shape.shape) -> !shape.size {
}
// CHECK-LABEL: extent_tensor_num_elements
-func @extent_tensor_num_elements(%shape : tensor<?xindex>) -> index {
+func.func @extent_tensor_num_elements(%shape : tensor<?xindex>) -> index {
%init = arith.constant 1 : index
%num_elements = shape.reduce(%shape, %init) : tensor<?xindex> -> index {
^bb0(%index : index, %extent : index, %acc : index):
@@ -26,27 +26,27 @@ func @extent_tensor_num_elements(%shape : tensor<?xindex>) -> index {
return %num_elements : index
}
-func @test_shape_num_elements_unknown() {
+func.func @test_shape_num_elements_unknown() {
%0 = "shape.unknown_shape"() : () -> !shape.shape
%1 = call @shape_num_elements(%0) : (!shape.shape) -> (!shape.size)
%2 = "shape.print"(%1) : (!shape.size) -> !shape.size
return
}
-func @const_shape() {
+func.func @const_shape() {
%0 = shape.const_shape [1, 2, 3] : !shape.shape
%2 = shape.const_shape [4, 5, 6] : tensor<3xindex>
return
}
-func @test_shape_num_elements_fixed() {
+func.func @test_shape_num_elements_fixed() {
%0 = shape.const_shape [1, 57, 92] : !shape.shape
%1 = call @shape_num_elements(%0) : (!shape.shape) -> (!shape.size)
%3 = "shape.print"(%1) : (!shape.size) -> !shape.size
return
}
-func @test_broadcast_fixed() {
+func.func @test_broadcast_fixed() {
%0 = shape.const_shape [10, 1, 57, 92] : !shape.shape
%1 = shape.const_shape [4, 57, 92] : !shape.shape
%2 = shape.broadcast %0, %1 : !shape.shape, !shape.shape -> !shape.shape
@@ -54,14 +54,14 @@ func @test_broadcast_fixed() {
return
}
-func @test_broadcast_extents() -> tensor<4xindex> {
+func.func @test_broadcast_extents() -> tensor<4xindex> {
%0 = shape.const_shape [10, 1, 57, 92] : tensor<4xindex>
%1 = shape.const_shape [4, 57, 92] : tensor<3xindex>
%2 = shape.broadcast %0, %1 : tensor<4xindex>, tensor<3xindex> -> tensor<4xindex>
return %2 : tensor<4xindex>
}
-func @test_shape_any_fixed() {
+func.func @test_shape_any_fixed() {
%0 = shape.const_shape [4, 57, 92] : !shape.shape
%1 = shape.const_shape [4, 57, 92] : !shape.shape
%2 = "shape.meet"(%0, %1) : (!shape.shape, !shape.shape) -> !shape.shape
@@ -69,7 +69,7 @@ func @test_shape_any_fixed() {
return
}
-func @test_shape_any_unknown() {
+func.func @test_shape_any_unknown() {
%0 = shape.const_shape [4, -1, 92] : !shape.shape
%1 = shape.const_shape [-1, 57, 92] : !shape.shape
%2 = "shape.meet"(%0, %1) : (!shape.shape, !shape.shape) -> !shape.shape
@@ -77,7 +77,7 @@ func @test_shape_any_unknown() {
return
}
-func @test_shape_any_fixed_mismatch() {
+func.func @test_shape_any_fixed_mismatch() {
%0 = shape.const_shape [4, 57, 92] : !shape.shape
%1 = shape.const_shape [2, 57, 92] : !shape.shape
%2 = "shape.meet"(%0, %1) : (!shape.shape, !shape.shape) -> !shape.shape
@@ -85,19 +85,19 @@ func @test_shape_any_fixed_mismatch() {
return
}
-func @test_parse_const_shape() {
+func.func @test_parse_const_shape() {
%0 = shape.const_shape [] : !shape.shape
%1 = shape.const_shape [1, 2, 3] : !shape.shape
%2 = shape.const_shape [1, 2, 3] : tensor<3xindex>
return
}
-func @test_shape_of(%arg0: tensor<?xf32>) -> tensor<?xindex> {
+func.func @test_shape_of(%arg0: tensor<?xf32>) -> tensor<?xindex> {
%0 = shape.shape_of %arg0 : tensor<?xf32> -> tensor<?xindex>
return %0 : tensor<?xindex>
}
-func @test_constraints() {
+func.func @test_constraints() {
%0 = shape.const_shape [] : !shape.shape
%1 = shape.const_shape [1, 2, 3] : !shape.shape
%true = arith.constant true
@@ -114,19 +114,19 @@ func @test_constraints() {
return
}
-func @eq_on_extent_tensors(%lhs : tensor<?xindex>,
+func.func @eq_on_extent_tensors(%lhs : tensor<?xindex>,
%rhs : tensor<?xindex>) {
%w0 = shape.cstr_eq %lhs, %rhs : tensor<?xindex>, tensor<?xindex>
return
}
-func @broadcastable_on_extent_tensors(%lhs : tensor<?xindex>,
+func.func @broadcastable_on_extent_tensors(%lhs : tensor<?xindex>,
%rhs : tensor<?xindex>) {
%w0 = shape.cstr_broadcastable %lhs, %rhs : tensor<?xindex>, tensor<?xindex>
return
}
-func @mul(%size_arg : !shape.size, %index_arg : index) {
+func.func @mul(%size_arg : !shape.size, %index_arg : index) {
%size_prod = shape.mul %size_arg, %size_arg
: !shape.size, !shape.size -> !shape.size
%index_prod = shape.mul %index_arg, %index_arg : index, index -> index
@@ -135,7 +135,7 @@ func @mul(%size_arg : !shape.size, %index_arg : index) {
return
}
-func @div(%size_arg : !shape.size, %index_arg : index) {
+func.func @div(%size_arg : !shape.size, %index_arg : index) {
%size_div = shape.div %size_arg, %size_arg
: !shape.size, !shape.size -> !shape.size
%index_div = shape.div %index_arg, %index_arg : index, index -> index
@@ -144,7 +144,7 @@ func @div(%size_arg : !shape.size, %index_arg : index) {
return
}
-func @add(%size_arg : !shape.size, %index_arg : index) {
+func.func @add(%size_arg : !shape.size, %index_arg : index) {
%size_sum = shape.add %size_arg, %size_arg
: !shape.size, !shape.size -> !shape.size
%index_sum = shape.add %index_arg, %index_arg : index, index -> index
@@ -153,7 +153,7 @@ func @add(%size_arg : !shape.size, %index_arg : index) {
return
}
-func @const_size() {
+func.func @const_size() {
// CHECK: %c1 = shape.const_size 1
// CHECK: %c2 = shape.const_size 2
// CHECK: %c2_0 = shape.const_size 2
@@ -163,66 +163,66 @@ func @const_size() {
return
}
-func @test_to_extent_tensor(%arg: !shape.shape) -> tensor<3xindex> {
+func.func @test_to_extent_tensor(%arg: !shape.shape) -> tensor<3xindex> {
%0 = shape.to_extent_tensor %arg : !shape.shape -> tensor<3xindex>
return %0 : tensor<3xindex>
}
-func @test_identity_to_extent_tensor(%arg: tensor<3xindex>) -> tensor<3xindex> {
+func.func @test_identity_to_extent_tensor(%arg: tensor<3xindex>) -> tensor<3xindex> {
%0 = shape.to_extent_tensor %arg : tensor<3xindex> -> tensor<3xindex>
return %0 : tensor<3xindex>
}
-func @test_from_extent_tensor(%arg: tensor<?xindex>) -> !shape.shape {
+func.func @test_from_extent_tensor(%arg: tensor<?xindex>) -> !shape.shape {
%0 = shape.from_extent_tensor %arg : tensor<?xindex>
return %0 : !shape.shape
}
-func @rank(%shape : !shape.shape) -> !shape.size {
+func.func @rank(%shape : !shape.shape) -> !shape.size {
%rank = shape.rank %shape : !shape.shape -> !shape.size
return %rank : !shape.size
}
-func @rank_on_extent_tensor(%shape : tensor<?xindex>) -> index {
+func.func @rank_on_extent_tensor(%shape : tensor<?xindex>) -> index {
%rank = shape.rank %shape : tensor<?xindex> -> index
return %rank : index
}
-func @shape_eq_on_shapes(%a : !shape.shape, %b : !shape.shape) -> i1 {
+func.func @shape_eq_on_shapes(%a : !shape.shape, %b : !shape.shape) -> i1 {
%result = shape.shape_eq %a, %b : !shape.shape, !shape.shape
return %result : i1
}
-func @shape_eq_on_tensors(%a : tensor<?xindex>, %b : tensor<?xindex>) -> i1 {
+func.func @shape_eq_on_tensors(%a : tensor<?xindex>, %b : tensor<?xindex>) -> i1 {
%result = shape.shape_eq %a, %b : tensor<?xindex>, tensor<?xindex>
return %result : i1
}
-func @shape_eq_on_mixed(%a : tensor<?xindex>, %b : !shape.shape) -> i1 {
+func.func @shape_eq_on_mixed(%a : tensor<?xindex>, %b : !shape.shape) -> i1 {
%result = shape.shape_eq %a, %b : tensor<?xindex>, !shape.shape
return %result : i1
}
-func @get_extent_on_shape(%arg : !shape.shape) -> !shape.size {
+func.func @get_extent_on_shape(%arg : !shape.shape) -> !shape.size {
%c0 = shape.const_size 0
%result = shape.get_extent %arg, %c0 :
!shape.shape, !shape.size -> !shape.size
return %result : !shape.size
}
-func @get_extent_on_extent_tensor(%arg : tensor<?xindex>) -> index {
+func.func @get_extent_on_extent_tensor(%arg : tensor<?xindex>) -> index {
%c0 = arith.constant 0 : index
%result = shape.get_extent %arg, %c0 : tensor<?xindex>, index -> index
return %result : index
}
-func @get_extent_on_mixed_operands(%arg : tensor<?xindex>) -> !shape.size {
+func.func @get_extent_on_mixed_operands(%arg : tensor<?xindex>) -> !shape.size {
%c0 = shape.const_size 0
%result = shape.get_extent %arg, %c0 : tensor<?xindex>, !shape.size -> !shape.size
return %result : !shape.size
}
-func @any() {
+func.func @any() {
%0 = shape.const_shape [1, 2, 3] : !shape.shape
%1 = shape.const_shape [4, 5, 6] : !shape.shape
%2 = "shape.any"(%0, %1) : (!shape.shape, !shape.shape) -> !shape.shape
@@ -232,39 +232,39 @@ func @any() {
return
}
-func @num_elements_extent_tensor(%arg : tensor<?xindex>) -> index {
+func.func @num_elements_extent_tensor(%arg : tensor<?xindex>) -> index {
%result = shape.num_elements %arg : tensor<?xindex> -> index
return %result : index
}
-func @num_elements_shape(%arg : !shape.shape) -> !shape.size {
+func.func @num_elements_shape(%arg : !shape.shape) -> !shape.size {
%result = shape.num_elements %arg : !shape.shape -> !shape.size
return %result : !shape.size
}
// Testing invoking shape function from another. shape_equal_shapes is merely
// a trivial helper function to invoke elsewhere.
-func @shape_equal_shapes(%a : !shape.value_shape, %b : !shape.value_shape) -> !shape.shape {
+func.func @shape_equal_shapes(%a : !shape.value_shape, %b : !shape.value_shape) -> !shape.shape {
%0 = shape.shape_of %a : !shape.value_shape -> !shape.shape
%1 = shape.shape_of %b : !shape.value_shape -> !shape.shape
%2 = "shape.meet"(%0, %1) : (!shape.shape, !shape.shape) -> !shape.shape
return %2 : !shape.shape
}
-func @shape_with_shape(%a : !shape.value_shape, %b : !shape.value_shape) -> !shape.shape {
+func.func @shape_with_shape(%a : !shape.value_shape, %b : !shape.value_shape) -> !shape.shape {
%0 = shape.shape_of %a : !shape.value_shape -> !shape.shape
%1 = shape.with_shape %b, %0 : !shape.value_shape, !shape.shape
%2 = call @shape_equal_shapes(%a, %1) : (!shape.value_shape, !shape.value_shape) -> !shape.shape
return %2 : !shape.shape
}
-func @any_on_shape(%a : !shape.shape, %b : !shape.shape, %c : !shape.shape)
+func.func @any_on_shape(%a : !shape.shape, %b : !shape.shape, %c : !shape.shape)
-> !shape.shape {
%result = shape.any %a, %b, %c
: !shape.shape, !shape.shape, !shape.shape -> !shape.shape
return %result : !shape.shape
}
-func @any_on_mixed(%a : tensor<?xindex>,
+func.func @any_on_mixed(%a : tensor<?xindex>,
%b : tensor<?xindex>,
%c : !shape.shape) -> !shape.shape {
%result = shape.any %a, %b, %c
@@ -272,7 +272,7 @@ func @any_on_mixed(%a : tensor<?xindex>,
return %result : !shape.shape
}
-func @any_on_extent_tensors(%a : tensor<?xindex>,
+func.func @any_on_extent_tensors(%a : tensor<?xindex>,
%b : tensor<?xindex>,
%c : tensor<?xindex>) -> tensor<?xindex> {
%result = shape.any %a, %b, %c
@@ -280,21 +280,21 @@ func @any_on_extent_tensors(%a : tensor<?xindex>,
return %result : tensor<?xindex>
}
-func @is_broadcastable_on_extent_tensors(%a : tensor<?xindex>,
+func.func @is_broadcastable_on_extent_tensors(%a : tensor<?xindex>,
%b : tensor<?xindex>) -> i1 {
%result = shape.is_broadcastable %a, %b
: tensor<?xindex>, tensor<?xindex>
return %result : i1
}
-func @is_broadcastable_on_shapes(%a : !shape.shape,
+func.func @is_broadcastable_on_shapes(%a : !shape.shape,
%b : !shape.shape) -> i1 {
%result = shape.is_broadcastable %a, %b
: !shape.shape, !shape.shape
return %result : i1
}
-func @shape_upper_bounded_by_constant(%a: !shape.shape) -> !shape.shape {
+func.func @shape_upper_bounded_by_constant(%a: !shape.shape) -> !shape.shape {
%0 = shape.const_shape [4, 57, 92] : !shape.shape
%1 = shape.max %a, %0 : !shape.shape, !shape.shape -> !shape.shape
%2 = shape.meet %0, %1, error="exceeded element-wise upper bound" :
@@ -302,7 +302,7 @@ func @shape_upper_bounded_by_constant(%a: !shape.shape) -> !shape.shape {
return %2 : !shape.shape
}
-func @shape_lower_bounded_by_constant(%a: !shape.shape) -> !shape.shape {
+func.func @shape_lower_bounded_by_constant(%a: !shape.shape) -> !shape.shape {
%0 = shape.const_shape [4, 57, 92] : !shape.shape
%1 = shape.min %a, %0 : !shape.shape, !shape.shape -> !shape.shape
%2 = shape.meet %0, %1, error="lower bound element-wise exceeded" :
@@ -310,7 +310,7 @@ func @shape_lower_bounded_by_constant(%a: !shape.shape) -> !shape.shape {
return %2 : !shape.shape
}
-func @size_upper_bounded_by_constant(%a: !shape.size) -> !shape.size {
+func.func @size_upper_bounded_by_constant(%a: !shape.size) -> !shape.size {
%0 = shape.const_size 5
%1 = shape.max %a, %0 : !shape.size, !shape.size -> !shape.size
%2 = shape.meet %0, %1, error="exceeded element-wise upper bound" :
@@ -318,7 +318,7 @@ func @size_upper_bounded_by_constant(%a: !shape.size) -> !shape.size {
return %2 : !shape.size
}
-func @size_lower_bounded_by_constant(%a: !shape.size) -> !shape.size {
+func.func @size_lower_bounded_by_constant(%a: !shape.size) -> !shape.size {
%0 = shape.const_size 9
%1 = shape.min %a, %0 : !shape.size, !shape.size -> !shape.size
%2 = shape.meet %0, %1, error="lower bound element-wise exceeded" :
diff --git a/mlir/test/Dialect/Shape/remove-shape-constraints.mlir b/mlir/test/Dialect/Shape/remove-shape-constraints.mlir
index e52cd36107efc..425c69fec26ae 100644
--- a/mlir/test/Dialect/Shape/remove-shape-constraints.mlir
+++ b/mlir/test/Dialect/Shape/remove-shape-constraints.mlir
@@ -5,7 +5,7 @@
// Check that cstr_broadcastable is removed.
//
// CHECK-BOTH: func @f
-func @f(%arg0 : !shape.shape, %arg1 : !shape.shape) -> index {
+func.func @f(%arg0 : !shape.shape, %arg1 : !shape.shape) -> index {
// REPLACE-NEXT: %[[WITNESS:.+]] = shape.const_witness true
// REPLACE-NOT: shape.cstr_eq
// REPLACE: shape.assuming %[[WITNESS]]
@@ -23,7 +23,7 @@ func @f(%arg0 : !shape.shape, %arg1 : !shape.shape) -> index {
// Check that cstr_eq is removed.
//
// CHECK-BOTH: func @f
-func @f(%arg0 : !shape.shape, %arg1 : !shape.shape) -> index {
+func.func @f(%arg0 : !shape.shape, %arg1 : !shape.shape) -> index {
// REPLACE-NEXT: %[[WITNESS:.+]] = shape.const_witness true
// REPLACE-NOT: shape.cstr_eq
// REPLACE: shape.assuming %[[WITNESS]]
@@ -42,7 +42,7 @@ func @f(%arg0 : !shape.shape, %arg1 : !shape.shape) -> index {
// should be removed still.
//
// CHECK-BOTH: func @f
-func @f(%arg0 : !shape.shape, %arg1 : !shape.shape) -> index {
+func.func @f(%arg0 : !shape.shape, %arg1 : !shape.shape) -> index {
// CANON-NEXT: test.source
// CANON-NEXT: return
%0 = shape.cstr_broadcastable %arg0, %arg1 : !shape.shape, !shape.shape
diff --git a/mlir/test/Dialect/Shape/shape-to-shape.mlir b/mlir/test/Dialect/Shape/shape-to-shape.mlir
index 4bc58af5f0126..4edf8bdefe084 100644
--- a/mlir/test/Dialect/Shape/shape-to-shape.mlir
+++ b/mlir/test/Dialect/Shape/shape-to-shape.mlir
@@ -2,7 +2,7 @@
// CHECK-LABEL: func @num_elements_to_reduce
// CHECK-SAME: ([[ARG:%.*]]: !shape.shape) -> !shape.size
-func @num_elements_to_reduce(%shape : !shape.shape) -> !shape.size {
+func.func @num_elements_to_reduce(%shape : !shape.shape) -> !shape.size {
%num_elements = shape.num_elements %shape : !shape.shape -> !shape.size
return %num_elements : !shape.size
}
@@ -18,7 +18,7 @@ func @num_elements_to_reduce(%shape : !shape.shape) -> !shape.size {
// CHECK-LABEL: func @num_elements_to_reduce_on_index
// CHECK-SAME: ([[ARG:%.*]]: tensor<?xindex>) -> index
-func @num_elements_to_reduce_on_index(%shape : tensor<?xindex>) -> index {
+func.func @num_elements_to_reduce_on_index(%shape : tensor<?xindex>) -> index {
%num_elements = shape.num_elements %shape : tensor<?xindex> -> index
return %num_elements : index
}
More information about the Mlir-commits
mailing list