[Mlir-commits] [mlir] 8ae83bb - [mlir][NFC] Update textual references of `func` to `func.func` in ODS documentation
River Riddle
llvmlistbot at llvm.org
Wed Apr 20 22:24:17 PDT 2022
Author: River Riddle
Date: 2022-04-20T22:17:26-07:00
New Revision: 8ae83bb8be3d07855fec425017936a8ba4357a32
URL: https://github.com/llvm/llvm-project/commit/8ae83bb8be3d07855fec425017936a8ba4357a32
DIFF: https://github.com/llvm/llvm-project/commit/8ae83bb8be3d07855fec425017936a8ba4357a32.diff
LOG: [mlir][NFC] Update textual references of `func` to `func.func` in ODS documentation
The special case parsing of `func` operations is being removed.
Added:
Modified:
mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
mlir/include/mlir/Dialect/Affine/Passes.td
mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
mlir/include/mlir/Dialect/ControlFlow/IR/ControlFlowOps.td
mlir/include/mlir/Dialect/GPU/GPUOps.td
mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td
mlir/include/mlir/Dialect/SCF/SCFOps.td
mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
mlir/include/mlir/IR/BuiltinAttributes.td
mlir/include/mlir/IR/BuiltinOps.td
mlir/include/mlir/Transforms/Passes.td
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
index a44f13da4f0fb..2bb19fa8e1182 100644
--- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
+++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
@@ -163,7 +163,7 @@ def AffineForOp : Affine_Op<"for",
```mlir
#map57 = affine_map<(d0)[s0] -> (s0 - d0 - 1)>
- func @simple_example(%A: memref<?x?xf32>, %B: memref<?x?xf32>) {
+ func.func @simple_example(%A: memref<?x?xf32>, %B: memref<?x?xf32>) {
%N = dim %A, 0 : memref<?x?xf32>
affine.for %i = 0 to %N step 1 {
affine.for %j = 0 to %N { // implicitly steps by 1
@@ -191,7 +191,7 @@ def AffineForOp : Affine_Op<"for",
For example, to sum-reduce a memref:
```mlir
- func @reduce(%buffer: memref<1024xf32>) -> (f32) {
+ func.func @reduce(%buffer: memref<1024xf32>) -> (f32) {
// Initial sum set to 0.
%sum_0 = arith.constant 0.0 : f32
// iter_args binds initial values to the loop's region arguments.
@@ -387,7 +387,7 @@ def AffineIfOp : Affine_Op<"if",
```mlir
#set = affine_set<(d0, d1)[s0]: (d0 - 10 >= 0, s0 - d0 - 9 >= 0,
d1 - 10 >= 0, s0 - d1 - 9 >= 0)>
- func @reduced_domain_example(%A, %X, %N) : (memref<10xi32>, i32, i32) {
+ func.func @reduced_domain_example(%A, %X, %N) : (memref<10xi32>, i32, i32) {
affine.for %i = 0 to %N {
affine.for %j = 0 to %N {
%0 = affine.apply #map42(%j)
@@ -406,7 +406,7 @@ def AffineIfOp : Affine_Op<"if",
```mlir
#interior = affine_set<(i, j) : (i - 1 >= 0, j - 1 >= 0, 10 - i >= 0, 10 - j >= 0)> (%i, %j)
- func @pad_edges(%I : memref<10x10xf32>) -> (memref<12x12xf32) {
+ func.func @pad_edges(%I : memref<10x10xf32>) -> (memref<12x12xf32) {
%O = alloc memref<12x12xf32>
affine.parallel (%i, %j) = (0, 0) to (12, 12) {
%1 = affine.if #interior (%i, %j) {
@@ -655,7 +655,7 @@ def AffineParallelOp : Affine_Op<"parallel",
Example (3x3 valid convolution):
```mlir
- func @conv_2d(%D : memref<100x100xf32>, %K : memref<3x3xf32>) -> (memref<98x98xf32>) {
+ func.func @conv_2d(%D : memref<100x100xf32>, %K : memref<3x3xf32>) -> (memref<98x98xf32>) {
%O = alloc memref<98x98xf32>
affine.parallel (%x, %y) = (0, 0) to (98, 98) {
%0 = affine.parallel (%kx, %ky) = (0, 0) to (2, 2) reduce ("addf") {
diff --git a/mlir/include/mlir/Dialect/Affine/Passes.td b/mlir/include/mlir/Dialect/Affine/Passes.td
index 3e0f2f98f908a..7f942405cd7e9 100644
--- a/mlir/include/mlir/Dialect/Affine/Passes.td
+++ b/mlir/include/mlir/Dialect/Affine/Passes.td
@@ -63,7 +63,7 @@ def AffineLoopFusion : Pass<"affine-loop-fusion", "func::FuncOp"> {
Example 1: Producer-consumer fusion.
Input:
```mlir
- func @producer_consumer_fusion(%arg0: memref<10xf32>, %arg1: memref<10xf32>) {
+ func.func @producer_consumer_fusion(%arg0: memref<10xf32>, %arg1: memref<10xf32>) {
%0 = memref.alloc() : memref<10xf32>
%1 = memref.alloc() : memref<10xf32>
%cst = arith.constant 0.000000e+00 : f32
@@ -86,7 +86,7 @@ def AffineLoopFusion : Pass<"affine-loop-fusion", "func::FuncOp"> {
```
Output:
```mlir
- func @producer_consumer_fusion(%arg0: memref<10xf32>, %arg1: memref<10xf32>) {
+ func.func @producer_consumer_fusion(%arg0: memref<10xf32>, %arg1: memref<10xf32>) {
%0 = memref.alloc() : memref<1xf32>
%1 = memref.alloc() : memref<1xf32>
%cst = arith.constant 0.000000e+00 : f32
@@ -107,7 +107,7 @@ def AffineLoopFusion : Pass<"affine-loop-fusion", "func::FuncOp"> {
Example 2: Sibling fusion.
Input:
```mlir
- func @sibling_fusion(%arg0: memref<10x10xf32>, %arg1: memref<10x10xf32>,
+ func.func @sibling_fusion(%arg0: memref<10x10xf32>, %arg1: memref<10x10xf32>,
%arg2: memref<10x10xf32>, %arg3: memref<10x10xf32>,
%arg4: memref<10x10xf32>) {
affine.for %arg5 = 0 to 3 {
@@ -131,7 +131,7 @@ def AffineLoopFusion : Pass<"affine-loop-fusion", "func::FuncOp"> {
```
Output:
```mlir
- func @sibling_fusion(%arg0: memref<10x10xf32>, %arg1: memref<10x10xf32>,
+ func.func @sibling_fusion(%arg0: memref<10x10xf32>, %arg1: memref<10x10xf32>,
%arg2: memref<10x10xf32>, %arg3: memref<10x10xf32>,
%arg4: memref<10x10xf32>) {
affine.for %arg5 = 0 to 3 {
@@ -238,7 +238,7 @@ def AffinePipelineDataTransfer
Input
```mlir
- func @pipelinedatatransfer() {
+ func.func @pipelinedatatransfer() {
%0 = memref.alloc() : memref<256xf32>
%1 = memref.alloc() : memref<32xf32, 1>
%2 = memref.alloc() : memref<1xf32>
@@ -259,7 +259,7 @@ def AffinePipelineDataTransfer
```mlir
module {
- func @pipelinedatatransfer() {
+ func.func @pipelinedatatransfer() {
%c8 = arith.constant 8 : index
%c0 = arith.constant 0 : index
%0 = memref.alloc() : memref<256xf32>
@@ -306,7 +306,7 @@ def AffineScalarReplacement : Pass<"affine-scalrep", "func::FuncOp"> {
Input
```mlir
- func @store_load_affine_apply() -> memref<10x10xf32> {
+ func.func @store_load_affine_apply() -> memref<10x10xf32> {
%cf7 = arith.constant 7.0 : f32
%m = memref.alloc() : memref<10x10xf32>
affine.for %i0 = 0 to 10 {
@@ -324,7 +324,7 @@ def AffineScalarReplacement : Pass<"affine-scalrep", "func::FuncOp"> {
```mlir
module {
- func @store_load_affine_apply() -> memref<10x10xf32> {
+ func.func @store_load_affine_apply() -> memref<10x10xf32> {
%cst = arith.constant 7.000000e+00 : f32
%0 = memref.alloc() : memref<10x10xf32>
affine.for %arg0 = 0 to 10 {
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
index 6d56f47b8a375..d7aa0ffe1c683 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
@@ -25,7 +25,7 @@ def BufferDeallocation : Pass<"buffer-deallocation", "func::FuncOp"> {
```mlir
#map0 = affine_map<(d0) -> (d0)>
module {
- func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
+ func.func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
cf.cond_br %arg0, ^bb1, ^bb2
^bb1:
cf.br ^bb3(%arg1 : memref<2xf32>)
@@ -54,7 +54,7 @@ def BufferDeallocation : Pass<"buffer-deallocation", "func::FuncOp"> {
```mlir
#map0 = affine_map<(d0) -> (d0)>
module {
- func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
+ func.func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
cf.cond_br %arg0, ^bb1, ^bb2
^bb1: // pred: ^bb0
%0 = memref.alloc() : memref<2xf32>
diff --git a/mlir/include/mlir/Dialect/ControlFlow/IR/ControlFlowOps.td b/mlir/include/mlir/Dialect/ControlFlow/IR/ControlFlowOps.td
index e6f9c6dbcd61b..94c53239d12eb 100644
--- a/mlir/include/mlir/Dialect/ControlFlow/IR/ControlFlowOps.td
+++ b/mlir/include/mlir/Dialect/ControlFlow/IR/ControlFlowOps.td
@@ -133,7 +133,7 @@ def CondBranchOp : CF_Op<"cond_br",
Example:
```mlir
- func @select(%a: i32, %b: i32, %flag: i1) -> i32 {
+ func.func @select(%a: i32, %b: i32, %flag: i1) -> i32 {
// Both targets are the same, operands
diff er
cond_br %flag, ^bb1(%a : i32), ^bb1(%b : i32)
diff --git a/mlir/include/mlir/Dialect/GPU/GPUOps.td b/mlir/include/mlir/Dialect/GPU/GPUOps.td
index b8069897ca142..91111bb8a76bf 100644
--- a/mlir/include/mlir/Dialect/GPU/GPUOps.td
+++ b/mlir/include/mlir/Dialect/GPU/GPUOps.td
@@ -373,7 +373,7 @@ def GPU_LaunchFuncOp : GPU_Op<"launch_func",
// This module creates a separate compilation unit for the GPU compiler.
gpu.module @kernels {
- func @kernel_1(%arg0 : f32, %arg1 : memref<?xf32, 1>)
+ func.func @kernel_1(%arg0 : f32, %arg1 : memref<?xf32, 1>)
attributes { nvvm.kernel = true } {
// Operations that produce block/thread IDs and dimensions are
diff --git a/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td b/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td
index ff82c862cbcb8..ee769c11590b9 100644
--- a/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td
@@ -60,7 +60,7 @@ def NormalizeMemRefs : Pass<"normalize-memrefs", "ModuleOp"> {
```mlir
#tile = affine_map<(i) -> (i floordiv 4, i mod 4)>
- func @matmul(%A: memref<16xf64, #tile>,
+ func.func @matmul(%A: memref<16xf64, #tile>,
%B: index, %C: memref<16xf64>) -> (memref<16xf64, #tile>) {
affine.for %arg3 = 0 to 16 {
%a = affine.load %A[%arg3] : memref<16xf64, #tile>
@@ -76,7 +76,7 @@ def NormalizeMemRefs : Pass<"normalize-memrefs", "ModuleOp"> {
Output
```mlir
- func @matmul(%arg0: memref<4x4xf64>, %arg1: index, %arg2: memref<16xf64>)
+ func.func @matmul(%arg0: memref<4x4xf64>, %arg1: index, %arg2: memref<16xf64>)
-> memref<4x4xf64> {
affine.for %arg3 = 0 to 16 {
%3 = affine.load %arg0[%arg3 floordiv 4, %arg3 mod 4]: memref<4x4xf64>
@@ -94,7 +94,7 @@ def NormalizeMemRefs : Pass<"normalize-memrefs", "ModuleOp"> {
```
#linear8 = affine_map<(i, j) -> (i * 8 + j)>
- func @linearize(%arg0: memref<8x8xi32, #linear8>,
+ func.func @linearize(%arg0: memref<8x8xi32, #linear8>,
%arg1: memref<8x8xi32, #linear8>,
%arg2: memref<8x8xi32, #linear8>) {
%c8 = arith.constant 8 : index
@@ -119,7 +119,7 @@ def NormalizeMemRefs : Pass<"normalize-memrefs", "ModuleOp"> {
Output
```mlir
- func @linearize(%arg0: memref<64xi32>,
+ func.func @linearize(%arg0: memref<64xi32>,
%arg1: memref<64xi32>,
%arg2: memref<64xi32>) {
%c8 = arith.constant 8 : index
diff --git a/mlir/include/mlir/Dialect/SCF/SCFOps.td b/mlir/include/mlir/Dialect/SCF/SCFOps.td
index 420675272ddd7..887b8323f2e6b 100644
--- a/mlir/include/mlir/Dialect/SCF/SCFOps.td
+++ b/mlir/include/mlir/Dialect/SCF/SCFOps.td
@@ -159,8 +159,8 @@ def ForOp : SCF_Op<"for",
For example, to sum-reduce a memref:
```mlir
- func @reduce(%buffer: memref<1024xf32>, %lb: index,
- %ub: index, %step: index) -> (f32) {
+ func.func @reduce(%buffer: memref<1024xf32>, %lb: index,
+ %ub: index, %step: index) -> (f32) {
// Initial sum set to 0.
%sum_0 = arith.constant 0.0 : f32
// iter_args binds initial values to the loop's region arguments.
@@ -184,8 +184,8 @@ def ForOp : SCF_Op<"for",
perform conditional reduction:
```mlir
- func @conditional_reduce(%buffer: memref<1024xf32>, %lb: index,
- %ub: index, %step: index) -> (f32) {
+ func.func @conditional_reduce(%buffer: memref<1024xf32>, %lb: index,
+ %ub: index, %step: index) -> (f32) {
%sum_0 = arith.constant 0.0 : f32
%c0 = arith.constant 0.0 : f32
%sum = scf.for %iv = %lb to %ub step %step
diff --git a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
index 5f89a253fec28..44df58301bc13 100644
--- a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
+++ b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
@@ -552,7 +552,7 @@ def Shape_ReduceOp : Shape_Op<"reduce",
number of elements can be computed as follows:
```mlir
- func @reduce(%shape : !shape.shape, %init : !shape.size) -> !shape.size {
+ func.func @reduce(%shape : !shape.shape, %init : !shape.size) -> !shape.size {
%num_elements = shape.reduce(%shape, %init) -> !shape.size {
^bb0(%index: index, %dim: !shape.size, %acc: !shape.size):
%updated_acc = "shape.mul"(%acc, %dim) :
@@ -662,7 +662,7 @@ def Shape_WithOp : Shape_Op<"with_shape", [NoSideEffect]> {
and/or call one shape function from another. E.g.,
```mlir
- func @shape_foobah(%a: !shape.value_shape,
+ func.func @shape_foobah(%a: !shape.value_shape,
%b: !shape.value_shape,
%c: !shape.value_shape) -> !shape.shape {
%0 = call @shape_foo(%a, %b) :
@@ -1005,7 +1005,7 @@ def Shape_FunctionLibraryOp : Shape_Op<"function_library",
```mlir
shape.function_library {
- func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape {
+ func.func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape {
%0 = shape.shape_of %arg : !shape.value_shape -> !shape.shape
return %0 : !shape.shape
}
diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
index db5f881e684d7..6e36259de9490 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
@@ -36,7 +36,7 @@ def Sparsification : Pass<"sparsification", "ModuleOp"> {
}
// Multiply a sparse matrix A with a dense vector b into a dense vector x.
- func @kernel_matvec(%arga: tensor<?x?xf64, #SparseMatrix>,
+ func.func @kernel_matvec(%arga: tensor<?x?xf64, #SparseMatrix>,
%argb: tensor<?xf64>,
%argx: tensor<?xf64>) -> tensor<?xf64> {
%0 = linalg.generic #matvec
diff --git a/mlir/include/mlir/IR/BuiltinAttributes.td b/mlir/include/mlir/IR/BuiltinAttributes.td
index 0efbe0e885c20..1fbc7eb18e5d4 100644
--- a/mlir/include/mlir/IR/BuiltinAttributes.td
+++ b/mlir/include/mlir/IR/BuiltinAttributes.td
@@ -1100,11 +1100,11 @@ def Builtin_UnitAttr : Builtin_Attr<"Unit"> {
```mlir
// A unit attribute defined with the `unit` value specifier.
- func @verbose_form() attributes {dialectName.unitAttr = unit}
+ func.func @verbose_form() attributes {dialectName.unitAttr = unit}
// A unit attribute in an attribute dictionary can also be defined without
// the value specifier.
- func @simple_form() attributes {dialectName.unitAttr}
+ func.func @simple_form() attributes {dialectName.unitAttr}
```
}];
let extraClassDeclaration = [{
diff --git a/mlir/include/mlir/IR/BuiltinOps.td b/mlir/include/mlir/IR/BuiltinOps.td
index 6ba2611c501eb..621a71e32c7f8 100644
--- a/mlir/include/mlir/IR/BuiltinOps.td
+++ b/mlir/include/mlir/IR/BuiltinOps.td
@@ -48,7 +48,7 @@ def ModuleOp : Builtin_Op<"module", [
```mlir
module {
- func @foo()
+ func.func @foo()
}
```
}];
diff --git a/mlir/include/mlir/Transforms/Passes.td b/mlir/include/mlir/Transforms/Passes.td
index 4569ac27856f4..ee802269b3034 100644
--- a/mlir/include/mlir/Transforms/Passes.td
+++ b/mlir/include/mlir/Transforms/Passes.td
@@ -184,11 +184,11 @@ def SymbolDCE : Pass<"symbol-dce"> {
For example, consider the following input:
```mlir
- func private @dead_private_function()
- func private @live_private_function()
+ func.func private @dead_private_function()
+ func.func private @live_private_function()
// Note: The `public` isn't necessary here, as this is the default.
- func public @public_function() {
+ func.func public @public_function() {
"foo.return"() {uses = [@live_private_function]} : () -> ()
}
```
@@ -200,9 +200,9 @@ def SymbolDCE : Pass<"symbol-dce"> {
are no links to known-live operations. After running, we get the expected:
```mlir
- func private @live_private_function()
+ func.func private @live_private_function()
- func public @public_function() {
+ func.func public @public_function() {
"foo.return"() {uses = [@live_private_function]} : () -> ()
}
```
More information about the Mlir-commits
mailing list