[Mlir-commits] [mlir] [mlir][memref] Refine doc examples for operations (PR #165889)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Fri Oct 31 10:24:58 PDT 2025
https://github.com/FruitClover created https://github.com/llvm/llvm-project/pull/165889
Some of the examples contain typos; some of them use outdated assembly format,
and some annotations are missing. This is the best effort to keep them
"parsable" (assuming that most of the types are already defined).
>From b5126c0f0f8490ace52ef5015d5c79976e9641c4 Mon Sep 17 00:00:00 2001
From: "Mike K." <fruitclover at gmail.com>
Date: Fri, 31 Oct 2025 23:03:52 +0300
Subject: [PATCH] [mlir][memref] Refine doc examples for operations
Some of the examples contain typos; some of them use outdated assembly format,
and some annotations are missing. This is the best effort to keep them
"parsable" (assuming that most of the types are already defined).
---
.../mlir/Dialect/MemRef/IR/MemRefOps.td | 72 ++++++++++---------
1 file changed, 37 insertions(+), 35 deletions(-)
diff --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
index 671cc05e963b4..1923b854688be 100644
--- a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
+++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
@@ -284,8 +284,8 @@ def MemRef_ReallocOp : MemRef_Op<"realloc"> {
```mlir
%new = memref.realloc %old : memref<64xf32> to memref<124xf32>
- %4 = memref.load %new[%index] // ok
- %5 = memref.load %old[%index] // undefined behavior
+ %4 = memref.load %new[%index] : memref<124xf32> // ok
+ %5 = memref.load %old[%index] : memref<64xf32> // undefined behavior
```
}];
@@ -406,9 +406,10 @@ def MemRef_AllocaScopeOp : MemRef_Op<"alloca_scope",
operation:
```mlir
- %result = memref.alloca_scope {
+ %result = memref.alloca_scope -> f32 {
+ %value = arith.constant 1.0 : f32
...
- memref.alloca_scope.return %value
+ memref.alloca_scope.return %value : f32
}
```
@@ -439,7 +440,7 @@ def MemRef_AllocaScopeReturnOp : MemRef_Op<"alloca_scope.return",
to indicate which values are going to be returned. For example:
```mlir
- memref.alloca_scope.return %value
+ memref.alloca_scope.return %value : f32
```
}];
@@ -503,11 +504,11 @@ def MemRef_CastOp : MemRef_Op<"cast", [
Example:
```mlir
- Cast to concrete shape.
- %4 = memref.cast %1 : memref<*xf32> to memref<4x?xf32>
+ // Cast to concrete shape.
+ %4 = memref.cast %1 : memref<*xf32> to memref<4x?xf32>
- Erase rank information.
- %5 = memref.cast %1 : memref<4x?xf32> to memref<*xf32>
+ // Erase rank information.
+ %5 = memref.cast %1 : memref<4x?xf32> to memref<*xf32>
```
}];
@@ -573,8 +574,8 @@ def MemRef_DeallocOp : MemRef_Op<"dealloc", [MemRefsNormalizable]> {
Example:
```mlir
- %0 = memref.alloc() : memref<8x64xf32, affine_map<(d0, d1) -> (d0, d1), 1>>
- memref.dealloc %0 : memref<8x64xf32, affine_map<(d0, d1) -> (d0, d1), 1>>
+ %0 = memref.alloc() : memref<8x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
+ memref.dealloc %0 : memref<8x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
```
}];
@@ -688,13 +689,13 @@ def MemRef_DmaStartOp : MemRef_Op<"dma_start"> {
space 1 at indices [%k, %l], would be specified as follows:
```mlir
- %num_elements = arith.constant 256
+ %num_elements = arith.constant 256 : index
%idx = arith.constant 0 : index
- %tag = memref.alloc() : memref<1 x i32, affine_map<(d0) -> (d0)>, 4>
- dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%idx] :
- memref<40 x 128 x f32>, affine_map<(d0) -> (d0)>, 0>,
- memref<2 x 1024 x f32>, affine_map<(d0) -> (d0)>, 1>,
- memref<1 x i32>, affine_map<(d0) -> (d0)>, 2>
+ %tag = memref.alloc() : memref<1 x i32, affine_map<(d0) -> (d0)>, 2>
+ memref.dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%idx] :
+ memref<40 x 128 x f32, affine_map<(d0, d1) -> (d0, d1)>, 0>,
+ memref<2 x 1024 x f32, affine_map<(d0, d1) -> (d0, d1)>, 1>,
+ memref<1 x i32, affine_map<(d0) -> (d0)>, 2>
```
If %stride and %num_elt_per_stride are specified, the DMA is expected to
@@ -702,8 +703,8 @@ def MemRef_DmaStartOp : MemRef_Op<"dma_start"> {
memory space 0 until %num_elements are transferred.
```mlir
- dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%idx], %stride,
- %num_elt_per_stride :
+ memref.dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%idx], %stride,
+ %num_elt_per_stride :
```
* TODO: add additional operands to allow source and destination striding, and
@@ -851,10 +852,10 @@ def MemRef_DmaWaitOp : MemRef_Op<"dma_wait"> {
Example:
```mlir
- dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%index] :
- memref<2048 x f32>, affine_map<(d0) -> (d0)>, 0>,
- memref<256 x f32>, affine_map<(d0) -> (d0)>, 1>
- memref<1 x i32>, affine_map<(d0) -> (d0)>, 2>
+ memref.dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%index] :
+ memref<2048 x f32, affine_map<(d0) -> (d0)>, 0>,
+ memref<256 x f32, affine_map<(d0) -> (d0)>, 1>,
+ memref<1 x i32, affine_map<(d0) -> (d0)>, 2>
...
...
dma_wait %tag[%index], %num_elements : memref<1 x i32, affine_map<(d0) -> (d0)>, 2>
@@ -964,8 +965,8 @@ def MemRef_ExtractStridedMetadataOp : MemRef_Op<"extract_strided_metadata", [
```mlir
%base, %offset, %sizes:2, %strides:2 =
- memref.extract_strided_metadata %memref :
- memref<10x?xf32>, index, index, index, index, index
+ memref.extract_strided_metadata %memref : memref<10x?xf32>
+ -> memref<f32>, index, index, index, index, index
// After folding, the type of %m2 can be memref<10x?xf32> and further
// folded to %memref.
@@ -973,7 +974,7 @@ def MemRef_ExtractStridedMetadataOp : MemRef_Op<"extract_strided_metadata", [
offset: [%offset],
sizes: [%sizes#0, %sizes#1],
strides: [%strides#0, %strides#1]
- : memref<f32> to memref<?x?xf32, offset: ?, strides: [?, ?]>
+ : memref<f32> to memref<?x?xf32, strided<[?, ?], offset:?>>
```
}];
@@ -1142,10 +1143,10 @@ def MemRef_GlobalOp : MemRef_Op<"global", [Symbol]> {
```mlir
// Private variable with an initial value.
- memref.global "private" @x : memref<2xf32> = dense<0.0,2.0>
+ memref.global "private" @x : memref<2xf32> = dense<[0.0, 2.0]>
// Private variable with an initial value and an alignment (power of 2).
- memref.global "private" @x : memref<2xf32> = dense<0.0,2.0> {alignment = 64}
+ memref.global "private" @x : memref<2xf32> = dense<[0.0, 2.0]> {alignment = 64}
// Declaration of an external variable.
memref.global "private" @y : memref<4xi32>
@@ -1154,7 +1155,7 @@ def MemRef_GlobalOp : MemRef_Op<"global", [Symbol]> {
memref.global @z : memref<3xf16> = uninitialized
// Externally visible constant variable.
- memref.global constant @c : memref<2xi32> = dense<1, 4>
+ memref.global constant @c : memref<2xi32> = dense<[1, 4]>
```
}];
@@ -1480,7 +1481,8 @@ def MemRef_ReinterpretCastOp
%dst = memref.reinterpret_cast %src to
offset: [%offset],
sizes: [%sizes],
- strides: [%strides]
+ strides: [%strides] :
+ memref<*xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
```
means that `%dst`'s descriptor will be:
```mlir
@@ -1619,12 +1621,12 @@ def MemRef_ReshapeOp: MemRef_Op<"reshape", [
```mlir
// Reshape statically-shaped memref.
%dst = memref.reshape %src(%shape)
- : (memref<4x1xf32>, memref<1xi32>) to memref<4xf32>
+ : (memref<4x1xf32>, memref<1xi32>) -> memref<4xf32>
%dst0 = memref.reshape %src(%shape0)
- : (memref<4x1xf32>, memref<2xi32>) to memref<2x2xf32>
+ : (memref<4x1xf32>, memref<2xi32>) -> memref<2x2xf32>
// Flatten unranked memref.
%dst = memref.reshape %src(%shape)
- : (memref<*xf32>, memref<1xi32>) to memref<?xf32>
+ : (memref<*xf32>, memref<1xi32>) -> memref<?xf32>
```
b. Source type is ranked or unranked. Shape argument has dynamic size.
@@ -1633,10 +1635,10 @@ def MemRef_ReshapeOp: MemRef_Op<"reshape", [
```mlir
// Reshape dynamically-shaped 1D memref.
%dst = memref.reshape %src(%shape)
- : (memref<?xf32>, memref<?xi32>) to memref<*xf32>
+ : (memref<?xf32>, memref<?xi32>) -> memref<*xf32>
// Reshape unranked memref.
%dst = memref.reshape %src(%shape)
- : (memref<*xf32>, memref<?xi32>) to memref<*xf32>
+ : (memref<*xf32>, memref<?xi32>) -> memref<*xf32>
```
}];
More information about the Mlir-commits
mailing list