[Mlir-commits] [mlir] 946be75 - [MLIR][Linalg] Retire C++ DotOp in favor of a linalg-ods-gen'd op
Alex Zinenko
llvmlistbot at llvm.org
Tue Jul 28 03:34:26 PDT 2020
Author: lorenzo chelini
Date: 2020-07-28T12:34:19+02:00
New Revision: 946be75b9ec131519837e85487fc3e8bf475d001
URL: https://github.com/llvm/llvm-project/commit/946be75b9ec131519837e85487fc3e8bf475d001
DIFF: https://github.com/llvm/llvm-project/commit/946be75b9ec131519837e85487fc3e8bf475d001.diff
LOG: [MLIR][Linalg] Retire C++ DotOp in favor of a linalg-ods-gen'd op
- replace DotOp, now that DRR rules have been dropped.
- Capture arguments mismatch in the parser. The number of parsed arguments must
equal the number of expected arguments.
Reviewed By: ftynse, nicolasvasilache
Differential Revision: https://reviews.llvm.org/D82952
Added:
Modified:
mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOpsSpec.tc
mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h
mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp
mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
mlir/test/Dialect/Linalg/invalid.mlir
mlir/test/Dialect/Linalg/loops.mlir
mlir/test/Dialect/Linalg/roundtrip.mlir
mlir/test/Dialect/Linalg/standard.mlir
mlir/test/Dialect/Linalg/tile.mlir
mlir/test/Dialect/Linalg/transform-patterns-matmul-to-vector.mlir
mlir/test/Dialect/Linalg/transform-patterns.mlir
mlir/test/mlir-cpu-runner/linalg_integration_test.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOpsSpec.tc b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOpsSpec.tc
index bbd398585e5f..056f0723e92d 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOpsSpec.tc
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOpsSpec.tc
@@ -8,6 +8,11 @@ def matvec(A: f32(M, N), y: f32(N)) -> (x: f32(M)) {
x(m) = std_addf<n>(std_mulf(A(m, n), y(n)));
}
+ods_def<DotOp>:
+def dot(A: f32(M), B: f32(M)) -> (C: f32()) {
+ C() = std_addf<m>(std_mulf(A(m), B(m)));
+}
+
ods_def<BatchMatmulOp>:
def batch_matmul(A: f32(Batch, M, K), B: f32(Batch, K, N)) -> (C: f32(Batch, M, N)) {
C(b, m, n) = std_addf<k>(std_mulf(A(b, m, k), B(b, k, n)));
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h
index f89965a96857..21bff4185abf 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h
@@ -51,9 +51,9 @@ using ReassociationExprs = SmallVector<AffineExpr, 2>;
/// 1. linalg.fill(%A, %f) : memref<f32>, f32
/// name mangles into `linalg_fill_viewf32_f32_impl`
///
-/// 2. linalg.dot(%A, %B, %C) :
-/// memref<?xf32, stride_specification>,
-/// memref<?xf32, stride_specification>, memref<f32>
+/// 2. linalg.dot %A, %B, %C :
+/// (memref<?xf32, stride_specification>,
+/// memref<?xf32, stride_specification>, memref<f32>)
/// name mangles into `linalg_dot_viewxf32_viewxf32_viewf32_impl`
///
/// 3. linalg.matmul(...) :
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index 81f911a37cea..1e3321af981e 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -180,31 +180,6 @@ def FillOp : LinalgStructured_Op<"fill", [NInputs<0>, NOutputs<1>]> {
let hasFolder = 1;
}
-def DotOp : LinalgStructured_Op<"dot", [NInputs<2>, NOutputs<1>]> {
-
- let arguments = (ins AnyStridedMemRefOfRank<1>,
- AnyStridedMemRefOfRank<1>,
- AnyStridedMemRefOfRank<0>);
-
- let extraClassDeclaration = libraryCallName # [{
- llvm::Optional<SmallVector<StringRef, 8>> referenceIterators() {
- return SmallVector<StringRef, 8>{getReductionIteratorTypeName()};
- }
-
- // A(r_i) * B(r_i) -> C()
- llvm::Optional<SmallVector<AffineMap, 8>> referenceIndexingMaps() {
- MLIRContext *context = getContext();
- auto r_i = getAffineDimExpr(0, context);
- return SmallVector<AffineMap, 8>{
- AffineMap::get(1, 0, {r_i}, context),
- AffineMap::get(1, 0, {r_i}, context),
- AffineMap::get(1, 0, {}, context)};
- }
- }];
-
- let hasFolder = 1;
-}
-
/// A base class for pooling operation such as conv. The arguments must contain
/// optional arguments `strides`, `dilations` and `padding` with following type:
/// OptionalAttr<I64ArrayAttr>:$strides
diff --git a/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp b/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp
index 43f9d8825327..8a54c93d7685 100644
--- a/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp
+++ b/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp
@@ -235,13 +235,13 @@ void mlir::populateLinalgToStandardConversionPatterns(
LinalgOpConversion<PoolingMaxOp>,
LinalgOpConversion<PoolingMinOp>,
LinalgOpConversion<PoolingSumOp>,
- LinalgOpConversion<CopyOp>,
- LinalgOpConversion<DotOp>,
+ LinalgOpConversion<CopyOp>,
LinalgOpConversion<FillOp>,
LinalgOpConversion<GenericOp>,
LinalgOpConversion<IndexedGenericOp>>(ctx);
// TODO: collect all auto-generated named ops with a tblgen directive.
patterns.insert<
+ LinalgOpConversion<DotOp>,
LinalgOpConversion<BatchMatmulOp>,
LinalgOpConversion<MatvecOp>,
LinalgOpConversion<MatmulOp>>(ctx);
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 4c68f0265677..192179b3ff50 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -1173,10 +1173,6 @@ LogicalResult CopyOp::fold(ArrayRef<Attribute>,
SmallVectorImpl<OpFoldResult> &) {
return foldMemRefCast(*this);
}
-LogicalResult DotOp::fold(ArrayRef<Attribute>,
- SmallVectorImpl<OpFoldResult> &) {
- return foldMemRefCast(*this);
-}
LogicalResult FillOp::fold(ArrayRef<Attribute>,
SmallVectorImpl<OpFoldResult> &) {
return foldMemRefCast(*this);
@@ -1280,6 +1276,17 @@ static ParseResult parseNamedStructuredOp(OpAsmParser &parser,
if (!tensorResultTypes.empty())
result.addTypes(tensorResultTypes);
+ // The number of parsed arguments must equal
+ // the number of expected arguments for the current operation.
+ auto parsedArgs = operandsInfo.size();
+ auto expectedArgs = NamedStructuredOpType::getNumInputs() +
+ NamedStructuredOpType::getNumOutputs();
+ if (parsedArgs != expectedArgs)
+ return parser.emitError(parser.getNameLoc(),
+ "expects " + std::to_string(expectedArgs) +
+ " operands, but found " +
+ std::to_string(parsedArgs));
+
buildNamedStructuredOpRegionAndAttributes<NamedStructuredOpType>(
parser.getBuilder(), result, operandTypes, tensorResultTypes);
@@ -1299,6 +1306,10 @@ LogicalResult BatchMatmulOp::fold(ArrayRef<Attribute>,
SmallVectorImpl<OpFoldResult> &) {
return foldMemRefCast(*this);
}
+LogicalResult DotOp::fold(ArrayRef<Attribute>,
+ SmallVectorImpl<OpFoldResult> &) {
+ return foldMemRefCast(*this);
+}
LogicalResult MatmulOp::fold(ArrayRef<Attribute>,
SmallVectorImpl<OpFoldResult> &) {
return foldMemRefCast(*this);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
index 7fb1018fc588..32e50cb597d7 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
@@ -295,18 +295,6 @@ void emitScalarImplementation(ArrayRef<Value> allIvs, FillOp fillOp) {
nPar > 0 ? O(ivs) = fillOp.value() : O() = fillOp.value();
}
-template <typename IndexedValueType>
-void emitScalarImplementation(ArrayRef<Value> allIvs, DotOp dotOp) {
- assert(dotOp.hasBufferSemantics() &&
- "expected linalg op with buffer semantics");
- assert(allIvs.size() == 1);
- Value r_i(allIvs[0]);
- IndexedValueType A(dotOp.getInput(0)), B(dotOp.getInput(1)),
- C(dotOp.getOutputBuffer(0));
- // Emit scalar form.
- C() = C() + A(r_i) * B(r_i);
-}
-
template <typename IndexedValueType>
Value getConvOpInput(ConvOp convOp, StdIndexedValue im,
MutableArrayRef<Value> imIdx) {
@@ -673,8 +661,6 @@ static Optional<LinalgLoops> linalgOpToLoopsImplSwitch(Operation *op,
return linalgOpToLoopsImpl<LoopTy, CopyOp>(op, builder);
if (isa<FillOp>(op))
return linalgOpToLoopsImpl<LoopTy, FillOp>(op, builder);
- if (isa<DotOp>(op))
- return linalgOpToLoopsImpl<LoopTy, DotOp>(op, builder);
if (isa<ConvOp>(op))
return linalgOpToLoopsImpl<LoopTy, ConvOp>(op, builder);
if (isa<PoolingMaxOp>(op))
@@ -693,6 +679,8 @@ static Optional<LinalgLoops> linalgOpToLoopsImplSwitch(Operation *op,
return linalgOpToLoopsImpl<LoopTy, MatmulOp>(op, builder);
if (isa<MatvecOp>(op))
return linalgOpToLoopsImpl<LoopTy, MatvecOp>(op, builder);
+ if (isa<DotOp>(op))
+ return linalgOpToLoopsImpl<LoopTy, DotOp>(op, builder);
if (isa<BatchMatmulOp>(op))
return linalgOpToLoopsImpl<LoopTy, BatchMatmulOp>(op, builder);
llvm_unreachable("Unexpected op in linalgOpToLoopsImpl");
diff --git a/mlir/test/Dialect/Linalg/invalid.mlir b/mlir/test/Dialect/Linalg/invalid.mlir
index 79d61d8d7892..ca59ecd387ec 100644
--- a/mlir/test/Dialect/Linalg/invalid.mlir
+++ b/mlir/test/Dialect/Linalg/invalid.mlir
@@ -422,8 +422,8 @@ func @generic(%arg0: memref<?x?xi4>) {
// -----
func @generic_result_0_element_type(%arg0: memref<?xf32>) {
- // expected-error @+1 {{'linalg.dot' op expected 3 operands, but found 2}}
- linalg.dot(%arg0, %arg0): memref<?xf32>, memref<?xf32>
+ // expected-error @+1 {{'linalg.dot' expects 3 operands, but found 2}}
+ linalg.dot %arg0, %arg0 : (memref<?xf32>, memref<?xf32>)
}
// -----
diff --git a/mlir/test/Dialect/Linalg/loops.mlir b/mlir/test/Dialect/Linalg/loops.mlir
index 30bb90bdd43a..b01beb7e8f17 100644
--- a/mlir/test/Dialect/Linalg/loops.mlir
+++ b/mlir/test/Dialect/Linalg/loops.mlir
@@ -123,7 +123,7 @@ func @dot(%arg0: memref<?xi8>, %M: index) {
%1 = view %arg0[%c0][%M] : memref<?xi8> to memref<?xf32>
%2 = view %arg0[%c0][%M] : memref<?xi8> to memref<?xf32>
%3 = view %arg0[%c0][] : memref<?xi8> to memref<f32>
- linalg.dot(%1, %2, %3) : memref<?xf32>, memref<?xf32>, memref<f32>
+ linalg.dot %1, %2, %3 : (memref<?xf32>, memref<?xf32>, memref<f32>)
return
}
// CHECKLOOP-LABEL: func @dot(%{{.*}}: memref<?xi8>,
@@ -154,7 +154,9 @@ func @dot(%arg0: memref<?xi8>, %M: index) {
func @dot_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, offset: ?, strides: [1]>, %arg2: memref<f32>) {
- linalg.dot(%arg0, %arg1, %arg2) : memref<?xf32, offset: ?, strides: [1]>, memref<?xf32, offset: ?, strides: [1]>, memref<f32>
+ linalg.dot %arg0, %arg1, %arg2 : (memref<?xf32, offset: ?, strides: [1]>,
+ memref<?xf32, offset: ?, strides: [1]>,
+ memref<f32>)
return
}
// CHECKLOOP-LABEL: func @dot_view(
diff --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir
index 9e6c27547930..269664324697 100644
--- a/mlir/test/Dialect/Linalg/roundtrip.mlir
+++ b/mlir/test/Dialect/Linalg/roundtrip.mlir
@@ -88,10 +88,10 @@ func @ops(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
memref<?x?xf32, offset: ?, strides: [?, 1]>)
linalg.matvec %arg0, %arg1, %arg2 : (memref<?x?xf32, offset: ?, strides: [?, 1]>,
memref<?xf32, offset: ?, strides: [1]>,
- memref<?xf32, offset: ?, strides: [1]>)
- linalg.dot(%arg1, %arg2, %arg3) : memref<?xf32, offset: ?, strides: [1]>,
- memref<?xf32, offset: ?, strides: [1]>,
- memref<f32>
+ memref<?xf32, offset: ?, strides: [1]>)
+ linalg.dot %arg1, %arg2, %arg3 : (memref<?xf32, offset: ?, strides: [1]>,
+ memref<?xf32, offset: ?, strides: [1]>,
+ memref<f32>)
return
}
// CHECK-LABEL: func @ops(%
@@ -103,10 +103,10 @@ func @ops(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
// CHECK-SAME: (memref<?x?xf32, #[[$strided2D]]>,
// CHECK-SAME: memref<?xf32, #[[$strided1D]]>,
// CHECK-SAME: memref<?xf32, #[[$strided1D]]>)
-// CHECK-NEXT: linalg.dot(%{{.*}}, %{{.*}}, %{{.*}}) :
-// CHECK-SAME: memref<?xf32, #[[$strided1D]]>,
-// CHECK-SAME: memref<?xf32, #[[$strided1D]]>,
-// CHECK-SAME: memref<f32>
+// CHECK-NEXT: linalg.dot %{{.*}}, %{{.*}}, %{{.*}} :
+// CHECK-SAME: (memref<?xf32, #[[$strided1D]]>,
+// CHECK-SAME: memref<?xf32, #[[$strided1D]]>,
+// CHECK-SAME: memref<f32>)
// -----
diff --git a/mlir/test/Dialect/Linalg/standard.mlir b/mlir/test/Dialect/Linalg/standard.mlir
index 0ba3465443fa..60b348110c4f 100644
--- a/mlir/test/Dialect/Linalg/standard.mlir
+++ b/mlir/test/Dialect/Linalg/standard.mlir
@@ -13,9 +13,9 @@
func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>,
%arg1: memref<?xf32, offset: ?, strides: [1]>,
%arg2: memref<f32>) {
- linalg.dot(%arg0, %arg1, %arg2) : memref<?xf32, offset: ?, strides: [1]>,
- memref<?xf32, offset: ?, strides: [1]>,
- memref<f32>
+ linalg.dot %arg0, %arg1, %arg2 : (memref<?xf32, offset: ?, strides: [1]>,
+ memref<?xf32, offset: ?, strides: [1]>,
+ memref<f32>)
return
}
// CHECK-LABEL: func @dot(
diff --git a/mlir/test/Dialect/Linalg/tile.mlir b/mlir/test/Dialect/Linalg/tile.mlir
index 049fb571bd51..9a1bbfc1dc18 100644
--- a/mlir/test/Dialect/Linalg/tile.mlir
+++ b/mlir/test/Dialect/Linalg/tile.mlir
@@ -271,7 +271,9 @@ func @matvec(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
// TILE-234: linalg.matvec %[[sAij]], %[[sBj]], %[[sCi]] : (memref<?x?xf32, #[[$strided2D]]>, memref<?xf32, #[[$strided1D]]>, memref<?xf32, #[[$strided1D]]>)
func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, offset: ?, strides: [1]>, %arg2: memref<f32>) {
- linalg.dot(%arg0, %arg1, %arg2) : memref<?xf32, offset: ?, strides: [1]>, memref<?xf32, offset: ?, strides: [1]>, memref<f32>
+ linalg.dot %arg0, %arg1, %arg2 : (memref<?xf32, offset: ?, strides: [1]>,
+ memref<?xf32, offset: ?, strides: [1]>,
+ memref<f32>)
return
}
// TILE-2-LABEL: func @dot(
@@ -285,7 +287,7 @@ func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, of
// TILE-2: %[[localM:.*]] = dim %{{.*}}, %c0
// TILE-2: %[[szM:.*]] = affine.min #[[$bound_map]](%[[I]])[%[[localM]]]
// TILE-2: %[[sBi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[$strided1D]]> to memref<?xf32, #[[$strided1D]]>
-// TILE-2: linalg.dot(%[[sAi]], %[[sBi]], {{.*}}) : memref<?xf32, #[[$strided1D]]>, memref<?xf32, #[[$strided1D]]>, memref<f32>
+// TILE-2: linalg.dot %[[sAi]], %[[sBi]], {{.*}} : (memref<?xf32, #[[$strided1D]]>, memref<?xf32, #[[$strided1D]]>, memref<f32>)
// TILE-02-LABEL: func @dot(
// TILE-02-NOT: scf.for
@@ -304,7 +306,7 @@ func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, of
// TILE-234: %[[localM:.*]] = dim %{{.*}}, %c0
// TILE-234: %[[szM:.*]] = affine.min #[[$bound_map_2]](%[[I]])[%[[localM]]]
// TILE-234: %[[sBi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[$strided1D]]> to memref<?xf32, #[[$strided1D]]>
-// TILE-234: linalg.dot(%[[sAi]], %[[sBi]], %{{.*}}) : memref<?xf32, #[[$strided1D]]>, memref<?xf32, #[[$strided1D]]>, memref<f32>
+// TILE-234: linalg.dot %[[sAi]], %[[sBi]], %{{.*}} : (memref<?xf32, #[[$strided1D]]>, memref<?xf32, #[[$strided1D]]>, memref<f32>)
func @fill_static(%arg0: memref<127x99xf32>, %arg1: f32) {
linalg.fill(%arg0, %arg1) : memref<127x99xf32>, f32
diff --git a/mlir/test/Dialect/Linalg/transform-patterns-matmul-to-vector.mlir b/mlir/test/Dialect/Linalg/transform-patterns-matmul-to-vector.mlir
index b0702f9fdcfd..83e9461d66cc 100644
--- a/mlir/test/Dialect/Linalg/transform-patterns-matmul-to-vector.mlir
+++ b/mlir/test/Dialect/Linalg/transform-patterns-matmul-to-vector.mlir
@@ -36,7 +36,7 @@ func @matmul(%A: memref<1584x1584xf32, offset: 0, strides: [1584, 1]>,
func @contraction_dot(%A: memref<1584xf32>, %B: memref<1584xf32>, %C: memref<f32>) {
// VECTOR-CONTRACTION: vector.contract
// VECTOR-CONTRACTION-SAME: vector<1584xf32>, vector<1584xf32> into f32
- linalg.dot(%A, %B, %C) : memref<1584xf32>, memref<1584xf32>, memref<f32>
+ linalg.dot %A, %B, %C : (memref<1584xf32>, memref<1584xf32>, memref<f32>)
return
}
diff --git a/mlir/test/Dialect/Linalg/transform-patterns.mlir b/mlir/test/Dialect/Linalg/transform-patterns.mlir
index 3f7d16497253..1a4100403b00 100644
--- a/mlir/test/Dialect/Linalg/transform-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/transform-patterns.mlir
@@ -14,10 +14,10 @@
func @dot(%x: memref<?xf32, offset: ?, strides: [1]>,
%y: memref<?xf32, offset: ?, strides: [1]>,
%v: memref<f32>) {
- linalg.dot(%x, %y, %v) { __internal_linalg_transform__ = "MEM" } :
- memref<?xf32, offset: ?, strides: [1]>,
- memref<?xf32, offset: ?, strides: [1]>,
- memref<f32>
+ linalg.dot %x, %y, %v { __internal_linalg_transform__ = "MEM" } :
+ (memref<?xf32, offset: ?, strides: [1]>,
+ memref<?xf32, offset: ?, strides: [1]>,
+ memref<f32>)
return
}
// CHECK-LABEL: func @dot
@@ -28,8 +28,8 @@ func @dot(%x: memref<?xf32, offset: ?, strides: [1]>,
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c1]] {
// CHECK: load
// CHECK: load
-// CHECK: mulf
// CHECK: load
+// CHECK: mulf
// CHECK: addf
// CHECK: store
diff --git a/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir b/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir
index dd6feb96240e..ba2ea59cb22d 100644
--- a/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir
+++ b/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir
@@ -51,7 +51,7 @@ func @dot() -> f32 {
%B = view %bB[%c0][%c16] : memref<?xi8> to memref<?xf32>
%C = view %bC[%c0][] : memref<?xi8> to memref<f32>
- linalg.dot(%A, %B, %C) : memref<?xf32>, memref<?xf32>, memref<f32>
+ linalg.dot %A, %B, %C : (memref<?xf32>, memref<?xf32>, memref<f32>)
%res = load %C[] : memref<f32>
dealloc %bC : memref<?xi8>
More information about the Mlir-commits
mailing list