[Mlir-commits] [mlir] ed1681e - [mlir][Linalg] Add comprehensive bufferization support for ConstantOp (13/n)

Nicolas Vasilache llvmlistbot at llvm.org
Thu Jul 1 04:51:14 PDT 2021


Author: Nicolas Vasilache
Date: 2021-07-01T11:42:27Z
New Revision: ed1681ed3aff0f1fb97f1a738938671df098b939

URL: https://github.com/llvm/llvm-project/commit/ed1681ed3aff0f1fb97f1a738938671df098b939
DIFF: https://github.com/llvm/llvm-project/commit/ed1681ed3aff0f1fb97f1a738938671df098b939.diff

LOG: [mlir][Linalg] Add comprehensive bufferization support for ConstantOp (13/n)

ConstantOp are only supported in the ModulePass because they require a GlobalCreator object that must be constructed from a ModuleOp.
If the standlaone FunctionPass encounters a ConstantOp, bufferization fails.

Differential revision: https://reviews.llvm.org/D105156

Added: 
    

Modified: 
    mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferize.cpp
    mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferize.cpp b/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferize.cpp
index dec08dfd4da2..178676c5e4b7 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferize.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferize.cpp
@@ -358,6 +358,7 @@ static bool hasKnownBufferizationAliasingBehavior(Operation *op) {
       // clang-format off
       isa<CallOpInterface,
           tensor::CastOp,
+          ConstantOp,
           scf::ForOp,
           InitTensorOp,
           LinalgOp,
@@ -467,6 +468,7 @@ static Optional<OpOperand *> getAliasingOpOperand(OpResult result) {
     return None;
   return TypeSwitch<Operation *, OpOperand *>(result.getDefiningOp())
       .Case([&](tensor::CastOp op) { return &op->getOpOperand(0); })
+      .Case([&](ConstantOp op) { return &op->getOpOperand(0); })
       .Case([&](LinalgOp op) {
         return op.getOutputTensorOperands()[result.getResultNumber()];
       })
@@ -499,6 +501,8 @@ static Optional<OpResult> getAliasingOpResult(OpOperand &opOperand) {
       // These terminators legitimately have no result.
       .Case<ReturnOp, linalg::YieldOp, scf::YieldOp>(
           [&](auto op) { return OpResult(); })
+      // ConstantOp is never inplaceable.
+      .Case([&](ConstantOp op) { return op->getResult(0); })
       // ExtractSliceOp is 
diff erent: its result is not inplaceable on op.source
       // but when bufferized inplace, the result is an aliasing subregion of
       // op.source.
@@ -1600,6 +1604,26 @@ static LogicalResult bufferize(OpBuilder &b, tensor::CastOp castOp,
   return success();
 }
 
+static LogicalResult bufferize(OpBuilder &b, ConstantOp constantOp,
+                               BlockAndValueMapping &bvm,
+                               BufferizationAliasInfo &aliasInfo,
+                               GlobalCreator &globalCreator) {
+  if (!constantOp.getType().dyn_cast<RankedTensorType>())
+    return failure();
+
+  // Take a guard before anything else.
+  OpBuilder::InsertionGuard g(b);
+  b.setInsertionPoint(constantOp);
+
+  auto globalMemref = globalCreator.getGlobalFor(constantOp);
+  Value memref = b.create<memref::GetGlobalOp>(
+      constantOp.getLoc(), globalMemref.type(), globalMemref.getName());
+  aliasInfo.insertNewBufferEquivalence(memref, constantOp.getResult());
+  map(bvm, constantOp, memref);
+
+  return success();
+}
+
 /// DimOp tensor operand is modified inplace. This allows leaving dead
 /// tensors behind that will get DCE'd.
 static LogicalResult bufferize(OpBuilder &b, tensor::DimOp dimOp,
@@ -2115,7 +2139,8 @@ inPlaceAnalysisFuncOpBody(FuncOp funcOp, BufferizationAliasInfo &aliasInfo,
 
 static LogicalResult bufferizeFuncOpInternals(
     FuncOp funcOp, BlockAndValueMapping &bvm, BufferizationAliasInfo &aliasInfo,
-    DenseMap<FuncOp, FunctionType> &bufferizedFunctionTypes) {
+    DenseMap<FuncOp, FunctionType> &bufferizedFunctionTypes,
+    GlobalCreator &globalCreator) {
   LLVM_DEBUG(llvm::dbgs() << "\n\n");
   LDBG("Begin BufferizeFuncOpInternals:\n" << funcOp << '\n');
   OpBuilder b(funcOp->getContext());
@@ -2151,6 +2176,12 @@ static LogicalResult bufferizeFuncOpInternals(
         LDBG("Begin bufferize:\n" << op << '\n');
         return bufferize(b, op, bvm, aliasInfo, bufferizedFunctionTypes);
       })
+      .Case([&](ConstantOp op) {
+        if (!isaTensor(op.getResult().getType()))
+          return success();
+        LDBG("Begin bufferize:\n" << op << '\n');
+        return bufferize(b, op, bvm, aliasInfo, globalCreator);
+      })
       .Default([&](Operation *op) {
         auto isaTensor = [](Type t) { return t.isa<TensorType>(); };
         if (any_of(op->getOperandTypes(), isaTensor) ||
@@ -2429,6 +2460,7 @@ void LinalgComprehensiveModuleBufferize::runOnOperation() {
   if (failed(getFuncOpsOrderedByCalls(moduleOp, orderedFuncOps, callerMap)))
     return signalPassFailure();
 
+  GlobalCreator globalCreator(moduleOp);
   DominanceInfo domInfo(moduleOp);
   BufferizationAliasInfo aliasInfo(moduleOp);
   // Interestingly, all function args that are not visible outside of a module
@@ -2461,7 +2493,8 @@ void LinalgComprehensiveModuleBufferize::runOnOperation() {
     if (!testAnalysisOnly) {
       BlockAndValueMapping tensorToBufferMap;
       if (failed(bufferizeFuncOpInternals(funcOp, tensorToBufferMap, aliasInfo,
-                                          bufferizedFunctionTypes))) {
+                                          bufferizedFunctionTypes,
+                                          globalCreator))) {
         signalPassFailure();
         return;
       }

diff  --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir
index bc6488bca8e5..f7f221b2b77f 100644
--- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir
+++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir
@@ -352,6 +352,26 @@ func @scf_for_with_tensor.insert_slice(
 
 //      CHECK: #[[$DYN_1D_MAP:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
 
+//      CHECK: memref.global "private" constant @__constant_4xi32 : memref<4xi32> = dense<[1, 2, 3, 4]>
+//      CHECK: func private @some_external_func(memref<4xi32, #[[$DYN_1D_MAP]]>)
+func private @some_external_func(tensor<4xi32>)
+
+//      CHECK: func @main()
+func @main() {
+//      CHECK:   %[[A:.*]] = memref.get_global @__constant_4xi32 : memref<4xi32>
+  %A = constant dense<[1, 2, 3, 4]> : tensor<4xi32>
+
+//      CHECK:   %[[B:.*]] = memref.cast %[[A]] : memref<4xi32> to memref<4xi32, #[[$DYN_1D_MAP]]>
+//      CHECK:   call @some_external_func(%[[B]]) : (memref<4xi32, #[[$DYN_1D_MAP]]>) -> ()
+  call @some_external_func(%A) : (tensor<4xi32>) -> ()
+
+  return
+}
+
+// -----
+
+//      CHECK: #[[$DYN_1D_MAP:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
+
 //      CHECK:  func private @some_external_func(memref<?xf32, #[[$DYN_1D_MAP]]>)
 func private @some_external_func(tensor<?xf32>)
 


        


More information about the Mlir-commits mailing list