[Mlir-commits] [mlir] 489fec2 - [mlir][Linalg] NFC - Drop Optional in favor of FailureOr

Nicolas Vasilache llvmlistbot at llvm.org
Fri Oct 22 12:33:35 PDT 2021


Author: Nicolas Vasilache
Date: 2021-10-22T19:28:18Z
New Revision: 489fec2777881adbcc5fd013227ab84e3e4e69ff

URL: https://github.com/llvm/llvm-project/commit/489fec2777881adbcc5fd013227ab84e3e4e69ff
DIFF: https://github.com/llvm/llvm-project/commit/489fec2777881adbcc5fd013227ab84e3e4e69ff.diff

LOG: [mlir][Linalg] NFC - Drop Optional in favor of FailureOr

Differential revision: https://reviews.llvm.org/D112332

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
    mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
    mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
    mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
    mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
    mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
    mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index f9b82b371904..dcba0cbcbb5c 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -158,8 +158,8 @@ struct TiledLinalgOp {
   SmallVector<Operation *, 8> loops;
   SmallVector<Value, 4> tensorResults;
 };
-Optional<TiledLinalgOp> tileLinalgOp(OpBuilder &b, LinalgOp op,
-                                     const LinalgTilingOptions &options);
+FailureOr<TiledLinalgOp> tileLinalgOp(OpBuilder &b, LinalgOp op,
+                                      const LinalgTilingOptions &options);
 
 /// Fuse a sequence of linalg operations (`ops`) using tile-and-fuse. This
 /// proceeds as follows:
@@ -221,7 +221,7 @@ struct TiledAndFusedLinalgOps {
   /// The fused loop generated.
   SmallVector<Operation *, 4> fusedLoops;
 };
-Optional<TiledAndFusedLinalgOps>
+FailureOr<TiledAndFusedLinalgOps>
 tileAndFuseLinalgOps(OpBuilder &builder, ArrayRef<LinalgOp> ops,
                      const LinalgDependenceGraph &dependenceGraph,
                      const LinalgTilingOptions &tilingOptions);
@@ -344,7 +344,7 @@ struct PromotionInfo {
   Value fullLocalView;
   Value partialLocalView;
 };
-Optional<PromotionInfo>
+FailureOr<PromotionInfo>
 promoteSubviewAsNewBuffer(OpBuilder &b, Location loc, memref::SubViewOp subView,
                           AllocBufferCallbackFn allocationFn,
                           DataLayout &layout);
@@ -359,24 +359,24 @@ promoteSubviewAsNewBuffer(OpBuilder &b, Location loc, memref::SubViewOp subView,
 ///
 /// Returns the modified linalg op (the modification happens in place) as well
 /// as all the copy ops created.
-Optional<LinalgOp> promoteSubViews(OpBuilder &b, LinalgOp op,
-                                   LinalgPromotionOptions options);
+FailureOr<LinalgOp> promoteSubViews(OpBuilder &b, LinalgOp op,
+                                    LinalgPromotionOptions options);
 
 /// Emit a suitable vector form for a Linalg op with fully static shape.
 LogicalResult vectorizeLinalgOp(OpBuilder &builder, Operation *op,
                                 SmallVectorImpl<Value> &newResults);
 
 /// Emits a loop nest of `scf.for` with the proper body for `linalgOp`.
-Optional<LinalgLoops> linalgOpToLoops(PatternRewriter &rewriter,
-                                      LinalgOp linalgOp);
+FailureOr<LinalgLoops> linalgOpToLoops(PatternRewriter &rewriter,
+                                       LinalgOp linalgOp);
 
 /// Emits a loop nest of `scf.parallel` with the proper body for `linalgOp`.
-Optional<LinalgLoops> linalgOpToParallelLoops(PatternRewriter &rewriter,
-                                              LinalgOp linalgOp);
+FailureOr<LinalgLoops> linalgOpToParallelLoops(PatternRewriter &rewriter,
+                                               LinalgOp linalgOp);
 
 /// Emits a loop nest of `affine.for` with the proper body for `linalgOp`.
-Optional<LinalgLoops> linalgOpToAffineLoops(PatternRewriter &rewriter,
-                                            LinalgOp linalgOp);
+FailureOr<LinalgLoops> linalgOpToAffineLoops(PatternRewriter &rewriter,
+                                             LinalgOp linalgOp);
 
 //===----------------------------------------------------------------------===//
 // Preconditions that ensure the corresponding transformation succeeds and can
@@ -961,15 +961,15 @@ struct LinalgLoweringPattern : public RewritePattern {
       // TODO: Move lowering to library calls here.
       return failure();
     case LinalgLoweringType::Loops:
-      if (!linalgOpToLoops(rewriter, op))
+      if (failed(linalgOpToLoops(rewriter, op)))
         return failure();
       break;
     case LinalgLoweringType::AffineLoops:
-      if (!linalgOpToAffineLoops(rewriter, op))
+      if (failed(linalgOpToAffineLoops(rewriter, op)))
         return failure();
       break;
     case LinalgLoweringType::ParallelLoops:
-      if (!linalgOpToParallelLoops(rewriter, op))
+      if (failed(linalgOpToParallelLoops(rewriter, op)))
         return failure();
       break;
     }

diff  --git a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
index 03e1a63a880f..b32d8e1c12b0 100644
--- a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
+++ b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
@@ -164,25 +164,25 @@ struct FusionInfo {
 /// Implements the fusion part of the "tileAndFuse on buffers" transformation
 /// and thus requires the `consumerOpOperand` to be a `subview` op (generally
 /// obtained by applying the tiling transformation).
-Optional<FusionInfo> fuseProducerOfBuffer(OpBuilder &b,
-                                          OpOperand &consumerOpOperand,
-                                          const LinalgDependenceGraph &graph);
+FailureOr<FusionInfo> fuseProducerOfBuffer(OpBuilder &b,
+                                           OpOperand &consumerOpOperand,
+                                           const LinalgDependenceGraph &graph);
 /// Tensor counterpart of `fuseProducerOfBuffer`.
 /// This implements the fusion part of the "tileAndFuse on tensors"
 /// transformation and thus requires the `consumerOpOperand` to be a
 /// `extract_slice` op (generally obtained by applying the tiling
 /// transformation).
-Optional<FusionInfo> fuseProducerOfTensor(OpBuilder &b,
-                                          OpOperand &consumerOpOperand);
+FailureOr<FusionInfo> fuseProducerOfTensor(OpBuilder &b,
+                                           OpOperand &consumerOpOperand);
 /// Tensor counterpart of `fuseProducerOfBuffer`.
 /// This implements the fusion part of the "tileAndFuse on tensors"
 /// transformation and thus requires the `consumerOpOperand` to be a
 /// `extract_slice` op (generally obtained by applying the tiling
 /// transformation). Assumes `producerOfTensor` is a Linalg op that produces
 /// `consumerOpOperand`.
-Optional<FusionInfo> fuseProducerOfTensor(OpBuilder &b,
-                                          OpResult producerOpResult,
-                                          OpOperand &consumerOpOperand);
+FailureOr<FusionInfo> fuseProducerOfTensor(OpBuilder &b,
+                                           OpResult producerOpResult,
+                                           OpOperand &consumerOpOperand);
 
 //===----------------------------------------------------------------------===//
 // Fusion on tensor utilities

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
index 067d5499253c..707158217f79 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
@@ -331,7 +331,7 @@ bool mlir::linalg::isFusableInto(const LinalgDependenceGraph &graph,
 /// For `consumer` with buffer semantics, find the Linalg operation on buffers
 /// that is the last writer of `consumerOpOperand`. For now the fusable
 /// dependence is returned as an instance of the `dependenceGraph`.
-static Optional<LinalgDependenceGraph::LinalgDependenceGraphElem>
+static FailureOr<LinalgDependenceGraph::LinalgDependenceGraphElem>
 findFusableProducer(OpOperand &consumerOpOperand,
                     const LinalgDependenceGraph &dependenceGraph) {
   LLVM_DEBUG(llvm::dbgs() << "findFusableProducer for: "
@@ -340,7 +340,7 @@ findFusableProducer(OpOperand &consumerOpOperand,
                           << *consumerOpOperand.getOwner() << "\n");
   LinalgOp consumerOp = dyn_cast<LinalgOp>(consumerOpOperand.getOwner());
   if (!consumerOp)
-    return {};
+    return failure();
 
   // Only consider RAW and WAW atm.
   for (auto depType : {
@@ -386,37 +386,37 @@ findFusableProducer(OpOperand &consumerOpOperand,
       }
     }
   }
-  return {};
+  return failure();
 }
 
-Optional<FusionInfo>
+FailureOr<FusionInfo>
 mlir::linalg::fuseProducerOfBuffer(OpBuilder &b, OpOperand &consumerOpOperand,
                                    const LinalgDependenceGraph &graph) {
   Optional<LinalgDependenceGraph::LinalgDependenceGraphElem> fusableDependence =
       findFusableProducer(consumerOpOperand, graph);
   if (!fusableDependence)
-    return llvm::None;
+    return failure();
 
   LinalgOp producerOp = dyn_cast<LinalgOp>(fusableDependence->getDependentOp());
   if (!producerOp)
-    return llvm::None;
+    return failure();
 
   // If producer is already in the same block as consumer, we are done.
   if (consumerOpOperand.get().getParentBlock() ==
       fusableDependence->getDependentValue().getParentBlock())
-    return llvm::None;
+    return failure();
 
   Optional<AffineMap> producerMap =
       fusableDependence->getDependentOpViewIndexingMap();
   if (!producerMap)
-    return llvm::None;
+    return failure();
 
   // Must be a subview or an extract_slice to guarantee there are loops we can
   // fuse into.
   auto subView = consumerOpOperand.get().getDefiningOp<memref::SubViewOp>();
   if (!subView) {
     LLVM_DEBUG(llvm::dbgs() << "\nNot fusable (not a subview)");
-    return llvm::None;
+    return failure();
   }
 
   // Fuse `producer` just before `consumer`.
@@ -459,28 +459,28 @@ static void getProducerOfTensor(Value tensor, OpResult &opResult) {
   }
 }
 
-Optional<FusionInfo>
+FailureOr<FusionInfo>
 mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpOperand &consumerOpOperand) {
   Value inputTensor = consumerOpOperand.get();
   OpResult producerOpResult;
   getProducerOfTensor(inputTensor, producerOpResult);
   if (!producerOpResult) {
     LLVM_DEBUG(llvm::dbgs() << "\nUnable to find producer");
-    return {};
+    return failure();
   }
   return fuseProducerOfTensor(b, producerOpResult, consumerOpOperand);
 }
 
-Optional<FusionInfo>
+FailureOr<FusionInfo>
 mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpResult producerOpResult,
                                    OpOperand &consumerOpOperand) {
   auto producerOp = dyn_cast<LinalgOp>(producerOpResult.getOwner());
   if (!producerOp)
-    return llvm::None;
+    return failure();
 
   LinalgOp consumerOp = dyn_cast<LinalgOp>(consumerOpOperand.getOwner());
   if (!consumerOp)
-    return llvm::None;
+    return failure();
 
   Value inputTensor = consumerOpOperand.get();
 
@@ -489,13 +489,13 @@ mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpResult producerOpResult,
   if (!sliceOp) {
     LLVM_DEBUG(llvm::dbgs()
                << "\nNot fusable, not an extract_slice op: " << inputTensor);
-    return {};
+    return failure();
   }
 
   // If producer is already in the same block as consumer, we are done.
   if (consumerOpOperand.get().getParentBlock() ==
       producerOpResult.getParentBlock())
-    return {};
+    return failure();
 
   // Insert fused `producer` just before `consumer`.
   OpBuilder::InsertionGuard g(b);
@@ -537,27 +537,27 @@ static AffineMap pruneReductionDimsFromMap(ArrayRef<Attribute> iteratorTypes,
 /// - indexing map of the fused view in the producer : producerIndexMap
 ///     consumerLoopToProducerLoop =
 ///       inverse(producerIndexMap).compose(consumerIndexMap)
-static Optional<AffineMap> getConsumerLoopToProducerLoopMap(
+static FailureOr<AffineMap> getConsumerLoopToProducerLoopMap(
     LinalgDependenceGraph::LinalgDependenceGraphElem dependence) {
   auto producer = dyn_cast<LinalgOp>(dependence.getDependentOp());
   if (!producer)
-    return None;
+    return failure();
 
   Optional<AffineMap> producerIndexingMap =
       dependence.getDependentOpViewIndexingMap();
   Optional<AffineMap> consumerIndexingMap =
       dependence.getIndexingOpViewIndexingMap();
   if (!producerIndexingMap || !consumerIndexingMap)
-    return None;
+    return failure();
 
   AffineMap prunedProducerIndexingMap = pruneReductionDimsFromMap(
       producer.iterator_types().getValue(), *producerIndexingMap);
   if (!prunedProducerIndexingMap.isPermutation())
-    return None;
+    return failure();
 
   if (consumerIndexingMap->getNumResults() !=
       prunedProducerIndexingMap.getNumResults())
-    return None;
+    return failure();
 
   LLVM_DEBUG({
     llvm::dbgs() << "\t producerMap : ";
@@ -572,7 +572,7 @@ static Optional<AffineMap> getConsumerLoopToProducerLoopMap(
 
   AffineMap invProducerIndexMap = inversePermutation(prunedProducerIndexingMap);
   if (!invProducerIndexMap)
-    return None;
+    return failure();
 
   return invProducerIndexMap.compose(*consumerIndexingMap);
 }
@@ -776,7 +776,7 @@ FusableOpDependencesTy mlir::linalg::findAllFusableDependences(
 
 /// Tile the fused loops in the root operation, by setting the tile sizes for
 /// all other loops to zero (those will be tiled later).
-static Optional<TiledLinalgOp>
+static FailureOr<TiledLinalgOp>
 tileRootOperation(OpBuilder &b, LinalgOp op, ArrayRef<Value> tileSizeVector,
                   const LinalgTilingOptions &options,
                   const std::set<unsigned> &fusedLoops) {
@@ -871,12 +871,12 @@ fuseOperations(OpBuilder &b, LinalgOp rootOp, TiledLinalgOp tiledLinalgOp,
   return fusedOps;
 }
 
-static Optional<TiledAndFusedLinalgOps>
+static FailureOr<TiledAndFusedLinalgOps>
 tileAndFuseLinalgOpsImpl(OpBuilder &b, ArrayRef<LinalgOp> ops,
                          const LinalgDependenceGraph &dependenceGraph,
                          const LinalgTilingOptions &tilingOptions) {
   if (ops.size() < 2)
-    return llvm::None;
+    return failure();
   LinalgOp rootOp = ops.back();
   if (!llvm::all_of(
           ops,
@@ -887,13 +887,13 @@ tileAndFuseLinalgOpsImpl(OpBuilder &b, ArrayRef<LinalgOp> ops,
     rootOp.emitError(
         "unable to fuse operations that have tensor semantics with operations "
         "that have buffer semantics and viceversa.");
-    return llvm::None;
+    return failure();
   }
   // TODO: Support interchange with tile + fuse. This might actually help do
   // better fusion.
   if (!tilingOptions.interchangeVector.empty()) {
     rootOp.emitRemark("unable to handle tile and fuse with interchange");
-    return llvm::None;
+    return failure();
   }
 
   OpBuilder::InsertionGuard guard(b);
@@ -905,7 +905,7 @@ tileAndFuseLinalgOpsImpl(OpBuilder &b, ArrayRef<LinalgOp> ops,
       findAllFusableDependences(ops, dependenceGraph);
   if (fusableDependences.empty()) {
     LLVM_DEBUG(llvm::dbgs() << "no fusable dependencies found\n");
-    return llvm::None;
+    return failure();
   }
 
   TiledAndFusedLinalgOps ret;
@@ -917,17 +917,17 @@ tileAndFuseLinalgOpsImpl(OpBuilder &b, ArrayRef<LinalgOp> ops,
   // just return.
   if (ret.fusedLoopDims.empty()) {
     LLVM_DEBUG(llvm::dbgs() << "no fusable loops found\n");
-    return llvm::None;
+    return failure();
   }
 
   // Tile the fused loops in the last operation in the list.
   SmallVector<Value, 4> tileSizeVector =
       tilingOptions.tileSizeComputationFunction(b, rootOp);
-  Optional<TiledLinalgOp> tiledRootOp = tileRootOperation(
+  FailureOr<TiledLinalgOp> tiledRootOp = tileRootOperation(
       b, rootOp, tileSizeVector, tilingOptions, ret.fusedLoopDims);
-  if (!tiledRootOp) {
+  if (failed(tiledRootOp)) {
     rootOp.emitRemark("failed to tile the fused loops");
-    return llvm::None;
+    return failure();
   }
   ret.op = tiledRootOp->op;
   ret.fusedLoops.assign(tiledRootOp->loops.begin(), tiledRootOp->loops.end());
@@ -939,7 +939,7 @@ tileAndFuseLinalgOpsImpl(OpBuilder &b, ArrayRef<LinalgOp> ops,
   return ret;
 }
 
-Optional<TiledAndFusedLinalgOps>
+FailureOr<TiledAndFusedLinalgOps>
 mlir::linalg::tileAndFuseLinalgOps(OpBuilder &b, ArrayRef<LinalgOp> ops,
                                    const LinalgDependenceGraph &dependenceGraph,
                                    const LinalgTilingOptions &tilingOptions) {
@@ -950,5 +950,5 @@ mlir::linalg::tileAndFuseLinalgOps(OpBuilder &b, ArrayRef<LinalgOp> ops,
     return tileAndFuseLinalgOpsImpl(b, ops, dependenceGraph, tilingOptions);
   default:;
   }
-  return llvm::None;
+  return failure();
 }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
index ff19f6adc1fe..7a706141e440 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
@@ -194,8 +194,8 @@ static void replaceIndexOpsByInductionVariables(LinalgOp linalgOp,
 }
 
 template <typename LoopTy>
-static Optional<LinalgLoops> linalgOpToLoopsImpl(PatternRewriter &rewriter,
-                                                 LinalgOp linalgOp) {
+static FailureOr<LinalgLoops> linalgOpToLoopsImpl(PatternRewriter &rewriter,
+                                                  LinalgOp linalgOp) {
   using LoadOpTy =
       typename std::conditional<std::is_same<LoopTy, AffineForOp>::value,
                                 AffineLoadOp, memref::LoadOp>::type;
@@ -227,12 +227,12 @@ static Optional<LinalgLoops> linalgOpToLoopsImpl(PatternRewriter &rewriter,
   SetVector<Operation *> loopSet;
   for (Value iv : allIvs) {
     if (!iv)
-      return {};
+      return failure();
     // The induction variable is a block argument of the entry block of the
     // loop operation.
     BlockArgument ivVal = iv.dyn_cast<BlockArgument>();
     if (!ivVal)
-      return {};
+      return failure();
     loopSet.insert(ivVal.getOwner()->getParentOp());
   }
   LinalgLoops loops(loopSet.begin(), loopSet.end());
@@ -253,7 +253,7 @@ class LinalgRewritePattern : public RewritePattern {
     auto linalgOp = dyn_cast<LinalgOp>(op);
     if (!isa<LinalgOp>(op))
       return failure();
-    if (!linalgOpToLoopsImpl<LoopType>(rewriter, linalgOp))
+    if (failed(linalgOpToLoopsImpl<LoopType>(rewriter, linalgOp)))
       return failure();
     rewriter.eraseOp(op);
     return success();
@@ -547,20 +547,20 @@ mlir::createConvertLinalgToAffineLoopsPass() {
 }
 
 /// Emits a loop nest of `affine.for` with the proper body for `linalgOp`.
-Optional<LinalgLoops>
+FailureOr<LinalgLoops>
 mlir::linalg::linalgOpToAffineLoops(PatternRewriter &rewriter,
                                     LinalgOp linalgOp) {
   return linalgOpToLoopsImpl<AffineForOp>(rewriter, linalgOp);
 }
 
 /// Emits a loop nest of `scf.for` with the proper body for `linalgOp`.
-Optional<LinalgLoops> mlir::linalg::linalgOpToLoops(PatternRewriter &rewriter,
-                                                    LinalgOp linalgOp) {
+FailureOr<LinalgLoops> mlir::linalg::linalgOpToLoops(PatternRewriter &rewriter,
+                                                     LinalgOp linalgOp) {
   return linalgOpToLoopsImpl<scf::ForOp>(rewriter, linalgOp);
 }
 
 /// Emits a loop nest of `scf.parallel` with the proper body for `linalgOp`.
-Optional<LinalgLoops>
+FailureOr<LinalgLoops>
 mlir::linalg::linalgOpToParallelLoops(PatternRewriter &rewriter,
                                       LinalgOp linalgOp) {
   return linalgOpToLoopsImpl<scf::ParallelOp>(rewriter, linalgOp);

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
index 55c49c11f553..ed86cfc080a9 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
@@ -210,7 +210,7 @@ LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions(
 // To account for general boundary effects, padding must be performed on the
 // boundary tiles. For now this is done with an unconditional `fill` op followed
 // by a partial `copy` op.
-Optional<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
+FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
     OpBuilder &b, Location loc, memref::SubViewOp subView,
     AllocBufferCallbackFn allocationFn, DataLayout &layout) {
   auto viewType = subView.getType();
@@ -236,7 +236,7 @@ Optional<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
   // allocating the promoted buffer.
   Optional<Value> fullLocalView = allocationFn(b, subView, fullSizes, layout);
   if (!fullLocalView)
-    return {};
+    return failure();
   SmallVector<OpFoldResult, 4> zeros(fullSizes.size(), b.getIndexAttr(0));
   SmallVector<OpFoldResult, 4> ones(fullSizes.size(), b.getIndexAttr(1));
   auto partialLocalView = b.createOrFold<memref::SubViewOp>(
@@ -244,21 +244,21 @@ Optional<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
   return PromotionInfo{*fullLocalView, partialLocalView};
 }
 
-static Optional<MapVector<int64_t, PromotionInfo>>
+static FailureOr<MapVector<int64_t, PromotionInfo>>
 promoteSubViews(ImplicitLocOpBuilder &b,
                 LinalgOpInstancePromotionOptions options, DataLayout &layout) {
   if (options.subViews.empty())
-    return {};
+    return failure();
 
   MapVector<int64_t, PromotionInfo> promotionInfoMap;
 
   for (auto v : options.subViews) {
     memref::SubViewOp subView =
         cast<memref::SubViewOp>(v.second.getDefiningOp());
-    Optional<PromotionInfo> promotionInfo = promoteSubviewAsNewBuffer(
+    auto promotionInfo = promoteSubviewAsNewBuffer(
         b, b.getLoc(), subView, options.allocationFn, layout);
-    if (!promotionInfo)
-      return {};
+    if (failed(promotionInfo))
+      return failure();
     promotionInfoMap[v.first] = *promotionInfo;
 
     // Only fill the buffer if the full local view is used
@@ -283,7 +283,7 @@ promoteSubViews(ImplicitLocOpBuilder &b,
             })
             .Default([](auto) { return Value(); });
     if (!fillVal)
-      return {};
+      return failure();
     b.create<linalg::FillOp>(fillVal, promotionInfo->fullLocalView);
   }
 
@@ -295,21 +295,21 @@ promoteSubViews(ImplicitLocOpBuilder &b,
     if (failed(options.copyInFn(
             b, cast<memref::SubViewOp>(v.second.getDefiningOp()),
             info->second.partialLocalView)))
-      return {};
+      return failure();
   }
   return promotionInfoMap;
 }
 
-static Optional<LinalgOp>
+static FailureOr<LinalgOp>
 promoteSubViews(ImplicitLocOpBuilder &b, LinalgOp op,
                 LinalgOpInstancePromotionOptions options, DataLayout &layout) {
   assert(op.hasBufferSemantics() && "expected linalg op with buffer semantics");
 
   // 1. Promote the specified views and use them in the new op.
   auto promotedBuffersAndViews = promoteSubViews(b, options, layout);
-  if (!promotedBuffersAndViews ||
+  if (failed(promotedBuffersAndViews) ||
       promotedBuffersAndViews->size() != options.subViews.size())
-    return {};
+    return failure();
 
   // 2. Append all other operands as they appear, this enforces that such
   // operands are not views. This is to support cases such as FillOp taking
@@ -343,7 +343,7 @@ promoteSubViews(ImplicitLocOpBuilder &b, LinalgOp op,
   for (auto viewAndPartialLocalView : writebackViews) {
     if (failed(options.copyOutFn(b, viewAndPartialLocalView.second,
                                  viewAndPartialLocalView.first)))
-      return {};
+      return failure();
   }
 
   // 4. Dealloc all local buffers.
@@ -374,13 +374,16 @@ mlir::linalg::promoteSubviewsPrecondition(Operation *op,
   return failure();
 }
 
-Optional<LinalgOp>
+FailureOr<LinalgOp>
 mlir::linalg::promoteSubViews(OpBuilder &builder, LinalgOp linalgOp,
                               LinalgPromotionOptions options) {
   LinalgOpInstancePromotionOptions linalgOptions(linalgOp, options);
   auto layout = DataLayout::closest(linalgOp);
   ImplicitLocOpBuilder b(linalgOp.getLoc(), builder);
-  return ::promoteSubViews(b, linalgOp, linalgOptions, layout);
+  auto res = ::promoteSubViews(b, linalgOp, linalgOptions, layout);
+  if (failed(res))
+    return failure();
+  return res;
 }
 
 namespace {
@@ -400,7 +403,8 @@ struct LinalgPromotionPass : public LinalgPromotionBase<LinalgPromotionPass> {
         return;
       LLVM_DEBUG(llvm::dbgs() << "Promote: " << *(op.getOperation()) << "\n");
       ImplicitLocOpBuilder b(op.getLoc(), op);
-      promoteSubViews(b, op, options);
+      // TODO: signalPassFailure() ?
+      (void)promoteSubViews(b, op, options);
     });
   }
 };

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index cd4406d416f6..d7a26eebae0f 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -152,7 +152,7 @@ static Value insertSliceIntoTensor(OpBuilder &b, Location loc,
 }
 
 template <typename LoopTy>
-static Optional<TiledLinalgOp>
+static FailureOr<TiledLinalgOp>
 tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ValueRange tileSizes,
                  const LinalgTilingOptions &options) {
   auto nLoops = op.getNumLoops();
@@ -160,13 +160,13 @@ tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ValueRange tileSizes,
   tileSizes = tileSizes.take_front(nLoops);
 
   if (llvm::all_of(tileSizes, isZero))
-    return llvm::None;
+    return failure();
 
   // 1. Build the tiled loop ranges.
   auto allShapeSizes = op.createFlatListOfOperandDims(b, op.getLoc());
   AffineMap shapeSizesToLoopsMap = op.getShapesToLoopsMap();
   if (!shapeSizesToLoopsMap)
-    return llvm::None;
+    return failure();
 
   SmallVector<Range, 4> loopRanges;
   LoopIndexToRangeIndexMap loopIndexToRangeIndex;
@@ -291,13 +291,13 @@ tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ValueRange tileSizes,
 }
 
 template <typename LoopTy>
-Optional<TiledLinalgOp> static tileLinalgOpImpl(
+FailureOr<TiledLinalgOp> static tileLinalgOpImpl(
     OpBuilder &b, LinalgOp op, const LinalgTilingOptions &options) {
   OpBuilder::InsertionGuard g(b);
   b.setInsertionPoint(op);
 
   if (!options.tileSizeComputationFunction)
-    return llvm::None;
+    return failure();
 
   // Enforce the convention that "tiling by zero" skips tiling a particular
   // dimension. This convention is significantly simpler to handle instead of
@@ -313,7 +313,7 @@ Optional<TiledLinalgOp> static tileLinalgOpImpl(
   return tileLinalgOpImpl<LoopTy>(b, op, tileSizeVector, options);
 }
 
-Optional<TiledLinalgOp>
+FailureOr<TiledLinalgOp>
 mlir::linalg::tileLinalgOp(OpBuilder &b, LinalgOp op,
                            const LinalgTilingOptions &options) {
   switch (options.loopType) {
@@ -325,7 +325,7 @@ mlir::linalg::tileLinalgOp(OpBuilder &b, LinalgOp op,
     return tileLinalgOpImpl<linalg::TiledLoopOp>(b, op, options);
   default:;
   }
-  return llvm::None;
+  return failure();
 }
 
 /// Generate a loop nest around a given PadTensorOp (for tiling). `newPadOp`

diff  --git a/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
index 069a17384f1f..7d87e9ae6d7c 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
@@ -187,26 +187,28 @@ static LogicalResult fuseLinalgOpsGreedily(FuncOp f) {
         // removed.
         linalg::Aliases aliases;
         linalg::LinalgDependenceGraph graph(aliases, linalgOps);
-        if (auto info = fuseProducerOfBuffer(b, *opOperand, graph)) {
-          auto *originalOp = info->originalProducer.getOperation();
-          eraseSet.insert(originalOp);
-          auto *originalOpInLinalgOpsVector =
-              std::find(linalgOps.begin(), linalgOps.end(), originalOp);
-          *originalOpInLinalgOpsVector = info->fusedProducer.getOperation();
-          changed = true;
-        }
+        auto info = fuseProducerOfBuffer(b, *opOperand, graph);
+        if (failed(info))
+          continue;
+        auto *originalOp = info->originalProducer.getOperation();
+        eraseSet.insert(originalOp);
+        auto *originalOpInLinalgOpsVector =
+            std::find(linalgOps.begin(), linalgOps.end(), originalOp);
+        *originalOpInLinalgOpsVector = info->fusedProducer.getOperation();
+        changed = true;
       } else if (opOperand->get().getType().isa<RankedTensorType>()) {
         // Tile and Fuse tensor input.
         if (opOperand->getOperandNumber() >= linalgOp.getNumInputs())
           continue;
-        if (auto info = fuseProducerOfTensor(b, *opOperand)) {
-          auto *originalOp = info->originalProducer.getOperation();
-          auto *originalOpInLinalgOpsVector =
-              std::find(linalgOps.begin(), linalgOps.end(), originalOp);
-          *originalOpInLinalgOpsVector = info->fusedProducer.getOperation();
-          // Don't mark for erasure in the tensor case, let DCE handle this.
-          changed = true;
-        }
+        auto info = fuseProducerOfTensor(b, *opOperand);
+        if (failed(info))
+          continue;
+        auto *originalOp = info->originalProducer.getOperation();
+        auto *originalOpInLinalgOpsVector =
+            std::find(linalgOps.begin(), linalgOps.end(), originalOp);
+        *originalOpInLinalgOpsVector = info->fusedProducer.getOperation();
+        // Don't mark for erasure in the tensor case, let DCE handle this.
+        changed = true;
       }
     }
   }


        


More information about the Mlir-commits mailing list