[Mlir-commits] [mlir] c2e5226 - [mlir][linalg] Cleanup LinalgOp usage in tiling (NFC).

Tobias Gysi llvmlistbot at llvm.org
Tue Jun 1 01:18:25 PDT 2021


Author: Tobias Gysi
Date: 2021-06-01T08:17:38Z
New Revision: c2e5226a851413464356163fc028f23653dad4cd

URL: https://github.com/llvm/llvm-project/commit/c2e5226a851413464356163fc028f23653dad4cd
DIFF: https://github.com/llvm/llvm-project/commit/c2e5226a851413464356163fc028f23653dad4cd.diff

LOG: [mlir][linalg] Cleanup LinalgOp usage in tiling (NFC).

Replace the uses of deprecated Structured Op Interface methods in Tiling.cpp and Utils.cpp. This patch is based on https://reviews.llvm.org/D103394.

Differential Revision: https://reviews.llvm.org/D103438

Added: 
    

Modified: 
    mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
    mlir/lib/Dialect/Linalg/Utils/Utils.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index 2ea16f75bedda..b46ac20ec6b82 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -232,11 +232,11 @@ tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ValueRange tileSizes,
     else
       interchangedIvs.assign(ivs.begin(), ivs.end());
 
-    assert(op.getNumOutputTensors() == iterArgs.size() &&
+    assert(op.getOutputTensorOperands().size() == iterArgs.size() &&
            "num output tensors must match number of loop iter arguments");
 
-    auto operands = llvm::to_vector<4>(op.getInputs());
-    SmallVector<Value, 4> outputBuffers = op.getOutputBuffers();
+    SmallVector<Value> operands = op.getInputOperands();
+    SmallVector<Value> outputBuffers = op.getOutputBufferOperands();
     // TODO: thanks to simplifying assumption we do not need to worry about
     // order of output buffers and tensors: there is only ever one kind.
     assert(outputBuffers.empty() || iterArgs.empty());
@@ -252,7 +252,7 @@ tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ValueRange tileSizes,
     // TODO: use an interface/adaptor to avoid leaking position in
     // `tiledOperands`.
     SmallVector<Type, 4> resultTensorTypes;
-    for (OpOperand *opOperand : op.getOutputTensorsOpOperands())
+    for (OpOperand *opOperand : op.getOutputTensorOperands())
       resultTensorTypes.push_back(
           tiledOperands[opOperand->getOperandNumber()].getType());
 
@@ -260,7 +260,7 @@ tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ValueRange tileSizes,
 
     // Insert a subtensor_insert for each output tensor.
     unsigned resultIdx = 0;
-    for (OpOperand *opOperand : op.getOutputTensorsOpOperands()) {
+    for (OpOperand *opOperand : op.getOutputTensorOperands()) {
       // TODO: use an interface/adaptor to avoid leaking position in
       // `tiledOperands`.
       Value outputTensor = tiledOperands[opOperand->getOperandNumber()];

diff  --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
index cb92bf6e23bbe..cb419e4fd8e9d 100644
--- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
@@ -200,7 +200,7 @@ void GenerateLoopNest<scf::ForOp>::doit(
         bodyBuilderFn,
     Optional<LinalgLoopDistributionOptions> distributionOptions,
     ArrayRef<StringRef> distributionTypes) {
-  auto iterArgInitValues = linalgOp.getOutputTensors();
+  SmallVector<Value> iterArgInitValues = linalgOp.getOutputTensorOperands();
   // Create procInfo so it dominates loops, if appropriate.
   SmallVector<ProcInfo, 4> procInfo;
   SmallVector<DistributionMethod, 0> distributionMethod;
@@ -248,7 +248,7 @@ void GenerateLoopNest<AffineForOp>::doit(
                                   ValueRange)>
         bodyBuilderFn,
     Optional<LinalgLoopDistributionOptions>, ArrayRef<StringRef>) {
-  auto iterArgInitValues = linalgOp.getOutputTensors();
+  SmallVector<Value> iterArgInitValues = linalgOp.getOutputTensorOperands();
   assert(iterArgInitValues.empty() && "unexpected AffineForOp init values");
   SmallVector<Value, 4> lbs, ubs, steps;
   unpackRanges(loopRanges, lbs, ubs, steps);
@@ -285,14 +285,17 @@ void GenerateLoopNest<TiledLoopOp>::doit(
   auto wrappedBuilderFn = [&](OpBuilder &nestedBuilder, Location nestedLoc,
                               ValueRange ivs, ValueRange inputs,
                               ValueRange outputs) {
-    scf::ValueVector results = bodyBuilderFn(nestedBuilder, nestedLoc, ivs,
-                                             linalgOp.getOutputTensors());
+    SmallVector<Value> outputTensors = linalgOp.getOutputTensorOperands();
+    scf::ValueVector results =
+        bodyBuilderFn(nestedBuilder, nestedLoc, ivs, outputTensors);
     nestedBuilder.create<linalg::YieldOp>(nestedLoc, results);
   };
 
-  auto tiledLoop = b.create<TiledLoopOp>(
-      loc, lbs, ubs, steps, linalgOp.getInputs(), linalgOp.getOutputs(),
-      b.getArrayAttr(iteratorTypes), wrappedBuilderFn);
+  SmallVector<Value> inputOperands = linalgOp.getInputOperands();
+  SmallVector<Value> outputOperands = linalgOp.getOutputOperands();
+  auto tiledLoop =
+      b.create<TiledLoopOp>(loc, lbs, ubs, steps, inputOperands, outputOperands,
+                            b.getArrayAttr(iteratorTypes), wrappedBuilderFn);
   if (!distributionTypes.empty())
     tiledLoop.setDistributionTypes(b, distributionTypes);
 
@@ -300,11 +303,9 @@ void GenerateLoopNest<TiledLoopOp>::doit(
   auto isInsideTiledLoop = [&](OpOperand &operand) {
     return operand.getOwner()->getBlock() == tiledLoop.getBody();
   };
-  for (auto it :
-       llvm::zip(linalgOp.getInputs(), tiledLoop.getRegionInputArgs()))
+  for (auto it : llvm::zip(inputOperands, tiledLoop.getRegionInputArgs()))
     std::get<0>(it).replaceUsesWithIf(std::get<1>(it), isInsideTiledLoop);
-  for (auto it :
-       llvm::zip(linalgOp.getOutputs(), tiledLoop.getRegionOutputArgs()))
+  for (auto it : llvm::zip(outputOperands, tiledLoop.getRegionOutputArgs()))
     std::get<0>(it).replaceUsesWithIf(std::get<1>(it), isInsideTiledLoop);
 }
 
@@ -452,7 +453,7 @@ void GenerateLoopNest<scf::ParallelOp>::doit(
         bodyBuilderFn,
     Optional<LinalgLoopDistributionOptions> distributionOptions,
     ArrayRef<StringRef> distributionTypes) {
-  auto iterArgInitValues = linalgOp.getOutputTensors();
+  SmallVector<Value> iterArgInitValues = linalgOp.getOutputTensorOperands();
   assert(iterArgInitValues.empty() && "unexpected ParallelOp init values");
   // This function may be passed more iterator types than ranges.
   assert(iteratorTypes.size() >= loopRanges.size() &&
@@ -509,7 +510,7 @@ void GenerateLoopNest<scf::ParallelOp>::doit(
 
 SmallVector<Value, 4> makeTiledShapes(OpBuilder &b, Location loc,
                                       LinalgOp linalgOp,
-                                      ArrayRef<Value> tiledOperands,
+                                      ArrayRef<Value> valuesToTile,
                                       ValueRange ivs, ValueRange tileSizes,
                                       ArrayRef<Value> sizeBounds) {
   assert(ivs.size() == static_cast<size_t>(llvm::count_if(
@@ -533,20 +534,22 @@ SmallVector<Value, 4> makeTiledShapes(OpBuilder &b, Location loc,
     LLVM_DEBUG(llvm::dbgs() << "size: " << subShapeSizes.back() << "\n");
   }
 
+  assert(valuesToTile.size() == linalgOp.getNumInputsAndOutputs() &&
+         "expected one value to tile for every operand");
   MLIRContext *context = b.getContext();
   SmallVector<Value, 4> tiledShapes;
-  tiledShapes.reserve(tiledOperands.size());
-  for (auto en : llvm::enumerate(tiledOperands)) {
-    Value shapedOp = en.value();
+  tiledShapes.reserve(valuesToTile.size());
+  for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) {
+    Value shapedOp = valuesToTile[opOperand->getOperandNumber()];
     LLVM_DEBUG(llvm::dbgs() << "makeTiledShapes: for operand " << shapedOp);
-    ShapedType shapedType = shapedOp.getType().cast<ShapedType>();
-    unsigned rank = shapedType.getRank();
-    AffineMap map = linalgOp.getIndexingMap(en.index());
+    int64_t rank = linalgOp.getRank(opOperand);
+    ArrayRef<int64_t> shape = linalgOp.getShape(opOperand);
+    AffineMap map = linalgOp.getTiedIndexingMap(opOperand);
     // If the shape is not tiled, we can use it as is.
     if (!isTiled(map, tileSizes)) {
       tiledShapes.push_back(shapedOp);
-      LLVM_DEBUG(llvm::dbgs()
-                 << ": not tiled: use shape: " << shapedType << "\n");
+      LLVM_DEBUG(llvm::dbgs() << ": not tiled: use shape: "
+                              << opOperand->get().getType() << "\n");
       continue;
     }
     LLVM_DEBUG(llvm::dbgs() << ": tiled: figure out subshape...\n");
@@ -583,7 +586,7 @@ SmallVector<Value, 4> makeTiledShapes(OpBuilder &b, Location loc,
       // The size of the subview / subtensor should be trimmed to avoid
       // out-of-bounds accesses, unless we statically know the subshape size
       // divides the shape size evenly.
-      int64_t shapeSize = shapedType.getDimSize(r);
+      int64_t shapeSize = shape[r];
       auto sizeCst = size.getDefiningOp<ConstantIndexOp>();
       if (ShapedType::isDynamic(shapeSize) || !sizeCst ||
           (shapeSize % sizeCst.getValue()) != 0) {
@@ -610,7 +613,7 @@ SmallVector<Value, 4> makeTiledShapes(OpBuilder &b, Location loc,
       strides.push_back(b.getIndexAttr(1));
     }
 
-    if (shapedType.isa<MemRefType>())
+    if (opOperand->get().getType().isa<MemRefType>())
       tiledShapes.push_back(
           b.create<memref::SubViewOp>(loc, shapedOp, offsets, sizes, strides));
     else


        


More information about the Mlir-commits mailing list