[Mlir-commits] [mlir] 23bd2e9 - [mlir][Affine] Delete duplicate code: `applyMapToValues`

Matthias Springer llvmlistbot at llvm.org
Fri Jun 30 05:02:57 PDT 2023


Author: Matthias Springer
Date: 2023-06-30T14:01:13+02:00
New Revision: 23bd2e96fe3c945972eec8d8ad963651dd13ea6a

URL: https://github.com/llvm/llvm-project/commit/23bd2e96fe3c945972eec8d8ad963651dd13ea6a
DIFF: https://github.com/llvm/llvm-project/commit/23bd2e96fe3c945972eec8d8ad963651dd13ea6a.diff

LOG: [mlir][Affine] Delete duplicate code: `applyMapToValues`

The same functionality is provided by `makeComposedFoldedAffineApply`.

Differential Revision: https://reviews.llvm.org/D154199

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Affine/IR/AffineOps.h
    mlir/lib/Dialect/Affine/IR/AffineOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
    mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
    mlir/test/Dialect/Linalg/pad_fusion.mlir
    mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir
    mlir/test/Dialect/Linalg/vectorization-masked.mlir
    mlir/test/Dialect/Tensor/bufferize.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h
index 778c3b3593236f..153878a41a043c 100644
--- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h
+++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h
@@ -425,10 +425,6 @@ OpFoldResult makeComposedFoldedAffineMax(OpBuilder &b, Location loc,
                                          AffineMap map,
                                          ArrayRef<OpFoldResult> operands);
 
-/// Returns the values obtained by applying `map` to the list of values.
-SmallVector<Value, 4> applyMapToValues(OpBuilder &b, Location loc,
-                                       AffineMap map, ValueRange values);
-
 /// Given an affine map `map` and its input `operands`, this method composes
 /// into `map`, maps of AffineApplyOps whose results are the values in
 /// `operands`, iteratively until no more of `operands` are the result of an

diff  --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
index f110a446758f47..ca676d952fe649 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
@@ -1392,33 +1392,6 @@ mlir::affine::makeComposedFoldedAffineMax(OpBuilder &b, Location loc,
   return makeComposedFoldedMinMax<AffineMaxOp>(b, loc, map, operands);
 }
 
-/// Fully compose map with operands and canonicalize the result.
-/// Return the `createOrFold`'ed AffineApply op.
-static Value createFoldedComposedAffineApply(OpBuilder &b, Location loc,
-                                             AffineMap map,
-                                             ValueRange operandsRef) {
-  SmallVector<Value, 4> operands(operandsRef.begin(), operandsRef.end());
-  fullyComposeAffineMapAndOperands(&map, &operands);
-  canonicalizeMapAndOperands(&map, &operands);
-  return b.createOrFold<AffineApplyOp>(loc, map, operands);
-}
-
-SmallVector<Value, 4> mlir::affine::applyMapToValues(OpBuilder &b, Location loc,
-                                                     AffineMap map,
-                                                     ValueRange values) {
-  SmallVector<Value, 4> res;
-  res.reserve(map.getNumResults());
-  unsigned numDims = map.getNumDims(), numSym = map.getNumSymbols();
-  // For each `expr` in `map`, applies the `expr` to the values extracted from
-  // ranges. If the resulting application can be folded into a Value, the
-  // folding occurs eagerly.
-  for (auto expr : map.getResults()) {
-    AffineMap map = AffineMap::get(numDims, numSym, expr);
-    res.push_back(createFoldedComposedAffineApply(b, loc, map, values));
-  }
-  return res;
-}
-
 // A symbol may appear as a dim in affine.apply operations. This function
 // canonicalizes dims that are valid symbols into actual symbols.
 template <class MapOrSet>

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index f4e9c24a086185..ebfdc6ed6b0a03 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -529,7 +529,7 @@ tileLinalgOpImpl(RewriterBase &b, LinalgOp op, ArrayRef<OpFoldResult> tileSizes,
     procInfo.resize(
         iteratorTypes.size(),
         linalg::ProcInfo{nullptr, nullptr, linalg::DistributionMethod::None});
-    // Collect loop ranges of tiled loopss, loops that are parallel.
+    // Collect loop ranges of tiled loops, loops that are parallel.
     SmallVector<Range> parallelLoopRanges;
     for (const auto &iteratorType : llvm::enumerate(iteratorTypes)) {
       if (!isParallelIterator(iteratorType.value()))
@@ -559,10 +559,13 @@ tileLinalgOpImpl(RewriterBase &b, LinalgOp op, ArrayRef<OpFoldResult> tileSizes,
     // loop ranges and the iterator types. Apply its inverse to the
     // resulting loop `ivs` to match the op definition.
     SmallVector<Value, 4> interchangedIvs;
-    if (!options.interchangeVector.empty())
-      interchangedIvs = applyMapToValues(b, loc, invPermutationMap, ivs);
-    else
+    if (!options.interchangeVector.empty()) {
+      for (AffineExpr result : invPermutationMap.getResults())
+        interchangedIvs.push_back(
+            ivs[result.cast<AffineDimExpr>().getPosition()]);
+    } else {
       interchangedIvs.assign(ivs.begin(), ivs.end());
+    }
 
     // Tile the `operandValuesToUse` that either match the `op` operands
     // themselves or the tile loop arguments forwarding them.

diff  --git a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
index a964d9116f11c5..c9e71a820dae8b 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
@@ -49,15 +49,15 @@ static OpFoldResult getCollapsedOutputDimFromInputShape(
       map.getResults().front().cast<AffineDimExpr>().getPosition();
   unsigned endPos = map.getResults().back().cast<AffineDimExpr>().getPosition();
   AffineExpr expr;
-  SmallVector<Value, 2> dynamicDims;
+  SmallVector<OpFoldResult> dynamicDims;
   for (auto dim : llvm::seq_inclusive(startPos, endPos)) {
     dynamicDims.push_back(builder.createOrFold<tensor::DimOp>(loc, src, dim));
     AffineExpr currExpr = builder.getAffineSymbolExpr(dim - startPos);
     expr = (expr ? expr * currExpr : currExpr);
   }
-  return affine::applyMapToValues(
+  return affine::makeComposedFoldedAffineApply(
       builder, loc, AffineMap::get(0, endPos - startPos + 1, expr),
-      dynamicDims)[0];
+      dynamicDims);
 }
 
 /// Given the `src` of a collapsing reshape op and its reassociation maps,
@@ -102,12 +102,13 @@ static OpFoldResult getExpandedOutputDimFromInputShape(
            "dimensions");
     linearizedStaticDim *= d.value();
   }
-  Value sourceDim = builder.create<tensor::DimOp>(loc, src, sourceDimPos);
-  return affine::applyMapToValues(
+  OpFoldResult sourceDim =
+      builder.create<tensor::DimOp>(loc, src, sourceDimPos).getResult();
+  return affine::makeComposedFoldedAffineApply(
       builder, loc,
       AffineMap::get(
           0, 1, builder.getAffineSymbolExpr(0).floorDiv(linearizedStaticDim)),
-      sourceDim)[0];
+      sourceDim);
 }
 
 /// Given the `src` of an expanding reshape op, the reassociation maps and the
@@ -174,25 +175,17 @@ struct ReifyPadOp
       }
 
       // Shape along each dimension is source dim + low pad + high pad.
-      SmallVector<Value> mapOperands;
+      SmallVector<OpFoldResult> mapOperands;
       mapOperands.push_back(
           b.createOrFold<tensor::DimOp>(loc, padOp.getSource(), dim));
-      AffineExpr expr = b.getAffineDimExpr(0);
-      unsigned numSymbols = 0;
-      auto addOpFoldResult = [&](OpFoldResult valueOrAttr) {
-        if (Value v = llvm::dyn_cast_if_present<Value>(valueOrAttr)) {
-          expr = expr + b.getAffineSymbolExpr(numSymbols++);
-          mapOperands.push_back(v);
-          return;
-        }
-        int64_t staticValue =
-            llvm::cast<IntegerAttr>(valueOrAttr.get<Attribute>()).getInt();
-        expr = expr + staticValue;
-      };
-      addOpFoldResult(lowPad[dim]);
-      addOpFoldResult(highPad[dim]);
-      shapes.push_back(affine::applyMapToValues(
-          b, loc, AffineMap::get(1, numSymbols, expr), mapOperands)[0]);
+      mapOperands.push_back(lowPad[dim]);
+      mapOperands.push_back(highPad[dim]);
+      AffineExpr expr = b.getAffineDimExpr(0) + b.getAffineSymbolExpr(0) +
+                        b.getAffineSymbolExpr(1);
+      shapes.push_back(getValueOrCreateConstantIndexOp(
+          b, loc,
+          affine::makeComposedFoldedAffineApply(
+              b, loc, AffineMap::get(1, 2, expr), mapOperands)));
     }
     reifiedReturnShapes.emplace_back(std::move(shapes));
     return success();

diff  --git a/mlir/test/Dialect/Linalg/pad_fusion.mlir b/mlir/test/Dialect/Linalg/pad_fusion.mlir
index 36eca8eb916d52..a0d9a6ded34c45 100644
--- a/mlir/test/Dialect/Linalg/pad_fusion.mlir
+++ b/mlir/test/Dialect/Linalg/pad_fusion.mlir
@@ -22,7 +22,7 @@ func.func @dynamic_pad_fusion(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : in
   return %1 : tensor<?x?xf32>
 }
 
-//  CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s2 + s0 + s1)>
+//  CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s0 + s1 + s2)>
 //      CHECK: func @dynamic_pad_fusion
 // CHECK-SAME:     %[[ARG0:.+]]: tensor<?x?xf32>
 // CHECK-SAME:     %[[ARG1:[a-zA-Z0-9]+]]: index
@@ -70,7 +70,7 @@ func.func @mixed_pad_fusion(%arg0 : tensor<?x42xf32>, %arg1 : index, %arg2 : ind
     } : tensor<42x?xf32> to tensor<49x?xf32>
   return %1 : tensor<49x?xf32>
 }
-//  CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s2 + s0 + s1)>
+//  CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s0 + s1 + s2)>
 //      CHECK: func @mixed_pad_fusion
 // CHECK-SAME:     %[[ARG0:.+]]: tensor<?x42xf32>
 // CHECK-SAME:     %[[ARG1:[a-zA-Z0-9]+]]: index

diff  --git a/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir b/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir
index f931fe87187bb2..aeb357d4ee86d7 100644
--- a/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir
+++ b/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir
@@ -262,8 +262,8 @@ func.func @dim_of_pad_op(%arg0 : tensor<2x?x?xf32>, %arg1 : index, %arg2 : index
    %3 = tensor.dim %0, %c2 : tensor<?x?x?xf32>
    return %1, %2, %3 : index, index, index
 }
-//  CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0, s1] -> (s1 + s0 + 5)>
-//  CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0, s1] -> (s1 + s0 + 4)>
+//  CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0, s1] -> (s0 + s1 + 5)>
+//  CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0, s1] -> (s0 + s1 + 4)>
 //      CHECK: func @dim_of_pad_op
 // CHECK-SAME:   %[[ARG0:[A-Za-z0-9_]+]]: tensor<2x?x?xf32>
 // CHECK-SAME:   %[[ARG1:[A-Za-z0-9_]+]]: index

diff  --git a/mlir/test/Dialect/Linalg/vectorization-masked.mlir b/mlir/test/Dialect/Linalg/vectorization-masked.mlir
index 985dd054c25eba..fc7749aaa36482 100644
--- a/mlir/test/Dialect/Linalg/vectorization-masked.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization-masked.mlir
@@ -405,7 +405,7 @@ transform.sequence failures(propagate) {
 
 // -----
 
-//       CHECK: #[[MAP:.+]] = affine_map<()[s0, s1] -> (s1 + s0)>
+//       CHECK: #[[MAP:.+]] = affine_map<()[s0, s1] -> (s0 + s1)>
 //       CHECK: func @test_masked_vectorize_dynamic_pad
 func.func @test_masked_vectorize_dynamic_pad(
   %0 : tensor<?x?xf32>, %h0 : index, %h1 : index)

diff  --git a/mlir/test/Dialect/Tensor/bufferize.mlir b/mlir/test/Dialect/Tensor/bufferize.mlir
index b9382b9844df1a..c7b16315bfed1b 100644
--- a/mlir/test/Dialect/Tensor/bufferize.mlir
+++ b/mlir/test/Dialect/Tensor/bufferize.mlir
@@ -547,7 +547,7 @@ func.func @tensor.reshape(%t1: tensor<?x10xf32>) -> tensor<2x2x5xf32> {
 
 // -----
 
-// CHECK:       #[[$sum_map_1:.+]] = affine_map<()[s0, s1] -> (s1 + s0 + 5)>
+// CHECK:       #[[$sum_map_1:.+]] = affine_map<()[s0, s1] -> (s0 + s1 + 5)>
 // CHECK:       #[[$sum_map_2:.+]] = affine_map<()[s0, s1] -> (s0 + s1 + 10)>
 // CHECK-LABEL: func @tensor.pad(
 //  CHECK-SAME:   %[[t1:.*]]: tensor<?x10xindex>, %[[l2:.*]]: index, %[[h1:.*]]: index, %[[h2:.*]]: index


        


More information about the Mlir-commits mailing list