[Mlir-commits] [mlir] [mlir][sparse] use "current" and "curr" consistently (PR #74656)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed Dec 6 13:56:49 PST 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-mlir-sparse

Author: Aart Bik (aartbik)

<details>
<summary>Changes</summary>

Removes at in favor of curr; also makes method delegates consistent

---

Patch is 26.05 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/74656.diff


2 Files Affected:

- (modified) mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h (+1-1) 
- (modified) mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp (+137-138) 


``````````diff
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h
index 27dc407b493dc..a1947f48393ef 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h
@@ -108,7 +108,7 @@ class CodegenEnv {
     return loopEmitter.unpackTensorLevelRange(std::forward<ContainerTy>(c));
   }
 
-  unsigned getLoopDepth() const { return loopEmitter.getCurrentDepth(); }
+  unsigned getCurrentDepth() const { return loopEmitter.getCurrentDepth(); }
 
   //
   // Code generation environment verify functions.
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index 6637a26d0e5af..6c9adf9fa21a0 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -45,22 +45,22 @@ using namespace mlir::sparse_tensor;
 //===----------------------------------------------------------------------===//
 
 /// Returns true iff affine expression is invariant. Sets the
-/// parameter `isAtLoop` when expression just became invariant.
-static bool isInvariantAffine(AffineExpr a, LoopId at, bool &isAtLoop) {
+/// parameter `isCurrentLoop` when expression just became invariant.
+static bool isInvariantAffine(AffineExpr a, LoopId curr, bool &isCurrentLoop) {
   switch (a.getKind()) {
   case AffineExprKind::DimId: {
     const LoopId i = cast<AffineDimExpr>(a).getPosition();
-    if (i + 1 == at) {
-      isAtLoop = true;
+    if (i + 1 == curr) {
+      isCurrentLoop = true;
       return true; // becomes invariant at current loop
     }
-    return i < at; // invariant when already generated
+    return i < curr; // invariant when already generated
   }
   case AffineExprKind::Add:
   case AffineExprKind::Mul: {
     auto binOp = cast<AffineBinaryOpExpr>(a);
-    return isInvariantAffine(binOp.getLHS(), at, isAtLoop) &&
-           isInvariantAffine(binOp.getRHS(), at, isAtLoop);
+    return isInvariantAffine(binOp.getLHS(), curr, isCurrentLoop) &&
+           isInvariantAffine(binOp.getRHS(), curr, isCurrentLoop);
   }
   default: {
     assert(isa<AffineConstantExpr>(a));
@@ -89,9 +89,8 @@ static bool findAffine(Merger &merger, TensorId tid, Level lvl, AffineExpr a,
     assert(isDenseLT(lt));
     if (auto binOp = dyn_cast<AffineBinaryOpExpr>(a)) {
       // We do not set dim level format for affine expression like d0 + d1 on
-      // either loop index at d0 or d1.
-      // We continue the recursion merely to check whether current affine is
-      // admissible or not.
+      // either loop index at d0 or d1. We continue the recursion merely to
+      // check whether current affine is admissible or not.
       return findAffine(merger, tid, lvl, binOp.getLHS(), lt, false) &&
              findAffine(merger, tid, lvl, binOp.getRHS(), lt, false);
     }
@@ -257,10 +256,10 @@ static bool findSparseAnnotations(CodegenEnv &env, bool idxReducBased) {
     const Level lvlRank = map.getNumResults();
     assert(!enc || lvlRank == enc.getLvlRank());
     assert(static_cast<Level>(env.op().getRank(&t)) == lvlRank);
-    // We only need to do index reduction if there is at least one non-trivial
-    // index expression on sparse levels.
-    // If all non-trivial index expression is on dense levels, we can
-    // efficiently rely on the random access to locate the element.
+    // We only need to do index reduction if there is at least one
+    // non-trivial index expression on sparse levels. If all non-trivial
+    // index expression is on dense levels, we can efficiently rely on
+    // the random access to locate the element.
     bool needIdxReduc =
         enc && getNumNonTrivialIdxExpOnSparseLvls(map, t.get()) != 0;
     // If then current tensor being inspected requires affine index, it need
@@ -413,9 +412,8 @@ static void genInsertionStore(CodegenEnv &env, OpBuilder &builder, OpOperand *t,
   if (!env.isExpand()) {
     const LoopId numLoops = op.getRank(t);
     // Retrieves the first `numLoop` induction variables.
-    SmallVector<Value> ivs = llvm::to_vector(
-        llvm::drop_end(env.emitter().getLoopIVsRange(),
-                       env.emitter().getCurrentDepth() - numLoops));
+    SmallVector<Value> ivs = llvm::to_vector(llvm::drop_end(
+        env.emitter().getLoopIVsRange(), env.getCurrentDepth() - numLoops));
     Value chain = env.getInsertionChain();
     if (!env.getValidLexInsert()) {
       env.updateInsertionChain(builder.create<InsertOp>(loc, rhs, chain, ivs));
@@ -655,7 +653,7 @@ static Value genExp(CodegenEnv &env, RewriterBase &rewriter, ExprId e) {
 
 /// Hoists loop invariant tensor loads for which indices have been exhausted.
 static void genInvariants(CodegenEnv &env, OpBuilder &builder, ExprId exp,
-                          LoopId at, bool atStart) {
+                          LoopId curr, bool isStart) {
   if (exp == ::mlir::sparse_tensor::detail::kInvalidId)
     return;
   if (env.exp(exp).kind == TensorExp::Kind::kTensor) {
@@ -666,19 +664,19 @@ static void genInvariants(CodegenEnv &env, OpBuilder &builder, ExprId exp,
     const auto stt = getSparseTensorType(t.get());
     const Level lvlRank = stt.getLvlRank();
     assert(static_cast<Level>(map.getNumResults()) == lvlRank);
-    bool isAtLoop = at == 0; // for scalar tensors
+    bool isCurrentLoop = curr == 0; // for scalar tensors
     for (Level l = 0; l < lvlRank; l++) {
       const AffineExpr a = map.getResult(l);
-      if (!isInvariantAffine(a, at, /*out*/ isAtLoop))
+      if (!isInvariantAffine(a, curr, /*out*/ isCurrentLoop))
         return; // still in play
     }
-    // All exhausted at this level (isAtLoop denotes exactly at this LoopId).
-    if (!isAtLoop)
+    // All exhausted at current level.
+    if (!isCurrentLoop)
       return;
     OpOperand *lhs = op.getDpsInitOperand(0);
     if (lhs == &t) {
       // Start or end a scalarized reduction.
-      if (atStart) {
+      if (isStart) {
         Value load = env.isCustomReduc() ? env.getCustomRedId()
                                          : genTensorLoad(env, builder, exp);
         env.startReduc(exp, load);
@@ -690,7 +688,7 @@ static void genInvariants(CodegenEnv &env, OpBuilder &builder, ExprId exp,
       }
     } else {
       // Start or end loop invariant hoisting of a tensor load.
-      if (atStart)
+      if (isStart)
         env.merger().setExprValue(exp, genTensorLoad(env, builder, exp));
       else
         env.merger().clearExprValue(exp);
@@ -705,20 +703,20 @@ static void genInvariants(CodegenEnv &env, OpBuilder &builder, ExprId exp,
       env.startCustomReduc(exp); // enter custom
     const ExprId e0 = env.exp(exp).children.e0;
     const ExprId e1 = env.exp(exp).children.e1;
-    genInvariants(env, builder, e0, at, atStart);
-    genInvariants(env, builder, e1, at, atStart);
+    genInvariants(env, builder, e0, curr, isStart);
+    genInvariants(env, builder, e1, curr, isStart);
     if (env.exp(exp).kind == TensorExp::Kind::kReduce)
       env.endCustomReduc(); // exit custom
   }
 }
 
 /// Generates an expanded access pattern in innermost dimension.
-static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopId at,
-                      bool atStart) {
+static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopId curr,
+                      bool isStart) {
   linalg::GenericOp op = env.op();
   OpOperand *lhs = op.getDpsInitOperand(0);
-  if (!env.atExpandLevel(lhs, op.getRank(lhs), at))
-    return; // not needed at this level
+  if (!env.atExpandLevel(lhs, op.getRank(lhs), curr))
+    return; // not needed at current level
   assert(!env.isReduc());
   // Generate start or end of an expanded access pattern. Note that because
   // an expansion does not rely on the ongoing contents of the sparse storage
@@ -727,7 +725,7 @@ static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopId at,
   // needed, we will need to use the SSA value in the insertion chain instead.
   Value tensor = lhs->get();
   Location loc = op.getLoc();
-  if (atStart) {
+  if (isStart) {
     auto dynShape = {ShapedType::kDynamic};
     Type etp = cast<ShapedType>(tensor.getType()).getElementType();
     Type t1 = MemRefType::get(dynShape, etp);
@@ -740,7 +738,7 @@ static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopId at,
                     r.getResult(3));
   } else {
     SmallVector<Value> indices;
-    for (LoopId i = 0; i < at; i++)
+    for (LoopId i = 0; i < curr; i++)
       indices.push_back(env.emitter().getLoopIV(i));
     Value values = env.getExpandValues();
     Value filled = env.getExpandFilled();
@@ -782,18 +780,18 @@ static bool isParallelFor(CodegenEnv &env, bool isOuter, bool isSparse) {
 
 /// Whether or not the current loop being generated should be parallized (if
 /// possible) according to the configuration.
-static bool shouldTryParallize(CodegenEnv &env, LoopId at,
+static bool shouldTryParallize(CodegenEnv &env, LoopId curr,
                                ArrayRef<TensorLevel> tidLvls) {
   linalg::GenericOp op = env.op();
   auto iteratorTypes = op.getIteratorTypesArray();
-  bool isSparse = llvm::any_of(tidLvls, [at, &env](TensorLevel tidLvl) {
+  bool isSparse = llvm::any_of(tidLvls, [curr, &env](TensorLevel tidLvl) {
     // Queries the LT based on the tensor and loop id, as requested by
     // `CodegenEnv::lt(TensorId, LoopId)`. The returned LT from CodegenEnv
     // should be consistent with the LT indexed by <TensorId, Level>.
-    const auto lt = env.lt(env.unpackTensorLevel(tidLvl).first, at);
+    const auto lt = env.lt(env.unpackTensorLevel(tidLvl).first, curr);
     return isCompressedLT(lt) || isSingletonLT(lt);
   });
-  return isParallelFor(env, /*isOuter=*/at == 0, isSparse);
+  return isParallelFor(env, /*isOuter=*/curr == 0, isSparse);
 }
 
 /// Emit a loop to coiterate over the list of tensor levels. The generated loop
@@ -814,9 +812,9 @@ static Operation *genCoIteration(CodegenEnv &env, OpBuilder &builder,
 
 /// Generates a for-loop or a while-loop, depending on whether it implements
 /// singleton iteration or co-iteration over the given conjunction.
-static Operation *genLoop(CodegenEnv &env, OpBuilder &builder, LoopId at,
+static Operation *genLoop(CodegenEnv &env, OpBuilder &builder, LoopId curr,
                           bool needsUniv, ArrayRef<TensorLevel> tidLvls) {
-  bool tryParallel = shouldTryParallize(env, at, tidLvls);
+  bool tryParallel = shouldTryParallize(env, curr, tidLvls);
   return genCoIteration(env, builder, tidLvls, tryParallel, needsUniv);
 }
 
@@ -861,7 +859,7 @@ static void finalizeWhileOp(CodegenEnv &env, OpBuilder &builder,
 }
 
 /// Generates a single if-statement within a while-loop.
-static scf::IfOp genIf(CodegenEnv &env, OpBuilder &builder, LoopId at,
+static scf::IfOp genIf(CodegenEnv &env, OpBuilder &builder, LoopId curr,
                        LatPointId p) {
   Location loc = env.op().getLoc();
   SmallVector<Type> types;
@@ -879,13 +877,13 @@ static scf::IfOp genIf(CodegenEnv &env, OpBuilder &builder, LoopId at,
           auto stt = getSparseTensorType(env.op().getInputs()[tid]);
           lt = stt.getLvlType(*lvl);
         }
-        assert(at == env.merger().loop(b));
+        assert(curr == env.merger().loop(b));
         Value clause;
         if (isCompressedLT(lt) || isSingletonLT(lt) ||
             isLooseCompressedLT(lt) || is2OutOf4LT(lt)) {
           assert(lvl.has_value());
           const Value crd = env.emitter().getCoords()[tid][*lvl];
-          const Value lvar = env.getLoopVar(at);
+          const Value lvar = env.getLoopVar(curr);
           clause = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq,
                                                  crd, lvar);
         } else {
@@ -942,12 +940,12 @@ static void endIf(CodegenEnv &env, OpBuilder &builder, scf::IfOp ifOp,
 /// Starts a loop sequence at given level. Returns true if
 /// the universal loop index must be maintained at this level.
 static bool startLoopSeq(CodegenEnv &env, OpBuilder &builder, ExprId exp,
-                         LoopId at, LatSetId lts) {
-  assert(!env.getLoopVar(at));
+                         LoopId curr, LatSetId lts) {
+  assert(!env.getLoopVar(curr));
   // Emit invariants at this loop sequence level.
-  genInvariants(env, builder, exp, at, /*atStart=*/true);
+  genInvariants(env, builder, exp, curr, /*isStart=*/true);
   // Emit access pattern expansion for sparse tensor output.
-  genExpand(env, builder, at, /*atStart=*/true);
+  genExpand(env, builder, curr, /*isStart=*/true);
   // Emit further intitialization at this loop sequence level.
   const LatPointId l0 = env.set(lts)[0];
   bool needsUniv = false;
@@ -956,13 +954,12 @@ static bool startLoopSeq(CodegenEnv &env, OpBuilder &builder, ExprId exp,
   env.merger().foreachTensorLoopId(l0, [&](TensorLoopId b, TensorId tid,
                                            std::optional<Level> lvl,
                                            LevelType lt, bool isIdxReduc) {
-    assert(env.merger().loop(b) == at);
+    assert(env.merger().loop(b) == curr);
     if (isDenseLT(lt) || isUndefLT(lt)) {
       if (tid == env.merger().getSynTensorID()) {
         // Needs loop emitter to set up loop bounds for synthetic tensor too if
         // there is a loop condition imposed on the synthetic tensor.
-        tidLvls.push_back(
-            env.makeTensorLevel(tid, env.emitter().getCurrentDepth()));
+        tidLvls.push_back(env.makeTensorLevel(tid, env.getCurrentDepth()));
       }
       needsUniv = true;
     }
@@ -1025,87 +1022,89 @@ static void genInitConstantDenseAddress(CodegenEnv &env,
 
 /// Return true if the lattices bit can be iterated by a for loop.
 static bool translateBitsToTidLvlPairs(
-    CodegenEnv &env, LatPointId li, LoopId at,
+    CodegenEnv &env, LatPointId li, LoopId curr,
     SmallVectorImpl<TensorLevel> &tidLvls,
     SmallVectorImpl<std::pair<TensorLevel, AffineExpr>> &affineTidLvls) {
   const BitVector &simple = env.lat(li).simple;
   const TensorId outTid = env.merger().getOutTensorID();
-  const std::optional<Level> outLvl = env.merger().getLvl(outTid, at);
+  const std::optional<Level> outLvl = env.merger().getLvl(outTid, curr);
 
   unsigned numloopCond = 0;
   bool hasNonUnique = false;
-  env.merger().foreachTensorLoopId(li, [&, at](TensorLoopId b, TensorId tid,
-                                               std::optional<Level> lvl,
-                                               LevelType lt, bool isIdxReduc) {
-    if (simple[b]) {
-      if (isIdxReduc) {
-        tidLvls.push_back(env.makeTensorLevel(tid, *lvl));
-        numloopCond++;
-        return;
-      }
-      if (isUndefLT(lt)) {
-        // An undefined lt in the lattices, we probably mean to
-        // generate a dense loop according to the synthetic tensor (for
-        // invariants and sparse output tensor).
-        if (env.merger().getSynTensorID() == tid) {
-          // Coiterating with an invariant
-          // e.g., out = prod(in[i][j] op invariant);
-          // or a broadcast
-          // e.g., out[i][j] = in[i] (j is undef for input)
-          //
-          // The level of the synthetic tensor is the current loop depth;
-          // the rank of the synthetic tensor equals to number of loops.
-          lvl = env.emitter().getCurrentDepth();
-        } else if (!lvl) {
-          // Skips invalid lvl (e.g., when this is a zero ranked tensor).
-          return;
-        }
-      }
-      hasNonUnique = !isUniqueLT(lt) || hasNonUnique;
-      tidLvls.push_back(env.makeTensorLevel(tid, *lvl));
-      numloopCond++;
-    } else if (isDenseLT(lt) || isIdxReduc) {
-      tidLvls.push_back(env.makeTensorLevel(tid, *lvl));
-    } else {
-      assert(isUndefLT(lt));
-      linalg::GenericOp op = env.op();
-      if (tid >= op.getNumDpsInputs())
-        // We only handle affine expression on input tensors (for now).
-        return;
-      OpOperand *operand = &op->getOpOperand(tid);
-      const auto stt = getSparseTensorType(operand->get());
-      // Non-annotated dense tensors requires no special handling.
-      if (!stt.hasEncoding())
-        return;
-
-      ArrayRef<AffineExpr> affines =
-          op.getMatchingIndexingMap(operand).getResults();
-      const Level lvlRank = stt.getLvlRank();
-      assert(affines.size() == static_cast<size_t>(lvlRank));
-      for (Level l = 0; l < lvlRank; l++) {
-        AffineExpr exp = affines[l];
-        // Skip simple affine expression and non-dense levels (which
-        // have their own filter loop).
-        if (isa<AffineDimExpr>(exp) || !stt.isDenseLvl(l))
-          continue;
-
-        // Constant affine expression are handled in genLoop.
-        if (!isa<AffineConstantExpr>(exp)) {
-          bool isAtLoop = false;
-          assert(at == env.getLoopDepth());
-          if (isInvariantAffine(exp, at + 1, /*out*/ isAtLoop) && isAtLoop) {
-            // If the compound affine is invariant and we are right at the
-            // level. We need to generate the address according to the
-            // affine expression. This is also the best place we can do it
-            // to avoid putting it inside inner loops.
-            affineTidLvls.emplace_back(env.makeTensorLevel(tid, l), exp);
+  env.merger().foreachTensorLoopId(
+      li, [&, curr](TensorLoopId b, TensorId tid, std::optional<Level> lvl,
+                    LevelType lt, bool isIdxReduc) {
+        if (simple[b]) {
+          if (isIdxReduc) {
+            tidLvls.push_back(env.makeTensorLevel(tid, *lvl));
+            numloopCond++;
+            return;
+          }
+          if (isUndefLT(lt)) {
+            // An undefined lt in the lattices, we probably mean to
+            // generate a dense loop according to the synthetic tensor (for
+            // invariants and sparse output tensor).
+            if (env.merger().getSynTensorID() == tid) {
+              // Coiterating with an invariant
+              // e.g., out = prod(in[i][j] op invariant);
+              // or a broadcast
+              // e.g., out[i][j] = in[i] (j is undef for input)
+              //
+              // The level of the synthetic tensor is the current loop depth;
+              // the rank of the synthetic tensor equals to number of loops.
+              assert(curr == env.getCurrentDepth());
+              lvl = curr;
+            } else if (!lvl) {
+              // Skips invalid lvl (e.g., when this is a zero ranked tensor).
+              return;
+            }
+          }
+          hasNonUnique = !isUniqueLT(lt) || hasNonUnique;
+          tidLvls.push_back(env.makeTensorLevel(tid, *lvl));
+          numloopCond++;
+        } else if (isDenseLT(lt) || isIdxReduc) {
+          tidLvls.push_back(env.makeTensorLevel(tid, *lvl));
+        } else {
+          assert(isUndefLT(lt));
+          linalg::GenericOp op = env.op();
+          if (tid >= op.getNumDpsInputs())
+            // We only handle affine expression on input tensors (for now).
+            return;
+          OpOperand *operand = &op->getOpOperand(tid);
+          const auto stt = getSparseTensorType(operand->get());
+          // Non-annotated dense tensors requires no special handling.
+          if (!stt.hasEncoding())
+            return;
+
+          ArrayRef<AffineExpr> affines =
+              op.getMatchingIndexingMap(operand).getResults();
+          const Level lvlRank = stt.getLvlRank();
+          assert(affines.size() == static_cast<size_t>(lvlRank));
+          for (Level l = 0; l < lvlRank; l++) {
+            AffineExpr exp = affines[l];
+            // Skip simple affine expression and non-dense levels (which
+            // have their own filter loop).
+            if (isa<AffineDimExpr>(exp) || !stt.isDenseLvl(l))
+              continue;
+
+            // Constant affine expression are handled in genLoop.
+            if (!isa<AffineConstantExpr>(exp)) {
+              bool isCurrentLoop = false;
+              assert(curr == env.getCurrentDepth());
+              if (isInvariantAffine(exp, curr + 1, /*out*/ isCurrentLoop) &&
+                  isCurrentLoop) {
+                // If the compound affine is invariant and we are right at the
+                // level. We need to generate the address according to the
+                // affine expression. This is also the best place w...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/74656


More information about the Mlir-commits mailing list