[Mlir-commits] [mlir] 2cb99df - [mlir][sparse] Fix typos (#67859)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Mon Oct 2 08:07:42 PDT 2023


Author: Yinying Li
Date: 2023-10-02T11:07:38-04:00
New Revision: 2cb99df6090d8e219d03c55b2a40f99b9be692ed

URL: https://github.com/llvm/llvm-project/commit/2cb99df6090d8e219d03c55b2a40f99b9be692ed
DIFF: https://github.com/llvm/llvm-project/commit/2cb99df6090d8e219d03c55b2a40f99b9be692ed.diff

LOG: [mlir][sparse] Fix typos (#67859)

Added: 
    

Modified: 
    mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
    mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
index 96eea0d0658290f..2d4f40eceba3b0d 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
@@ -242,7 +242,7 @@ Value LoopEmitter::genSegmentHigh(OpBuilder &builder, Location loc,
         {
           OpBuilder::InsertionGuard guard(builder);
           // Load the next coordinates only when inbound (to avoid OOB
-          // acccesses).
+          // accesses).
           builder.setInsertionPointToStart(ifInBound.thenBlock());
           Value crd = genIndexLoad(builder, loc, coordinates, pos);
           Value isSameCrd = builder.create<arith::CmpIOp>(
@@ -651,7 +651,7 @@ std::pair<Operation *, Value> LoopEmitter::emitForLoopOverTensorAtLvl(
     // expression on init vals will be moved into scf.reduce and replaced with
     // the block arguments when exiting the loop (see exitForLoop). This is
     // needed as we can not build the actual reduction block and get the actual
-    // reduction varaible before users fill parallel loop body.
+    // reduction variable before users fill parallel loop body.
     for (int i = 0, e = reduc.size(); i < e; i++)
       reduc[i] = parOp.getInitVals()[i];
     loop = parOp;
@@ -882,7 +882,7 @@ std::pair<Operation *, Value> LoopEmitter::emitWhileLoopOverTensorsAtLvls(
 
   // The set of induction variables for the while loop.
   SmallVector<Value> ivs;
-  // Segement sizes for induction variables used for 
diff erent kinds of loop
+  // Segment sizes for induction variables used for 
diff erent kinds of loop
   // conditions.
   SmallVector<unsigned> opSegSize;
 
@@ -1077,7 +1077,7 @@ Operation *LoopEmitter::enterCoIterationOverTensorsAtLvls(
   needsUniv = !spConds.empty() && needsUniv;
   // The TensorLevel used for loop conditions.
   // If there is any sparse level, we need to use the sparse condition.
-  // If all levels are dense, we can pick arbitary one (dense slice-driven loop
+  // If all levels are dense, we can pick arbitrary one (dense slice-driven loop
   // can be generated using a simple ForOp as well).
   Operation *l = nullptr;
   Value iv = nullptr;
@@ -1700,7 +1700,7 @@ std::pair<Operation *, ValueRange> LoopEmitter::genSliceLvlTraverseLoop(
           // Delegates to users' callback.
           bodyBuilder(builder, loc, iv, ifRet);
         }
-        // Marks this speical ifOp to avoid sparisification finalizing it.
+        // Marks this special ifOp to avoid sparisification finalizing it.
         ifOp->setAttr(getLoopEmitterLoopAttrName(),
                       StringAttr::get(builder.getContext(), "slice"));
         // Insertion point restored to after ifOp.
@@ -1741,7 +1741,7 @@ ValueRange LoopEmitter::genUnResolvedSliceTreeTraverse(
   Value pos = c0;
   OpBuilder::InsertPoint ip;
   SmallVector<Value> innerArgs(userReduc.begin(), userReduc.end());
-  scf::ForOp outerMost = nullptr; // the outtermost loop.
+  scf::ForOp outerMost = nullptr; // the outermost loop.
 
   // Wraps body builder and inserts a extra counting instruction at the end.
   auto wrapped = [bodyBuilder](OpBuilder &builder, Location loc, Value iv,
@@ -1842,7 +1842,7 @@ ValueRange LoopEmitter::genUnResolvedSliceTreeTraverse(
                                OpBuilder &builder, Location loc, ValueRange ivs,
                                ValueRange iterArgs) -> scf::ValueVector {
                              for (auto em : llvm::enumerate(ivs)) {
-                               // Linearizes postion: pos = (pos * lvlsize) +
+                               // Linearizes position: pos = (pos * lvlsize) +
                                // iv;
                                pos = MULI(pos, lvlSzs[em.index()]);
                                pos = ADDI(pos, em.value());
@@ -2072,7 +2072,7 @@ bool LoopEmitter::genSliceBegin(OpBuilder &builder, Location loc, TensorId tid,
   assert(isOrderedDLT(lvlType));
   if (isSingletonDLT(lvlType)) {
     llvm_unreachable("TODO: dense level should be easy to support, while "
-                     "singleton level requres more efforts");
+                     "singleton level requires more efforts");
   }
 
   assert(!dependentLvlMap[tid][lvl].empty());

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index 0a6cc32253d26d1..e47131bb78318ab 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -250,7 +250,7 @@ static bool findAffine(Merger &merger, TensorId tid, Level lvl, AffineExpr a,
     }
 
     if (auto binOp = a.dyn_cast<AffineBinaryOpExpr>()) {
-      // We do not set dim level format for affine expresssion like d0 + d1 on
+      // We do not set dim level format for affine expression like d0 + d1 on
       // either loop index at d0 or d1.
       // We continue the recursion merely to check whether current affine is
       // admissible or not.
@@ -309,7 +309,7 @@ static bool findDepIdxSet(Merger &merger, TensorId tensor, Level lvl,
       if (merger.hasDependentLvl(ldx, tensor)) {
         // TODO: This can be supported by coiterate slices if the loop idx is
         // appeared on affine index for 
diff erent tensor, or take slice on
-        // mulitple dimensions when it is on the same tensor.
+        // multiple dimensions when it is on the same tensor.
         // E.g.,
         // `d0 + d1` for indexing t0[lvl0] and `d0 + d2` for indexing t1[lvl0]
         // d0_1 = getNextSliceOffset t0 along lvl0
@@ -357,7 +357,7 @@ static bool findDepIdxSet(Merger &merger, TensorId tensor, Level lvl,
 /// indexing-expression is `d0 + d1`)
 static unsigned getNumNonTrivialIdxExpOnSparseLvls(AffineMap map,
                                                    Value tensor) {
-  // The `tensor` is not guaranted to have `RankedTensorType`, therefore
+  // The `tensor` is not guaranteed to have `RankedTensorType`, therefore
   // we can't use `getRankedTensorType`/`getSparseTensorType` here.
   // However, we don't need to handle `StorageSpecifierType`, so we
   // can use `SparseTensorType` once we guard against non-tensors.
@@ -636,7 +636,7 @@ static void addFilterLoopBasedConstraints(CodegenEnv &env, OpOperand &t,
 
   // Each tensor expression and optional dimension ordering (row-major
   // by default) puts an ordering constraint on the loop indices. For
-  // example, the tensor expresion A_ijk forces the ordering i < j < k
+  // example, the tensor expression A_ijk forces the ordering i < j < k
   // on the loop indices if no explicit dimension ordering is given.
   const Level lvlRank = map.getNumResults();
   assert(!enc || lvlRank == enc.getLvlRank());
@@ -668,7 +668,7 @@ static void addFilterLoopBasedConstraints(CodegenEnv &env, OpOperand &t,
 
       // Applying order constraints on every pair of dimExpr between two
       // compound affine expressions can sometime too strict:
-      // E.g, for [dense, dense] -> (d0 + d1, d2 + d3).
+      // E.g., for [dense, dense] -> (d0 + d1, d2 + d3).
       // It is totally fine to have loop sequence d0->d2->d1->d3 instead of
       // requiring d0 < d2, d1 < d2, d0 < d3, d1 < d3.
       // We also relax the affine constraint when use slice-based algorithm
@@ -1316,7 +1316,7 @@ static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopOrd at,
     return; // not needed at this level
   assert(!env.isReduc());
   // Generate start or end of an expanded access pattern. Note that because
-  // an expension does not rely on the ongoing contents of the sparse storage
+  // an expansion does not rely on the ongoing contents of the sparse storage
   // scheme, we can use the original tensor as incoming SSA value (which
   // simplifies codegen a bit). If expansion on the actual contents is ever
   // needed, we will need to use the SSA value in the insertion chain instead.
@@ -2007,9 +2007,9 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
     bool isAdmissible = false;
     bool hasCycle = true;
 
-    // An const list of all masks that we used for interation graph
+    // A const list of all masks that we used for iteration graph
     // computation. Must be ordered from more strict to less strict.
-    // Ideally (though might not be guaranteed), the eariler a constraint mask
+    // Ideally (though might not be guaranteed), the earlier a constraint mask
     // can be satisfied, the faster the generated kernel will be.
     const auto allMasks = {
         SortMask::kIncludeAll,        SortMask::kIncludeDense,
@@ -2038,7 +2038,7 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
     env.startEmit();
     genBuffers(env, rewriter);
     // TODO: Constant affine expression should be handled 
diff erently when using
-    // slice-based codegen, it does not matter now becasue we already reject the
+    // slice-based codegen, it does not matter now because we already reject the
     // constant expression at a earlier stage.
     genInitConstantDenseAddress(env, rewriter);
     genStmt(env, rewriter, env.getExprId(), 0);

diff  --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
index b6970b8b96d4659..bfa2f67eab37a76 100644
--- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
@@ -1101,7 +1101,7 @@ LatSetId Merger::buildLattices(ExprId e, LoopId i) {
     }
   case TensorExp::Kind::kCmpF:
   case TensorExp::Kind::kCmpI:
-    // An comparison operation needs to be performed
+    // A comparison operation needs to be performed
     // for the disjunction of sparse iteration spaces.
     //
     //   x < y |  !y   |   y   |
@@ -1118,7 +1118,7 @@ LatSetId Merger::buildLattices(ExprId e, LoopId i) {
   case TensorExp::Kind::kShlI:
     // A shift operation by an invariant amount (viz. tensor expressions
     // can only occur at the left-hand-side of the operator) can be handled
-    // with the conjuction rule.
+    // with the conjunction rule.
     {
       const ExprId e0 = expr.children.e0;
       const ExprId e1 = expr.children.e1;


        


More information about the Mlir-commits mailing list