[Mlir-commits] [mlir] e52f530 - [mlir][sparse] fix two typos

Aart Bik llvmlistbot at llvm.org
Thu Jan 13 15:12:04 PST 2022


Author: Aart Bik
Date: 2022-01-13T15:11:55-08:00
New Revision: e52f530c36e4b3f78c35f1ccd59cae75bdff7db4

URL: https://github.com/llvm/llvm-project/commit/e52f530c36e4b3f78c35f1ccd59cae75bdff7db4
DIFF: https://github.com/llvm/llvm-project/commit/e52f530c36e4b3f78c35f1ccd59cae75bdff7db4.diff

LOG: [mlir][sparse] fix two typos

(1) copy-and-past error in encoding alias name:
    this is an annotation for a tensor (3-d) not a matrix (2-d).

(2) typo in "initialization"

Reviewed By: bixia

Differential Revision: https://reviews.llvm.org/D117255

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
index 292fc072e1af9..b7fce5b3137c1 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
@@ -239,7 +239,7 @@ def SparseTensor_ExpandOp : SparseTensor_Op<"expand", []>,
     dimension (e.g. a full row for matrices). The added array and count are used
     to store new indices when a false value is encountered in the filled array.
     All arrays should be allocated before the loop (possibly even shared between
-    loops in a future optimization) so that their *dense* intitialization can be
+    loops in a future optimization) so that their *dense* initialization can be
     amortized over many iterations. Setting and resetting the dense arrays in
     the loop nest itself is kept *sparse* by only iterating over set elements
     through an indirection using the added array, so that the operations are

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
index 71ba247c5bc11..ca1287387d72e 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
@@ -26,7 +26,7 @@
 
 !Filename = type !llvm.ptr<i8>
 
-#SparseMatrix = #sparse_tensor.encoding<{
+#SparseTensor = #sparse_tensor.encoding<{
   dimLevelType = [ "compressed", "compressed", "compressed" ]
 }>
 
@@ -51,14 +51,14 @@ module {
   // Computes Matricized Tensor Times Khatri-Rao Product (MTTKRP) kernel. See
   // http://tensor-compiler.org/docs/data_analytics/index.html.
   //
-  func @kernel_mttkrp(%argb: tensor<?x?x?xf64, #SparseMatrix>,
+  func @kernel_mttkrp(%argb: tensor<?x?x?xf64, #SparseTensor>,
                       %argc: tensor<?x?xf64>,
                       %argd: tensor<?x?xf64>,
                       %arga: tensor<?x?xf64> {linalg.inplaceable = true})
 		      -> tensor<?x?xf64> {
     %0 = linalg.generic #mttkrp
       ins(%argb, %argc, %argd:
-            tensor<?x?x?xf64, #SparseMatrix>, tensor<?x?xf64>, tensor<?x?xf64>)
+            tensor<?x?x?xf64, #SparseTensor>, tensor<?x?xf64>, tensor<?x?xf64>)
       outs(%arga: tensor<?x?xf64>) {
       ^bb(%b: f64, %c: f64, %d: f64, %a: f64):
         %0 = arith.mulf %b, %c : f64
@@ -87,7 +87,7 @@ module {
     // Read the sparse B input from a file.
     %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
     %b = sparse_tensor.new %fileName
-          : !Filename to tensor<?x?x?xf64, #SparseMatrix>
+          : !Filename to tensor<?x?x?xf64, #SparseTensor>
 
     // Initialize dense C and D inputs and dense output A.
     %cdata = memref.alloc(%c3, %c5) : memref<?x?xf64>
@@ -124,7 +124,7 @@ module {
 
     // Call kernel.
     %0 = call @kernel_mttkrp(%b, %c, %d, %a)
-      : (tensor<?x?x?xf64, #SparseMatrix>,
+      : (tensor<?x?x?xf64, #SparseTensor>,
         tensor<?x?xf64>, tensor<?x?xf64>, tensor<?x?xf64>) -> tensor<?x?xf64>
 
     // Print the result for verification.
@@ -141,7 +141,7 @@ module {
     memref.dealloc %adata : memref<?x?xf64>
     memref.dealloc %cdata : memref<?x?xf64>
     memref.dealloc %ddata : memref<?x?xf64>
-    sparse_tensor.release %b : tensor<?x?x?xf64, #SparseMatrix>
+    sparse_tensor.release %b : tensor<?x?x?xf64, #SparseTensor>
 
     return
   }


        


More information about the Mlir-commits mailing list