[Mlir-commits] [mlir] 333f98b - [mlir][sparse][nfc] Use tensor.generate in sparse integration tests

Rajas Vanjape llvmlistbot at llvm.org
Mon Aug 8 09:45:05 PDT 2022


Author: Rajas Vanjape
Date: 2022-08-08T16:44:45Z
New Revision: 333f98b4b68872fd7436d987f2e3dfb4f3e9efba

URL: https://github.com/llvm/llvm-project/commit/333f98b4b68872fd7436d987f2e3dfb4f3e9efba
DIFF: https://github.com/llvm/llvm-project/commit/333f98b4b68872fd7436d987f2e3dfb4f3e9efba.diff

LOG: [mlir][sparse][nfc] Use tensor.generate in sparse integration tests

Currently, dense tensors are initialized in Sparse Integration tests using
"buffer.tensor_alloc and scf.for" . This makes code harder to read and maintain.
This diff uses tensor.generate instead to initialize dense tensors.

Testing: Ran integration tests after building with -DLLVM_USE_SANITIZER=Address flag.

Reviewed By: springerm

Differential Revision: https://reviews.llvm.org/D131404

Added: 
    

Modified: 
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
index 585aaf729190c..17be14d02a03d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
@@ -75,18 +75,17 @@ module {
     %a = sparse_tensor.new %fileName : !Filename to tensor<?x?xi32, #SparseMatrix>
 
     // Initialize dense vectors.
-    %init_256 = bufferization.alloc_tensor(%c256) : tensor<?xi32>
-    %b = scf.for %i = %c0 to %c256 step %c1 iter_args(%t = %init_256) -> tensor<?xi32> {
+    %b = tensor.generate %c256 {
+    ^bb0(%i : index):
       %k = arith.addi %i, %c1 : index
       %j = arith.index_cast %k : index to i32
-      %t2 = tensor.insert %j into %t[%i] : tensor<?xi32>
-      scf.yield %t2 : tensor<?xi32>
-    }
-    %init_4 = bufferization.alloc_tensor(%c4) : tensor<?xi32>
-    %x = scf.for %i = %c0 to %c4 step %c1 iter_args(%t = %init_4) -> tensor<?xi32> {
-      %t2 = tensor.insert %i0 into %t[%i] : tensor<?xi32>
-      scf.yield %t2 : tensor<?xi32>
-    }
+      tensor.yield %j : i32
+    } : tensor<?xi32>
+
+    %x = tensor.generate %c4 {
+      ^bb0(%i : index):
+        tensor.yield %i0 : i32
+    } : tensor<?xi32>
 
     // Call kernel.
     %0 = call @kernel_matvec(%a, %b, %x)

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
index d2d9aa47a0a5f..74302a7d95b5e 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
@@ -82,42 +82,30 @@ module {
     %lsz = tensor.dim %b, %cst2 : tensor<?x?x?xf64, #SparseTensor>
 
     // Initialize dense input matrix C.
-    %c0 = bufferization.alloc_tensor(%ksz, %jsz) : tensor<?x?xf64>
-    %c = scf.for %k = %cst0 to %ksz step %cst1 iter_args(%c1 = %c0) -> tensor<?x?xf64> {
-      %c2 = scf.for %j = %cst0 to %jsz step %cst1 iter_args(%c3 = %c1) -> tensor<?x?xf64> {
-        %k0 = arith.muli %k, %jsz : index
-        %k1 = arith.addi %k0, %j : index
-        %k2 = arith.index_cast %k1 : index to i32
-        %kf = arith.sitofp %k2 : i32 to f64
-        %c4 = tensor.insert %kf into %c3[%k, %j] : tensor<?x?xf64>
-        scf.yield %c4 : tensor<?x?xf64>
-      }
-      scf.yield %c2 : tensor<?x?xf64>
-    }
+    %c = tensor.generate %ksz, %jsz {
+    ^bb0(%k : index, %j : index):
+      %k0 = arith.muli %k, %jsz : index
+      %k1 = arith.addi %k0, %j : index
+      %k2 = arith.index_cast %k1 : index to i32
+      %kf = arith.sitofp %k2 : i32 to f64
+      tensor.yield %kf : f64
+    } : tensor<?x?xf64>
 
     // Initialize dense input matrix D.
-    %d0 = bufferization.alloc_tensor(%lsz, %jsz) : tensor<?x?xf64>
-    %d = scf.for %l = %cst0 to %lsz step %cst1 iter_args(%d1 = %d0) -> tensor<?x?xf64> {
-      %d2 = scf.for %j = %cst0 to %jsz step %cst1 iter_args(%d3 = %d1) -> tensor<?x?xf64> {
-        %k0 = arith.muli %l, %jsz : index
-        %k1 = arith.addi %k0, %j : index
-        %k2 = arith.index_cast %k1 : index to i32
-        %kf = arith.sitofp %k2 : i32 to f64
-        %d4 = tensor.insert %kf into %d3[%l, %j] : tensor<?x?xf64>
-        scf.yield %d4 : tensor<?x?xf64>
-      }
-      scf.yield %d2 : tensor<?x?xf64>
-    }
+    %d = tensor.generate %lsz, %jsz {
+    ^bb0(%l : index, %j : index):
+      %k0 = arith.muli %l, %jsz : index
+      %k1 = arith.addi %k0, %j : index
+      %k2 = arith.index_cast %k1 : index to i32
+      %kf = arith.sitofp %k2 : i32 to f64
+      tensor.yield %kf : f64
+    } : tensor<?x?xf64>
 
     // Initialize dense output matrix A.
-    %a0 = bufferization.alloc_tensor(%isz, %jsz) : tensor<?x?xf64>
-    %a = scf.for %i = %cst0 to %isz step %cst1 iter_args(%a1 = %a0) -> tensor<?x?xf64> {
-      %a2 = scf.for %j = %cst0 to %jsz step %cst1 iter_args(%a3 = %a1) -> tensor<?x?xf64> {
-        %a4 = tensor.insert %f0 into %a3[%i, %j] : tensor<?x?xf64>
-        scf.yield %a4 : tensor<?x?xf64>
-      }
-      scf.yield %a2 : tensor<?x?xf64>
-    }
+    %a = tensor.generate %isz, %jsz {
+    ^bb0(%i : index, %j: index):
+      tensor.yield %f0 : f64
+    } : tensor<?x?xf64>
 
     // Call kernel.
     %0 = call @kernel_mttkrp(%b, %c, %d, %a)

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
index 1b85c1b8e075a..55fe32bd86d5d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
@@ -72,27 +72,27 @@ module {
     %c5 = arith.constant 5 : index
     %c10 = arith.constant 10 : index
 
-    // Setup memory for the dense matrices and initialize.
-    %a0 = bufferization.alloc_tensor(%c5, %c10) : tensor<?x?xf32>
-    %b0 = bufferization.alloc_tensor(%c10, %c5) : tensor<?x?xf32>
-    %x0 = bufferization.alloc_tensor(%c5, %c5) : tensor<?x?xf32>
-    %a, %b, %x = scf.for %i = %c0 to %c5 step %c1 iter_args(%a1 = %a0, %b1 = %b0, %x1 = %x0)
-        -> (tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>) {
-      %x2 = scf.for %j = %c0 to %c5 step %c1 iter_args(%x3 = %x1) -> (tensor<?x?xf32>) {
-        %x4 = tensor.insert %d0 into %x3[%i, %j] : tensor<?x?xf32>
-        scf.yield %x4 : tensor<?x?xf32>
-      }
+    // Initialize dense matrices.
+    %x = tensor.generate %c5, %c5 {
+    ^bb0(%i : index, %j : index):
+      tensor.yield %d0 : f32
+    } : tensor<?x?xf32>
+
+    %a = tensor.generate %c5, %c10 {
+    ^bb0(%i: index, %j: index):
       %p = arith.addi %i, %c1 : index
       %q = arith.index_cast %p : index to i32
       %d = arith.sitofp %q : i32 to f32
-      %a2, %b2 = scf.for %j = %c0 to %c10 step %c1 iter_args(%a3 = %a1, %b3 = %b1)
-          -> (tensor<?x?xf32>, tensor<?x?xf32>) {
-        %a4 = tensor.insert %d into %a3[%i, %j] : tensor<?x?xf32>
-        %b4 = tensor.insert %d into %b3[%j, %i] : tensor<?x?xf32>
-        scf.yield %a4, %b4 : tensor<?x?xf32>, tensor<?x?xf32>
-      }
-      scf.yield %a2, %b2, %x2 : tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>
-    }
+      tensor.yield %d : f32
+    } : tensor<?x?xf32>
+
+    %b = tensor.generate %c10, %c5 {
+    ^bb0(%i: index, %j: index):
+      %p = arith.addi %j, %c1 : index
+      %q = arith.index_cast %p : index to i32
+      %d = arith.sitofp %q : i32 to f32
+      tensor.yield %d : f32
+    } : tensor<?x?xf32>
 
     // Read the sparse matrix from file, construct sparse storage.
     %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
index b538fc134a30b..3b483672d8074 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
@@ -70,27 +70,20 @@ module {
     %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
     %a = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #SparseMatrix>
 
-    // Initialize dense vectors.
-    %init_256_4 = bufferization.alloc_tensor(%c256, %c4) : tensor<?x?xf64>
-    %b = scf.for %i = %c0 to %c256 step %c1 iter_args(%t = %init_256_4) -> tensor<?x?xf64> {
-      %b2 = scf.for %j = %c0 to %c4 step %c1 iter_args(%t2 = %t) -> tensor<?x?xf64> {
-        %k0 = arith.muli %i, %c4 : index
-        %k1 = arith.addi %j, %k0 : index
-        %k2 = arith.index_cast %k1 : index to i32
-        %k = arith.sitofp %k2 : i32 to f64
-        %t3 = tensor.insert %k into %t2[%i, %j] : tensor<?x?xf64>
-        scf.yield %t3 : tensor<?x?xf64>
-      }
-      scf.yield %b2 : tensor<?x?xf64>
-    }
-    %init_4_4 = bufferization.alloc_tensor(%c4, %c4) : tensor<?x?xf64>
-    %x = scf.for %i = %c0 to %c4 step %c1 iter_args(%t = %init_4_4) -> tensor<?x?xf64> {
-      %x2 = scf.for %j = %c0 to %c4 step %c1 iter_args(%t2 = %t) -> tensor<?x?xf64> {
-        %t3 = tensor.insert %i0 into %t2[%i, %j] : tensor<?x?xf64>
-        scf.yield %t3 : tensor<?x?xf64>
-      }
-      scf.yield %x2 : tensor<?x?xf64>
-    }
+    // Initialize dense tensors.
+    %b = tensor.generate %c256, %c4 {
+    ^bb0(%i : index, %j : index):
+      %k0 = arith.muli %i, %c4 : index
+      %k1 = arith.addi %j, %k0 : index
+      %k2 = arith.index_cast %k1 : index to i32
+      %k = arith.sitofp %k2 : i32 to f64
+      tensor.yield %k : f64
+    } : tensor<?x?xf64>
+
+    %x = tensor.generate %c4, %c4 {
+    ^bb0(%i : index, %j : index):
+      tensor.yield %i0 : f64
+    } : tensor<?x?xf64>
   
     // Call kernel.
     %0 = call @kernel_spmm(%a, %b, %x)


        


More information about the Mlir-commits mailing list