[Mlir-commits] [mlir] [mlir][sparse] temporarily disable BSR GPU libgen tests. (PR #71870)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu Nov 9 13:33:29 PST 2023
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir-gpu
Author: Peiming Liu (PeimingLiu)
<details>
<summary>Changes</summary>
---
Full diff: https://github.com/llvm/llvm-project/pull/71870.diff
1 Files Affected:
- (modified) mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sddmm-lib.mlir (+35-33)
``````````diff
diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sddmm-lib.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sddmm-lib.mlir
index 735dc8cb4bb3611..6c3d67e2ea78dc6 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sddmm-lib.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sddmm-lib.mlir
@@ -85,30 +85,32 @@ module {
// A kernel that computes a BSR sampled dense matrix matrix multiplication
// using a "spy" function and in-place update of the sampling sparse matrix.
//
- func.func @SDDMM_block(%args: tensor<?x?xf32, #BSR>,
- %arga: tensor<?x?xf32>,
- %argb: tensor<?x?xf32>) -> tensor<?x?xf32, #BSR> {
- %result = linalg.generic #trait_SDDMM
- ins(%arga, %argb: tensor<?x?xf32>, tensor<?x?xf32>)
- outs(%args: tensor<?x?xf32, #BSR>) {
- ^bb(%a: f32, %b: f32, %s: f32):
- %f0 = arith.constant 0.0 : f32
- %u = sparse_tensor.unary %s : f32 to f32
- present={
- ^bb0(%p: f32):
- %mul = arith.mulf %a, %b : f32
- sparse_tensor.yield %mul : f32
- }
- absent={}
- %r = sparse_tensor.reduce %s, %u, %f0 : f32 {
- ^bb0(%p: f32, %q: f32):
- %add = arith.addf %p, %q : f32
- sparse_tensor.yield %add : f32
- }
- linalg.yield %r : f32
- } -> tensor<?x?xf32, #BSR>
- return %result : tensor<?x?xf32, #BSR>
- }
+ // TODO: re-enable the following test.
+ //
+ // func.func @SDDMM_block(%args: tensor<?x?xf32, #BSR>,
+ // %arga: tensor<?x?xf32>,
+ // %argb: tensor<?x?xf32>) -> tensor<?x?xf32, #BSR> {
+ // %result = linalg.generic #trait_SDDMM
+ // ins(%arga, %argb: tensor<?x?xf32>, tensor<?x?xf32>)
+ // outs(%args: tensor<?x?xf32, #BSR>) {
+ // ^bb(%a: f32, %b: f32, %s: f32):
+ // %f0 = arith.constant 0.0 : f32
+ // %u = sparse_tensor.unary %s : f32 to f32
+ // present={
+ // ^bb0(%p: f32):
+ // %mul = arith.mulf %a, %b : f32
+ // sparse_tensor.yield %mul : f32
+ // }
+ // absent={}
+ // %r = sparse_tensor.reduce %s, %u, %f0 : f32 {
+ // ^bb0(%p: f32, %q: f32):
+ // %add = arith.addf %p, %q : f32
+ // sparse_tensor.yield %add : f32
+ // }
+ // linalg.yield %r : f32
+ // } -> tensor<?x?xf32, #BSR>
+ // return %result : tensor<?x?xf32, #BSR>
+ // }
func.func private @getTensorFilename(index) -> (!Filename)
@@ -151,15 +153,15 @@ module {
//
%fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
%m_csr = sparse_tensor.new %fileName : !Filename to tensor<?x?xf32, #CSR>
- %m_bsr = sparse_tensor.new %fileName : !Filename to tensor<?x?xf32, #BSR>
+ // %m_bsr = sparse_tensor.new %fileName : !Filename to tensor<?x?xf32, #BSR>
// Call the kernel.
%0 = call @SDDMM(%m_csr, %a, %b)
: (tensor<?x?xf32, #CSR>,
tensor<?x?xf32>, tensor<?x?xf32>) -> tensor<?x?xf32, #CSR>
- %1 = call @SDDMM_block(%m_bsr, %a, %b)
- : (tensor<?x?xf32, #BSR>,
- tensor<?x?xf32>, tensor<?x?xf32>) -> tensor<?x?xf32, #BSR>
+ // %1 = call @SDDMM_block(%m_bsr, %a, %b)
+ // : (tensor<?x?xf32, #BSR>,
+ // tensor<?x?xf32>, tensor<?x?xf32>) -> tensor<?x?xf32, #BSR>
//
// Print the result for verification. Note that the "spy" determines what
@@ -168,18 +170,18 @@ module {
// in the original zero positions).
//
// CHECK: ( 5, 10, 24, 19, 53, 42, 55, 56 )
- // CHECK-NEXT: ( 5, 10, 8, 19, 24, 24, 40, 53, 42, 55, 56, 64 )
+ // C_HECK-NEXT: ( 5, 10, 8, 19, 24, 24, 40, 53, 42, 55, 56, 64 )
//
%v0 = sparse_tensor.values %0 : tensor<?x?xf32, #CSR> to memref<?xf32>
%vv0 = vector.transfer_read %v0[%c0], %d0 : memref<?xf32>, vector<8xf32>
vector.print %vv0 : vector<8xf32>
- %v1 = sparse_tensor.values %1 : tensor<?x?xf32, #BSR> to memref<?xf32>
- %vv1 = vector.transfer_read %v1[%c0], %d0 : memref<?xf32>, vector<12xf32>
- vector.print %vv1 : vector<12xf32>
+ // %v1 = sparse_tensor.values %1 : tensor<?x?xf32, #BSR> to memref<?xf32>
+ // %vv1 = vector.transfer_read %v1[%c0], %d0 : memref<?xf32>, vector<12xf32>
+ // vector.print %vv1 : vector<12xf32>
// Release the resources.
bufferization.dealloc_tensor %0 : tensor<?x?xf32, #CSR>
- bufferization.dealloc_tensor %1 : tensor<?x?xf32, #BSR>
+ // bufferization.dealloc_tensor %1 : tensor<?x?xf32, #BSR>
llvm.call @mgpuDestroySparseEnv() : () -> ()
return
``````````
</details>
https://github.com/llvm/llvm-project/pull/71870
More information about the Mlir-commits
mailing list