[Mlir-commits] [mlir] 16b8f4d - [mlir][sparse] add a "release" operation to sparse tensor dialect
Aart Bik
llvmlistbot at llvm.org
Tue Oct 5 09:36:08 PDT 2021
Author: Aart Bik
Date: 2021-10-05T09:35:59-07:00
New Revision: 16b8f4ddae1cb36ac16c6eb451613c032e4064f6
URL: https://github.com/llvm/llvm-project/commit/16b8f4ddae1cb36ac16c6eb451613c032e4064f6
DIFF: https://github.com/llvm/llvm-project/commit/16b8f4ddae1cb36ac16c6eb451613c032e4064f6.diff
LOG: [mlir][sparse] add a "release" operation to sparse tensor dialect
We have several ways to materialize sparse tensors (new and convert) but no explicit operation to release the underlying sparse storage scheme at runtime (other than making an explicit delSparseTensor() library call). To simplify memory management, a sparse_tensor.release operation has been introduced that lowers to the runtime library call while keeping tensors, opague pointers, and memrefs transparent in the initial IR.
*Note* There is obviously some tension between the concept of immutable tensors and memory management methods. This tension is addressed by simply stating that after the "release" call, no further memref related operations are allowed on the tensor value. We expect the design to evolve over time, however, and arrive at a more satisfactory view of tensors and buffers eventually.
Bug:
http://llvm.org/pr52046
Reviewed By: bixia
Differential Revision: https://reviews.llvm.org/D111099
Added:
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir
Modified:
mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
mlir/test/Dialect/SparseTensor/conversion.mlir
mlir/test/Dialect/SparseTensor/invalid.mlir
mlir/test/Dialect/SparseTensor/roundtrip.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
Removed:
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse-constant_to_sparse_tensor.mlir
################################################################################
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
index c6f5c97e832a8..3950c122bca57 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
@@ -85,6 +85,25 @@ def SparseTensor_ConvertOp : SparseTensor_Op<"convert",
let hasFolder = 1;
}
+def SparseTensor_ReleaseOp : SparseTensor_Op<"release", []>,
+ Arguments<(ins AnyTensor:$tensor)> {
+ string description = [{
+ Releases the underlying sparse storage scheme for a tensor that
+ materialized earlier through a `new` operator or a non-trivial
+ `convert` operator with an annotated tensor type as destination.
+ This operation should only be called once for any materialized tensor.
+ Also, after this operation, any subsequent `memref` querying operation
+ on the tensor returns undefined results.
+
+ Example:
+
+ ```mlir
+ sparse_tensor.release %tensor : tensor<1024x1024xf64, #CSR>
+ ```
+ }];
+ let assemblyFormat = "$tensor attr-dict `:` type($tensor)";
+}
+
def SparseTensor_ToPointersOp : SparseTensor_Op<"pointers", [NoSideEffect]>,
Arguments<(ins AnyTensor:$tensor, Index:$dim)>,
Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 4e63f57c4ddf9..8df79ed4559bf 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -235,6 +235,12 @@ OpFoldResult ConvertOp::fold(ArrayRef<Attribute> operands) {
return {};
}
+static LogicalResult verify(ReleaseOp op) {
+ if (!getSparseTensorEncoding(op.tensor().getType()))
+ return op.emitError("expected a sparse tensor to release");
+ return success();
+}
+
static LogicalResult verify(ToPointersOp op) {
if (auto e = getSparseTensorEncoding(op.tensor().getType())) {
if (failed(isInBounds(op.dim(), op.tensor())))
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index 11f589c8b446e..d60a13f1763da 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -99,8 +99,8 @@ static Value getTensor(ConversionPatternRewriter &rewriter, unsigned width,
/// the "_emit_c_interface" on the function declaration when requested,
/// so that LLVM lowering generates a wrapper function that takes care
/// of ABI complications with passing in and returning MemRefs to C functions.
-static FlatSymbolRefAttr getFunc(Operation *op, StringRef name, Type resultType,
- ValueRange operands,
+static FlatSymbolRefAttr getFunc(Operation *op, StringRef name,
+ TypeRange resultType, ValueRange operands,
bool emitCInterface = false) {
MLIRContext *context = op->getContext();
auto module = op->getParentOfType<ModuleOp>();
@@ -471,6 +471,23 @@ class SparseTensorConvertConverter : public OpConversionPattern<ConvertOp> {
}
};
+/// Sparse conversion rule for the release operator.
+class SparseTensorReleaseConverter : public OpConversionPattern<ReleaseOp> {
+public:
+ using OpConversionPattern::OpConversionPattern;
+ LogicalResult
+ matchAndRewrite(ReleaseOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ StringRef name = "delSparseTensor";
+ TypeRange none;
+ rewriter.create<CallOp>(op.getLoc(), none,
+ getFunc(op, name, none, adaptor.getOperands()),
+ adaptor.getOperands());
+ rewriter.eraseOp(op);
+ return success();
+ }
+};
+
/// Sparse conversion rule for pointer accesses.
class SparseTensorToPointersConverter
: public OpConversionPattern<ToPointersOp> {
@@ -483,7 +500,7 @@ class SparseTensorToPointersConverter
Type eltType = resType.cast<ShapedType>().getElementType();
StringRef name;
if (eltType.isIndex())
- name = "sparsePointers";
+ name = "sparsePointers"; // 64-bit, but its own name for unique signature
else if (eltType.isInteger(64))
name = "sparsePointers64";
else if (eltType.isInteger(32))
@@ -514,7 +531,7 @@ class SparseTensorToIndicesConverter : public OpConversionPattern<ToIndicesOp> {
Type eltType = resType.cast<ShapedType>().getElementType();
StringRef name;
if (eltType.isIndex())
- name = "sparseIndices";
+ name = "sparseIndices"; // 64-bit, but its own name for unique signature
else if (eltType.isInteger(64))
name = "sparseIndices64";
else if (eltType.isInteger(32))
@@ -609,7 +626,8 @@ void mlir::populateSparseTensorConversionPatterns(TypeConverter &typeConverter,
RewritePatternSet &patterns) {
patterns.add<SparseReturnConverter, SparseTensorToDimSizeConverter,
SparseTensorNewConverter, SparseTensorConvertConverter,
- SparseTensorToPointersConverter, SparseTensorToIndicesConverter,
- SparseTensorToValuesConverter, SparseTensorToTensorConverter>(
- typeConverter, patterns.getContext());
+ SparseTensorReleaseConverter, SparseTensorToPointersConverter,
+ SparseTensorToIndicesConverter, SparseTensorToValuesConverter,
+ SparseTensorToTensorConverter>(typeConverter,
+ patterns.getContext());
}
diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir
index f2831f88b88ae..48d8a7eed6aef 100644
--- a/mlir/test/Dialect/SparseTensor/conversion.mlir
+++ b/mlir/test/Dialect/SparseTensor/conversion.mlir
@@ -112,6 +112,15 @@ func @sparse_new3d(%arg0: !llvm.ptr<i8>) -> tensor<?x?x?xf32, #SparseTensor> {
return %0 : tensor<?x?x?xf32, #SparseTensor>
}
+// CHECK-LABEL: func @sparse_release(
+// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
+// CHECK: call @delSparseTensor(%[[A]]) : (!llvm.ptr<i8>) -> ()
+// CHECK: return
+func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) {
+ sparse_tensor.release %arg0 : tensor<128xf64, #SparseVector>
+ return
+}
+
// CHECK-LABEL: func @sparse_nop_convert(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
// CHECK: return %[[A]] : !llvm.ptr<i8>
diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir
index d2f8eee4b89fc..18b90e8188a8b 100644
--- a/mlir/test/Dialect/SparseTensor/invalid.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid.mlir
@@ -8,6 +8,14 @@ func @invalid_new_dense(%arg0: !llvm.ptr<i8>) -> tensor<32xf32> {
// -----
+func @invalid_release_dense(%arg0: tensor<4xi32>) {
+ // expected-error at +1 {{expected a sparse tensor to release}}
+ sparse_tensor.release %arg0 : tensor<4xi32>
+ return
+}
+
+// -----
+
func @invalid_pointers_dense(%arg0: tensor<128xf64>) -> memref<?xindex> {
%c = constant 0 : index
// expected-error at +1 {{expected a sparse tensor to get pointers}}
diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
index dcb2b7f6431da..770e9039a1f2d 100644
--- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
@@ -15,6 +15,19 @@ func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
+// CHECK-LABEL: func @sparse_release(
+// CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>
+// CHECK: sparse_tensor.release %[[A]] : tensor<128xf64, #{{.*}}>
+// CHECK: return
+func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) {
+ sparse_tensor.release %arg0 : tensor<128xf64, #SparseVector>
+ return
+}
+
+// -----
+
+#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
+
// CHECK-LABEL: func @sparse_convert_1d_to_sparse(
// CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
// CHECK: %[[T:.*]] = sparse_tensor.convert %[[A]] : tensor<64xf32> to tensor<64xf32, #{{.*}}>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
index 76b98527d516d..c0e624b0900f1 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
@@ -95,6 +95,10 @@ module {
%v = vector.load %m[%c0] : memref<?xf64>, vector<25xf64>
vector.print %v : vector<25xf64>
+ // Release the resources.
+ sparse_tensor.release %a : tensor<?x?xf64, #SparseMatrix>
+ sparse_tensor.release %x : tensor<?x?xf64, #DenseMatrix>
+
return
}
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg b/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg
index 7ac5190e3b908..83247d7e37449 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg
@@ -3,6 +3,3 @@ import sys
# No JIT on win32.
if sys.platform == 'win32':
config.unsupported = True
-
-# http://llvm.org/pr52046
-config.environment['ASAN_OPTIONS'] = 'detect_leaks=0'
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir
index 5fcc27e49a487..2a75fd0dc88ca 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir
@@ -273,6 +273,22 @@ module {
%v9 = vector.transfer_read %m9[%z], %i: memref<10xi32>, vector<10xi32>
vector.print %v9 : vector<10xi32>
+ // Release the resources.
+ sparse_tensor.release %1 : tensor<10xi32, #SV>
+ sparse_tensor.release %3 : tensor<10xf32, #SV>
+ sparse_tensor.release %5 : tensor<10xf64, #SV>
+ sparse_tensor.release %7 : tensor<10xf64, #SV>
+ memref.dealloc %m0 : memref<10xf32>
+ memref.dealloc %m1 : memref<10xf32>
+ memref.dealloc %m2 : memref<10xi32>
+ memref.dealloc %m3 : memref<10xi32>
+ memref.dealloc %m4 : memref<10xf64>
+ memref.dealloc %m5 : memref<10xf32>
+ memref.dealloc %m6 : memref<10xi64>
+ memref.dealloc %m7 : memref<10xi64>
+ memref.dealloc %m8 : memref<10xi8>
+ memref.dealloc %m9 : memref<10xi32>
+
return
}
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse-constant_to_sparse_tensor.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir
similarity index 95%
rename from mlir/test/Integration/Dialect/SparseTensor/CPU/sparse-constant_to_sparse_tensor.mlir
rename to mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir
index 9154b402635e0..e0141bcbfcbba 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse-constant_to_sparse_tensor.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir
@@ -45,6 +45,9 @@ module {
%vr = vector.transfer_read %v[%c0], %d0: memref<?xf64>, vector<8xf64>
vector.print %vr : vector<8xf64>
+ // Release the resources.
+ sparse_tensor.release %ts : tensor<10x8xf64, #Tensor1>
+
return
}
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir
index a6a9782d81b04..4e0aaf153d100 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir
@@ -245,7 +245,17 @@ module {
call @dumpf64(%v2) : (memref<?xf64>) -> ()
call @dumpf64(%v3) : (memref<?xf64>) -> ()
+ // Release the resources.
+ sparse_tensor.release %1 : tensor<2x3x4xf64, #Tensor1>
+ sparse_tensor.release %2 : tensor<2x3x4xf64, #Tensor2>
+ sparse_tensor.release %3 : tensor<2x3x4xf64, #Tensor3>
+ sparse_tensor.release %b : tensor<2x3x4xf64, #Tensor1>
+ sparse_tensor.release %c : tensor<2x3x4xf64, #Tensor1>
+ sparse_tensor.release %d : tensor<2x3x4xf64, #Tensor2>
+ sparse_tensor.release %f : tensor<2x3x4xf64, #Tensor2>
+ sparse_tensor.release %g : tensor<2x3x4xf64, #Tensor3>
+ sparse_tensor.release %h : tensor<2x3x4xf64, #Tensor3>
+
return
}
}
-
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
index 63627db19d555..4135af224abda 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
@@ -84,6 +84,10 @@ module {
: memref<6x6xi32>, vector<6x6xi32>
vector.print %v : vector<6x6xi32>
+ // Release the resources.
+ sparse_tensor.release %sparse_filter : tensor<3x3xi32, #DCSR>
+ memref.dealloc %m : memref<6x6xi32>
+
return
}
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
index 6ef8adb82a131..957c210c2f25d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
@@ -41,7 +41,8 @@ module {
// A kernel that flattens a rank 8 tensor into a dense matrix.
//
func @kernel_flatten(%arga: tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>,
- %argx: tensor<7x3xf64>) -> tensor<7x3xf64> {
+ %argx: tensor<7x3xf64> {linalg.inplaceable = true})
+ -> tensor<7x3xf64> {
%0 = linalg.generic #trait_flatten
ins(%arga: tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>)
outs(%argx: tensor<7x3xf64>) {
@@ -99,6 +100,7 @@ module {
// Release the resources.
memref.dealloc %xdata : memref<7x3xf64>
+ sparse_tensor.release %a : tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>
return
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
index 0f05d0b4cd52c..a378ba1d488a4 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
@@ -54,7 +54,8 @@ module {
//
func @kernel_matvec(%arga: tensor<?x?xi32, #SparseMatrix>,
%argb: tensor<?xi32>,
- %argx: tensor<?xi32>) -> tensor<?xi32> {
+ %argx: tensor<?xi32> {linalg.inplaceable = true})
+ -> tensor<?xi32> {
%0 = linalg.generic #matvec
ins(%arga, %argb: tensor<?x?xi32, #SparseMatrix>, tensor<?xi32>)
outs(%argx: tensor<?xi32>) {
@@ -111,6 +112,7 @@ module {
// Release the resources.
memref.dealloc %bdata : memref<?xi32>
memref.dealloc %xdata : memref<?xi32>
+ sparse_tensor.release %a : tensor<?x?xi32, #SparseMatrix>
return
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
index e061012ec60fb..4b7721cef1cee 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
@@ -40,7 +40,8 @@ module {
func @kernel_mttkrp(%argb: tensor<?x?x?xf64, #SparseMatrix>,
%argc: tensor<?x?xf64>,
%argd: tensor<?x?xf64>,
- %arga: tensor<?x?xf64>) -> tensor<?x?xf64> {
+ %arga: tensor<?x?xf64> {linalg.inplaceable = true})
+ -> tensor<?x?xf64> {
%0 = linalg.generic #mttkrp
ins(%argb, %argc, %argd:
tensor<?x?x?xf64, #SparseMatrix>, tensor<?x?xf64>, tensor<?x?xf64>)
@@ -126,6 +127,7 @@ module {
memref.dealloc %adata : memref<?x?xf64>
memref.dealloc %cdata : memref<?x?xf64>
memref.dealloc %ddata : memref<?x?xf64>
+ sparse_tensor.release %b : tensor<?x?x?xf64, #SparseMatrix>
return
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
index 8c36275fc079e..7d2a406a47f3e 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
@@ -72,6 +72,9 @@ module {
%v = vector.transfer_read %m[%c0], %d0: memref<?xf64>, vector<9xf64>
vector.print %v : vector<9xf64>
+ // Release the resources.
+ sparse_tensor.release %x : tensor<?x?xf64, #DCSR>
+
return
}
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
index ffc07ae880f21..62a5e58dd636a 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
@@ -71,6 +71,10 @@ module {
: memref<5x6xi32>, vector<5x6xi32>
vector.print %v : vector<5x6xi32>
+ // Release the resources.
+ sparse_tensor.release %sparse_input2 : tensor<3x6xi8, #DCSR>
+ memref.dealloc %m : memref<5x6xi32>
+
return
}
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir
index d2d04670cc170..867e17f3fdf24 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir
@@ -125,16 +125,14 @@ module {
return %0 : tensor<i32>
}
- func @dump_i32(%arg0 : tensor<i32>) {
- %m = memref.buffer_cast %arg0 : memref<i32>
- %v = memref.load %m[] : memref<i32>
+ func @dump_i32(%arg0 : memref<i32>) {
+ %v = memref.load %arg0[] : memref<i32>
vector.print %v : i32
return
}
- func @dump_f32(%arg0 : tensor<f32>) {
- %m = memref.buffer_cast %arg0 : memref<f32>
- %v = memref.load %m[] : memref<f32>
+ func @dump_f32(%arg0 : memref<f32>) {
+ %v = memref.load %arg0[] : memref<f32>
vector.print %v : f32
return
}
@@ -203,13 +201,33 @@ module {
// CHECK: 15
// CHECK: 10
//
- call @dump_i32(%0) : (tensor<i32>) -> ()
- call @dump_f32(%1) : (tensor<f32>) -> ()
- call @dump_i32(%2) : (tensor<i32>) -> ()
- call @dump_f32(%3) : (tensor<f32>) -> ()
- call @dump_i32(%4) : (tensor<i32>) -> ()
- call @dump_i32(%5) : (tensor<i32>) -> ()
- call @dump_i32(%6) : (tensor<i32>) -> ()
+ %m0 = memref.buffer_cast %0 : memref<i32>
+ call @dump_i32(%m0) : (memref<i32>) -> ()
+ %m1 = memref.buffer_cast %1 : memref<f32>
+ call @dump_f32(%m1) : (memref<f32>) -> ()
+ %m2 = memref.buffer_cast %2 : memref<i32>
+ call @dump_i32(%m2) : (memref<i32>) -> ()
+ %m3 = memref.buffer_cast %3 : memref<f32>
+ call @dump_f32(%m3) : (memref<f32>) -> ()
+ %m4 = memref.buffer_cast %4 : memref<i32>
+ call @dump_i32(%m4) : (memref<i32>) -> ()
+ %m5 = memref.buffer_cast %5 : memref<i32>
+ call @dump_i32(%m5) : (memref<i32>) -> ()
+ %m6 = memref.buffer_cast %6 : memref<i32>
+ call @dump_i32(%m6) : (memref<i32>) -> ()
+
+ // Release the resources.
+ sparse_tensor.release %sparse_input_i32 : tensor<32xi32, #SV>
+ sparse_tensor.release %sparse_input_f32 : tensor<32xf32, #SV>
+ sparse_tensor.release %dense_input_i32 : tensor<32xi32, #DV>
+ sparse_tensor.release %dense_input_f32 : tensor<32xf32, #DV>
+ memref.dealloc %m0 : memref<i32>
+ memref.dealloc %m1 : memref<f32>
+ memref.dealloc %m2 : memref<i32>
+ memref.dealloc %m3 : memref<f32>
+ memref.dealloc %m4 : memref<i32>
+ memref.dealloc %m5 : memref<i32>
+ memref.dealloc %m6 : memref<i32>
return
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
index 2b6e57c4b6b7c..569f76207e604 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
@@ -128,6 +128,7 @@ module {
memref.dealloc %adata : memref<?x?xf32>
memref.dealloc %bdata : memref<?x?xf32>
memref.dealloc %xdata : memref<?x?xf32>
+ sparse_tensor.release %s : tensor<?x?xf32, #SparseMatrix>
return
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
old mode 100644
new mode 100755
index 63f82159985ac..3a4b322c11297
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
@@ -28,6 +28,10 @@
// RUN: -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
+//
+// Interop between linalg/sparse leaves some issues to be revolved:
+// UNSUPPORTED: asan
+
#SM = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>
@@ -163,6 +167,11 @@ module {
vector.print %v0 : vector<8x8xf64>
vector.print %v1 : vector<8x8xf64>
+ // Release the resources.
+ sparse_tensor.release %s : tensor<8x8xf64, #SM>
+ memref.dealloc %m0 : memref<8x8xf64>
+ memref.dealloc %m1 : memref<8x8xf64>
+
return
}
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir
index 7db8176c5ece5..3b1f40aa2defe 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir
@@ -74,6 +74,9 @@ module {
%v = vector.transfer_read %m[%c0], %f0: memref<?xf32>, vector<16xf32>
vector.print %v : vector<16xf32>
+ // Release the resources.
+ sparse_tensor.release %1 : tensor<8x8xf32, #CSR>
+
return
}
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
index 4c977e1805a4a..5085bd2c9374a 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
@@ -38,7 +38,7 @@ module {
//
func @kernel_spmm(%arga: tensor<?x?xf64, #SparseMatrix>,
%argb: tensor<?x?xf64>,
- %argx: tensor<?x?xf64>) -> tensor<?x?xf64> {
+ %argx: tensor<?x?xf64> {linalg.inplaceable = true}) -> tensor<?x?xf64> {
%0 = linalg.generic #spmm
ins(%arga, %argb: tensor<?x?xf64, #SparseMatrix>, tensor<?x?xf64>)
outs(%argx: tensor<?x?xf64>) {
@@ -101,6 +101,7 @@ module {
// Release the resources.
memref.dealloc %bdata : memref<?x?xf64>
memref.dealloc %xdata : memref<?x?xf64>
+ sparse_tensor.release %a : tensor<?x?xf64, #SparseMatrix>
return
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir
index cbac014bbc152..09465accdc1f8 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir
@@ -250,6 +250,15 @@ module {
%50 = vector.transfer_read %49[%c0], %d0: memref<?xf64>, vector<70xf64>
vector.print %50 : vector<70xf64>
+ // Release the resources.
+ sparse_tensor.release %0 : tensor<10x8xf64, #Dense>
+ sparse_tensor.release %1 : tensor<10x8xf64, #CSR>
+ sparse_tensor.release %2 : tensor<10x8xf64, #DCSR>
+ sparse_tensor.release %3 : tensor<10x8xf64, #CSC>
+ sparse_tensor.release %4 : tensor<10x8xf64, #DCSC>
+ sparse_tensor.release %x : tensor<10x8xf64, #BlockRow>
+ sparse_tensor.release %y : tensor<10x8xf64, #BlockCol>
+
return
}
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
index 6a9ee01b5c3c7..b6fcaee73eb34 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
@@ -35,7 +35,7 @@ module {
// A kernel that sum-reduces a matrix to a single scalar.
//
func @kernel_sum_reduce(%arga: tensor<?x?xf64, #SparseMatrix>,
- %argx: tensor<f64>) -> tensor<f64> {
+ %argx: tensor<f64> {linalg.inplaceable = true}) -> tensor<f64> {
%0 = linalg.generic #trait_sum_reduce
ins(%arga: tensor<?x?xf64, #SparseMatrix>)
outs(%argx: tensor<f64>) {
@@ -79,6 +79,7 @@ module {
// Release the resources.
memref.dealloc %xdata : memref<f64>
+ sparse_tensor.release %a : tensor<?x?xf64, #SparseMatrix>
return
}
More information about the Mlir-commits
mailing list