[Mlir-commits] [mlir] 79ff70f - [mlir][sparse] Better error handling when bufferizing sparse_tensor ops

Matthias Springer llvmlistbot at llvm.org
Thu Aug 24 23:39:00 PDT 2023


Author: Matthias Springer
Date: 2023-08-25T08:34:05+02:00
New Revision: 79ff70fda223f22cf409301b0f0c2bf451dbb6b3

URL: https://github.com/llvm/llvm-project/commit/79ff70fda223f22cf409301b0f0c2bf451dbb6b3
DIFF: https://github.com/llvm/llvm-project/commit/79ff70fda223f22cf409301b0f0c2bf451dbb6b3.diff

LOG: [mlir][sparse] Better error handling when bufferizing sparse_tensor ops

sparse_tensor ops cannot be bufferized with One-Shot Bufferize. (They can only be analyzed.) The sparse compiler does the actual lowering to memref. Produce a proper error message instead of crashing.

This fixes #61311.

Differential Revision: https://reviews.llvm.org/D158728

Added: 
    mlir/test/Dialect/SparseTensor/one_shot_bufferize_invalid.mlir

Modified: 
    mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp
index e8467ee3087ae6..89c6495a3112ad 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -26,9 +26,19 @@ namespace mlir {
 namespace sparse_tensor {
 namespace {
 
+template <typename ConcreteModel, typename ConcreteOp>
+struct SparseBufferizableOpInterfaceExternalModel
+    : public BufferizableOpInterface::ExternalModel<ConcreteModel, ConcreteOp> {
+  LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
+                          const BufferizationOptions &options) const {
+    return op->emitError(
+        "sparse_tensor ops must be bufferized with the sparse compiler");
+  }
+};
+
 struct ConcatenateOpInterface
-    : public BufferizableOpInterface::ExternalModel<
-          ConcatenateOpInterface, sparse_tensor::ConcatenateOp> {
+    : SparseBufferizableOpInterfaceExternalModel<ConcatenateOpInterface,
+                                                 sparse_tensor::ConcatenateOp> {
   bool bufferizesToAllocation(Operation *op, Value value) const { return true; }
 
   bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
@@ -52,9 +62,8 @@ struct ConcatenateOpInterface
   }
 };
 
-struct ConvertOpInterface
-    : public BufferizableOpInterface::ExternalModel<ConvertOpInterface,
-                                                    sparse_tensor::ConvertOp> {
+struct ConvertOpInterface : public SparseBufferizableOpInterfaceExternalModel<
+                                ConvertOpInterface, sparse_tensor::ConvertOp> {
   bool bufferizesToAllocation(Operation *op, Value value) const {
     // ConvertOps may allocate. (Unless they convert between two identical
     // types, then they fold away.)
@@ -83,8 +92,8 @@ struct ConvertOpInterface
 };
 
 struct LoadOpInterface
-    : public BufferizableOpInterface::ExternalModel<LoadOpInterface,
-                                                    sparse_tensor::LoadOp> {
+    : public SparseBufferizableOpInterfaceExternalModel<LoadOpInterface,
+                                                        sparse_tensor::LoadOp> {
   bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
                               const AnalysisState &state) const {
     return false;
@@ -102,8 +111,8 @@ struct LoadOpInterface
 };
 
 struct NewOpInterface
-    : public BufferizableOpInterface::ExternalModel<NewOpInterface,
-                                                    sparse_tensor::NewOp> {
+    : public SparseBufferizableOpInterfaceExternalModel<NewOpInterface,
+                                                        sparse_tensor::NewOp> {
   bool resultBufferizesToMemoryWrite(Operation *op, OpResult opResult,
                                      const AnalysisState &state) const {
     // NewOps allocate but do not write.
@@ -114,8 +123,8 @@ struct NewOpInterface
 };
 
 struct PackOpInterface
-    : public BufferizableOpInterface::ExternalModel<PackOpInterface,
-                                                    sparse_tensor::PackOp> {
+    : public SparseBufferizableOpInterfaceExternalModel<PackOpInterface,
+                                                        sparse_tensor::PackOp> {
   bool bufferizesToAllocation(Operation *op, Value value) const {
     // PackOp reuses all the buffers instead of allocating new ones
     return false;
@@ -145,9 +154,8 @@ struct PackOpInterface
   }
 };
 
-struct UnpackOpInterface
-    : public BufferizableOpInterface::ExternalModel<UnpackOpInterface,
-                                                    sparse_tensor::UnpackOp> {
+struct UnpackOpInterface : public SparseBufferizableOpInterfaceExternalModel<
+                               UnpackOpInterface, sparse_tensor::UnpackOp> {
   bool bufferizesToAllocation(Operation *op, Value value) const {
     // The output buffer is pre-allocated by the user.
     return false;
@@ -178,9 +186,8 @@ struct UnpackOpInterface
   }
 };
 
-struct InsertOpInterface
-    : public BufferizableOpInterface::ExternalModel<InsertOpInterface,
-                                                    sparse_tensor::InsertOp> {
+struct InsertOpInterface : public SparseBufferizableOpInterfaceExternalModel<
+                               InsertOpInterface, sparse_tensor::InsertOp> {
   bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
                               const AnalysisState &state) const {
     return true;
@@ -201,7 +208,7 @@ struct InsertOpInterface
 };
 
 struct NumberOfEntriesOpInterface
-    : public BufferizableOpInterface::ExternalModel<
+    : public SparseBufferizableOpInterfaceExternalModel<
           NumberOfEntriesOpInterface, sparse_tensor::NumberOfEntriesOp> {
   bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
                               const AnalysisState &state) const {
@@ -220,7 +227,7 @@ struct NumberOfEntriesOpInterface
 };
 
 struct ToCoordinatesBufferOpInterface
-    : public BufferizableOpInterface::ExternalModel<
+    : public SparseBufferizableOpInterfaceExternalModel<
           ToCoordinatesBufferOpInterface,
           sparse_tensor::ToCoordinatesBufferOp> {
   bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
@@ -242,7 +249,7 @@ struct ToCoordinatesBufferOpInterface
 };
 
 struct ToCoordinatesOpInterface
-    : public BufferizableOpInterface::ExternalModel<
+    : public SparseBufferizableOpInterfaceExternalModel<
           ToCoordinatesOpInterface, sparse_tensor::ToCoordinatesOp> {
   bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
                               const AnalysisState &state) const {
@@ -263,7 +270,7 @@ struct ToCoordinatesOpInterface
 };
 
 struct ToPositionsOpInterface
-    : public BufferizableOpInterface::ExternalModel<
+    : public SparseBufferizableOpInterfaceExternalModel<
           ToPositionsOpInterface, sparse_tensor::ToPositionsOp> {
   bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
                               const AnalysisState &state) const {
@@ -284,8 +291,8 @@ struct ToPositionsOpInterface
 };
 
 struct ToValuesOpInterface
-    : public BufferizableOpInterface::ExternalModel<ToValuesOpInterface,
-                                                    sparse_tensor::ToValuesOp> {
+    : public SparseBufferizableOpInterfaceExternalModel<
+          ToValuesOpInterface, sparse_tensor::ToValuesOp> {
   bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
                               const AnalysisState &state) const {
     return true;

diff  --git a/mlir/test/Dialect/SparseTensor/one_shot_bufferize_invalid.mlir b/mlir/test/Dialect/SparseTensor/one_shot_bufferize_invalid.mlir
new file mode 100644
index 00000000000000..25ecd20c380035
--- /dev/null
+++ b/mlir/test/Dialect/SparseTensor/one_shot_bufferize_invalid.mlir
@@ -0,0 +1,12 @@
+// RUN: mlir-opt %s -one-shot-bufferize -verify-diagnostics
+
+#SparseVector = #sparse_tensor.encoding<{
+  lvlTypes = ["compressed"]
+}>
+
+func.func @sparse_tensor_op(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
+  // expected-error @below{{sparse_tensor ops must be bufferized with the sparse compiler}}
+  // expected-error @below{{failed to bufferize op}}
+  %0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector>
+  return %0 : tensor<64xf32, #SparseVector>
+}


        


More information about the Mlir-commits mailing list