[Mlir-commits] [mlir] fa639d3 - [mlir][sparse] Implement BufferizableOpInterface for additional ops

Matthias Springer llvmlistbot at llvm.org
Fri Jan 27 08:58:00 PST 2023


Author: Matthias Springer
Date: 2023-01-27T17:56:55+01:00
New Revision: fa639d3b43a9034133b7566d0ec2a91fcd79c361

URL: https://github.com/llvm/llvm-project/commit/fa639d3b43a9034133b7566d0ec2a91fcd79c361
DIFF: https://github.com/llvm/llvm-project/commit/fa639d3b43a9034133b7566d0ec2a91fcd79c361.diff

LOG: [mlir][sparse] Implement BufferizableOpInterface for additional ops

The handling of unknown ops will be tightened in a subsequent change. All sparse_tensor ops should implement BufferizableOpInterface, otherwise, they are treated as "unknown" and additional buffer allocs/copies may be inserted around them.

Differential Revision: https://reviews.llvm.org/D142005

Added: 
    

Modified: 
    mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp
index 6de3957368fed..aaf1e8677f8d2 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -26,6 +26,34 @@ namespace mlir {
 namespace sparse_tensor {
 namespace {
 
+struct ConcatenateOpInterface
+    : public BufferizableOpInterface::ExternalModel<
+          ConcatenateOpInterface, sparse_tensor::ConcatenateOp> {
+  bool bufferizesToAllocation(Operation *op, OpResult opResult) const {
+    return true;
+  }
+
+  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
+                              const AnalysisState &state) const {
+    return true;
+  }
+
+  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
+                               const AnalysisState &state) const {
+    return false;
+  }
+
+  SmallVector<OpResult> getAliasingOpResult(Operation *op, OpOperand &opOperand,
+                                            const AnalysisState &state) const {
+    return {};
+  }
+
+  bool isWritable(Operation *op, Value value,
+                  const AnalysisState &state) const {
+    return true;
+  }
+};
+
 struct ConvertOpInterface
     : public BufferizableOpInterface::ExternalModel<ConvertOpInterface,
                                                     sparse_tensor::ConvertOp> {
@@ -123,17 +151,128 @@ struct InsertOpInterface
   }
 };
 
+struct NumberOfEntriesOpInterface
+    : public BufferizableOpInterface::ExternalModel<
+          NumberOfEntriesOpInterface, sparse_tensor::NumberOfEntriesOp> {
+  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
+                              const AnalysisState &state) const {
+    return true;
+  }
+
+  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
+                               const AnalysisState &state) const {
+    return false;
+  }
+
+  SmallVector<OpResult> getAliasingOpResult(Operation *op, OpOperand &opOperand,
+                                            const AnalysisState &state) const {
+    return {};
+  }
+};
+
+struct ToIndicesBufferOpInterface
+    : public BufferizableOpInterface::ExternalModel<
+          ToIndicesBufferOpInterface, sparse_tensor::ToIndicesBufferOp> {
+  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
+                              const AnalysisState &state) const {
+    return true;
+  }
+
+  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
+                               const AnalysisState &state) const {
+    // Potential writes into memory through the result of sparse_tensor.indices
+    // are not considered.
+    return false;
+  }
+
+  SmallVector<OpResult> getAliasingOpResult(Operation *op, OpOperand &opOperand,
+                                            const AnalysisState &state) const {
+    return {};
+  }
+};
+
+struct ToIndicesOpInterface
+    : public BufferizableOpInterface::ExternalModel<
+          ToIndicesOpInterface, sparse_tensor::ToIndicesOp> {
+  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
+                              const AnalysisState &state) const {
+    return true;
+  }
+
+  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
+                               const AnalysisState &state) const {
+    // Potential writes into memory through the result of sparse_tensor.indices
+    // are not considered.
+    return false;
+  }
+
+  SmallVector<OpResult> getAliasingOpResult(Operation *op, OpOperand &opOperand,
+                                            const AnalysisState &state) const {
+    return {};
+  }
+};
+
+struct ToPointersOpInterface
+    : public BufferizableOpInterface::ExternalModel<
+          ToPointersOpInterface, sparse_tensor::ToPointersOp> {
+  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
+                              const AnalysisState &state) const {
+    return true;
+  }
+
+  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
+                               const AnalysisState &state) const {
+    // Potential writes into memory through the result of sparse_tensor.pointers
+    // are not considered.
+    return false;
+  }
+
+  SmallVector<OpResult> getAliasingOpResult(Operation *op, OpOperand &opOperand,
+                                            const AnalysisState &state) const {
+    return {};
+  }
+};
+
+struct ToValuesOpInterface
+    : public BufferizableOpInterface::ExternalModel<ToValuesOpInterface,
+                                                    sparse_tensor::ToValuesOp> {
+  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
+                              const AnalysisState &state) const {
+    return true;
+  }
+
+  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
+                               const AnalysisState &state) const {
+    // Potential writes into memory through the result of sparse_tensor.values
+    // are not considered.
+    return false;
+  }
+
+  SmallVector<OpResult> getAliasingOpResult(Operation *op, OpOperand &opOperand,
+                                            const AnalysisState &state) const {
+    return {};
+  }
+};
+
 } // namespace
 } // namespace sparse_tensor
 } // namespace mlir
 
 void mlir::sparse_tensor::registerBufferizableOpInterfaceExternalModels(
     DialectRegistry &registry) {
-  registry.addExtension(
-      +[](MLIRContext *ctx, sparse_tensor::SparseTensorDialect *dialect) {
-        sparse_tensor::ConvertOp::attachInterface<ConvertOpInterface>(*ctx);
-        sparse_tensor::LoadOp::attachInterface<LoadOpInterface>(*ctx);
-        sparse_tensor::NewOp::attachInterface<NewOpInterface>(*ctx);
-        sparse_tensor::InsertOp::attachInterface<InsertOpInterface>(*ctx);
-      });
+  registry.addExtension(+[](MLIRContext *ctx,
+                            sparse_tensor::SparseTensorDialect *dialect) {
+    sparse_tensor::ConcatenateOp::attachInterface<ConcatenateOpInterface>(*ctx);
+    sparse_tensor::ConvertOp::attachInterface<ConvertOpInterface>(*ctx);
+    sparse_tensor::LoadOp::attachInterface<LoadOpInterface>(*ctx);
+    sparse_tensor::NewOp::attachInterface<NewOpInterface>(*ctx);
+    sparse_tensor::InsertOp::attachInterface<InsertOpInterface>(*ctx);
+    sparse_tensor::NumberOfEntriesOp::attachInterface<
+        NumberOfEntriesOpInterface>(*ctx);
+    sparse_tensor::ToIndicesBufferOp::attachInterface<
+        ToIndicesBufferOpInterface>(*ctx);
+    sparse_tensor::ToIndicesOp::attachInterface<ToIndicesOpInterface>(*ctx);
+    sparse_tensor::ToPointersOp::attachInterface<ToPointersOpInterface>(*ctx);
+    sparse_tensor::ToValuesOp::attachInterface<ToValuesOpInterface>(*ctx);
+  });
 }


        


More information about the Mlir-commits mailing list