[Mlir-commits] [mlir] 7b86f7c - [mlir][sparse] support sparse bufferization.alloc_tensor with copy argument.

Peiming Liu llvmlistbot at llvm.org
Fri Mar 31 15:27:29 PDT 2023


Author: Peiming Liu
Date: 2023-03-31T22:27:23Z
New Revision: 7b86f7c5d4c465cc11746cc3e49c6e0b3d29610b

URL: https://github.com/llvm/llvm-project/commit/7b86f7c5d4c465cc11746cc3e49c6e0b3d29610b
DIFF: https://github.com/llvm/llvm-project/commit/7b86f7c5d4c465cc11746cc3e49c6e0b3d29610b.diff

LOG: [mlir][sparse] support sparse bufferization.alloc_tensor with copy argument.

Reviewed By: aartbik

Differential Revision: https://reviews.llvm.org/D147358

Added: 
    mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir

Modified: 
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index e9939b153325b..31bf59552f4e0 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -749,11 +749,29 @@ class SparseTensorAllocConverter
     const auto resType = getSparseTensorType(op);
     if (!resType.hasEncoding())
       return failure();
-    if (op.getCopy())
-      return rewriter.notifyMatchFailure(op, "tensor copy not implemented");
 
     // Construct allocation for each field.
     const Location loc = op.getLoc();
+    if (op.getCopy()) {
+      auto desc = getDescriptorFromTensorTuple(adaptor.getCopy());
+      SmallVector<Value> fields;
+      fields.reserve(desc.getNumFields());
+      // Memcpy on memref fields.
+      for (auto field : desc.getMemRefFields()) {
+        auto memrefTp = field.getType().cast<MemRefType>();
+        auto size = rewriter.create<memref::DimOp>(loc, field, 0);
+        auto copied =
+            rewriter.create<memref::AllocOp>(loc, memrefTp, ValueRange{size});
+        rewriter.create<memref::CopyOp>(loc, field, copied);
+        fields.push_back(copied);
+      }
+      // Reuses specifier.
+      fields.push_back(desc.getSpecifier());
+      assert(fields.size() == desc.getNumFields());
+      rewriter.replaceOp(op, genTuple(rewriter, loc, resType, fields));
+      return success();
+    }
+
     const Value sizeHint = op.getSizeHint();
     const ValueRange dynSizes = adaptor.getDynamicSizes();
     const size_t found = dynSizes.size();

diff  --git a/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir b/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir
new file mode 100644
index 0000000000000..8d09a8ccba910
--- /dev/null
+++ b/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir
@@ -0,0 +1,44 @@
+// RUN: mlir-opt %s --sparse-tensor-codegen --canonicalize --cse | FileCheck %s
+
+#CSR = #sparse_tensor.encoding<{ dimLevelType = ["dense", "compressed"]}>
+#COO = #sparse_tensor.encoding<{ dimLevelType = ["compressed-nu", "singleton"]}>
+
+// CHECK-LABEL:   func.func @sparse_alloc_copy_CSR(
+// CHECK-SAME:      %[[VAL_0:.*0]]: memref<?xindex>,
+// CHECK-SAME:      %[[VAL_1:.*1]]: memref<?xindex>,
+// CHECK-SAME:      %[[VAL_2:.*2]]: memref<?xf32>,
+// CHECK-SAME:      %[[VAL_3:.*]]: !sparse_tensor.storage_specifier<#{{.*}}>) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#{{.*}}>) {
+// CHECK:           %[[VAL_4:.*]] = arith.constant 0 : index
+// CHECK:           %[[VAL_5:.*]] = memref.dim %[[VAL_0]], %[[VAL_4]] : memref<?xindex>
+// CHECK:           %[[VAL_6:.*]] = memref.alloc(%[[VAL_5]]) : memref<?xindex>
+// CHECK:           memref.copy %[[VAL_0]], %[[VAL_6]] : memref<?xindex> to memref<?xindex>
+// CHECK:           %[[VAL_7:.*]] = memref.dim %[[VAL_1]], %[[VAL_4]] : memref<?xindex>
+// CHECK:           %[[VAL_8:.*]] = memref.alloc(%[[VAL_7]]) : memref<?xindex>
+// CHECK:           memref.copy %[[VAL_1]], %[[VAL_8]] : memref<?xindex> to memref<?xindex>
+// CHECK:           %[[VAL_9:.*]] = memref.dim %[[VAL_2]], %[[VAL_4]] : memref<?xf32>
+// CHECK:           %[[VAL_10:.*]] = memref.alloc(%[[VAL_9]]) : memref<?xf32>
+// CHECK:           memref.copy %[[VAL_2]], %[[VAL_10]] : memref<?xf32> to memref<?xf32>
+func.func @sparse_alloc_copy_CSR(%arg0: tensor<2x2xf32, #CSR>) -> tensor<2x2xf32, #CSR> {
+  %0 = bufferization.alloc_tensor() copy(%arg0) : tensor<2x2xf32, #CSR>
+  "test.sink"(%0) : (tensor<2x2xf32, #CSR>) -> ()
+}
+
+// CHECK-LABEL:   func.func @sparse_alloc_copy_COO(
+// CHECK-SAME:      %[[VAL_0:.*0]]: memref<?xindex>,
+// CHECK-SAME:      %[[VAL_1:.*1]]: memref<?xindex>,
+// CHECK-SAME:      %[[VAL_2:.*2]]: memref<?xf32>,
+// CHECK-SAME:      %[[VAL_3:.*]]: !sparse_tensor.storage_specifier<#{{.*}}>) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#{{.*}}>) {
+// CHECK:           %[[VAL_4:.*]] = arith.constant 0 : index
+// CHECK:           %[[VAL_5:.*]] = memref.dim %[[VAL_0]], %[[VAL_4]] : memref<?xindex>
+// CHECK:           %[[VAL_6:.*]] = memref.alloc(%[[VAL_5]]) : memref<?xindex>
+// CHECK:           memref.copy %[[VAL_0]], %[[VAL_6]] : memref<?xindex> to memref<?xindex>
+// CHECK:           %[[VAL_7:.*]] = memref.dim %[[VAL_1]], %[[VAL_4]] : memref<?xindex>
+// CHECK:           %[[VAL_8:.*]] = memref.alloc(%[[VAL_7]]) : memref<?xindex>
+// CHECK:           memref.copy %[[VAL_1]], %[[VAL_8]] : memref<?xindex> to memref<?xindex>
+// CHECK:           %[[VAL_9:.*]] = memref.dim %[[VAL_2]], %[[VAL_4]] : memref<?xf32>
+// CHECK:           %[[VAL_10:.*]] = memref.alloc(%[[VAL_9]]) : memref<?xf32>
+// CHECK:           memref.copy %[[VAL_2]], %[[VAL_10]] : memref<?xf32> to memref<?xf32>
+func.func @sparse_alloc_copy_COO(%arg0: tensor<2x2xf32, #COO>) -> tensor<2x2xf32, #COO> {
+  %0 = bufferization.alloc_tensor() copy(%arg0) : tensor<2x2xf32, #COO>
+  "test.sink"(%0) : (tensor<2x2xf32, #COO>) -> ()
+}


        


More information about the Mlir-commits mailing list