[Mlir-commits] [mlir] b78b547 - [mlir][sparse] Avoid values buffer reallocation for annotated all dense tensors.
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Wed Jan 11 16:31:12 PST 2023
Author: bixia1
Date: 2023-01-11T16:31:07-08:00
New Revision: b78b547371253118b7ab04a12653c66878466d2b
URL: https://github.com/llvm/llvm-project/commit/b78b547371253118b7ab04a12653c66878466d2b
DIFF: https://github.com/llvm/llvm-project/commit/b78b547371253118b7ab04a12653c66878466d2b.diff
LOG: [mlir][sparse] Avoid values buffer reallocation for annotated all dense tensors.
Previously, we rely on the InsertOp to gradually increase the size of the
storage for all sparse tensors. We now allocate the full size values buffer
for annotated all dense tensors when we first allocate the tensor. This avoids
the cost of gradually increasing the buffer and allows accessing the values
buffer as if it were a dense tensor.
Reviewed By: Peiming
Differential Revision: https://reviews.llvm.org/D141516
Added:
Modified:
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
mlir/test/Dialect/SparseTensor/codegen.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 38a7e0e0610fb..0ce37620061a6 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -205,11 +205,30 @@ static void createAllocFields(OpBuilder &builder, Location loc, Type type,
ValueRange dynSizes, bool enableInit,
SmallVectorImpl<Value> &fields) {
RankedTensorType rtp = type.cast<RankedTensorType>();
- Value heuristic = constantIndex(builder, loc, 16);
+ // Build original sizes.
+ SmallVector<Value> sizes;
+ auto shape = rtp.getShape();
+ unsigned rank = shape.size();
+ for (unsigned r = 0, o = 0; r < rank; r++) {
+ if (ShapedType::isDynamic(shape[r]))
+ sizes.push_back(dynSizes[o++]);
+ else
+ sizes.push_back(constantIndex(builder, loc, shape[r]));
+ }
+ Value heuristic = constantIndex(builder, loc, 16);
+ Value valHeuristic = heuristic;
+ SparseTensorEncodingAttr enc = getSparseTensorEncoding(rtp);
+ if (enc.isAllDense()) {
+ Value linear = sizes[0];
+ for (unsigned r = 1; r < rank; r++) {
+ linear = builder.create<arith::MulIOp>(loc, linear, sizes[r]);
+ }
+ valHeuristic = linear;
+ }
foreachFieldAndTypeInSparseTensor(
rtp,
- [&builder, &fields, rtp, loc, heuristic,
+ [&builder, &fields, rtp, loc, heuristic, valHeuristic,
enableInit](Type fType, unsigned fIdx, SparseTensorFieldKind fKind,
unsigned /*dim*/, DimLevelType /*dlt*/) -> bool {
assert(fields.size() == fIdx);
@@ -222,7 +241,10 @@ static void createAllocFields(OpBuilder &builder, Location loc, Type type,
case SparseTensorFieldKind::IdxMemRef:
case SparseTensorFieldKind::ValMemRef:
field = createAllocation(builder, loc, fType.cast<MemRefType>(),
- heuristic, enableInit);
+ fKind == SparseTensorFieldKind::ValMemRef
+ ? valHeuristic
+ : heuristic,
+ enableInit);
break;
}
assert(field);
@@ -233,16 +255,6 @@ static void createAllocFields(OpBuilder &builder, Location loc, Type type,
MutSparseTensorDescriptor desc(rtp, fields);
- // Build original sizes.
- SmallVector<Value> sizes;
- auto shape = rtp.getShape();
- unsigned rank = shape.size();
- for (unsigned r = 0, o = 0; r < rank; r++) {
- if (ShapedType::isDynamic(shape[r]))
- sizes.push_back(dynSizes[o++]);
- else
- sizes.push_back(constantIndex(builder, loc, shape[r]));
- }
// Initialize the storage scheme to an empty tensor. Initialized memSizes
// to all zeros, sets the dimSizes to known values and gives all pointer
// fields an initial zero entry, so that it is easier to maintain the
diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir
index 652923ea22d07..61c4324cf1a41 100644
--- a/mlir/test/Dialect/SparseTensor/codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen.mlir
@@ -345,8 +345,8 @@ func.func @sparse_alloc_csc(%arg0: index) -> tensor<10x?xf64, #CSC> {
// CHECK: %[[A2:.*]] = arith.constant 10 : i64
// CHECK: %[[A3:.*]] = arith.constant 30 : i64
// CHECK: %[[A4:.*]] = arith.constant 0.000000e+00 : f64
-// CHECK: %[[A5:.*]] = memref.alloc() : memref<16xf64>
-// CHECK: %[[A6:.*]] = memref.cast %[[A5]] : memref<16xf64> to memref<?xf64>
+// CHECK: %[[A5:.*]] = memref.alloc() : memref<6000xf64>
+// CHECK: %[[A6:.*]] = memref.cast %[[A5]] : memref<6000xf64> to memref<?xf64>
// CHECK: %[[A7:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier
// CHECK: %[[A8:.*]] = sparse_tensor.storage_specifier.set %[[A7]] dim_sz at 0 with %[[A3]] : i64, !sparse_tensor.storage_specifier
// CHECK: %[[A9:.*]] = sparse_tensor.storage_specifier.set %[[A8]] dim_sz at 1 with %[[A2]] : i64, !sparse_tensor.storage_specifier
More information about the Mlir-commits
mailing list