[Mlir-commits] [mlir] e2e6e7a - [mlir][sparse] start using size_hint provided in allocation op
Aart Bik
llvmlistbot at llvm.org
Fri Feb 3 14:02:51 PST 2023
Author: Aart Bik
Date: 2023-02-03T14:02:41-08:00
New Revision: e2e6e7a6a3a575153e2c3a4782c89c2ca1e09c02
URL: https://github.com/llvm/llvm-project/commit/e2e6e7a6a3a575153e2c3a4782c89c2ca1e09c02
DIFF: https://github.com/llvm/llvm-project/commit/e2e6e7a6a3a575153e2c3a4782c89c2ca1e09c02.diff
LOG: [mlir][sparse] start using size_hint provided in allocation op
Even though we introduced the size_hint, we never used it.
This is a very first step, using the hint during the codegen path.
Note that we can refine the heuristics. Also, we need to start
adding the hint on all allocation generated for reading tensors,
converting tensors, etc.
Reviewed By: Peiming, bixia
Differential Revision: https://reviews.llvm.org/D143292
Added:
Modified:
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
mlir/test/Dialect/SparseTensor/codegen.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index f96aeeeab4027..1fba7d8d74f73 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -198,8 +198,10 @@ static Value createAllocation(OpBuilder &builder, Location loc,
///
static void createAllocFields(OpBuilder &builder, Location loc, Type type,
ValueRange dynSizes, bool enableInit,
- SmallVectorImpl<Value> &fields) {
+ SmallVectorImpl<Value> &fields, Value sizeHint) {
RankedTensorType rtp = type.cast<RankedTensorType>();
+ SparseTensorEncodingAttr enc = getSparseTensorEncoding(rtp);
+
// Build original sizes.
SmallVector<Value> sizes;
auto shape = rtp.getShape();
@@ -211,19 +213,34 @@ static void createAllocFields(OpBuilder &builder, Location loc, Type type,
sizes.push_back(constantIndex(builder, loc, shape[r]));
}
- Value heuristic = constantIndex(builder, loc, 16);
- Value valHeuristic = heuristic;
- SparseTensorEncodingAttr enc = getSparseTensorEncoding(rtp);
+ // Set up some heuristic sizes. We try to set the initial
+ // size based on available information. Otherwise we just
+ // initialize a few elements to start the reallocation chain.
+ // TODO: refine this
+ Value ptrHeuristic, idxHeuristic, valHeuristic;
if (enc.isAllDense()) {
Value linear = sizes[0];
for (unsigned r = 1; r < rank; r++) {
linear = builder.create<arith::MulIOp>(loc, linear, sizes[r]);
}
valHeuristic = linear;
+ } else if (sizeHint) {
+ if (getCOOStart(enc) == 0) {
+ ptrHeuristic = constantIndex(builder, loc, 2);
+ idxHeuristic = builder.create<arith::MulIOp>(
+ loc, constantIndex(builder, loc, rank), sizeHint); // AOS
+ } else {
+ ptrHeuristic = idxHeuristic = constantIndex(builder, loc, 16);
+ }
+ valHeuristic = sizeHint;
+ } else {
+ ptrHeuristic = idxHeuristic = valHeuristic =
+ constantIndex(builder, loc, 16);
}
+
foreachFieldAndTypeInSparseTensor(
rtp,
- [&builder, &fields, rtp, loc, heuristic, valHeuristic,
+ [&builder, &fields, rtp, loc, ptrHeuristic, idxHeuristic, valHeuristic,
enableInit](Type fType, unsigned fIdx, SparseTensorFieldKind fKind,
unsigned /*dim*/, DimLevelType /*dlt*/) -> bool {
assert(fields.size() == fIdx);
@@ -235,11 +252,12 @@ static void createAllocFields(OpBuilder &builder, Location loc, Type type,
case SparseTensorFieldKind::PtrMemRef:
case SparseTensorFieldKind::IdxMemRef:
case SparseTensorFieldKind::ValMemRef:
- field = createAllocation(builder, loc, fType.cast<MemRefType>(),
- fKind == SparseTensorFieldKind::ValMemRef
- ? valHeuristic
- : heuristic,
- enableInit);
+ field = createAllocation(
+ builder, loc, fType.cast<MemRefType>(),
+ (fKind == SparseTensorFieldKind::PtrMemRef) ? ptrHeuristic
+ : (fKind == SparseTensorFieldKind::IdxMemRef) ? idxHeuristic
+ : valHeuristic,
+ enableInit);
break;
}
assert(field);
@@ -691,9 +709,10 @@ class SparseTensorAllocConverter
// Construct allocation for each field.
Location loc = op.getLoc();
+ Value sizeHint = op.getSizeHint();
SmallVector<Value> fields;
createAllocFields(rewriter, loc, resType, adaptor.getOperands(),
- enableBufferInitialization, fields);
+ enableBufferInitialization, fields, sizeHint);
// Replace operation with resulting memrefs.
rewriter.replaceOp(op, genTuple(rewriter, loc, resType, fields));
return success();
diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir
index 1e06e65212468..081ad5b2cf1e1 100644
--- a/mlir/test/Dialect/SparseTensor/codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen.mlir
@@ -363,6 +363,19 @@ func.func @sparse_alloc_3d() -> tensor<10x20x30xf64, #Dense3D> {
return %1 : tensor<10x20x30xf64, #Dense3D>
}
+// CHECK-LABEL: func.func @sparse_alloc_coo_with_size_hint(
+// CHECK-SAME: %[[HINT:.*]]: index)
+// CHECK: %[[C2:.*]] = arith.constant 2 : index
+// CHECK: %[[M2:.*]] = arith.muli %[[HINT]], %c2 : index
+// CHECK: %[[A1:.*]] = memref.alloc() : memref<2xindex>
+// CHECK: %[[A2:.*]] = memref.alloc(%[[M2]]) : memref<?xindex>
+// CHECK: %[[A3:.*]] = memref.alloc(%[[HINT]]) : memref<?xf64>
+func.func @sparse_alloc_coo_with_size_hint(%arg0: index) -> tensor<10x20xf64, #Coo> {
+ %0 = bufferization.alloc_tensor() size_hint=%arg0 : tensor<10x20xf64, #Coo>
+ %1 = sparse_tensor.load %0 : tensor<10x20xf64, #Coo>
+ return %1 : tensor<10x20xf64, #Coo>
+}
+
// CHECK-LABEL: func.func @sparse_expansion1()
// CHECK: %[[A:.*]] = memref.alloc() : memref<8xf64>
// CHECK: %[[B:.*]] = memref.alloc() : memref<8xi1>
More information about the Mlir-commits
mailing list