[Mlir-commits] [mlir] 0e1708f - [mlir][sparse] cleanup small vector constant hints
Aart Bik
llvmlistbot at llvm.org
Tue Nov 15 15:09:15 PST 2022
Author: Aart Bik
Date: 2022-11-15T15:09:06-08:00
New Revision: 0e1708ff649f5883749877558f839b26c85725fd
URL: https://github.com/llvm/llvm-project/commit/0e1708ff649f5883749877558f839b26c85725fd
DIFF: https://github.com/llvm/llvm-project/commit/0e1708ff649f5883749877558f839b26c85725fd.diff
LOG: [mlir][sparse] cleanup small vector constant hints
Following advise from
https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h
This revision removes the size hints from SmallVector (unless we are
certain of the resulting number of elements). Also, this replaces
SmallVector references with SmallVectorImpl references.
Reviewed By: Peiming
Differential Revision: https://reviews.llvm.org/D138063
Added:
Modified:
mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h
mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h
index ebc33f395e5a9..dc27e85d3e716 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h
@@ -298,7 +298,7 @@ class Merger {
/// may cause data movement and invalidate the underlying memory address.
TensorExp &exp(unsigned e) { return tensorExps[e]; }
LatPoint &lat(unsigned l) { return latPoints[l]; }
- SmallVector<unsigned, 16> &set(unsigned s) { return latSets[s]; }
+ SmallVector<unsigned> &set(unsigned s) { return latSets[s]; }
#ifndef NDEBUG
/// Print methods (for debugging).
@@ -341,9 +341,9 @@ class Merger {
std::vector<std::vector<DimLevelType>> dimTypes;
// Map that converts pair<tensor id, loop id> to the corresponding dimension.
std::vector<std::vector<Optional<unsigned>>> loopIdxToDim;
- llvm::SmallVector<TensorExp, 32> tensorExps;
- llvm::SmallVector<LatPoint, 16> latPoints;
- llvm::SmallVector<SmallVector<unsigned, 16>, 8> latSets;
+ llvm::SmallVector<TensorExp> tensorExps;
+ llvm::SmallVector<LatPoint> latPoints;
+ llvm::SmallVector<SmallVector<unsigned>> latSets;
};
} // namespace sparse_tensor
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 2f7c0bc304a10..36b45de2efeb3 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -51,7 +51,7 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {
if (failed(parser.parseGreater()))
return {};
// Process the data from the parsed dictionary value into struct-like data.
- SmallVector<DimLevelType, 4> dlt;
+ SmallVector<DimLevelType> dlt;
AffineMap dimOrd = {};
AffineMap higherOrd = {};
unsigned ptr = 0;
@@ -601,7 +601,7 @@ void ForeachOp::build(
auto rtp = tensor.getType().cast<RankedTensorType>();
int64_t rank = rtp.getRank();
- SmallVector<Type, 4> blockArgTypes;
+ SmallVector<Type> blockArgTypes;
// Starts with n index.
std::fill_n(std::back_inserter(blockArgTypes), rank, builder.getIndexType());
// Followed by one value.
@@ -609,7 +609,7 @@ void ForeachOp::build(
// Followed by reduction variable.
blockArgTypes.append(initArgs.getTypes().begin(), initArgs.getTypes().end());
- SmallVector<Location, 4> blockArgLocs;
+ SmallVector<Location> blockArgLocs;
std::fill_n(std::back_inserter(blockArgLocs), blockArgTypes.size(),
tensor.getLoc());
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
index 2db2b152958e9..5692af5004c0f 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
@@ -296,8 +296,8 @@ Operation *SparseTensorLoopEmitter::enterCoIterationOverTensorsAtDims(
ArrayRef<size_t> dims, bool needsUniv, MutableArrayRef<Value> reduc,
ArrayRef<size_t> extraTids, ArrayRef<size_t> extraDims) {
assert(tids.size() == dims.size());
- SmallVector<Type, 4> types;
- SmallVector<Value, 4> operands;
+ SmallVector<Type> types;
+ SmallVector<Value> operands;
// Construct the while-loop with a parameter for each index.
Type indexType = builder.getIndexType();
for (auto [tid, dim] : llvm::zip(tids, dims)) {
@@ -556,7 +556,7 @@ void SparseTensorLoopEmitter::exitCoIterationLoop(
// instructions during code generation. Moreover, performing the induction
// after the if-statements more closely resembles code generated by TACO.
unsigned o = 0;
- SmallVector<Value, 4> operands;
+ SmallVector<Value> operands;
Value one = constantIndex(builder, loc, 1);
for (auto [tid, dim] : llvm::zip(tids, dims)) {
if (isCompressedDLT(dimTypes[tid][dim]) ||
@@ -604,7 +604,7 @@ void SparseTensorLoopEmitter::exitCurrentLoop(RewriterBase &rewriter,
// earlier stage (instead of silently using a wrong value).
LoopLevelInfo &loopInfo = loopStack.back();
assert(loopInfo.tids.size() == loopInfo.dims.size());
- SmallVector<Value, 2> red;
+ SmallVector<Value> red;
if (llvm::isa<scf::WhileOp>(loopInfo.loop)) {
exitCoIterationLoop(rewriter, loc, reduc);
} else {
@@ -777,7 +777,7 @@ Value mlir::sparse_tensor::genIsNonzero(OpBuilder &builder, mlir::Location loc,
}
void mlir::sparse_tensor::genReshapeDstShape(
- Location loc, PatternRewriter &rewriter, SmallVector<Value, 4> &dstShape,
+ Location loc, PatternRewriter &rewriter, SmallVectorImpl<Value> &dstShape,
ArrayRef<Value> srcShape, ArrayRef<int64_t> staticDstShape,
ArrayRef<ReassociationIndices> reassociation) {
// Collapse shape.
@@ -967,7 +967,7 @@ Value mlir::sparse_tensor::genValueForDense(OpBuilder &builder, Location loc,
void mlir::sparse_tensor::genDenseTensorOrSparseConstantIterLoop(
OpBuilder &builder, Location loc, Value src, unsigned rank,
function_ref<void(OpBuilder &, Location, Value, ValueRange)> bodyBuilder) {
- SmallVector<Value, 4> indicesArray;
+ SmallVector<Value> indicesArray;
SmallVector<Value> lo;
SmallVector<Value> hi;
SmallVector<Value> st;
@@ -1007,7 +1007,7 @@ void mlir::sparse_tensor::genDenseTensorOrSparseConstantIterLoop(
}
void mlir::sparse_tensor::sizesFromSrc(OpBuilder &builder,
- SmallVector<Value, 4> &sizes,
+ SmallVectorImpl<Value> &sizes,
Location loc, Value src) {
unsigned rank = src.getType().cast<ShapedType>().getRank();
for (unsigned i = 0; i < rank; i++)
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
index 9d26e2fc38d6c..ebb82d025c6f8 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
@@ -95,7 +95,7 @@ Value genIsNonzero(OpBuilder &builder, Location loc, Value v);
/// used when operands have dynamic shape. The shape of the destination is
/// stored into dstShape.
void genReshapeDstShape(Location loc, PatternRewriter &rewriter,
- SmallVector<Value, 4> &dstShape,
+ SmallVectorImpl<Value> &dstShape,
ArrayRef<Value> srcShape,
ArrayRef<int64_t> staticDstShape,
ArrayRef<ReassociationIndices> reassociation);
@@ -177,7 +177,7 @@ void genDenseTensorOrSparseConstantIterLoop(
function_ref<void(OpBuilder &, Location, Value, ValueRange)> bodyBuilder);
/// Populates given sizes array from dense tensor or sparse tensor constant.
-void sizesFromSrc(OpBuilder &builder, SmallVector<Value, 4> &sizes,
+void sizesFromSrc(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
Location loc, Value src);
/// Scans to top of generated loop.
@@ -420,9 +420,9 @@ class SparseTensorLoopEmitter {
: tids(tids), dims(dims), loop(loop), iv(iv) {}
// TODO: maybe use a vector<pair> for tid and dim?
// The set of tensors that the loop is operating on
- const llvm::SmallVector<size_t, 4> tids;
+ const llvm::SmallVector<size_t> tids;
// The corresponding dims for the tensors
- const llvm::SmallVector<size_t, 4> dims;
+ const llvm::SmallVector<size_t> dims;
const Operation *loop; // the loop operation
const Value iv; // the induction variable for the loop
};
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp
index c556b0d3afe29..75fa3e8b347c7 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp
@@ -331,7 +331,7 @@ static void createBinarySearchFunc(OpBuilder &builder, ModuleOp module,
Location loc = func.getLoc();
ValueRange args = entryBlock->getArguments();
Value p = args[hiIdx];
- SmallVector<Type, 2> types(2, p.getType());
+ SmallVector<Type, 2> types(2, p.getType()); // only two
scf::WhileOp whileOp = builder.create<scf::WhileOp>(
loc, types, SmallVector<Value, 2>{args[loIdx], args[hiIdx]});
@@ -357,7 +357,7 @@ static void createBinarySearchFunc(OpBuilder &builder, ModuleOp module,
Value midp1 = builder.create<arith::AddIOp>(loc, mid, c1);
// Compare xs[p] < xs[mid].
- SmallVector<Value, 6> compareOperands{p, mid};
+ SmallVector<Value> compareOperands{p, mid};
uint64_t numXBuffers = isCoo ? 1 : nx;
compareOperands.append(args.begin() + xStartIdx,
args.begin() + xStartIdx + numXBuffers);
@@ -400,7 +400,7 @@ createScanLoop(OpBuilder &builder, ModuleOp module, func::FuncOp func,
Block *before =
builder.createBlock(&whileOp.getBefore(), {}, {i.getType()}, {loc});
builder.setInsertionPointToEnd(before);
- SmallVector<Value, 6> compareOperands;
+ SmallVector<Value> compareOperands;
if (step > 0) {
compareOperands.push_back(before->getArgument(0));
compareOperands.push_back(p);
@@ -490,8 +490,8 @@ static void createPartitionFunc(OpBuilder &builder, ModuleOp module,
Value i = lo;
Value j = builder.create<arith::SubIOp>(loc, hi, c1);
- SmallVector<Value, 4> operands{i, j, p};
- SmallVector<Type, 4> types{i.getType(), j.getType(), p.getType()};
+ SmallVector<Value> operands{i, j, p};
+ SmallVector<Type> types{i.getType(), j.getType(), p.getType()};
scf::WhileOp whileOp = builder.create<scf::WhileOp>(loc, types, operands);
// The before-region of the WhileOp.
@@ -525,7 +525,7 @@ static void createPartitionFunc(OpBuilder &builder, ModuleOp module,
cond = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult, i, j);
scf::IfOp ifOp = builder.create<scf::IfOp>(loc, types, cond, /*else=*/true);
builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
- SmallVector<Value, 6> swapOperands{i, j};
+ SmallVector<Value> swapOperands{i, j};
swapOperands.append(args.begin() + xStartIdx, args.end());
createSwap(builder, loc, swapOperands, nx, ny, isCoo);
// If the pivot is moved, update p with the new pivot.
@@ -610,11 +610,11 @@ static void createSortNonstableFunc(OpBuilder &builder, ModuleOp module,
auto p = builder.create<func::CallOp>(
loc, partitionFunc, TypeRange{IndexType::get(context)}, ValueRange(args));
- SmallVector<Value, 6> lowOperands{lo, p.getResult(0)};
+ SmallVector<Value> lowOperands{lo, p.getResult(0)};
lowOperands.append(args.begin() + xStartIdx, args.end());
builder.create<func::CallOp>(loc, func, lowOperands);
- SmallVector<Value, 6> highOperands{
+ SmallVector<Value> highOperands{
builder.create<arith::AddIOp>(loc, p.getResult(0),
constantIndex(builder, loc, 1)),
hi};
@@ -660,7 +660,7 @@ static void createSortStableFunc(OpBuilder &builder, ModuleOp module,
Value i = forOpI.getInductionVar();
// Binary search to find the insertion point p.
- SmallVector<Value, 6> operands{lo, i};
+ SmallVector<Value> operands{lo, i};
operands.append(args.begin() + xStartIdx, args.end());
FlatSymbolRefAttr searchFunc = getMangledSortHelperFunc(
builder, func, {IndexType::get(context)}, kBinarySearchFuncNamePrefix, nx,
@@ -672,7 +672,7 @@ static void createSortStableFunc(OpBuilder &builder, ModuleOp module,
// Move the value at data[i] to a temporary location.
operands[0] = operands[1] = i;
- SmallVector<Value, 6> d;
+ SmallVector<Value> d;
forEachIJPairInAllBuffers(
builder, loc, operands, nx, ny, isCoo,
[&](uint64_t unused, Value i, Value unused2, Value buffer) {
@@ -715,7 +715,7 @@ LogicalResult matchAndRewriteSortOp(OpTy op, ValueRange xys, uint64_t nx,
uint64_t ny, bool isCoo,
PatternRewriter &rewriter) {
Location loc = op.getLoc();
- SmallVector<Value, 6> operands{constantIndex(rewriter, loc, 0), op.getN()};
+ SmallVector<Value> operands{constantIndex(rewriter, loc, 0), op.getN()};
// Convert `values` to have dynamic shape and append them to `operands`.
for (Value v : xys) {
@@ -869,7 +869,7 @@ struct SortRewriter : public OpRewritePattern<SortOp> {
LogicalResult matchAndRewrite(SortOp op,
PatternRewriter &rewriter) const override {
- SmallVector<Value, 6> xys(op.getXs());
+ SmallVector<Value> xys(op.getXs());
xys.append(op.getYs().begin(), op.getYs().end());
return matchAndRewriteSortOp(op, xys, op.getXs().size(), /*ny=*/0,
/*isCoo=*/false, rewriter);
@@ -883,7 +883,7 @@ struct SortCooRewriter : public OpRewritePattern<SortCooOp> {
LogicalResult matchAndRewrite(SortCooOp op,
PatternRewriter &rewriter) const override {
- SmallVector<Value, 6> xys;
+ SmallVector<Value> xys;
xys.push_back(op.getXy());
xys.append(op.getYs().begin(), op.getYs().end());
uint64_t nx = 1;
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index a37fecf0865e4..77b3e5bf6af99 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -324,7 +324,7 @@ static void createAllocFields(OpBuilder &builder, Location loc, Type type,
unsigned rank = shape.size();
Value heuristic = constantIndex(builder, loc, 16);
// Build original sizes.
- SmallVector<Value, 8> sizes;
+ SmallVector<Value> sizes;
for (unsigned r = 0, o = 0; r < rank; r++) {
if (ShapedType::isDynamic(shape[r]))
sizes.push_back(dynSizes[o++]);
@@ -403,7 +403,7 @@ static Value genCompressed(OpBuilder &builder, Location loc,
SmallVectorImpl<Value> &indices, Value value,
Value pos, unsigned field, unsigned d) {
unsigned rank = rtp.getShape().size();
- SmallVector<Type, 4> types;
+ SmallVector<Type> types;
Type indexType = builder.getIndexType();
Type boolType = builder.getIntegerType(1);
Value one = constantIndex(builder, loc, 1);
@@ -543,7 +543,7 @@ static void genEndInsert(OpBuilder &builder, Location loc, RankedTensorType rtp,
Value hi = genLoad(builder, loc, fields[memSizesIdx], mz);
Value zero = constantIndex(builder, loc, 0);
Value one = constantIndex(builder, loc, 1);
- SmallVector<Value, 1> inits;
+ SmallVector<Value, 1> inits; // only one
inits.push_back(genLoad(builder, loc, fields[field], zero));
scf::ForOp loop = createFor(builder, loc, hi, inits, one);
Value i = loop.getInductionVar();
@@ -584,7 +584,7 @@ class SparseReturnConverter : public OpConversionPattern<func::ReturnOp> {
LogicalResult
matchAndRewrite(func::ReturnOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- SmallVector<Value, 8> flattened;
+ SmallVector<Value> flattened;
flattenOperands(adaptor.getOperands(), flattened);
// Create a return with the flattened value extracted from sparse tensors.
rewriter.replaceOpWithNewOp<func::ReturnOp>(op, flattened);
@@ -606,23 +606,23 @@ class SparseCallConverter : public OpConversionPattern<func::CallOp> {
// ==>
// memref..., f, memref = call @foo(...) replace with
// cast(memref...)->sparse_tensor, f, cast(memref...)->sparse_tensor
- SmallVector<Type, 8> finalRetTy;
+ SmallVector<Type> finalRetTy;
if (failed(typeConverter->convertTypes(op.getResultTypes(), finalRetTy)))
return failure();
// (1) Genereates new call with flattened return value.
- SmallVector<Value, 8> flattened;
+ SmallVector<Value> flattened;
flattenOperands(adaptor.getOperands(), flattened);
auto newCall = rewriter.create<func::CallOp>(loc, op.getCallee(),
finalRetTy, flattened);
// (2) Create cast operation for sparse tensor returns.
- SmallVector<Value, 4> castedRet;
+ SmallVector<Value> castedRet;
// Tracks the offset of current return value (of the orignal call)
// relative to the new call (after sparse tensor flattening);
unsigned retOffset = 0;
// Temporal buffer to hold the flattened list of type for
// a sparse tensor.
- SmallVector<Type, 8> sparseFlat;
+ SmallVector<Type> sparseFlat;
for (auto ret : op.getResults()) {
assert(retOffset < newCall.getNumResults());
auto retType = ret.getType();
@@ -713,7 +713,7 @@ class SparseTensorAllocConverter
// Construct allocation for each field.
Location loc = op.getLoc();
- SmallVector<Value, 8> fields;
+ SmallVector<Value> fields;
createAllocFields(rewriter, loc, resType, adaptor.getOperands(),
enableBufferInitialization, fields);
// Replace operation with resulting memrefs.
@@ -760,7 +760,7 @@ class SparseTensorLoadConverter : public OpConversionPattern<LoadOp> {
op.getTensor().getType().cast<RankedTensorType>();
auto tuple = getTuple(adaptor.getTensor());
// Prepare fields.
- SmallVector<Value, 8> fields(tuple.getInputs());
+ SmallVector<Value> fields(tuple.getInputs());
// Generate optional insertion finalization code.
if (op.getHasInserts())
genEndInsert(rewriter, op.getLoc(), srcType, fields);
@@ -839,8 +839,8 @@ class SparseCompressConverter : public OpConversionPattern<CompressOp> {
Value added = adaptor.getAdded();
Value count = adaptor.getCount();
// Prepare fields and indices.
- SmallVector<Value, 8> fields(tuple.getInputs());
- SmallVector<Value, 8> indices(adaptor.getIndices());
+ SmallVector<Value> fields(tuple.getInputs());
+ SmallVector<Value> indices(adaptor.getIndices());
// If the innermost dimension is ordered, we need to sort the indices
// in the "added" array prior to applying the compression.
unsigned rank = dstType.getShape().size();
@@ -897,8 +897,8 @@ class SparseInsertConverter : public OpConversionPattern<InsertOp> {
op.getTensor().getType().cast<RankedTensorType>();
auto tuple = getTuple(adaptor.getTensor());
// Prepare fields and indices.
- SmallVector<Value, 8> fields(tuple.getInputs());
- SmallVector<Value, 8> indices(adaptor.getIndices());
+ SmallVector<Value> fields(tuple.getInputs());
+ SmallVector<Value> indices(adaptor.getIndices());
// Generate insertion.
Value value = adaptor.getValue();
genInsert(rewriter, op->getLoc(), dstType, fields, indices, value);
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index c73fc1f1d43a8..8c8bf737352b1 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -63,7 +63,7 @@ static Value genLvlSizeCall(OpBuilder &builder, Location loc,
uint64_t lvl) {
// Generate the call.
StringRef name = "sparseLvlSize";
- SmallVector<Value, 2> params{
+ SmallVector<Value, 2> params{ // just two
src, constantIndex(builder, loc, toStoredDim(enc, lvl))};
Type iTp = builder.getIndexType();
return createFuncCall(builder, loc, name, iTp, params, EmitCInterface::Off)
@@ -92,7 +92,7 @@ static Value sizeFromPtrAtDim(OpBuilder &builder, Location loc,
/// Populates given sizes array from type (for static sizes) and from
/// an already-converted opaque pointer source (for dynamic sizes).
-static void sizesFromPtr(OpBuilder &builder, SmallVector<Value, 4> &sizes,
+static void sizesFromPtr(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
Location loc, SparseTensorEncodingAttr &enc,
ShapedType stp, Value src) {
for (unsigned i = 0, rank = stp.getRank(); i < rank; i++)
@@ -100,7 +100,7 @@ static void sizesFromPtr(OpBuilder &builder, SmallVector<Value, 4> &sizes,
}
/// Populates given sizes array from type.
-static void sizesFromType(OpBuilder &builder, SmallVector<Value, 4> &sizes,
+static void sizesFromType(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
Location loc, ShapedType stp) {
auto shape = stp.getShape();
for (unsigned i = 0, rank = stp.getRank(); i < rank; i++) {
@@ -113,7 +113,7 @@ static void sizesFromType(OpBuilder &builder, SmallVector<Value, 4> &sizes,
/// sizes) and from an already-converted opaque pointer source (for dynamic
/// sizes).
static void concatSizesFromInputs(OpBuilder &builder,
- SmallVector<Value, 4> &sizes, Location loc,
+ SmallVectorImpl<Value> &sizes, Location loc,
ShapedType dstTp, ValueRange srcs,
unsigned dim) {
auto dstShape = dstTp.getShape();
@@ -262,7 +262,7 @@ NewCallParams &NewCallParams::genBuffers(SparseTensorEncodingAttr enc,
const unsigned lvlRank = enc.getDimLevelType().size();
const unsigned dimRank = stp.getRank();
// Sparsity annotations.
- SmallVector<Value, 4> lvlTypes;
+ SmallVector<Value> lvlTypes;
for (auto dlt : enc.getDimLevelType())
lvlTypes.push_back(constantDimLevelTypeEncoding(builder, loc, dlt));
assert(lvlTypes.size() == lvlRank && "Level-rank mismatch");
@@ -276,12 +276,12 @@ NewCallParams &NewCallParams::genBuffers(SparseTensorEncodingAttr enc,
// For now however, since we're still assuming permutations, we will
// initialize this parameter alongside the `dim2lvl` and `lvl2dim`
// parameters below. We preinitialize `lvlSizes` for code symmetry.
- SmallVector<Value, 4> lvlSizes(lvlRank);
+ SmallVector<Value> lvlSizes(lvlRank);
// The dimension-to-level mapping and its inverse. We must preinitialize
// `dim2lvl` so that the true branch below can perform random-access
// `operator[]` assignment. We preinitialize `lvl2dim` for code symmetry.
- SmallVector<Value, 4> dim2lvl(dimRank);
- SmallVector<Value, 4> lvl2dim(lvlRank);
+ SmallVector<Value> dim2lvl(dimRank);
+ SmallVector<Value> lvl2dim(lvlRank);
auto dimOrder = enc.getDimOrdering();
if (dimOrder) {
assert(dimOrder.isPermutation());
@@ -365,11 +365,11 @@ static Value genGetNextCall(OpBuilder &builder, Location loc, Value iter,
/// Converts a pointer to COO (from calls to iter->next()) into a vector of
/// indices, apply (optional) `offset` on `offsetDim`.
-static SmallVector<Value, 4> loadIndices(OpBuilder &builder, Location loc,
- unsigned rank, Value ind,
- unsigned offsetDim = 0,
- Value offset = Value()) {
- SmallVector<Value, 4> ivs;
+static SmallVector<Value> loadIndices(OpBuilder &builder, Location loc,
+ unsigned rank, Value ind,
+ unsigned offsetDim = 0,
+ Value offset = Value()) {
+ SmallVector<Value> ivs;
ivs.reserve(rank);
for (unsigned i = 0; i < rank; i++) {
Value idx = constantIndex(builder, loc, i);
@@ -437,14 +437,14 @@ static void translateIndices(Location loc, ConversionPatternRewriter &rewriter,
unsigned dstRank = dstTp.getRank();
unsigned srcRank = srcTp.getRank();
- SmallVector<Value, 4> srcIndices;
+ SmallVector<Value> srcIndices;
for (unsigned i = 0; i < srcRank; i++) {
Value idx = rewriter.create<memref::LoadOp>(
loc, srcIdx, constantIndex(rewriter, loc, i));
srcIndices.push_back(idx);
}
- SmallVector<Value, 4> dstIndices;
+ SmallVector<Value> dstIndices;
translateIndicesArray(rewriter, loc, reassociation, srcIndices, srcShape,
dstShape, dstIndices);
@@ -488,13 +488,13 @@ genSparse2SparseReshape(ReshapeOp op, typename ReshapeOp::Adaptor adaptor,
auto noPerm = SparseTensorEncodingAttr::get(
op->getContext(), encSrc.getDimLevelType(), AffineMap(), AffineMap(),
encSrc.getPointerBitWidth(), encSrc.getIndexBitWidth());
- SmallVector<Value, 4> srcSizes;
+ SmallVector<Value> srcSizes;
sizesFromPtr(rewriter, srcSizes, loc, encSrc, srcTp, adaptor.getSrc());
NewCallParams params(rewriter, loc);
Value iter = params.genBuffers(noPerm, srcSizes, srcTp)
.genNewCall(Action::kToIterator, adaptor.getSrc());
// Start a new COO for the destination tensor.
- SmallVector<Value, 4> dstSizes;
+ SmallVector<Value> dstSizes;
if (dstTp.hasStaticShape()) {
sizesFromType(rewriter, dstSizes, loc, dstTp);
} else {
@@ -555,7 +555,7 @@ static void genSparseCOOIterationLoop(
auto noPerm = SparseTensorEncodingAttr::get(
rewriter.getContext(), enc.getDimLevelType(), AffineMap(), AffineMap(),
enc.getPointerBitWidth(), enc.getIndexBitWidth());
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
sizesFromPtr(rewriter, sizes, loc, noPerm, tensorTp, t);
Value iter = NewCallParams(rewriter, loc)
.genBuffers(noPerm, sizes, tensorTp)
@@ -721,7 +721,7 @@ class SparseTensorNewConverter : public OpConversionPattern<NewOp> {
return failure();
// Generate the call to construct tensor from ptr. The sizes are
// inferred from the result type of the new operator.
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
ShapedType stp = resType.cast<ShapedType>();
sizesFromType(rewriter, sizes, loc, stp);
Value ptr = adaptor.getOperands()[0];
@@ -800,7 +800,7 @@ class SparseTensorConvertConverter : public OpConversionPattern<ConvertOp> {
rewriter.replaceOp(op, adaptor.getOperands()); // hidden nop cast
return success();
}
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
NewCallParams params(rewriter, loc);
ShapedType stp = srcType.cast<ShapedType>();
sizesFromPtr(rewriter, sizes, loc, encSrc, stp, src);
@@ -860,7 +860,7 @@ class SparseTensorConvertConverter : public OpConversionPattern<ConvertOp> {
op->getContext(),
SmallVector<DimLevelType>(rank, DimLevelType::Dense), AffineMap(),
AffineMap(), encSrc.getPointerBitWidth(), encSrc.getIndexBitWidth());
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
sizesFromPtr(rewriter, sizes, loc, encSrc, srcTensorTp, src);
Value iter = NewCallParams(rewriter, loc)
.genBuffers(encDst, sizes, dstTensorTp)
@@ -880,7 +880,7 @@ class SparseTensorConvertConverter : public OpConversionPattern<ConvertOp> {
rewriter.create<scf::ConditionOp>(loc, cond, before->getArguments());
Block *after = rewriter.createBlock(&whileOp.getAfter(), {}, noTypes);
rewriter.setInsertionPointToStart(after);
- SmallVector<Value, 4> ivs = loadIndices(rewriter, loc, rank, ind);
+ SmallVector<Value> ivs = loadIndices(rewriter, loc, rank, ind);
insertScalarIntoDenseTensor(rewriter, loc, elemPtr, dst, ivs);
rewriter.create<scf::YieldOp>(loc);
rewriter.setInsertionPointAfter(whileOp);
@@ -925,7 +925,7 @@ class SparseTensorConvertConverter : public OpConversionPattern<ConvertOp> {
// loop is generated by genAddElt().
ShapedType stp = resType.cast<ShapedType>();
unsigned rank = stp.getRank();
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
sizesFromSrc(rewriter, sizes, loc, src);
NewCallParams params(rewriter, loc);
Value coo =
@@ -1223,7 +1223,7 @@ class SparseTensorConcatConverter : public OpConversionPattern<ConcatenateOp> {
// The offset applied to the dimenstion to be concated (starting from 0)
Value offset = constantIndex(rewriter, loc, 0);
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
NewCallParams params(rewriter, loc);
concatSizesFromInputs(rewriter, sizes, loc, dstTp, op.getInputs(),
concatDim);
@@ -1277,7 +1277,7 @@ class SparseTensorConcatConverter : public OpConversionPattern<ConcatenateOp> {
} else {
// Case: dense => dense
Value val = genValueForDense(builder, loc, adaptedOp, idx);
- SmallVector<Value, 4> indVec(idx);
+ SmallVector<Value> indVec(idx);
// Apply offset.
indVec[concatDim] = builder.create<arith::AddIOp>(
loc, indVec[concatDim], offset);
@@ -1320,7 +1320,7 @@ class SparseTensorOutConverter : public OpConversionPattern<OutOp> {
// Convert to default permuted COO.
Value src = adaptor.getOperands()[0];
auto encSrc = getSparseTensorEncoding(srcType);
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
sizesFromPtr(rewriter, sizes, loc, encSrc, srcType, src);
auto enc = SparseTensorEncodingAttr::get(
op->getContext(), encSrc.getDimLevelType(), AffineMap(), AffineMap(),
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index 5147aa8b25194..6fab36186d15b 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -113,7 +113,7 @@ static bool isZeroYield(GenericOp op) {
/// Populates given sizes array from type (for static sizes) and from
/// the tensor (for dynamic sizes).
-static void sizesForTensor(OpBuilder &builder, SmallVector<Value, 4> &sizes,
+static void sizesForTensor(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
Location loc, ShapedType stp, Value tensor) {
for (const auto &d : enumerate(stp.getShape())) {
Value dim;
@@ -131,7 +131,7 @@ static void sizesForTensor(OpBuilder &builder, SmallVector<Value, 4> &sizes,
static RankedTensorType getUnorderedCOOFromType(RankedTensorType src) {
auto *ctx = src.getContext();
auto rank = src.getRank();
- SmallVector<DimLevelType, 4> dims;
+ SmallVector<DimLevelType> dims;
// An unordered and non-unique compressed dim at beginning.
dims.push_back(DimLevelType::CompressedNuNo);
@@ -334,10 +334,10 @@ struct Sparse2SparseReshapeRewriter : public OpRewritePattern<ReshapeOp> {
// Generate code to represent the static dimension constants or compute
// the dynamic dimension values.
- SmallVector<Value, 4> srcSizes;
+ SmallVector<Value> srcSizes;
sizesForTensor(rewriter, srcSizes, loc, srcTp, srcTensor);
- SmallVector<Value, 4> dstSizes;
- SmallVector<Value, 4> dstDynSizes;
+ SmallVector<Value> dstSizes;
+ SmallVector<Value> dstDynSizes;
if (dstTp.hasStaticShape()) {
for (auto d : dstTp.getShape())
dstSizes.push_back(constantIndex(rewriter, loc, d));
@@ -363,8 +363,8 @@ struct Sparse2SparseReshapeRewriter : public OpRewritePattern<ReshapeOp> {
loc, srcTensor, cooBuffer,
[&](OpBuilder &builder, Location loc, ValueRange args, Value v,
ValueRange reduc) {
- SmallVector<Value, 4> srcIndices;
- SmallVector<Value, 4> dstIndices;
+ SmallVector<Value> srcIndices;
+ SmallVector<Value> dstIndices;
for (int64_t i = 0, e = srcTp.getRank(); i < e; i++) {
uint64_t dim = toStoredDim(encSrc, i);
srcIndices.push_back(args[dim]);
@@ -469,7 +469,7 @@ struct ConcatenateRewriter : public OpRewritePattern<ConcatenateOp> {
loc, input, cooBuffer,
[&](OpBuilder &builder, Location loc, ValueRange args, Value v,
ValueRange reduc) {
- SmallVector<Value, 4> indices;
+ SmallVector<Value> indices;
for (int64_t i = 0; i < rank; i++) {
Value idx = args[i];
if (i == static_cast<int64_t>(conDim))
@@ -551,9 +551,9 @@ struct ConvertRewriter : public OpRewritePattern<ConvertOp> {
Location loc = op.getLoc();
Value src = op.getSource();
RankedTensorType dstTp = op.getType().cast<RankedTensorType>();
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
sizesFromSrc(rewriter, sizes, loc, src);
- SmallVector<Value, 4> dynSizes;
+ SmallVector<Value> dynSizes;
getDynamicSizes(dstTp, sizes, dynSizes);
bool fromSparseConst = false;
@@ -606,7 +606,7 @@ struct ConvertRewriter : public OpRewritePattern<ConvertOp> {
Value src = op.getSource();
RankedTensorType srcTp = src.getType().cast<RankedTensorType>();
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
sizesForTensor(rewriter, sizes, loc, srcTp, src);
Value dst = allocDenseTensor(rewriter, loc, dstTp, sizes);
@@ -645,7 +645,7 @@ struct ConvertRewriter : public OpRewritePattern<ConvertOp> {
RankedTensorType dstTp = op.getType().cast<RankedTensorType>();
SparseTensorEncodingAttr encDst = getSparseTensorEncoding(dstTp);
- SmallVector<Value, 4> srcSizes;
+ SmallVector<Value> srcSizes;
sizesForTensor(rewriter, srcSizes, loc, srcTp, src);
Value tmpCoo = Value();
if (!isUniqueCOOType(srcTp)) {
@@ -653,7 +653,7 @@ struct ConvertRewriter : public OpRewritePattern<ConvertOp> {
// TODO: there may be cases for which more efficiently without
// going through an intermediate COO, such as cases that only change
// the overhead types.
- SmallVector<Value, 4> dynSrcSizes;
+ SmallVector<Value> dynSrcSizes;
getDynamicSizes(srcTp, srcSizes, dynSrcSizes);
srcTp = getUnorderedCOOFromType(srcTp);
tmpCoo =
@@ -678,7 +678,7 @@ struct ConvertRewriter : public OpRewritePattern<ConvertOp> {
MemRefType::get(dynShape, getIndexOverheadType(rewriter, encSrc));
uint64_t rank = dstTp.getRank();
// Gather the indices-arrays in the dst tensor storage order.
- SmallVector<Value, 4> xs(rank, Value());
+ SmallVector<Value> xs(rank, Value());
for (uint64_t i = 0; i < rank; i++) {
uint64_t orgDim = toOrigDim(encSrc, i);
xs[toStoredDim(encDst, orgDim)] = rewriter.create<ToIndicesOp>(
@@ -698,11 +698,11 @@ struct ConvertRewriter : public OpRewritePattern<ConvertOp> {
rewriter.create<SortOp>(loc, nnz, xs, ValueRange{y});
// For each element in the COO tensor, insert the element to the dst tensor.
- SmallVector<Value, 4> dynDstSizes;
+ SmallVector<Value> dynDstSizes;
getDynamicSizes(dstTp, srcSizes, dynDstSizes);
Value dst =
rewriter.create<AllocTensorOp>(loc, dstTp, dynDstSizes).getResult();
- SmallVector<Value, 4> indices(srcTp.getRank(), Value());
+ SmallVector<Value> indices(srcTp.getRank(), Value());
auto foreachOp = rewriter.create<ForeachOp>(
loc, src, dst,
[&](OpBuilder &builder, Location loc, ValueRange args, Value v,
@@ -797,7 +797,7 @@ struct ForeachRewriter : public OpRewritePattern<ForeachOp> {
loopEmitter.enterLoopOverTensorAtDim(rewriter, loc, 0, i, reduc);
}
- SmallVector<Value, 4> coords;
+ SmallVector<Value> coords;
coords.reserve(rank);
loopEmitter.getCoordinateArray(coords);
@@ -811,8 +811,8 @@ struct ForeachRewriter : public OpRewritePattern<ForeachOp> {
// 2. Inline the block in the foreach operator.
Block *srcBlock = op.getBody();
- SmallVector<Value, 4> args;
// Remap coordinates.
+ SmallVector<Value> args;
for (int64_t i = 0; i < rank; i++) {
Value actual = coords[toStoredDim(enc, i)];
args.push_back(actual);
@@ -876,7 +876,7 @@ struct NewRewriter : public OpRewritePattern<NewOp> {
// If the result tensor has dynamic dimensions, get the dynamic sizes from
// the sparse tensor reader.
- SmallVector<Value, 4> dynSizesArray;
+ SmallVector<Value> dynSizesArray;
if (!dstTp.hasStaticShape()) {
createFuncCall(rewriter, loc, "getSparseTensorReaderDimSizes", {},
{reader, dimSizes}, EmitCInterface::On)
@@ -918,7 +918,7 @@ struct NewRewriter : public OpRewritePattern<NewOp> {
createFuncCall(rewriter, loc, getNextFuncName, {eltTp},
{reader, indices, value}, EmitCInterface::On)
.getResult(0);
- SmallVector<Value, 4> indicesArray;
+ SmallVector<Value> indicesArray;
for (uint64_t i = 0; i < rank; i++) {
indicesArray.push_back(rewriter.create<memref::LoadOp>(
loc, indices, constantIndex(rewriter, loc, i)));
@@ -962,7 +962,7 @@ struct OutRewriter : public OpRewritePattern<OutOp> {
// Generate code to calculate dimension size values and store the values to
// the buffer.
- SmallVector<Value, 4> dims;
+ SmallVector<Value> dims;
sizesForTensor(rewriter, dims, loc, srcTp, src);
for (uint64_t i = 0; i < rank; i++) {
rewriter.create<memref::StoreOp>(loc, dims[i], dimSizes,
@@ -995,7 +995,7 @@ struct OutRewriter : public OpRewritePattern<OutOp> {
constantIndex(builder, loc, i));
}
rewriter.create<memref::StoreOp>(loc, v, value);
- SmallVector<Value, 4> operands{writer, rankValue, indices, value};
+ SmallVector<Value> operands{writer, rankValue, indices, value};
FlatSymbolRefAttr fn = getFunc(module, outNextFuncName, {}, operands,
EmitCInterface::On);
builder.create<func::CallOp>(loc, TypeRange(), fn, operands);
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index 3f2ee1b378ef9..e9bf5aa85610d 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -108,11 +108,11 @@ static AffineMap permute(MLIRContext *context, AffineMap m,
assert(m.getNumResults() == sz && "TopoSort/AffineMap size mismatch");
// Construct the inverse of `m`; to avoid the asymptotic complexity
// of calling `m.getPermutedPosition` repeatedly.
- SmallVector<unsigned, 4> inv(sz);
+ SmallVector<unsigned> inv(sz);
for (unsigned i = 0; i < sz; i++)
inv[i] = m.getDimPosition(i);
// Construct the permutation.
- SmallVector<unsigned, 4> perm(sz);
+ SmallVector<unsigned> perm(sz);
for (unsigned i = 0; i < sz; i++)
perm[i] = inv[topSort[i]];
return AffineMap::getPermutationMap(perm, context);
@@ -417,7 +417,7 @@ static Optional<Operation *> genLoopBoundary(
CodeGen &codegen, Merger &merger,
function_ref<Optional<Operation *>(MutableArrayRef<Value> reduc)>
callback) {
- SmallVector<Value, 4> reduc;
+ SmallVector<Value> reduc;
if (codegen.redVal)
reduc.push_back(codegen.redVal);
if (codegen.expValues)
@@ -526,7 +526,7 @@ static Value genIndex(CodeGen &codegen, linalg::GenericOp op, OpOperand *t) {
/// Generates subscript for load/store on a dense or sparse tensor.
static Value genSubscript(CodeGen &codegen, OpBuilder &builder,
linalg::GenericOp op, OpOperand *t,
- SmallVector<Value, 4> &args) {
+ SmallVectorImpl<Value> &args) {
unsigned tensor = t->getOperandNumber();
auto map = op.getMatchingIndexingMap(t);
auto enc = getSparseTensorEncoding(t->get().getType());
@@ -588,7 +588,7 @@ static void genInsertionStore(CodeGen &codegen, OpBuilder &builder,
// Direct insertion in lexicographic index order.
if (!codegen.expValues) {
unsigned rank = op.getRank(t);
- SmallVector<Value, 4> indices;
+ SmallVector<Value> indices;
for (unsigned i = 0; i < rank; i++) {
assert(codegen.loopEmitter.getLoopIV(i));
indices.push_back(codegen.loopEmitter.getLoopIV(i));
@@ -645,7 +645,7 @@ static Value genTensorLoad(Merger &merger, CodeGen &codegen, OpBuilder &builder,
return genInsertionLoad(codegen, builder, op, &t);
}
// Actual load.
- SmallVector<Value, 4> args;
+ SmallVector<Value> args;
Value ptr = genSubscript(codegen, builder, op, &t, args);
return builder.create<memref::LoadOp>(op.getLoc(), ptr, args);
}
@@ -670,10 +670,8 @@ static void genTensorStore(Merger &merger, CodeGen &codegen, OpBuilder &builder,
// Select operation insertion.
Value insChain = codegen.insChain;
assert(insChain);
- SmallVector<Type, 1> types;
- types.push_back(codegen.insChain.getType());
- scf::IfOp ifOp =
- builder.create<scf::IfOp>(loc, types, rhs, /*else=*/true);
+ scf::IfOp ifOp = builder.create<scf::IfOp>(
+ loc, insChain.getType(), rhs, /*else=*/true);
builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
// Existing value was preserved to be used here.
assert(merger.exp(exp).val);
@@ -694,7 +692,7 @@ static void genTensorStore(Merger &merger, CodeGen &codegen, OpBuilder &builder,
return;
}
// Actual store.
- SmallVector<Value, 4> args;
+ SmallVector<Value> args;
Value ptr = genSubscript(codegen, builder, op, t, args);
builder.create<memref::StoreOp>(loc, rhs, ptr, args);
}
@@ -882,7 +880,7 @@ static void genExpansion(Merger &merger, CodeGen &codegen, OpBuilder &builder,
codegen.expCount = res.getResult(3);
} else {
assert(codegen.expValues);
- SmallVector<Value, 4> indices;
+ SmallVector<Value> indices;
for (unsigned i = 0; i < at; i++) {
assert(codegen.loopEmitter.getLoopIV(i));
indices.push_back(codegen.loopEmitter.getLoopIV(i));
@@ -991,7 +989,7 @@ static void finalizeWhileOp(Merger &merger, CodeGen &codegen,
while (auto ifOp = dyn_cast_or_null<scf::IfOp>(
builder.getInsertionBlock()->getParentOp())) {
unsigned y = 0;
- SmallVector<Value, 4> yields;
+ SmallVector<Value> yields;
if (codegen.redVal) {
yields.push_back(codegen.redVal);
updateReduc(merger, codegen, ifOp.getResult(y++));
@@ -1017,7 +1015,7 @@ static scf::IfOp genIf(Merger &merger, CodeGen &codegen, OpBuilder &builder,
linalg::GenericOp op, unsigned idx,
BitVector &conditions) {
Location loc = op.getLoc();
- SmallVector<Type, 4> types;
+ SmallVector<Type> types;
Value cond;
for (unsigned b = 0, be = conditions.size(); b < be; b++) {
if (!conditions[b])
@@ -1054,7 +1052,7 @@ static scf::IfOp genIf(Merger &merger, CodeGen &codegen, OpBuilder &builder,
static void endIf(Merger &merger, CodeGen &codegen, OpBuilder &builder,
linalg::GenericOp op, scf::IfOp ifOp, Operation *loop,
Value redInput, Value cntInput, Value insInput) {
- SmallVector<Value, 4> operands;
+ SmallVector<Value> operands;
if (codegen.redVal) {
operands.push_back(codegen.redVal);
updateReduc(merger, codegen, redInput);
@@ -1172,10 +1170,10 @@ static Operation *startLoop(Merger &merger, CodeGen &codegen,
OpBuilder &builder, linalg::GenericOp op,
unsigned at, unsigned li, bool needsUniv) {
// The set of tensors + dims to generate loops on
- SmallVector<size_t, 4> condTids, condDims;
+ SmallVector<size_t> condTids, condDims;
// The set of (dense) tensors that is optimized from condition, yet still
// need extra locals to iterate on them.
- SmallVector<size_t, 4> extraTids, extraDims;
+ SmallVector<size_t> extraTids, extraDims;
translateBitsToTidDimPairs(merger, codegen, li, codegen.topSort[at], condTids,
condDims, extraTids, extraDims);
@@ -1369,7 +1367,7 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
merger.setHasSparseOut(sparseOut != nullptr);
- SmallVector<Value, 4> tensors;
+ SmallVector<Value> tensors;
for (OpOperand &t : op->getOpOperands())
tensors.push_back(t.get());
diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
index 6d6a4d115ef3e..8b86840520ae1 100644
--- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
@@ -150,7 +150,7 @@ unsigned Merger::addLat(unsigned t, unsigned i, unsigned e) {
unsigned Merger::addSet() {
unsigned s = latSets.size();
- latSets.emplace_back(SmallVector<unsigned, 16>());
+ latSets.emplace_back();
return s;
}
More information about the Mlir-commits
mailing list