[Mlir-commits] [mlir] 3fdd85d - [mlir][sparse] Add AOS optimization.
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Wed Jan 4 18:16:10 PST 2023
Author: bixia1
Date: 2023-01-04T18:16:04-08:00
New Revision: 3fdd85da067edb3d8f96095f344e7b07bcce5241
URL: https://github.com/llvm/llvm-project/commit/3fdd85da067edb3d8f96095f344e7b07bcce5241
DIFF: https://github.com/llvm/llvm-project/commit/3fdd85da067edb3d8f96095f344e7b07bcce5241.diff
LOG: [mlir][sparse] Add AOS optimization.
Use an array of structures to represent the indices for the tailing COO region
of a sparse tensor.
Reviewed By: aartbik
Differential Revision: https://reviews.llvm.org/D140870
Added:
Modified:
mlir/lib/Dialect/SparseTensor/Transforms/SparseStorageSpecifierToLLVM.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.h
mlir/test/Dialect/SparseTensor/codegen.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseStorageSpecifierToLLVM.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseStorageSpecifierToLLVM.cpp
index eaa4b420bbcd3..a34e66a2918bb 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseStorageSpecifierToLLVM.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseStorageSpecifierToLLVM.cpp
@@ -138,7 +138,7 @@ class SpecifierGetterSetterOpConverter : public OpConversionPattern<SourceOp> {
op.getDim().value().getZExtValue());
} else {
auto enc = op.getSpecifier().getType().getEncoding();
- StorageLayout layout(enc);
+ StorageLayout<true> layout(enc);
Optional<unsigned> dim = std::nullopt;
if (op.getDim())
dim = op.getDim().value().getZExtValue();
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index bb5128bf14d54..8db2fb6ba1751 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -122,7 +122,7 @@ static std::optional<Value> sizeFromTensorAtDim(OpBuilder &builder,
// Gets the dimension size at the given stored dimension 'd', either as a
// constant for a static size, or otherwise dynamically through memSizes.
Value sizeAtStoredDim(OpBuilder &builder, Location loc,
- SparseTensorDescriptor desc, unsigned d) {
+ MutSparseTensorDescriptor desc, unsigned d) {
RankedTensorType rtp = desc.getTensorType();
unsigned dim = toOrigDim(rtp, d);
auto shape = rtp.getShape();
@@ -293,15 +293,20 @@ static Value genCompressed(OpBuilder &builder, Location loc,
SmallVector<Type> types;
Type indexType = builder.getIndexType();
Type boolType = builder.getIntegerType(1);
- unsigned idxIndex = desc.getIdxMemRefIndex(d);
+ unsigned idxIndex;
+ unsigned idxStride;
+ std::tie(idxIndex, idxStride) = desc.getIdxMemRefIndexAndStride(d);
unsigned ptrIndex = desc.getPtrMemRefIndex(d);
Value one = constantIndex(builder, loc, 1);
Value pp1 = builder.create<arith::AddIOp>(loc, pos, one);
Value plo = genLoad(builder, loc, desc.getMemRefField(ptrIndex), pos);
Value phi = genLoad(builder, loc, desc.getMemRefField(ptrIndex), pp1);
Value msz = desc.getIdxMemSize(builder, loc, d);
- // Value msz = desc.getMemSize(builder, loc, getFieldMemSizesIndex(idxIndex));
-
+ Value idxStrideC;
+ if (idxStride > 1) {
+ idxStrideC = constantIndex(builder, loc, idxStride);
+ msz = builder.create<arith::DivUIOp>(loc, msz, idxStrideC);
+ }
Value phim1 = builder.create<arith::SubIOp>(
loc, toType(builder, loc, phi, indexType), one);
// Conditional expression.
@@ -311,7 +316,10 @@ static Value genCompressed(OpBuilder &builder, Location loc,
scf::IfOp ifOp1 = builder.create<scf::IfOp>(loc, types, lt, /*else*/ true);
types.pop_back();
builder.setInsertionPointToStart(&ifOp1.getThenRegion().front());
- Value crd = genLoad(builder, loc, desc.getMemRefField(idxIndex), phim1);
+ Value crd = genLoad(
+ builder, loc, desc.getMemRefField(idxIndex),
+ idxStride > 1 ? builder.create<arith::MulIOp>(loc, phim1, idxStrideC)
+ : phim1);
Value eq = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq,
toType(builder, loc, crd, indexType),
indices[d]);
@@ -631,8 +639,10 @@ class SparseDimOpConverter : public OpConversionPattern<tensor::DimOp> {
if (!index || !getSparseTensorEncoding(adaptor.getSource().getType()))
return failure();
- auto desc = getDescriptorFromTensorTuple(adaptor.getSource());
- auto sz = sizeFromTensorAtDim(rewriter, op.getLoc(), desc, *index);
+ Location loc = op.getLoc();
+ auto desc =
+ getDescriptorFromTensorTuple(rewriter, loc, adaptor.getSource());
+ auto sz = sizeFromTensorAtDim(rewriter, loc, desc, *index);
if (!sz)
return failure();
@@ -707,7 +717,8 @@ class SparseTensorDeallocConverter
// Replace the sparse tensor deallocation with field deallocations.
Location loc = op.getLoc();
- auto desc = getDescriptorFromTensorTuple(adaptor.getTensor());
+ SmallVector<Value> fields;
+ auto desc = getMutDescriptorFromTensorTuple(adaptor.getTensor(), fields);
for (auto input : desc.getMemRefFields())
// Deallocate every buffer used to store the sparse tensor handler.
rewriter.create<memref::DeallocOp>(loc, input);
@@ -746,7 +757,8 @@ class SparseExpandConverter : public OpConversionPattern<ExpandOp> {
if (!getSparseTensorEncoding(op.getTensor().getType()))
return failure();
Location loc = op->getLoc();
- auto desc = getDescriptorFromTensorTuple(adaptor.getTensor());
+ auto desc =
+ getDescriptorFromTensorTuple(rewriter, loc, adaptor.getTensor());
RankedTensorType srcType =
op.getTensor().getType().cast<RankedTensorType>();
Type eltType = srcType.getElementType();
@@ -889,7 +901,8 @@ class SparseToPointersConverter : public OpConversionPattern<ToPointersOp> {
// Replace the requested pointer access with corresponding field.
// The cast_op is inserted by type converter to intermix 1:N type
// conversion.
- auto desc = getDescriptorFromTensorTuple(adaptor.getTensor());
+ auto desc = getDescriptorFromTensorTuple(rewriter, op.getLoc(),
+ adaptor.getTensor());
uint64_t dim = op.getDimension().getZExtValue();
rewriter.replaceOp(op, desc.getPtrMemRef(dim));
return success();
@@ -907,7 +920,8 @@ class SparseToIndicesConverter : public OpConversionPattern<ToIndicesOp> {
// Replace the requested pointer access with corresponding field.
// The cast_op is inserted by type converter to intermix 1:N type
// conversion.
- auto desc = getDescriptorFromTensorTuple(adaptor.getTensor());
+ auto desc = getDescriptorFromTensorTuple(rewriter, op.getLoc(),
+ adaptor.getTensor());
uint64_t dim = op.getDimension().getZExtValue();
Value field = desc.getIdxMemRef(dim);
@@ -934,7 +948,8 @@ class SparseToValuesConverter : public OpConversionPattern<ToValuesOp> {
// Replace the requested pointer access with corresponding field.
// The cast_op is inserted by type converter to intermix 1:N type
// conversion.
- auto desc = getDescriptorFromTensorTuple(adaptor.getTensor());
+ auto desc = getDescriptorFromTensorTuple(rewriter, op.getLoc(),
+ adaptor.getTensor());
rewriter.replaceOp(op, desc.getValMemRef());
return success();
}
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp
index 9ab5adc76c4e2..b0eb72e6fd668 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp
@@ -10,6 +10,7 @@
#include "CodegenUtils.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
+#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
#include "mlir/Transforms/DialectConversion.h"
@@ -75,34 +76,6 @@ SparseTensorTypeToBufferConverter::SparseTensorTypeToBufferConverter() {
});
}
-//===----------------------------------------------------------------------===//
-// StorageLayout methods.
-//===----------------------------------------------------------------------===//
-
-unsigned StorageLayout::getMemRefFieldIndex(SparseTensorFieldKind kind,
- std::optional<unsigned> dim) const {
- unsigned fieldIdx = -1u;
- foreachFieldInSparseTensor(
- enc,
- [dim, kind, &fieldIdx](unsigned fIdx, SparseTensorFieldKind fKind,
- unsigned fDim, DimLevelType dlt) -> bool {
- if ((dim && fDim == dim.value() && kind == fKind) ||
- (kind == fKind && fKind == SparseTensorFieldKind::ValMemRef)) {
- fieldIdx = fIdx;
- // Returns false to break the iteration.
- return false;
- }
- return true;
- });
- assert(fieldIdx != -1u);
- return fieldIdx;
-}
-
-unsigned StorageLayout::getMemRefFieldIndex(StorageSpecifierKind kind,
- std::optional<unsigned> dim) const {
- return getMemRefFieldIndex(toFieldKind(kind), dim);
-}
-
//===----------------------------------------------------------------------===//
// StorageTensorSpecifier methods.
//===----------------------------------------------------------------------===//
@@ -132,6 +105,47 @@ void SparseTensorSpecifier::setSpecifierField(OpBuilder &builder, Location loc,
createIndexCast(builder, loc, v, getFieldType(kind, dim)));
}
+//===----------------------------------------------------------------------===//
+// SparseTensorDescriptor methods.
+//===----------------------------------------------------------------------===//
+
+sparse_tensor::SparseTensorDescriptor::SparseTensorDescriptor(
+ OpBuilder &builder, Location loc, Type tp, ValueArrayRef buffers)
+ : SparseTensorDescriptorImpl<false>(tp), expandedFields() {
+ SparseTensorEncodingAttr enc = getSparseTensorEncoding(tp);
+ unsigned rank = enc.getDimLevelType().size();
+ unsigned cooStart = getCOOStart(enc);
+ if (cooStart < rank) {
+ ValueRange beforeFields = buffers.drop_back(3);
+ expandedFields.append(beforeFields.begin(), beforeFields.end());
+ Value buffer = buffers[buffers.size() - 3];
+
+ Value stride = constantIndex(builder, loc, rank - cooStart);
+ SmallVector<Value> buffersArray(buffers.begin(), buffers.end());
+ MutSparseTensorDescriptor mutDesc(tp, buffersArray);
+ // Calculate subbuffer size as memSizes[idx] / (stride).
+ Value subBufferSize = mutDesc.getIdxMemSize(builder, loc, cooStart);
+ subBufferSize = builder.create<arith::DivUIOp>(loc, subBufferSize, stride);
+
+ // Create views of the linear idx buffer for the COO indices.
+ for (unsigned i = cooStart; i < rank; i++) {
+ Value subBuffer = builder.create<memref::SubViewOp>(
+ loc, buffer,
+ /*offset=*/ValueRange{constantIndex(builder, loc, i - cooStart)},
+ /*size=*/ValueRange{subBufferSize},
+ /*step=*/ValueRange{stride});
+ expandedFields.push_back(subBuffer);
+ }
+ expandedFields.push_back(buffers[buffers.size() - 2]); // The Values memref.
+ expandedFields.push_back(buffers.back()); // The specifier.
+ fields = expandedFields;
+ } else {
+ fields = buffers;
+ }
+
+ sanityCheck();
+}
+
//===----------------------------------------------------------------------===//
// Public methods.
//===----------------------------------------------------------------------===//
@@ -142,17 +156,20 @@ void sparse_tensor::foreachFieldInSparseTensor(
const SparseTensorEncodingAttr enc,
llvm::function_ref<bool(unsigned, SparseTensorFieldKind, unsigned,
DimLevelType)>
- callback) {
+ callback,
+ bool isBuffer) {
assert(enc);
#define RETURN_ON_FALSE(idx, kind, dim, dlt) \
if (!(callback(idx, kind, dim, dlt))) \
return;
+ unsigned rank = enc.getDimLevelType().size();
+ unsigned cooStart = isBuffer ? getCOOStart(enc) : rank;
static_assert(kDataFieldStartingIdx == 0);
unsigned fieldIdx = kDataFieldStartingIdx;
// Per-dimension storage.
- for (unsigned r = 0, rank = enc.getDimLevelType().size(); r < rank; r++) {
+ for (unsigned r = 0; r < rank; r++) {
// Dimension level types apply in order to the reordered dimension.
// As a result, the compound type can be constructed directly in the given
// order.
@@ -161,7 +178,8 @@ void sparse_tensor::foreachFieldInSparseTensor(
RETURN_ON_FALSE(fieldIdx++, SparseTensorFieldKind::PtrMemRef, r, dlt);
RETURN_ON_FALSE(fieldIdx++, SparseTensorFieldKind::IdxMemRef, r, dlt);
} else if (isSingletonDLT(dlt)) {
- RETURN_ON_FALSE(fieldIdx++, SparseTensorFieldKind::IdxMemRef, r, dlt);
+ if (r < cooStart)
+ RETURN_ON_FALSE(fieldIdx++, SparseTensorFieldKind::IdxMemRef, r, dlt);
} else {
assert(isDenseDLT(dlt)); // no fields
}
@@ -213,32 +231,38 @@ void sparse_tensor::foreachFieldAndTypeInSparseTensor(
return callback(valMemType, fieldIdx, fieldKind, dim, dlt);
};
llvm_unreachable("unrecognized field kind");
- });
+ },
+ /*isBuffer=*/true);
}
-unsigned sparse_tensor::getNumFieldsFromEncoding(SparseTensorEncodingAttr enc) {
+unsigned sparse_tensor::getNumFieldsFromEncoding(SparseTensorEncodingAttr enc,
+ bool isBuffer) {
unsigned numFields = 0;
- foreachFieldInSparseTensor(enc,
- [&numFields](unsigned, SparseTensorFieldKind,
- unsigned, DimLevelType) -> bool {
- numFields++;
- return true;
- });
+ foreachFieldInSparseTensor(
+ enc,
+ [&numFields](unsigned, SparseTensorFieldKind, unsigned,
+ DimLevelType) -> bool {
+ numFields++;
+ return true;
+ },
+ isBuffer);
return numFields;
}
unsigned
sparse_tensor::getNumDataFieldsFromEncoding(SparseTensorEncodingAttr enc) {
unsigned numFields = 0; // one value memref
- foreachFieldInSparseTensor(enc,
- [&numFields](unsigned fidx, SparseTensorFieldKind,
- unsigned, DimLevelType) -> bool {
- if (fidx >= kDataFieldStartingIdx)
- numFields++;
- return true;
- });
+ foreachFieldInSparseTensor(
+ enc,
+ [&numFields](unsigned fidx, SparseTensorFieldKind, unsigned,
+ DimLevelType) -> bool {
+ if (fidx >= kDataFieldStartingIdx)
+ numFields++;
+ return true;
+ },
+ /*isBuffer=*/true);
numFields -= 1; // the last field is MetaData field
- assert(numFields ==
- getNumFieldsFromEncoding(enc) - kDataFieldStartingIdx - 1);
+ assert(numFields == getNumFieldsFromEncoding(enc, /*isBuffer=*/true) -
+ kDataFieldStartingIdx - 1);
return numFields;
}
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.h b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.h
index 61244452bd5b7..e42708d6dee18 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.h
@@ -77,7 +77,8 @@ void foreachFieldInSparseTensor(
llvm::function_ref<bool(unsigned /*fieldIdx*/,
SparseTensorFieldKind /*fieldKind*/,
unsigned /*dim (if applicable)*/,
- DimLevelType /*DLT (if applicable)*/)>);
+ DimLevelType /*DLT (if applicable)*/)>,
+ bool isBuffer = false);
/// Same as above, except that it also builds the Type for the corresponding
/// field.
@@ -89,7 +90,7 @@ void foreachFieldAndTypeInSparseTensor(
DimLevelType /*DLT (if applicable)*/)>);
/// Gets the total number of fields for the given sparse tensor encoding.
-unsigned getNumFieldsFromEncoding(SparseTensorEncodingAttr enc);
+unsigned getNumFieldsFromEncoding(SparseTensorEncodingAttr enc, bool isBuffer);
/// Gets the total number of data fields (index arrays, pointer arrays, and a
/// value array) for the given sparse tensor encoding.
@@ -105,6 +106,13 @@ inline SparseTensorFieldKind toFieldKind(StorageSpecifierKind kind) {
return static_cast<SparseTensorFieldKind>(kind);
}
+/// Provides methods to access fields of a sparse tensor with the given
+/// encoding. When isBuffer is true, the fields are the actual buffers of the
+/// sparse tensor storage. In particular, when a linear buffer is used to
+/// store the COO data as an array-of-structures, the fields include the
+/// linear buffer (isBuffer=true) or includes the subviews of the buffer for the
+/// indices (isBuffer=false).
+template <bool isBuffer>
class StorageLayout {
public:
explicit StorageLayout(SparseTensorEncodingAttr enc) : enc(enc) {}
@@ -112,14 +120,60 @@ class StorageLayout {
///
/// Getters: get the field index for required field.
///
+
unsigned getMemRefFieldIndex(SparseTensorFieldKind kind,
- Optional<unsigned> dim) const;
+ std::optional<unsigned> dim) const {
+ return getFieldIndexAndStride(kind, dim).first;
+ }
unsigned getMemRefFieldIndex(StorageSpecifierKind kind,
- Optional<unsigned> dim) const;
+ std::optional<unsigned> dim) const {
+ return getMemRefFieldIndex(toFieldKind(kind), dim);
+ }
+
+ static unsigned getNumFieldsFromEncoding(SparseTensorEncodingAttr enc) {
+ return sparse_tensor::getNumFieldsFromEncoding(enc, isBuffer);
+ }
+
+ static void foreachFieldInSparseTensor(
+ const SparseTensorEncodingAttr enc,
+ llvm::function_ref<bool(unsigned, SparseTensorFieldKind, unsigned,
+ DimLevelType)>
+ callback) {
+ return sparse_tensor::foreachFieldInSparseTensor(enc, callback, isBuffer);
+ }
+
+ std::pair<unsigned, unsigned>
+ getFieldIndexAndStride(SparseTensorFieldKind kind,
+ std::optional<unsigned> dim) const {
+ unsigned fieldIdx = -1u;
+ unsigned stride = 1;
+ if (isBuffer && kind == SparseTensorFieldKind::IdxMemRef) {
+ assert(dim.has_value());
+ unsigned cooStart = getCOOStart(enc);
+ unsigned rank = enc.getDimLevelType().size();
+ if (dim.value() >= cooStart && dim.value() < rank) {
+ dim = cooStart;
+ stride = rank - cooStart;
+ }
+ }
+ foreachFieldInSparseTensor(
+ enc,
+ [dim, kind, &fieldIdx](unsigned fIdx, SparseTensorFieldKind fKind,
+ unsigned fDim, DimLevelType dlt) -> bool {
+ if ((dim && fDim == dim.value() && kind == fKind) ||
+ (kind == fKind && fKind == SparseTensorFieldKind::ValMemRef)) {
+ fieldIdx = fIdx;
+ // Returns false to break the iteration.
+ return false;
+ }
+ return true;
+ });
+ assert(fieldIdx != -1u);
+ return std::pair<unsigned, unsigned>(fieldIdx, stride);
+ }
private:
- unsigned getFieldIndex(unsigned dim, SparseTensorFieldKind kind) const;
SparseTensorEncodingAttr enc;
};
@@ -166,28 +220,29 @@ class SparseTensorDescriptorImpl {
using ValueArrayRef = typename std::conditional<mut, SmallVectorImpl<Value> &,
ValueRange>::type;
-public:
+ SparseTensorDescriptorImpl(Type tp)
+ : rType(tp.cast<RankedTensorType>()), fields() {}
+
SparseTensorDescriptorImpl(Type tp, ValueArrayRef fields)
: rType(tp.cast<RankedTensorType>()), fields(fields) {
- assert(getSparseTensorEncoding(tp) &&
- getNumFieldsFromEncoding(getSparseTensorEncoding(tp)) ==
- fields.size());
+ sanityCheck();
+ }
+
+ void sanityCheck() {
+ assert(getSparseTensorEncoding(rType) &&
+ StorageLayout<mut>::getNumFieldsFromEncoding(
+ getSparseTensorEncoding(rType)) == fields.size());
// We should make sure the class is trivially copyable (and should be small
// enough) such that we can pass it by value.
static_assert(
std::is_trivially_copyable_v<SparseTensorDescriptorImpl<mut>>);
}
- // Implicit (and cheap) type conversion from MutSparseTensorDescriptor to
- // SparseTensorDescriptor.
- template <typename T = SparseTensorDescriptorImpl<true>>
- /*implicit*/ SparseTensorDescriptorImpl(std::enable_if_t<!mut, T> &mDesc)
- : rType(mDesc.getTensorType()), fields(mDesc.getFields()) {}
-
+public:
unsigned getMemRefFieldIndex(SparseTensorFieldKind kind,
Optional<unsigned> dim) const {
// Delegates to storage layout.
- StorageLayout layout(getSparseTensorEncoding(rType));
+ StorageLayout<mut> layout(getSparseTensorEncoding(rType));
return layout.getMemRefFieldIndex(kind, dim);
}
@@ -328,9 +383,25 @@ class MutSparseTensorDescriptor : public SparseTensorDescriptorImpl<true> {
void setDimSize(OpBuilder &builder, Location loc, unsigned dim, T v) {
setSpecifierField(builder, loc, StorageSpecifierKind::DimSize, dim, v);
}
+
+ std::pair<unsigned, unsigned>
+ getIdxMemRefIndexAndStride(unsigned idxDim) const {
+ StorageLayout<true> layout(getSparseTensorEncoding(rType));
+ return layout.getFieldIndexAndStride(SparseTensorFieldKind::IdxMemRef,
+ idxDim);
+ }
};
-using SparseTensorDescriptor = SparseTensorDescriptorImpl<false>;
+class SparseTensorDescriptor : public SparseTensorDescriptorImpl<false> {
+public:
+ SparseTensorDescriptor(OpBuilder &builder, Location loc, Type tp,
+ ValueArrayRef buffers);
+
+private:
+ // Store the fields passed to SparseTensorDescriptorImpl when the tensor has
+ // a COO region.
+ SmallVector<Value> expandedFields;
+};
/// Returns the "tuple" value of the adapted tensor.
inline UnrealizedConversionCastOp getTuple(Value tensor) {
@@ -345,13 +416,15 @@ inline Value genTuple(OpBuilder &builder, Location loc, Type tp,
}
inline Value genTuple(OpBuilder &builder, Location loc,
- SparseTensorDescriptor desc) {
+ MutSparseTensorDescriptor desc) {
return genTuple(builder, loc, desc.getTensorType(), desc.getFields());
}
-inline SparseTensorDescriptor getDescriptorFromTensorTuple(Value tensor) {
+inline SparseTensorDescriptor
+getDescriptorFromTensorTuple(OpBuilder &builder, Location loc, Value tensor) {
auto tuple = getTuple(tensor);
- return SparseTensorDescriptor(tuple.getResultTypes()[0], tuple.getInputs());
+ return SparseTensorDescriptor(builder, loc, tuple.getResultTypes()[0],
+ tuple.getInputs());
}
inline MutSparseTensorDescriptor
diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir
index db7e5065b7484..9f1a9fe08fe2d 100644
--- a/mlir/test/Dialect/SparseTensor/codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen.mlir
@@ -50,6 +50,10 @@
dimLevelType = [ "compressed-nu", "singleton" ]
}>
+#ccoo = #sparse_tensor.encoding<{
+ dimLevelType = [ "compressed", "compressed-nu", "singleton" ]
+}>
+
// CHECK-LABEL: func @sparse_nop(
// CHECK-SAME: %[[A0:.*]]: memref<?xi32>,
// CHECK-SAME: %[[A1:.*]]: memref<?xi64>,
@@ -233,6 +237,39 @@ func.func @sparse_values_dcsr(%arg0: tensor<?x?xf64, #DCSR>) -> memref<?xf64> {
return %0 : memref<?xf64>
}
+// CHECK-LABEL: func.func @sparse_values_coo(
+// CHECK-SAME: %[[A0:.*0]]: memref<?xindex>,
+// CHECK-SAME: %[[A1:.*1]]: memref<?xindex>,
+// CHECK-SAME: %[[A2:.*2]]: memref<?xindex>,
+// CHECK-SAME: %[[A3:.*3]]: memref<?xindex>,
+// CHECK-SAME: %[[A4:.*4]]: memref<?xf64>,
+// CHECK-SAME: %[[A5:.*5]]: !sparse_tensor.storage_specifier
+// CHECK: return %[[A4]] : memref<?xf64>
+func.func @sparse_values_coo(%arg0: tensor<?x?x?xf64, #ccoo>) -> memref<?xf64> {
+ %0 = sparse_tensor.values %arg0 : tensor<?x?x?xf64, #ccoo> to memref<?xf64>
+ return %0 : memref<?xf64>
+}
+
+
+// CHECK-LABEL: func.func @sparse_indices_coo(
+// CHECK-SAME: %[[A0:.*0]]: memref<?xindex>,
+// CHECK-SAME: %[[A1:.*1]]: memref<?xindex>,
+// CHECK-SAME: %[[A2:.*2]]: memref<?xindex>,
+// CHECK-SAME: %[[A3:.*3]]: memref<?xindex>,
+// CHECK-SAME: %[[A4:.*4]]: memref<?xf64>,
+// CHECK-SAME: %[[A5:.*5]]: !sparse_tensor.storage_specifier
+// CHECK: %[[C2:.*]] = arith.constant 2 : index
+// CHECK: %[[S0:.*]] = sparse_tensor.storage_specifier.get %[[A5]] idx_mem_sz at 1
+// CHECK: %[[S1:.*]] = arith.index_cast %[[S0]]
+// CHECK: %[[S2:.*]] = arith.divui %[[S1]], %[[C2]] : index
+// CHECK: %[[R1:.*]] = memref.subview %[[A3]][0] {{\[}}%[[S2]]] [2] : memref<?xindex> to memref<?xindex, strided<[2]>>
+// CHECK: %[[R2:.*]] = memref.cast %[[R1]] : memref<?xindex, strided<[2]>> to memref<?xindex, strided<[?], offset: ?>>
+// CHECK: return %[[R2]] : memref<?xindex, strided<[?], offset: ?>>
+func.func @sparse_indices_coo(%arg0: tensor<?x?x?xf64, #ccoo>) -> memref<?xindex, strided<[?], offset: ?>> {
+ %0 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor<?x?x?xf64, #ccoo> to memref<?xindex, strided<[?], offset: ?>>
+ return %0 : memref<?xindex, strided<[?], offset: ?>>
+}
+
// CHECK-LABEL: func @sparse_noe(
// CHECK-SAME: %[[A0:.*]]: memref<?xi32>,
// CHECK-SAME: %[[A1:.*]]: memref<?xi64>,
@@ -571,25 +608,23 @@ func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: ind
}
// CHECK-LABEL: func.func private @"_insert_compressed-nu_singleton_5_6_f64_0_0"(
-// CHECK-SAME: %[[A0:.*0]]: memref<?xindex>,
-// CHECK-SAME: %[[A1:.*1]]: memref<?xindex>,
-// CHECK-SAME: %[[A2:.*2]]: memref<?xindex>,
-// CHECK-SAME: %[[A3:.*3]]: memref<?xf64>,
-// CHECK-SAME: %[[A4:.*4]]: !sparse_tensor.storage_specifier
+// CHECK-SAME: %[[A1:.*0]]: memref<?xindex>,
+// CHECK-SAME: %[[A2:.*1]]: memref<?xindex>,
+// CHECK-SAME: %[[A3:.*2]]: memref<?xf64>,
+// CHECK-SAME: %[[A4:.*3]]: !sparse_tensor.storage_specifier
+// CHECK-SAME: %[[A5:.*4]]: index,
// CHECK-SAME: %[[A5:.*5]]: index,
-// CHECK-SAME: %[[A5:.*6]]: index,
-// CHECK-SAME: %[[A7:.*7]]: f64)
+// CHECK-SAME: %[[A7:.*6]]: f64)
//
// CHECK-LABEL: func.func @sparse_insert_coo(
// CHECK-SAME: %[[A0:.*0]]: memref<?xindex>,
// CHECK-SAME: %[[A1:.*1]]: memref<?xindex>,
-// CHECK-SAME: %[[A2:.*2]]: memref<?xindex>,
-// CHECK-SAME: %[[A3:.*3]]: memref<?xf64>,
-// CHECK-SAME: %[[A4:.*4]]: !sparse_tensor.storage_specifier
-// CHECK-SAME: %[[A5:.*5]]: index,
-// CHECK-SAME: %[[A6:.*6]]: f64)
-// CHECK: %[[R:.*]]:5 = call @"_insert_compressed-nu_singleton_5_6_f64_0_0"(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A5]], %[[A5]], %[[A6]])
-// CHECK: return %[[R]]#0, %[[R]]#1, %[[R]]#2, %[[R]]#3, %[[R]]#4
+// CHECK-SAME: %[[A2:.*2]]: memref<?xf64>,
+// CHECK-SAME: %[[A3:.*3]]: !sparse_tensor.storage_specifier
+// CHECK-SAME: %[[A4:.*4]]: index,
+// CHECK-SAME: %[[A5:.*5]]: f64)
+// CHECK: %[[R:.*]]:4 = call @"_insert_compressed-nu_singleton_5_6_f64_0_0"(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A4]], %[[A5]])
+// CHECK: return %[[R]]#0, %[[R]]#1, %[[R]]#2, %[[R]]#3
func.func @sparse_insert_coo(%arg0: tensor<5x6xf64, #Coo>, %arg1: index, %arg2: f64) -> tensor<5x6xf64, #Coo> {
%0 = sparse_tensor.insert %arg2 into %arg0[%arg1, %arg1] : tensor<5x6xf64, #Coo>
%1 = sparse_tensor.load %0 hasInserts : tensor<5x6xf64, #Coo>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
index ee4632d0df12a..2bd65c1d44ef7 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
@@ -47,14 +47,14 @@ module {
%cu = arith.constant -1 : index
%fu = arith.constant 99.0 : f64
%p0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<4x3xf64, #SortedCOO> to memref<?xindex>
- %i0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<4x3xf64, #SortedCOO> to memref<?xindex>
- %i1 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor<4x3xf64, #SortedCOO> to memref<?xindex>
+ %i0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<4x3xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
+ %i1 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor<4x3xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
%v = sparse_tensor.values %arg0 : tensor<4x3xf64, #SortedCOO> to memref<?xf64>
%vp0 = vector.transfer_read %p0[%c0], %cu: memref<?xindex>, vector<2xindex>
vector.print %vp0 : vector<2xindex>
- %vi0 = vector.transfer_read %i0[%c0], %cu: memref<?xindex>, vector<4xindex>
+ %vi0 = vector.transfer_read %i0[%c0], %cu: memref<?xindex, strided<[?], offset: ?>>, vector<4xindex>
vector.print %vi0 : vector<4xindex>
- %vi1 = vector.transfer_read %i1[%c0], %cu: memref<?xindex>, vector<4xindex>
+ %vi1 = vector.transfer_read %i1[%c0], %cu: memref<?xindex, strided<[?], offset: ?>>, vector<4xindex>
vector.print %vi1 : vector<4xindex>
%vv = vector.transfer_read %v[%c0], %fu: memref<?xf64>, vector<4xf64>
vector.print %vv : vector<4xf64>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
index 2d353467d4434..3aedc4c953b37 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
@@ -19,6 +19,15 @@
dimLevelType = [ "compressed", "compressed", "dense" ]
}>
+#CCoo = #sparse_tensor.encoding<{
+ dimLevelType = [ "compressed", "compressed-nu", "singleton" ]
+}>
+
+#DCoo = #sparse_tensor.encoding<{
+ dimLevelType = [ "dense", "compressed-nu", "singleton" ]
+}>
+
+
module {
func.func @dump(%arg0: tensor<5x4x3xf64, #TensorCSR>) {
@@ -63,6 +72,48 @@ module {
return
}
+func.func @dump_ccoo(%arg0: tensor<5x4x3xf64, #CCoo>) {
+ %c0 = arith.constant 0 : index
+ %fu = arith.constant 99.0 : f64
+ %p0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<5x4x3xf64, #CCoo> to memref<?xindex>
+ %i0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<5x4x3xf64, #CCoo> to memref<?xindex>
+ %p1 = sparse_tensor.pointers %arg0 { dimension = 1 : index } : tensor<5x4x3xf64, #CCoo> to memref<?xindex>
+ %i1 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor<5x4x3xf64, #CCoo> to memref<?xindex>
+ %i2 = sparse_tensor.indices %arg0 { dimension = 2 : index } : tensor<5x4x3xf64, #CCoo> to memref<?xindex>
+ %v = sparse_tensor.values %arg0 : tensor<5x4x3xf64, #CCoo> to memref<?xf64>
+ %vp0 = vector.transfer_read %p0[%c0], %c0: memref<?xindex>, vector<2xindex>
+ vector.print %vp0 : vector<2xindex>
+ %vi0 = vector.transfer_read %i0[%c0], %c0: memref<?xindex>, vector<2xindex>
+ vector.print %vi0 : vector<2xindex>
+ %vp1 = vector.transfer_read %p1[%c0], %c0: memref<?xindex>, vector<3xindex>
+ vector.print %vp1 : vector<3xindex>
+ %vi1 = vector.transfer_read %i1[%c0], %c0: memref<?xindex>, vector<5xindex>
+ vector.print %vi1 : vector<5xindex>
+ %vi2 = vector.transfer_read %i2[%c0], %c0: memref<?xindex>, vector<5xindex>
+ vector.print %vi2 : vector<5xindex>
+ %vv = vector.transfer_read %v[%c0], %fu: memref<?xf64>, vector<5xf64>
+ vector.print %vv : vector<5xf64>
+ return
+ }
+
+func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
+ %c0 = arith.constant 0 : index
+ %fu = arith.constant 99.0 : f64
+ %p1 = sparse_tensor.pointers %arg0 { dimension = 1 : index } : tensor<5x4x3xf64, #DCoo> to memref<?xindex>
+ %i1 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor<5x4x3xf64, #DCoo> to memref<?xindex>
+ %i2 = sparse_tensor.indices %arg0 { dimension = 2 : index } : tensor<5x4x3xf64, #DCoo> to memref<?xindex>
+ %v = sparse_tensor.values %arg0 : tensor<5x4x3xf64, #DCoo> to memref<?xf64>
+ %vp1 = vector.transfer_read %p1[%c0], %c0: memref<?xindex>, vector<6xindex>
+ vector.print %vp1 : vector<6xindex>
+ %vi1 = vector.transfer_read %i1[%c0], %c0: memref<?xindex>, vector<5xindex>
+ vector.print %vi1 : vector<5xindex>
+ %vi2 = vector.transfer_read %i2[%c0], %c0: memref<?xindex>, vector<5xindex>
+ vector.print %vi2 : vector<5xindex>
+ %vv = vector.transfer_read %v[%c0], %fu: memref<?xf64>, vector<5xf64>
+ vector.print %vv : vector<5xf64>
+ return
+}
+
//
// Main driver.
//
@@ -110,19 +161,57 @@ module {
%rowm = sparse_tensor.load %row5 hasInserts : tensor<5x4x3xf64, #TensorRow>
call @dump_row(%rowm) : (tensor<5x4x3xf64, #TensorRow>) -> ()
+ //
+ // CHECK: ( 0, 2 )
+ // CHECK-NEXT: ( 3, 4 )
+ // CHECK-NEXT: ( 0, 3, 5 )
+ // CHECK-NEXT: ( 0, 0, 3, 2, 3 )
+ // CHECK-NEXT: ( 1, 2, 1, 2, 2 )
+ // CHECK-NEXT: ( 1.1, 2.2, 3.3, 4.4, 5.5 )
+ %ccoo = bufferization.alloc_tensor() : tensor<5x4x3xf64, #CCoo>
+ %ccoo1 = sparse_tensor.insert %f1 into %ccoo[%c3, %c0, %c1] : tensor<5x4x3xf64, #CCoo>
+ %ccoo2 = sparse_tensor.insert %f2 into %ccoo1[%c3, %c0, %c2] : tensor<5x4x3xf64, #CCoo>
+ %ccoo3 = sparse_tensor.insert %f3 into %ccoo2[%c3, %c3, %c1] : tensor<5x4x3xf64, #CCoo>
+ %ccoo4 = sparse_tensor.insert %f4 into %ccoo3[%c4, %c2, %c2] : tensor<5x4x3xf64, #CCoo>
+ %ccoo5 = sparse_tensor.insert %f5 into %ccoo4[%c4, %c3, %c2] : tensor<5x4x3xf64, #CCoo>
+ %ccoom = sparse_tensor.load %ccoo5 hasInserts : tensor<5x4x3xf64, #CCoo>
+ call @dump_ccoo(%ccoom) : (tensor<5x4x3xf64, #CCoo>) -> ()
+
+ //
+ // CHECK-NEXT: ( 0, 0, 0, 0, 3, 5 )
+ // CHECK-NEXT: ( 0, 0, 3, 2, 3 )
+ // CHECK-NEXT: ( 1, 2, 1, 2, 2 )
+ // CHECK-NEXT: ( 1.1, 2.2, 3.3, 4.4, 5.5 )
+ %dcoo = bufferization.alloc_tensor() : tensor<5x4x3xf64, #DCoo>
+ %dcoo1 = sparse_tensor.insert %f1 into %dcoo[%c3, %c0, %c1] : tensor<5x4x3xf64, #DCoo>
+ %dcoo2 = sparse_tensor.insert %f2 into %dcoo1[%c3, %c0, %c2] : tensor<5x4x3xf64, #DCoo>
+ %dcoo3 = sparse_tensor.insert %f3 into %dcoo2[%c3, %c3, %c1] : tensor<5x4x3xf64, #DCoo>
+ %dcoo4 = sparse_tensor.insert %f4 into %dcoo3[%c4, %c2, %c2] : tensor<5x4x3xf64, #DCoo>
+ %dcoo5 = sparse_tensor.insert %f5 into %dcoo4[%c4, %c3, %c2] : tensor<5x4x3xf64, #DCoo>
+ %dcoom = sparse_tensor.load %dcoo5 hasInserts : tensor<5x4x3xf64, #DCoo>
+ call @dump_dcoo(%dcoom) : (tensor<5x4x3xf64, #DCoo>) -> ()
+
// NOE sanity check.
//
// CHECK-NEXT: 5
// CHECK-NEXT: 12
+ // CHECK-NEXT: 5
+ // CHECK-NEXT: 5
//
%noe1 = sparse_tensor.number_of_entries %tensorm : tensor<5x4x3xf64, #TensorCSR>
vector.print %noe1 : index
%noe2 = sparse_tensor.number_of_entries %rowm : tensor<5x4x3xf64, #TensorRow>
vector.print %noe2 : index
+ %noe3 = sparse_tensor.number_of_entries %ccoom : tensor<5x4x3xf64, #CCoo>
+ vector.print %noe3 : index
+ %noe4 = sparse_tensor.number_of_entries %dcoom : tensor<5x4x3xf64, #DCoo>
+ vector.print %noe4 : index
// Release resources.
bufferization.dealloc_tensor %tensorm : tensor<5x4x3xf64, #TensorCSR>
bufferization.dealloc_tensor %rowm : tensor<5x4x3xf64, #TensorRow>
+ bufferization.dealloc_tensor %ccoom : tensor<5x4x3xf64, #CCoo>
+ bufferization.dealloc_tensor %dcoom : tensor<5x4x3xf64, #DCoo>
return
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
index e94396c5ad9e5..7a77dd6f47234 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
@@ -71,6 +71,13 @@ module {
return
}
+ func.func @dumpsi(%arg0: memref<?xindex, strided<[?], offset: ?>>) {
+ %c0 = arith.constant 0 : index
+ %v = vector.transfer_read %arg0[%c0], %c0: memref<?xindex, strided<[?], offset: ?>>, vector<20xindex>
+ vector.print %v : vector<20xindex>
+ return
+ }
+
func.func @dumpf(%arg0: memref<?xf64>) {
%c0 = arith.constant 0 : index
%nan = arith.constant 0x0 : f64
@@ -108,14 +115,14 @@ module {
%p0 = sparse_tensor.pointers %0 { dimension = 0 : index }
: tensor<?x?xf64, #SortedCOO> to memref<?xindex>
%i00 = sparse_tensor.indices %0 { dimension = 0 : index }
- : tensor<?x?xf64, #SortedCOO> to memref<?xindex>
+ : tensor<?x?xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
%i01 = sparse_tensor.indices %0 { dimension = 1 : index }
- : tensor<?x?xf64, #SortedCOO> to memref<?xindex>
+ : tensor<?x?xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
%v0 = sparse_tensor.values %0
: tensor<?x?xf64, #SortedCOO> to memref<?xf64>
call @dumpi(%p0) : (memref<?xindex>) -> ()
- call @dumpi(%i00) : (memref<?xindex>) -> ()
- call @dumpi(%i01) : (memref<?xindex>) -> ()
+ call @dumpsi(%i00) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
+ call @dumpsi(%i01) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
call @dumpf(%v0) : (memref<?xf64>) -> ()
//
@@ -127,14 +134,14 @@ module {
%p1 = sparse_tensor.pointers %1 { dimension = 0 : index }
: tensor<?x?xf64, #SortedCOOPermuted> to memref<?xindex>
%i10 = sparse_tensor.indices %1 { dimension = 0 : index }
- : tensor<?x?xf64, #SortedCOOPermuted> to memref<?xindex>
+ : tensor<?x?xf64, #SortedCOOPermuted> to memref<?xindex, strided<[?], offset: ?>>
%i11 = sparse_tensor.indices %1 { dimension = 1 : index }
- : tensor<?x?xf64, #SortedCOOPermuted> to memref<?xindex>
+ : tensor<?x?xf64, #SortedCOOPermuted> to memref<?xindex, strided<[?], offset: ?>>
%v1 = sparse_tensor.values %1
: tensor<?x?xf64, #SortedCOOPermuted> to memref<?xf64>
call @dumpi(%p1) : (memref<?xindex>) -> ()
- call @dumpi(%i10) : (memref<?xindex>) -> ()
- call @dumpi(%i11) : (memref<?xindex>) -> ()
+ call @dumpsi(%i10) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
+ call @dumpsi(%i11) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
call @dumpf(%v1) : (memref<?xf64>) -> ()
//
@@ -147,17 +154,17 @@ module {
%p2 = sparse_tensor.pointers %2 { dimension = 0 : index }
: tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex>
%i20 = sparse_tensor.indices %2 { dimension = 0 : index }
- : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex>
+ : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex, strided<[?], offset: ?>>
%i21 = sparse_tensor.indices %2 { dimension = 1 : index }
- : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex>
+ : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex, strided<[?], offset: ?>>
%i22 = sparse_tensor.indices %2 { dimension = 2 : index }
- : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex>
+ : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex, strided<[?], offset: ?>>
%v2 = sparse_tensor.values %2
: tensor<?x?x?xf64, #SortedCOO3D> to memref<?xf64>
call @dumpi(%p2) : (memref<?xindex>) -> ()
- call @dumpi(%i20) : (memref<?xindex>) -> ()
- call @dumpi(%i21) : (memref<?xindex>) -> ()
- call @dumpi(%i21) : (memref<?xindex>) -> ()
+ call @dumpsi(%i20) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
+ call @dumpsi(%i21) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
+ call @dumpsi(%i21) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
call @dumpf(%v2) : (memref<?xf64>) -> ()
//
@@ -170,17 +177,17 @@ module {
%p3 = sparse_tensor.pointers %3 { dimension = 0 : index }
: tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex>
%i30 = sparse_tensor.indices %3 { dimension = 0 : index }
- : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex>
+ : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex, strided<[?], offset: ?>>
%i31 = sparse_tensor.indices %3 { dimension = 1 : index }
- : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex>
+ : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex, strided<[?], offset: ?>>
%i32 = sparse_tensor.indices %3 { dimension = 2 : index }
- : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex>
+ : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex, strided<[?], offset: ?>>
%v3 = sparse_tensor.values %3
: tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xf64>
call @dumpi(%p3) : (memref<?xindex>) -> ()
- call @dumpi(%i30) : (memref<?xindex>) -> ()
- call @dumpi(%i31) : (memref<?xindex>) -> ()
- call @dumpi(%i31) : (memref<?xindex>) -> ()
+ call @dumpsi(%i30) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
+ call @dumpsi(%i31) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
+ call @dumpsi(%i31) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
call @dumpf(%v3) : (memref<?xf64>) -> ()
//
@@ -192,14 +199,14 @@ module {
%p4 = sparse_tensor.pointers %4 { dimension = 0 : index }
: tensor<?x?xf64, #SortedCOO> to memref<?xindex>
%i40 = sparse_tensor.indices %4 { dimension = 0 : index }
- : tensor<?x?xf64, #SortedCOO> to memref<?xindex>
+ : tensor<?x?xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
%i41 = sparse_tensor.indices %4 { dimension = 1 : index }
- : tensor<?x?xf64, #SortedCOO> to memref<?xindex>
+ : tensor<?x?xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
%v4 = sparse_tensor.values %4
: tensor<?x?xf64, #SortedCOO> to memref<?xf64>
call @dumpi(%p4) : (memref<?xindex>) -> ()
- call @dumpi(%i40) : (memref<?xindex>) -> ()
- call @dumpi(%i41) : (memref<?xindex>) -> ()
+ call @dumpsi(%i40) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
+ call @dumpsi(%i41) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
call @dumpf(%v4) : (memref<?xf64>) -> ()
// And last but not least, an actual operation applied to COO.
@@ -215,14 +222,14 @@ module {
%p5 = sparse_tensor.pointers %5 { dimension = 0 : index }
: tensor<?x?xf64, #SortedCOO> to memref<?xindex>
%i50 = sparse_tensor.indices %5 { dimension = 0 : index }
- : tensor<?x?xf64, #SortedCOO> to memref<?xindex>
+ : tensor<?x?xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
%i51 = sparse_tensor.indices %5 { dimension = 1 : index }
- : tensor<?x?xf64, #SortedCOO> to memref<?xindex>
+ : tensor<?x?xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
%v5 = sparse_tensor.values %5
: tensor<?x?xf64, #SortedCOO> to memref<?xf64>
call @dumpi(%p5) : (memref<?xindex>) -> ()
- call @dumpi(%i50) : (memref<?xindex>) -> ()
- call @dumpi(%i51) : (memref<?xindex>) -> ()
+ call @dumpsi(%i50) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
+ call @dumpsi(%i51) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
call @dumpf(%v5) : (memref<?xf64>) -> ()
// Release the resources.
More information about the Mlir-commits
mailing list