[Mlir-commits] [mlir] bf812ea - [mlir][linalg] remove the -now- obsolete sparse support in linalg
Aart Bik
llvmlistbot at llvm.org
Mon May 10 16:49:51 PDT 2021
Author: Aart Bik
Date: 2021-05-10T16:49:33-07:00
New Revision: bf812ea484b71ec41d6811646d89876499956235
URL: https://github.com/llvm/llvm-project/commit/bf812ea484b71ec41d6811646d89876499956235
DIFF: https://github.com/llvm/llvm-project/commit/bf812ea484b71ec41d6811646d89876499956235.diff
LOG: [mlir][linalg] remove the -now- obsolete sparse support in linalg
All glue and clutter in the linalg ops has been replaced by proper
sparse tensor type encoding. This code is no longer needed. Thanks
to ntv@ for giving us a temporary home in linalg.
So long, and thanks for all the fish.
Reviewed By: bixia
Differential Revision: https://reviews.llvm.org/D102098
Added:
Modified:
mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h
mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
mlir/python/mlir/dialects/linalg/opdsl/lang/emitter.py
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
index 0512b351650e9..80bd3775cfe24 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
@@ -1000,19 +1000,6 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
});
}]
>,
- InterfaceMethod<
- /*desc=*/[{
- Return whether the op has sparse tensor semantics.
- }],
- /*retTy=*/"bool",
- /*methodName=*/"hasSparseSemantics",
- /*args=*/(ins),
- /*methodBody=*/"",
- /*defaultImplementation=*/[{
- return $_op->getAttr(getSparseAttrName()).
- template dyn_cast_or_null<ArrayAttr>() != nullptr;
- }]
- >,
InterfaceMethod<
/*desc=*/[{
Return the name registered for this op when lowering to an external
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index 79b7a7d432a57..8336668883a51 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -527,9 +527,7 @@ class GenericOpBase<string mnemonic> : LinalgStructuredBase_Op<mnemonic, [
AffineMapArrayAttr:$indexing_maps,
ArrayAttr:$iterator_types,
OptionalAttr<StrAttr>:$doc,
- OptionalAttr<StrAttr>:$library_call,
- // ArrayAttr of StrArrayAttr:
- OptionalAttr<ArrayAttr>:$sparse);
+ OptionalAttr<StrAttr>:$library_call);
let results = (outs Variadic<AnyRankedTensor>:$result_tensors);
let regions = (region AnyRegion:$region);
let extraClassDeclaration = structuredOpsBaseDecls # [{
@@ -583,8 +581,6 @@ def GenericOp : GenericOpBase<"generic"> {
Each element of the list represents and iterator of one of the following
types:
parallel, reduction, window
- - sparse: an optional list with per-dimension sparsity annotations (either
- "D" for dense or "S" for sparse) for each input and output view.
Example:
Defining a #matmul_trait attribute in MLIR can be done as follows:
diff --git a/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h b/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h
index c7d2476032433..d6ccea1df7a4e 100644
--- a/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h
+++ b/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h
@@ -58,9 +58,6 @@ constexpr StringRef getDocAttrName() { return "doc"; }
/// function that implements the structured op.
constexpr StringRef getLibraryCallAttrName() { return "library_call"; }
-/// Attribute name for the ArrayAttr of StrArrayAttr that encodes sparsity.
-constexpr StringRef getSparseAttrName() { return "sparse"; }
-
/// Attribute name for the StrArrayAttr which encodes the value of strides.
constexpr StringRef getStridesAttrName() { return "strides"; }
@@ -129,18 +126,6 @@ inline StringRef toString(IteratorType t) {
llvm_unreachable("Unsupported IteratorType");
}
-/// Use to encode a dense or sparse dimension.
-constexpr StringRef getSparseDimName() { return "S"; }
-inline bool isSparseDim(Attribute attr) {
- auto strAttr = attr.dyn_cast_or_null<StringAttr>();
- return strAttr && strAttr.getValue() == getSparseDimName();
-}
-constexpr StringRef getDenseDimName() { return "D"; }
-inline bool isDenseDim(Attribute attr) {
- auto strAttr = attr.dyn_cast_or_null<StringAttr>();
- return strAttr && strAttr.getValue() == getDenseDimName();
-}
-
} // end namespace mlir
#endif // MLIR_UTILS_STRUCTUREDOPSUTILS_H
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 01c240fd88f8c..5f635c7ebaf91 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -447,8 +447,8 @@ void GenericOp::build(
builder.getAffineMapArrayAttr(indexingMaps),
builder.getStrArrayAttr(iteratorTypes),
doc.empty() ? StringAttr() : builder.getStringAttr(doc),
- libraryCall.empty() ? StringAttr() : builder.getStringAttr(libraryCall),
- ArrayAttr());
+ libraryCall.empty() ? StringAttr()
+ : builder.getStringAttr(libraryCall));
if (!bodyBuild)
return;
@@ -502,8 +502,8 @@ void IndexedGenericOp::build(
builder.getAffineMapArrayAttr(indexingMaps),
builder.getStrArrayAttr(iteratorTypes),
doc.empty() ? StringAttr() : builder.getStringAttr(doc),
- libraryCall.empty() ? StringAttr() : builder.getStringAttr(libraryCall),
- ArrayAttr());
+ libraryCall.empty() ? StringAttr()
+ : builder.getStringAttr(libraryCall));
if (!bodyBuild)
return;
@@ -676,58 +676,8 @@ void IndexedGenericOp::getEffects(
getInputBuffers(), getOutputBuffers());
}
-namespace {
-
-template <typename GenericOpType>
-struct AnnotationsVerifier {
- static LogicalResult verify(GenericOpType op) { return success(); }
-};
-
-template <>
-LogicalResult AnnotationsVerifier<GenericOp>::verify(GenericOp op) {
- ArrayAttr sparseAttr = op.sparseAttr();
- if (!sparseAttr)
- return success();
- // Verify consistency of sparse annotations.
- if (!op.hasTensorSemantics())
- return op.emitOpError("expected sparse annotations on tensors only");
- if (op.getNumOutputs() != 1)
- return op.emitOpError("expected single output tensor");
- unsigned numTensors = op.getNumShapedOperands();
- if (sparseAttr.size() != numTensors)
- return op.emitOpError("expected one sparse annotation for each tensor");
- for (unsigned t = 0; t < numTensors; t++) {
- auto dimAttr = sparseAttr[t].dyn_cast_or_null<ArrayAttr>();
- if (!dimAttr)
- return op.emitOpError("expected sparse annotation array for tensor ")
- << t;
- unsigned rank = op.getShapedType(t).getRank();
- if (dimAttr.size() != rank)
- return op.emitOpError("expected sparse annotation with rank ")
- << rank << " for tensor " << t;
- // Per-dimension annotations for each tensor consist of only "D" or "S".
- for (unsigned d = 0; d < rank; d++) {
- if (isDenseDim(dimAttr[d])) {
- continue;
- } else if (isSparseDim(dimAttr[d])) {
- if (t == numTensors - 1)
- return op.emitOpError("sparse output tensors not supported (yet)");
- continue;
- }
- return op.emitOpError("expected sparse annotation at position ")
- << d << " for tensor " << t;
- }
- }
- return success();
-}
-
-} // namespace
-
template <typename GenericOpType>
static LogicalResult verifyGenericOp(GenericOpType op) {
- if (failed(AnnotationsVerifier<GenericOpType>::verify(op)))
- return failure();
-
return success();
}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
index bd2fdac9aa4f3..ea250138582a3 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
@@ -88,7 +88,7 @@ finalizeBufferAllocationForGenericOp(ConversionPatternRewriter &rewriter,
/*inputs=*/inputs,
/*outputs=*/outputs, genericOp.indexing_maps(),
genericOp.iterator_types(), genericOp.docAttr(),
- genericOp.library_callAttr(), genericOp.sparseAttr());
+ genericOp.library_callAttr());
// Create a new block in the region of the new Generic Op.
Block *oldBlock = genericOp.getBody();
diff --git a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
index 4ee534d111d0d..6a0c597b7c3cd 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
@@ -321,8 +321,7 @@ fuseElementwiseOpsImpl(LinalgOp producer, OpOperand &consumerOpOperand,
consumer.getOutputs(), rewriter.getAffineMapArrayAttr(fusedIndexMaps),
consumer.iterator_types(),
/*doc=*/nullptr,
- /*library_call=*/nullptr,
- /*sparse=*/nullptr);
+ /*library_call=*/nullptr);
} else {
fusedOp = rewriter.create<IndexedGenericOp>(
consumer.getLoc(), consumer->getResultTypes(),
@@ -331,8 +330,7 @@ fuseElementwiseOpsImpl(LinalgOp producer, OpOperand &consumerOpOperand,
consumer.getOutputs(), rewriter.getAffineMapArrayAttr(fusedIndexMaps),
consumer.iterator_types(),
/*doc=*/nullptr,
- /*library_call=*/nullptr,
- /*sparse=*/nullptr);
+ /*library_call=*/nullptr);
}
// Construct an AffineMap from consumer loops to producer loops.
@@ -1260,8 +1258,7 @@ struct FoldConsumerReshapeOpByLinearization
/*outputs=*/output, rewriter.getAffineMapArrayAttr(fusedIndexMaps),
producer.iterator_types(),
/*doc=*/nullptr,
- /*library_call=*/nullptr,
- /*sparse=*/nullptr);
+ /*library_call=*/nullptr);
auto &fusedRegion = fusedOp->getRegion(0);
rewriter.cloneRegionBefore(producer->getRegion(0), fusedRegion,
fusedRegion.begin());
@@ -1352,8 +1349,7 @@ class FoldSplatConstants : public OpRewritePattern<LinalgOpTy> {
rewriter.getAffineMapArrayAttr(fusedIndexMaps),
linalgOp.iterator_types(),
/*doc=*/nullptr,
- /*library_call=*/nullptr,
- /*sparse=*/nullptr);
+ /*library_call=*/nullptr);
// Map the block argument corresponding to the replaced argument with the
// scalar constant.
diff --git a/mlir/python/mlir/dialects/linalg/opdsl/lang/emitter.py b/mlir/python/mlir/dialects/linalg/opdsl/lang/emitter.py
index 4a037025d46a7..85c77d52fe5f8 100644
--- a/mlir/python/mlir/dialects/linalg/opdsl/lang/emitter.py
+++ b/mlir/python/mlir/dialects/linalg/opdsl/lang/emitter.py
@@ -89,12 +89,10 @@ def prepare_common_structured_op(op_config: LinalgStructuredOpConfig,
for am in AffineMap.compress_unused_symbols(op_config.indexing_maps, Context.current)])
iterator_types_attr = ArrayAttr.get(
[StringAttr.get(s) for s in op_config.iterator_types])
- # TODO: Add support for sparse operands once there is a stable interface.
- sparse_attr = None
return (all_arg_defs, in_arg_defs, out_arg_defs, outs, result_types,
type_mapping, capture_arg_mapping, indexing_maps_attr,
- iterator_types_attr, sparse_attr)
+ iterator_types_attr)
def emit_generic_structured_op(op_config: LinalgStructuredOpConfig,
@@ -102,7 +100,7 @@ def emit_generic_structured_op(op_config: LinalgStructuredOpConfig,
outs: Sequence[Value] = (),
captures: Sequence[Value] = ()):
all_arg_defs, in_arg_defs, out_arg_defs, outs, result_types, type_mapping, \
- capture_arg_mapping, indexing_maps_attr, iterator_types_attr, sparse_attr = \
+ capture_arg_mapping, indexing_maps_attr, iterator_types_attr = \
prepare_common_structured_op(op_config, *ins, outs = outs,
captures=captures)
@@ -113,8 +111,7 @@ def emit_generic_structured_op(op_config: LinalgStructuredOpConfig,
indexing_maps=indexing_maps_attr,
iterator_types=iterator_types_attr,
doc=None, # TODO: Make optional.
- library_call=None, # TODO: Make optional.
- sparse=sparse_attr) # TODO: Make optional.
+ library_call=None) # TODO: Make optional.
# Construct the body.
block_arg_names = _get_tensor_def_names(*in_arg_defs, *out_arg_defs)
@@ -141,7 +138,7 @@ def emit_named_structured_op(op_config: LinalgStructuredOpConfig,
outs: Sequence[Value] = (),
captures: Sequence[Value] = ()):
all_arg_defs, in_arg_defs, out_arg_defs, outs, result_types, type_mapping, \
- capture_arg_mapping, indexing_maps_attr, iterator_types_attr, sparse_attr = \
+ capture_arg_mapping, indexing_maps_attr, iterator_types_attr = \
prepare_common_structured_op(op_config, *ins, outs = outs,
captures = captures)
@@ -351,8 +348,8 @@ def _get_tensor_def_names(
def _add_type_mapping(name: str, type: Type, type_mapping: Dict[str, Type]):
if name in type_mapping:
if type_mapping[name] != type:
- raise ValueError(f"Cannot overwrite type mapping {name} = "
- f"{type_mapping[name]} by type {type}")
+ raise ValueError(f"Cannot overwrite type mapping {name} = "
+ f"{type_mapping[name]} by type {type}")
type_mapping[name] = type
def _is_floating_point_type(t: Type) -> bool:
More information about the Mlir-commits
mailing list