[Mlir-commits] [mlir] 0a52d90 - [mlir][linalg] Update Structured Op Interface (NFC).
Tobias Gysi
llvmlistbot at llvm.org
Mon May 31 06:23:10 PDT 2021
Author: Tobias Gysi
Date: 2021-05-31T13:20:48Z
New Revision: 0a52d9006c8656af242f6cc237d126c62c61bbac
URL: https://github.com/llvm/llvm-project/commit/0a52d9006c8656af242f6cc237d126c62c61bbac
DIFF: https://github.com/llvm/llvm-project/commit/0a52d9006c8656af242f6cc237d126c62c61bbac.diff
LOG: [mlir][linalg] Update Structured Op Interface (NFC).
Adding methods to access operand properties via OpOperands and mark outdated methods as deprecated.
Differential Revision: https://reviews.llvm.org/D103394
Added:
Modified:
mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h
mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h
index 68491d8aad888..418e609131b52 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h
@@ -25,6 +25,11 @@ namespace mlir {
namespace linalg {
class LinalgOp;
+/// OpOperand vector that implicitly converts to a Value vector.
+struct OpOperandVector : public SmallVector<OpOperand *> {
+ operator SmallVector<Value>();
+};
+
/// Returns the values obtained by applying `map` to the list of values.
SmallVector<Value, 4> applyMapToValues(OpBuilder &b, Location loc,
AffineMap map, ValueRange values);
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
index 91bccdb239562..16510c906e25e 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
@@ -229,7 +229,7 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
/*desc=*/[{
Return the number of inputs.
}],
- /*retTy=*/"unsigned",
+ /*retTy=*/"int64_t",
/*methodName=*/"getNumInputs",
/*args=*/(ins),
/*methodBody=*/"",
@@ -251,7 +251,7 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
/*desc=*/[{
Return the number of outputs.
}],
- /*retTy=*/"unsigned",
+ /*retTy=*/"int64_t",
/*methodName=*/"getNumOutputs",
/*args=*/(ins),
/*methodBody=*/"",
@@ -259,984 +259,1175 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
return $_op.outputs().size();
}]
>,
- //===------------------------------------------------------------------===//
- // Input operands handling.
- //===------------------------------------------------------------------===//
InterfaceMethod<
/*desc=*/[{
- Return the `i`-th input operand.
+ Return the number of inputs and outputs.
}],
- /*retTy=*/"Value",
- /*methodName=*/"getInput",
- /*args=*/(ins "unsigned":$i),
+ /*retTy=*/"int64_t",
+ /*methodName=*/"getNumInputsAndOutputs",
+ /*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- assert(i < $_op.getNumInputs());
- return this->getOperation()->getOperand(i);
+ return getNumInputs() + getNumOutputs();
}]
>,
+ //===------------------------------------------------------------------===//
+ // Input operands handling.
+ //===------------------------------------------------------------------===//
InterfaceMethod<
/*desc=*/[{
- Return the `i`-th input shaped type
+ Return the input operands.
}],
- /*retTy=*/"ShapedType",
- /*methodName=*/"getInputShapedType",
- /*args=*/(ins "unsigned":$i),
+ /*retTy=*/"OpOperandVector",
+ /*methodName=*/"getInputOperands",
+ /*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return getInput(i).getType().template cast<ShapedType>();
+ OpOperandVector result;
+ result.reserve(getNumInputs());
+ llvm::transform(
+ this->getOperation()->getOpOperands().take_front(getNumInputs()),
+ std::back_inserter(result),
+ [](OpOperand &opOperand) { return &opOperand; });
+ return result;
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the range of input operands.
+ Return the `i`-th input operand.
}],
- /*retTy=*/"Operation::operand_range",
- /*methodName=*/"getInputs",
- /*args=*/(ins),
+ /*retTy=*/"OpOperand*",
+ /*methodName=*/"getInputOperand",
+ /*args=*/(ins "int64_t":$i),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- auto range = this->getOperation()->getOperands();
- return {range.begin(), range.begin() + $_op.getNumInputs()};
+ assert(i < getNumInputs());
+ return getInputOperands()[i];
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the OpOperands for the input operands.
+ Return the subset of input operands that are of buffer type.
}],
- /*retTy=*/" MutableArrayRef<OpOperand>",
- /*methodName=*/"getInputOpOperands",
+ /*retTy=*/"OpOperandVector",
+ /*methodName=*/"getInputBufferOperands",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return this->getOperation()->getOpOperands().take_front(getNumInputs());
+ OpOperandVector result;
+ result.reserve(getNumInputs());
+ llvm::copy_if(getInputOperands(),
+ std::back_inserter(result),
+ [](OpOperand *opOperand) {
+ return opOperand->get().getType().template isa<MemRefType>();
+ });
+ return result;
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the subset of input operands that are of buffer type.
+ Return the subset of input operands that are of tensor type.
}],
- /*retTy=*/"SmallVector<Value, 4>",
- /*methodName=*/"getInputBuffers",
+ /*retTy=*/"OpOperandVector",
+ /*methodName=*/"getInputTensorOperands",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return llvm::to_vector<4>(llvm::make_filter_range(
- getInputs(), [](Value in){ return in.getType().template isa<MemRefType>(); }));
+ OpOperandVector result;
+ result.reserve(getNumInputs());
+ llvm::copy_if(getInputOperands(),
+ std::back_inserter(result),
+ [](OpOperand *opOperand) {
+ return opOperand->get().getType().template isa<RankedTensorType>();
+ });
+ return result;
}]
>,
+ //===------------------------------------------------------------------===//
+ // Output operands handling.
+ //===------------------------------------------------------------------===//
InterfaceMethod<
/*desc=*/[{
- Return the number of input buffer operands.
+ Return the output operands.
}],
- /*retTy=*/"unsigned",
- /*methodName=*/"getNumInputBuffers",
+ /*retTy=*/"OpOperandVector",
+ /*methodName=*/"getOutputOperands",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return $_op.getInputBuffers().size();
+ OpOperandVector result;
+ result.reserve(getNumOutputs());
+ llvm::transform(
+ this->getOperation()->getOpOperands()
+ .drop_front(getNumInputs())
+ .take_front(getNumOutputs()),
+ std::back_inserter(result),
+ [](OpOperand &opOperand) { return &opOperand; });
+ return result;
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the `index`^th input buffer.
+ Return the `i`-th output operand.
}],
- /*retTy=*/"Value",
- /*methodName=*/"getInputBuffer",
- /*args=*/(ins "unsigned":$index),
+ /*retTy=*/"OpOperand*",
+ /*methodName=*/"getOutputOperand",
+ /*args=*/(ins "int64_t":$i),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- assert(index < getNumInputBuffers());
- return getInputBuffers()[index];
+ assert(i < getNumOutputs());
+ return getOutputOperands()[i];
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the subset of input operands that are of buffer type.
+ Return the subset of output operands that are of buffer type.
}],
- /*retTy=*/"SmallVector<OpOperand*, 4>",
- /*methodName=*/"getInputBuffersOpOperands",
+ /*retTy=*/"OpOperandVector",
+ /*methodName=*/"getOutputBufferOperands",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- SmallVector<OpOperand*, 4> res;
- res.reserve(getNumInputs());
- for (OpOperand &o : getInputOpOperands())
- if (o.get().getType().isa<MemRefType>())
- res.push_back(&o);
- return res;
+ OpOperandVector result;
+ result.reserve(getNumOutputs());
+ llvm::copy_if(getOutputOperands(),
+ std::back_inserter(result),
+ [](OpOperand *opOperand) {
+ return opOperand->get().getType().template isa<MemRefType>();
+ });
+ return result;
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the subset of input operands that are of tensor type.
+ Return the subset of output operands that are of tensor type.
}],
- /*retTy=*/"SmallVector<Value, 4>",
- /*methodName=*/"getInputTensors",
+ /*retTy=*/"OpOperandVector",
+ /*methodName=*/"getOutputTensorOperands",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return llvm::to_vector<4>(llvm::make_filter_range(
- getInputs(),
- [](Value in){ return in.getType().template isa<RankedTensorType>(); }));
+ OpOperandVector result;
+ result.reserve(getNumOutputs());
+ llvm::copy_if(getOutputOperands(),
+ std::back_inserter(result),
+ [](OpOperand *opOperand) {
+ return opOperand->get().getType().template isa<RankedTensorType>();
+ });
+ return result;
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the subset of op operands that are of tensor type.
+ Return the types of the subset of output operands that are of buffer type.
}],
- /*retTy=*/"SmallVector<OpOperand*, 4>",
- /*methodName=*/"getInputTensorsOpOperands",
+ /*retTy=*/"SmallVector<MemRefType>",
+ /*methodName=*/"getOutputBufferTypes",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- SmallVector<OpOperand*, 4> res;
- res.reserve(getNumInputs());
- for (OpOperand &o : getInputOpOperands())
- if (o.get().getType().isa<RankedTensorType>())
- res.push_back(&o);
- return res;
+ SmallVector<MemRefType> result;
+ result.reserve(getNumOutputs());
+ llvm::transform(getOutputBufferOperands(),
+ std::back_inserter(result),
+ [](OpOperand *opOperands) {
+ return opOperands->get().getType().cast<MemRefType>();
+ });
+ return result;
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the types of the subset of input operands that are of buffer type.
+ Return the types of the subset of output operands that are of tensor type.
}],
- /*retTy=*/"SmallVector<MemRefType, 4>",
- /*methodName=*/"getInputBufferTypes" ,
+ /*retTy=*/"SmallVector<RankedTensorType>",
+ /*methodName=*/"getOutputTensorTypes",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return llvm::to_vector<4>(
- llvm::map_range(
- llvm::make_filter_range(
- ValueRange(getInputs()).getTypes(),
- [](Type in){ return in.isa<MemRefType>(); }),
- [](Type in){ return in.cast<MemRefType>(); }));
+ SmallVector<RankedTensorType> result;
+ result.reserve(getNumOutputs());
+ llvm::transform(getOutputTensorOperands(),
+ std::back_inserter(result),
+ [](OpOperand *opOperands) {
+ return opOperands->get().getType().cast<RankedTensorType>();
+ });
+ return result;
}]
>,
+ //===------------------------------------------------------------------===//
+ // Input and Output arguments handling.
+ //===------------------------------------------------------------------===//
InterfaceMethod<
/*desc=*/[{
- Return the types of the subset of input operands that are of ranked
- tensor type.
+ Return the range over input and output operands.
}],
- /*retTy=*/"SmallVector<RankedTensorType, 4>",
- /*methodName=*/"getInputTensorTypes" ,
+ /*retTy=*/"OpOperandVector",
+ /*methodName=*/"getInputAndOutputOperands",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return llvm::to_vector<4>(
- llvm::map_range(
- llvm::make_filter_range(
- ValueRange(getInputs()).getTypes(),
- [](Type in){ return in.isa<RankedTensorType>(); }),
- [](Type in){ return in.cast<RankedTensorType>(); }));
+ OpOperandVector result;
+ result.reserve(getNumInputsAndOutputs());
+ llvm::transform(
+ this->getOperation()->getOpOperands()
+ .take_front(getNumInputsAndOutputs()),
+ std::back_inserter(result),
+ [](OpOperand &opOperand) { return &opOperand; });
+ return result;
}]
>,
-
- //===------------------------------------------------------------------===//
- // Output operands handling.
- //===------------------------------------------------------------------===//
InterfaceMethod<
/*desc=*/[{
- Return the `i`-th output operand.
+ Return true if the payload uses the value loaded from `opOperand`. This
+ is useful to avoid loading from "write-only" memory that may be
+ uninitialized, as well as properly cloning "read-write" operands.
}],
- /*retTy=*/"Value",
- /*methodName=*/"getOutput",
- /*args=*/(ins "unsigned":$i),
+ /*retTy=*/"bool",
+ /*methodName=*/"payloadUsesValueFromOperand",
+ /*args=*/(ins "OpOperand *":$opOperand),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- assert(i < $_op.getNumOutputs());
- return this->getOperation()->getOperand(i + $_op.getNumInputs());
+ unsigned bbArgNumber =
+ $_op.getNumPayloadInductionVariables() + opOperand->getOperandNumber();
+ // Safeguard against the named linalg ops that are manually defined and
+ // that only support buffer semantics: we should not be there.
+ // Such ops have an empty regionBuilder and are not constructed with a
+ // region for now. In the future they are slated to disappear.
+ assert(this->getOperation()->getNumRegions() == 1 && "unexpected "
+ "missing region (calling `payloadUsesValueFromOperand` on "
+ "manually defined named Linalg op?)");
+ Block &block = this->getOperation()->getRegion(0).front();
+ // Init tensors have uses.
+ return !block.getArgument(bbArgNumber).use_empty();
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the `i`-th output shaped type
+ Return true if `opOperand` is an input tensor.
}],
- /*retTy=*/"ShapedType",
- /*methodName=*/"getOutputShapedType",
- /*args=*/(ins "unsigned":$i),
+ /*retTy=*/"bool",
+ /*methodName=*/"isInputTensor",
+ /*args=*/(ins "OpOperand *":$opOperand),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return getOutput(i).getType().template cast<ShapedType>();
+ if (!opOperand->get().getType().template isa<RankedTensorType>())
+ return false;
+ if (opOperand->getOperandNumber() < $_op.getNumInputs())
+ return true;
+ return false;
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the range of output operands.
+ Return true if `opOperand` is an output tensor.
}],
- /*retTy=*/"Operation::operand_range",
- /*methodName=*/"getOutputs",
- /*args=*/(ins),
+ /*retTy=*/"bool",
+ /*methodName=*/"isOutputTensor",
+ /*args=*/(ins "OpOperand *":$opOperand),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- auto start =
- this->getOperation()->getOperands().begin() + $_op.getNumInputs();
- return {start, start + $_op.getNumOutputs()};
+ if (!opOperand->get().getType().template isa<RankedTensorType>())
+ return false;
+ if (opOperand->getOperandNumber() >= $_op.getNumInputs())
+ return true;
+ return false;
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the OpOperands for the output operands.
+ Return true if `opOperand` is an init tensor. This is true when it is
+ an output tensor operand whose value is used in the payload region.
}],
- /*retTy=*/" MutableArrayRef<OpOperand>",
- /*methodName=*/"getOutputOpOperands",
- /*args=*/(ins),
+ /*retTy=*/"bool",
+ /*methodName=*/"isInitTensor",
+ /*args=*/(ins "OpOperand *":$opOperand),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return this->getOperation()->getOpOperands().slice(
- getNumInputs(), getNumOutputs());
+ if (!$_op.isOutputTensor(opOperand))
+ return false;
+ return payloadUsesValueFromOperand(opOperand);
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the subset of output operands that are of buffer type.
+ Return the `opOperand` rank or zero for scalars.
}],
- /*retTy=*/"SmallVector<Value, 4>",
- /*methodName=*/"getOutputBuffers",
- /*args=*/(ins),
+ /*retTy=*/"int64_t",
+ /*methodName=*/"getRank",
+ /*args=*/(ins "OpOperand*":$opOperand),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return llvm::to_vector<4>(llvm::make_filter_range(
- getOutputs(), [](Value in){ return in.getType().template isa<MemRefType>(); }));
+ assert(opOperand->getOwner() == this->getOperation());
+ if (auto shapedType =
+ opOperand->get().getType().template dyn_cast<ShapedType>())
+ return shapedType.getRank();
+ return 0;
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the `index`^th output buffer.
+ Return the `opOperand` shape or an empty vector for scalars.
}],
- /*retTy=*/"Value",
- /*methodName=*/"getOutputBuffer",
- /*args=*/(ins "unsigned":$index),
+ /*retTy=*/"ArrayRef<int64_t>",
+ /*methodName=*/"getShape",
+ /*args=*/(ins "OpOperand*":$opOperand),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- assert(index < getNumOutputBuffers());
- return getOutputBuffers()[index];
+ assert(opOperand->getOwner() == this->getOperation());
+ if (auto shapedType =
+ opOperand->get().getType().template dyn_cast<ShapedType>())
+ return shapedType.getShape();
+ return {};
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the subset of output operands that are of buffer type.
+ Return the input or output indexing map for `opOperand`.
}],
- /*retTy=*/"SmallVector<OpOperand*, 4>",
- /*methodName=*/"getOutputBuffersOpOperands",
- /*args=*/(ins),
+ /*retTy=*/"AffineMap",
+ /*methodName=*/"getTiedIndexingMap",
+ /*args=*/(ins "OpOperand*":$opOperand),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- SmallVector<OpOperand*, 4> res;
- res.reserve(getNumOutputs());
- for (OpOperand &o : getOutputOpOperands())
- if (o.get().getType().isa<MemRefType>())
- res.push_back(&o);
- return res;
+ assert(opOperand->getOwner() == this->getOperation());
+ return getIndexingMaps()[opOperand->getOperandNumber()];
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the number of output buffer operands.
+ Return the result tied to `opOperand`.
}],
- /*retTy=*/"unsigned",
- /*methodName=*/"getNumOutputBuffers",
- /*args=*/(ins),
+ /*retTy=*/"OpResult",
+ /*methodName=*/"getTiedOpResult",
+ /*args=*/(ins "OpOperand*":$opOperand),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return $_op.getOutputBuffers().size();
+ assert(opOperand->getOwner() == this->getOperation());
+ int64_t resultIndex = opOperand->getOperandNumber() - getNumInputs();
+ assert(resultIndex >= 0 &&
+ resultIndex < this->getOperation()->getNumResults() );
+ return this->getOperation()->getResult(resultIndex);
}]
>,
+ //===------------------------------------------------------------------===//
+ // Other interface methods.
+ //===------------------------------------------------------------------===//
InterfaceMethod<
/*desc=*/[{
- Return the subset of output operands that are of tensor type.
+ Return the iterator types attribute within the current operation.
}],
- /*retTy=*/"SmallVector<Value, 4>",
- /*methodName=*/"getOutputTensors",
+ /*retTy=*/"ArrayAttr",
+ /*methodName=*/"iterator_types",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return llvm::to_vector<4>(llvm::make_filter_range(
- getOutputs(),
- [](Value in){ return in.getType().template isa<RankedTensorType>(); }));
+ return $_op.iterator_types();
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the subset of output operands that are of tensor type.
+ Return true if the indexing map is depending on the current op instance.
+ This means that the indexing map is dynamically synthesized by using the
+ op instance's concrete attributes, instead of being static for all
+ instances of the same op kind.
}],
- /*retTy=*/"SmallVector<OpOperand*, 4>",
- /*methodName=*/"getOutputTensorsOpOperands",
+ /*retTy=*/"bool",
+ /*methodName=*/"hasDynamicIndexingMaps",
/*args=*/(ins),
/*methodBody=*/"",
- /*defaultImplementation=*/[{
- SmallVector<OpOperand*, 4> res;
- res.reserve(getNumOutputs());
- for (OpOperand &o : getOutputOpOperands())
- if (o.get().getType().isa<RankedTensorType>())
- res.push_back(&o);
- return res;
- }]
+ /*defaultImplementation=*/[{ return false; }]
>,
InterfaceMethod<
/*desc=*/[{
- Return the number of output tensor operands.
+ Verify all attributes used by indexing maps are valid.
}],
- /*retTy=*/"unsigned",
- /*methodName=*/"getNumOutputTensors",
+ /*retTy=*/"LogicalResult",
+ /*methodName=*/"verifyIndexingMapRequiredAttributes",
/*args=*/(ins),
/*methodBody=*/"",
- /*defaultImplementation=*/[{
- return $_op.getOutputTensors().size();
- }]
+ /*defaultImplementation=*/[{ return success(); }]
>,
InterfaceMethod<
/*desc=*/[{
- Return the types of the subset of output operands that are of buffer type.
+ Return the indexing maps attribute within the current operation.
+ }],
+ /*retTy=*/"ArrayAttr",
+ /*methodName=*/"indexing_maps"
+ >,
+ InterfaceMethod<
+ /*desc=*/[{
+ Return the indexing maps within the current operation.
}],
- /*retTy=*/"SmallVector<MemRefType, 4>",
- /*methodName=*/"getOutputBufferTypes" ,
+ /*retTy=*/"SmallVector<AffineMap>",
+ /*methodName=*/"getIndexingMaps",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return llvm::to_vector<4>(
- llvm::map_range(
- llvm::make_filter_range(
- ValueRange(getOutputs()).getTypes(),
- [](Type in){ return in.isa<MemRefType>(); }),
- [](Type in){ return in.cast<MemRefType>(); }));
+ auto range = $_op.indexing_maps()
+ .template getAsValueRange<AffineMapAttr>();
+ return {range.begin(), range.end()};
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the types of the subset of output operands that are of ranked
- tensor type.
+ Return true if any of the operands has a dynamic shape.
}],
- /*retTy=*/"SmallVector<RankedTensorType, 4>",
- /*methodName=*/"getOutputTensorTypes" ,
+ /*retTy=*/"bool",
+ /*methodName=*/"hasDynamicShape",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return llvm::to_vector<4>(
- llvm::map_range(
- llvm::make_filter_range(
- ValueRange(getOutputs()).getTypes(),
- [](Type in){ return in.isa<RankedTensorType>(); }),
- [](Type in){ return in.cast<RankedTensorType>(); }));
+ return llvm::any_of(getStaticShape(), ShapedType::isDynamic);
}]
>,
-
- //===------------------------------------------------------------------===//
- // Input and Output arguments handling.
- //===------------------------------------------------------------------===//
InterfaceMethod<
/*desc=*/[{
- Return true if the payload uses the value loaded from `opOperand`. This
- is useful to avoid loading from "write-only" memory that may be
- uninitialized, as well as properly cloning "read-write" operands.
+ Return whether the op has only MemRef input and outputs.
}],
/*retTy=*/"bool",
- /*methodName=*/"payloadUsesValueFromOpOperand",
- /*args=*/(ins "OpOperand *":$opOperand),
+ /*methodName=*/"hasBufferSemantics",
+ /*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- unsigned bbArgNumber =
- $_op.getNumPayloadInductionVariables() + opOperand->getOperandNumber();
- // Safeguard against the named linalg ops that are manually defined and
- // that only support buffer semantics: we should not be there.
- // Such ops have an empty regionBuilder and are not constructed with a
- // region for now. In the future they are slated to disappear.
- assert(this->getOperation()->getNumRegions() == 1 && "unexpected "
- "missing region (calling `payloadUsesValueFromOpOperand` on "
- "manually defined named Linalg op?)");
- Block &block = this->getOperation()->getRegion(0).front();
- // Init tensors have uses.
- return !block.getArgument(bbArgNumber).use_empty();
+ return this->getOperation()->getNumResults() == 0 &&
+ llvm::all_of(getInputAndOutputOperands(),
+ [](OpOperand *opOperand) {
+ return opOperand->get().getType().template isa<MemRefType>();
+ });
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return true if the payload uses the value loaded from input operand
- `index`.
+ Return whether the op has only RankedTensor input and outputs.
}],
/*retTy=*/"bool",
- /*methodName=*/"payloadUsesValueFromInputOperandIndex",
- /*args=*/(ins "unsigned":$index),
+ /*methodName=*/"hasTensorSemantics",
+ /*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return payloadUsesValueFromOpOperand(&getInputOpOperands()[index]);
+ return llvm::all_of(getInputAndOutputOperands(),
+ [](OpOperand *opOperand) {
+ return opOperand->get().getType().template isa<RankedTensorType>();
+ });
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return true if the payload uses the value loaded from output operand
- `index`.
+ Return the name registered for this op when lowering to an external
+ library call.
}],
- /*retTy=*/"bool",
- /*methodName=*/"payloadUsesValueFromOutputOperandIndex",
- /*args=*/(ins "unsigned":$index),
+ /*retTy=*/"std::string",
+ /*methodName=*/"getLibraryCallName",
+ /*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return payloadUsesValueFromOpOperand(&getOutputOpOperands()[index]);
+ return $_op.getLibraryCallName();
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return true if `opOperand` is an input tensor.
+ Return whether the op accesses the iteration indices.
}],
/*retTy=*/"bool",
- /*methodName=*/"isInputTensor",
- /*args=*/(ins "OpOperand *":$opOperand),
+ /*methodName=*/"hasIndexSemantics",
+ /*args=*/(ins),
+ /*methodBody=*/"",
+ /*defaultImplementation=*/""
+ >,
+ //===------------------------------------------------------------------===//
+ // Linalg generalization hooks.
+ //===------------------------------------------------------------------===//
+ InterfaceMethod<
+ /*desc=*/[{
+ Hook to provide a custom AffineMap used to compute all the operand
+ subshapes given loop bounds. This is used to answer the question: "given
+ an iteration space over the codomain, what are the subshapes of the
+ operands involved in the computation".
+ The default behavior is to just concatenate all the indexing maps.
+ A custom AffineMap allows providing a map that can be used to
+ compute subshapes even in cases where the concatenation of indexing maps
+ (i.e. the data traversal order) is not a simple permutation of the loop
+ traversal order. It is then possible to define ops with skewed data
+ traversal order for which we can still easily compute hyperrectangular
+ loop bounds and subviews.
+ }],
+ /*retTy=*/"AffineMap",
+ /*methodName=*/"getLoopsToShapesMap",
+ /*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- if (!opOperand->get().getType().template isa<RankedTensorType>())
- return false;
- if (opOperand->getOperandNumber() < $_op.getNumInputs())
- return true;
- return false;
+ auto r = $_op.indexing_maps().template getAsRange<AffineMapAttr>();
+ auto maps = llvm::to_vector<8>(
+ llvm::map_range(r, [](AffineMapAttr a) { return a.getValue(); }));
+ return concatAffineMaps(maps);
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return true if `opOperand` is an output tensor.
+ Hook to provide a custom AffineMap used to construct the
+ hyperrectangular loop iteration space given all the operand subshapes.
+ This is used to answer the question:
+ "Given a list of operand ranges, what is the subportion of the iteration
+ space involved in the computation".
+ This is the inverse problem of `getLoopsToShapesMap`.
+ Return the empty AffineMap when such an AffineMap cannot be constructed.
+ The default behavior is based on a very simple inference procedure that
+ only works with permutation affine maps.
+ A more advanced Tensor-Comprehension like inference is possible but has
+ proven to be ambiguous in unfavorable case.
+ A safer and more robust alternative is to allow each each op to define
+ its own AffineMap.
}],
- /*retTy=*/"bool",
- /*methodName=*/"isOutputTensor",
- /*args=*/(ins "OpOperand *":$opOperand),
+ /*retTy=*/"AffineMap",
+ /*methodName=*/"getShapesToLoopsMap",
+ /*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- if (!opOperand->get().getType().template isa<RankedTensorType>())
- return false;
- if (opOperand->getOperandNumber() >= $_op.getNumInputs())
- return true;
- return false;
+ return inversePermutation(getLoopsToShapesMap());
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return true if `opOperand` is an init tensor. This is true when it is
- an output tensor operand whose value is used in the payload region.
+ Return the position in the results of the affine map computed
+ by getLoopsToShapesMap() that represents the shape of an
+ operand (input or output) at a dimension.
}],
- /*retTy=*/"bool",
- /*methodName=*/"isInitTensor",
- /*args=*/(ins "OpOperand *":$opOperand),
+ /*retTy=*/"Optional<unsigned>",
+ /*methodName=*/"getOperandDimPositionInLoopsToShapeMap",
+ /*args=*/(ins "unsigned":$operandIdx, "unsigned":$dim),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- if (!$_op.isOutputTensor(opOperand))
- return false;
- return payloadUsesValueFromOpOperand(opOperand);
+ unsigned pos = 0;
+ for (OpOperand *opOperand : getInputAndOutputOperands()) {
+ if (opOperand->getOperandNumber() == operandIdx) return pos + dim;
+ pos += getRank(opOperand);
+ }
+ return {};
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return true if the operand at output index `index` is an init tensor.
+ Return the position in the results of the affine map computed
+ by getLoopsToShapesMap() that represents the shape of an
+ input operand at a dimension.
}],
- /*retTy=*/"bool",
- /*methodName=*/"isIndexOfInitTensor",
- /*args=*/(ins "unsigned":$index),
+ /*retTy=*/"Optional<unsigned>",
+ /*methodName=*/"getInputValueDimPositionInLoopsToShapeMap",
+ /*args=*/(ins "unsigned":$inputIdx, "unsigned":$dim),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- assert(index < getNumOutputs());
- return isInitTensor(
- &this->getOperation()->getOpOperands()[$_op.getNumInputs() + index]);
+ if (inputIdx >= getNumInputs()) return {};
+ return getOperandDimPositionInLoopsToShapeMap(inputIdx, dim);
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the output operands that are init tensors.
+ Return the range of position in the result of the affine map
+ computed by getLoopsToShapesMap() which correspond to the
+ AffineExprs used to access the outputs of the operation.
}],
- /*retTy=*/"SmallVector<Value, 4>",
- /*methodName=*/"getInitTensors",
+ /*retTy=*/"std::pair<unsigned, unsigned>",
+ /*methodName=*/"getResultsPositionInLoopsToShapeMap",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- auto start =
- this->getOperation()->getOpOperands().begin() + $_op.getNumInputs();
- return llvm::to_vector<4>(
- llvm::map_range(
- llvm::make_filter_range(
- llvm::make_range(start, start + $_op.getNumOutputs()),
- [&](OpOperand &opOperand) {
- return $_op.isInitTensor(&opOperand);
- }),
- [&](OpOperand &opOperand) {
- return opOperand.get();
- }));
+ OpOperand *opOperand = getOutputOperand(getNumOutputs()-1);
+ return
+ {*getOperandDimPositionInLoopsToShapeMap(getNumInputs(), 0),
+ (*getOperandDimPositionInLoopsToShapeMap
+ (getNumInputs() + getNumOutputs() - 1,
+ getRank(opOperand) - 1)) + 1};
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the number of init tensor operands.
+ Like `getShape`, but only returns statically-known information, without
+ generating any new IR. For each shape dimension, returns >=0 if that
+ dimension is statically known, or ShapeType::kDynamicSize otherwise.
}],
- /*retTy=*/"unsigned",
- /*methodName=*/"getNumInitTensors",
+ /*retTy=*/"SmallVector<int64_t>",
+ /*methodName=*/"getStaticShape",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return getInitTensors().size();
+ SmallVector<int64_t> res;
+ for (OpOperand *opOperand : getInputAndOutputOperands())
+ llvm::append_range(res, getShape(opOperand));
+ return res;
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the number of input and output operands.
+ Returns the statically-known loop ranges. Composes
+ `getShapesToLoopsMap()` with the result of `getStaticShape`.
+ Returns None if `getShapesToLoopsMap()` fails. Returns
+ ShapeType::kDynamicSize for non-statically-known loop ranges.
}],
- /*retTy=*/"unsigned",
- /*methodName=*/"getNumShapedOperands",
+ /*retTy=*/"Optional<SmallVector<int64_t, 4>>",
+ /*methodName=*/"getStaticLoopRanges",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return $_op.getNumInputs() + $_op.getNumOutputs();
+ SmallVector<int64_t> viewSizes = getStaticShape();
+ AffineMap invertedMap = getShapesToLoopsMap();
+ if (!invertedMap)
+ return {};
+ return invertedMap.compose(viewSizes);
}]
>,
+ //===------------------------------------------------------------------===//
+ // Other static interface methods.
+ //===------------------------------------------------------------------===//
InterfaceMethod<
/*desc=*/[{
- Return the `i`-th shaped operand value.
+ Clone the current operation with the given location and operands. This
+ is used to abstract away the optional underlying region creation. This
+ does not change the balance between input, output_buffer and
+ init_tensors operands.
+ }],
+ /*retTy=*/"Operation *",
+ /*methodName=*/"clone",
+ (ins "OpBuilder &":$b, "Location":$loc, "TypeRange":$resultTypes,
+ "ValueRange":$operands),
+ [{
+ BlockAndValueMapping bvm;
+ OperationState state(
+ loc, ConcreteOp::getOperationName(), operands, resultTypes,
+ $_op->getAttrs());
+ for (Region &r : $_op->getRegions())
+ r.cloneInto(state.addRegion(), bvm);
+ return b.createOperation(state);
+ }]
+ >,
+ InterfaceMethod<
+ /*desc=*/[{
+ Clone the current operation with the given location, operands
+ and BlockAndValueMapping. This is used to abstract away the
+ optional underlying region creation. This does not change the
+ balance between input, output_buffer and init_tensors
+ operands.
+ }],
+ /*retTy=*/"Operation *",
+ /*methodName=*/"cloneWithMapper",
+ (ins "OpBuilder &":$b, "Location":$loc, "TypeRange":$resultTypes,
+ "ValueRange":$operands, "BlockAndValueMapping &":$bvm),
+ [{
+ OperationState state(
+ loc, ConcreteOp::getOperationName(), operands, resultTypes,
+ $_op->getAttrs());
+ for (Region &r : $_op->getRegions())
+ r.cloneInto(state.addRegion(), bvm);
+ return b.createOperation(state);
+ }]
+ >,
+ StaticInterfaceMethod<
+ /*desc=*/[{
+ Returns the region builder for constructing the body for linalg.generic.
+ Returns a null function if this named op does not define a region
+ builder.
+ }],
+ /*retTy=*/"std::function<void(ImplicitLocOpBuilder &, Block &, ValueRange)>",
+ /*methodName=*/"getRegionBuilder",
+ (ins),
+ [{ return ConcreteOp::getRegionBuilder(); }]
+ >,
+ //===------------------------------------------------------------------===//
+ // DEPRECATED METHODS
+ //===------------------------------------------------------------------===//
+ InterfaceMethod<
+ /*desc=*/[{
+ Return true if the payload uses the value loaded from `opOperand`. This
+ is useful to avoid loading from "write-only" memory that may be
+ uninitialized, as well as properly cloning "read-write" operands.
+ }],
+ /*retTy=*/"bool",
+ /*methodName=*/"payloadUsesValueFromOpOperand",
+ /*args=*/(ins "OpOperand *":$opOperand),
+ /*methodBody=*/"",
+ /*defaultImplementation=*/[{
+ unsigned bbArgNumber =
+ $_op.getNumPayloadInductionVariables() + opOperand->getOperandNumber();
+ // Safeguard against the named linalg ops that are manually defined and
+ // that only support buffer semantics: we should not be there.
+ // Such ops have an empty regionBuilder and are not constructed with a
+ // region for now. In the future they are slated to disappear.
+ assert(this->getOperation()->getNumRegions() == 1 && "unexpected "
+ "missing region (calling `payloadUsesValueFromOpOperand` on "
+ "manually defined named Linalg op?)");
+ Block &block = this->getOperation()->getRegion(0).front();
+ // Init tensors have uses.
+ return !block.getArgument(bbArgNumber).use_empty();
+ }]
+ >,
+ InterfaceMethod<
+ /*desc=*/[{
+ Return true if the payload uses the value loaded from input operand
+ `index`.
+ }],
+ /*retTy=*/"bool",
+ /*methodName=*/"payloadUsesValueFromInputOperandIndex",
+ /*args=*/(ins "unsigned":$index),
+ /*methodBody=*/"",
+ /*defaultImplementation=*/[{
+ return payloadUsesValueFromOpOperand(getInputOperand(index));
+ }]
+ >,
+ InterfaceMethod<
+ /*desc=*/[{
+ Return true if the payload uses the value loaded from output operand
+ `index`.
+ }],
+ /*retTy=*/"bool",
+ /*methodName=*/"payloadUsesValueFromOutputOperandIndex",
+ /*args=*/(ins "unsigned":$index),
+ /*methodBody=*/"",
+ /*defaultImplementation=*/[{
+ return payloadUsesValueFromOpOperand(getOutputOperand(index));
+ }]
+ >,
+ InterfaceMethod<
+ /*desc=*/[{
+ Return the `i`-th input operand.
}],
/*retTy=*/"Value",
- /*methodName=*/"getShapedOperand",
+ /*methodName=*/"getInput",
/*args=*/(ins "unsigned":$i),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- assert(i < $_op.getNumShapedOperands());
+ assert(i < $_op.getNumInputs());
return this->getOperation()->getOperand(i);
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the range over input and output operands.
+ Return the `i`-th input shaped type
+ }],
+ /*retTy=*/"ShapedType",
+ /*methodName=*/"getInputShapedType",
+ /*args=*/(ins "unsigned":$i),
+ /*methodBody=*/"",
+ /*defaultImplementation=*/[{
+ return getInput(i).getType().template cast<ShapedType>();
+ }]
+ >,
+ InterfaceMethod<
+ /*desc=*/[{
+ Return the range of input operands.
}],
/*retTy=*/"Operation::operand_range",
- /*methodName=*/"getShapedOperands",
+ /*methodName=*/"getInputs",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
auto range = this->getOperation()->getOperands();
- return {range.begin(), range.begin() + getNumShapedOperands()};
+ return {range.begin(), range.begin() + $_op.getNumInputs()};
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the OpOperands for all the shaped operands.
+ Return the OpOperands for the input operands.
}],
/*retTy=*/" MutableArrayRef<OpOperand>",
- /*methodName=*/"getShapedOpOperands",
+ /*methodName=*/"getInputOpOperands",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return this->getOperation()->getOpOperands().take_front(
- getNumShapedOperands());
+ return this->getOperation()->getOpOperands().take_front(getNumInputs());
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the OpOperands for all the shaped operands.
+ Return the subset of input operands that are of buffer type.
}],
- /*retTy=*/" OpOperand&",
- /*methodName=*/"getShapedOpOperand",
- /*args=*/(ins "unsigned":$i),
+ /*retTy=*/"SmallVector<Value, 4>",
+ /*methodName=*/"getInputBuffers",
+ /*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return *(this->getShapedOpOperands().begin() + i);
+ return llvm::to_vector<4>(llvm::make_filter_range(
+ getInputs(), [](Value in){ return in.getType().template isa<MemRefType>(); }));
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the range over input and output operands.
+ Return the number of input buffer operands.
}],
- /*retTy=*/"SmallVector<ShapedType, 4>",
- /*methodName=*/"getShapedOperandTypes",
+ /*retTy=*/"unsigned",
+ /*methodName=*/"getNumInputBuffers",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return llvm::to_vector<4>(
- llvm::map_range(
- getShapedOperands(),
- [](Value v) { return v.getType().cast<ShapedType>(); }));
+ return $_op.getInputBuffers().size();
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the `i`-th shaped type
+ Return the `index`^th input buffer.
}],
- /*retTy=*/"ShapedType",
- /*methodName=*/"getShapedType",
- /*args=*/(ins "unsigned":$i),
+ /*retTy=*/"Value",
+ /*methodName=*/"getInputBuffer",
+ /*args=*/(ins "unsigned":$index),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return $_op.getShapedOperand(i).getType().template cast<ShapedType>();
- }]>,
-
- //===------------------------------------------------------------------===//
- // Other interface methods.
- //===------------------------------------------------------------------===//
+ assert(index < getNumInputBuffers());
+ return getInputBuffers()[index];
+ }]
+ >,
InterfaceMethod<
/*desc=*/[{
- Return the iterator types attribute within the current operation.
+ Return the subset of input operands that are of buffer type.
}],
- /*retTy=*/"ArrayAttr",
- /*methodName=*/"iterator_types",
+ /*retTy=*/"SmallVector<OpOperand*, 4>",
+ /*methodName=*/"getInputBuffersOpOperands",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return $_op.iterator_types();
+ SmallVector<OpOperand*, 4> res;
+ res.reserve(getNumInputs());
+ for (OpOperand &o : getInputOpOperands())
+ if (o.get().getType().isa<MemRefType>())
+ res.push_back(&o);
+ return res;
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return true if the indexing map is depending on the current op instance.
- This means that the indexing map is dynamically synthesized by using the
- op instance's concrete attributes, instead of being static for all
- instances of the same op kind.
+ Return the subset of input operands that are of tensor type.
}],
- /*retTy=*/"bool",
- /*methodName=*/"hasDynamicIndexingMaps",
+ /*retTy=*/"SmallVector<Value, 4>",
+ /*methodName=*/"getInputTensors",
/*args=*/(ins),
/*methodBody=*/"",
- /*defaultImplementation=*/[{ return false; }]
+ /*defaultImplementation=*/[{
+ return llvm::to_vector<4>(llvm::make_filter_range(
+ getInputs(),
+ [](Value in){ return in.getType().template isa<RankedTensorType>(); }));
+ }]
>,
InterfaceMethod<
/*desc=*/[{
- Verify all attributes used by indexing maps are valid.
+ Return the subset of op operands that are of tensor type.
}],
- /*retTy=*/"LogicalResult",
- /*methodName=*/"verifyIndexingMapRequiredAttributes",
+ /*retTy=*/"SmallVector<OpOperand*, 4>",
+ /*methodName=*/"getInputTensorsOpOperands",
/*args=*/(ins),
/*methodBody=*/"",
- /*defaultImplementation=*/[{ return success(); }]
+ /*defaultImplementation=*/[{
+ SmallVector<OpOperand*, 4> res;
+ res.reserve(getNumInputs());
+ for (OpOperand &o : getInputOpOperands())
+ if (o.get().getType().isa<RankedTensorType>())
+ res.push_back(&o);
+ return res;
+ }]
>,
InterfaceMethod<
/*desc=*/[{
- Return the indexing maps attribute within the current operation.
+ Return the `i`-th output operand.
}],
- /*retTy=*/"ArrayAttr",
- /*methodName=*/"indexing_maps"
+ /*retTy=*/"Value",
+ /*methodName=*/"getOutput",
+ /*args=*/(ins "unsigned":$i),
+ /*methodBody=*/"",
+ /*defaultImplementation=*/[{
+ assert(i < $_op.getNumOutputs());
+ return this->getOperation()->getOperand(i + $_op.getNumInputs());
+ }]
>,
InterfaceMethod<
/*desc=*/[{
- Return the indexing maps within the current operation.
+ Return the `i`-th output shaped type
}],
- /*retTy=*/"SmallVector<AffineMap>",
- /*methodName=*/"getIndexingMaps",
+ /*retTy=*/"ShapedType",
+ /*methodName=*/"getOutputShapedType",
+ /*args=*/(ins "unsigned":$i),
+ /*methodBody=*/"",
+ /*defaultImplementation=*/[{
+ return getOutput(i).getType().template cast<ShapedType>();
+ }]
+ >,
+ InterfaceMethod<
+ /*desc=*/[{
+ Return the range of output operands.
+ }],
+ /*retTy=*/"Operation::operand_range",
+ /*methodName=*/"getOutputs",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return llvm::to_vector<4>(
- $_op.indexing_maps().template getAsValueRange<AffineMapAttr>());
+ auto start =
+ this->getOperation()->getOperands().begin() + $_op.getNumInputs();
+ return {start, start + $_op.getNumOutputs()};
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the input or output indexing map at index `i`.
+ Return the OpOperands for the output operands.
}],
- /*retTy=*/"AffineMap",
- /*methodName=*/"getIndexingMap",
- /*args=*/(ins "unsigned":$i),
+ /*retTy=*/" MutableArrayRef<OpOperand>",
+ /*methodName=*/"getOutputOpOperands",
+ /*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- assert(i < $_op.getNumShapedOperands());
- return getIndexingMaps()[i];
+ return this->getOperation()->getOpOperands().slice(
+ getNumInputs(), getNumOutputs());
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the input indexing map at index `i`.
+ Return the subset of output operands that are of buffer type.
}],
- /*retTy=*/"AffineMap",
- /*methodName=*/"getInputIndexingMap",
- /*args=*/(ins "unsigned":$i),
+ /*retTy=*/"SmallVector<Value, 4>",
+ /*methodName=*/"getOutputBuffers",
+ /*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- assert(i < $_op.getNumInputs());
- return getIndexingMaps()[i];
+ return llvm::to_vector<4>(llvm::make_filter_range(
+ getOutputs(), [](Value in){ return in.getType().template isa<MemRefType>(); }));
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the input indexing maps.
+ Return the `index`^th output buffer.
}],
- /*retTy=*/"SmallVector<AffineMap>",
- /*methodName=*/"getInputIndexingMaps",
+ /*retTy=*/"Value",
+ /*methodName=*/"getOutputBuffer",
+ /*args=*/(ins "unsigned":$index),
+ /*methodBody=*/"",
+ /*defaultImplementation=*/[{
+ assert(index < getNumOutputBuffers());
+ return getOutputBuffers()[index];
+ }]
+ >,
+ InterfaceMethod<
+ /*desc=*/[{
+ Return the subset of output operands that are of buffer type.
+ }],
+ /*retTy=*/"SmallVector<OpOperand*, 4>",
+ /*methodName=*/"getOutputBuffersOpOperands",
+ /*args=*/(ins),
+ /*methodBody=*/"",
+ /*defaultImplementation=*/[{
+ SmallVector<OpOperand*, 4> res;
+ res.reserve(getNumOutputs());
+ for (OpOperand &o : getOutputOpOperands())
+ if (o.get().getType().isa<MemRefType>())
+ res.push_back(&o);
+ return res;
+ }]
+ >,
+ InterfaceMethod<
+ /*desc=*/[{
+ Return the number of output buffer operands.
+ }],
+ /*retTy=*/"unsigned",
+ /*methodName=*/"getNumOutputBuffers",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- auto maps = $_op.getIndexingMaps();
- return SmallVector<AffineMap>{maps.begin(),
- maps.begin() + $_op.getNumInputs()};
+ return $_op.getOutputBuffers().size();
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the output indexing map at index `i`.
+ Return the subset of output operands that are of tensor type.
}],
- /*retTy=*/"AffineMap",
- /*methodName=*/"getOutputIndexingMap",
- /*args=*/(ins "unsigned":$i),
+ /*retTy=*/"SmallVector<Value, 4>",
+ /*methodName=*/"getOutputTensors",
+ /*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- assert(i < $_op.getNumOutputs());
- return getIndexingMaps()[i + $_op.getNumInputs()];
+ return llvm::to_vector<4>(llvm::make_filter_range(
+ getOutputs(),
+ [](Value in){ return in.getType().template isa<RankedTensorType>(); }));
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the output indexing maps.
+ Return the subset of output operands that are of tensor type.
}],
- /*retTy=*/"SmallVector<AffineMap>",
- /*methodName=*/"getOutputIndexingMaps",
+ /*retTy=*/"SmallVector<OpOperand*, 4>",
+ /*methodName=*/"getOutputTensorsOpOperands",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- auto maps = $_op.getIndexingMaps();
- return SmallVector<AffineMap>{maps.begin() + $_op.getNumInputs(),
- maps.begin() + $_op.getNumShapedOperands()};
+ SmallVector<OpOperand*, 4> res;
+ res.reserve(getNumOutputs());
+ for (OpOperand &o : getOutputOpOperands())
+ if (o.get().getType().isa<RankedTensorType>())
+ res.push_back(&o);
+ return res;
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return whether the op has only MemRef input and outputs.
+ Return the number of output tensor operands.
}],
- /*retTy=*/"bool",
- /*methodName=*/"hasBufferSemantics",
+ /*retTy=*/"unsigned",
+ /*methodName=*/"getNumOutputTensors",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return this->getOperation()->getNumResults() == 0 &&
- llvm::all_of(getShapedOperands(), [](Value v) {
- return v.getType().template isa<MemRefType>(); });
+ return $_op.getOutputTensors().size();
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return whether the op has only RankedTensor input and outputs.
+ Return the number of input and output operands.
}],
- /*retTy=*/"bool",
- /*methodName=*/"hasTensorSemantics",
+ /*retTy=*/"unsigned",
+ /*methodName=*/"getNumShapedOperands",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return llvm::all_of(getShapedOperands(), [](Value v) {
- return v.getType().template isa<RankedTensorType>();
- });
+ return $_op.getNumInputs() + $_op.getNumOutputs();
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the name registered for this op when lowering to an external
- library call.
+ Return the `i`-th shaped operand value.
}],
- /*retTy=*/"std::string",
- /*methodName=*/"getLibraryCallName",
- /*args=*/(ins),
+ /*retTy=*/"Value",
+ /*methodName=*/"getShapedOperand",
+ /*args=*/(ins "unsigned":$i),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return $_op.getLibraryCallName();
+ assert(i < $_op.getNumShapedOperands());
+ return this->getOperation()->getOperand(i);
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return whether the op accesses the iteration indices.
+ Return the range over input and output operands.
}],
- /*retTy=*/"bool",
- /*methodName=*/"hasIndexSemantics",
+ /*retTy=*/"Operation::operand_range",
+ /*methodName=*/"getShapedOperands",
/*args=*/(ins),
/*methodBody=*/"",
- /*defaultImplementation=*/""
+ /*defaultImplementation=*/[{
+ auto range = this->getOperation()->getOperands();
+ return {range.begin(), range.begin() + getNumShapedOperands()};
+ }]
>,
- //===------------------------------------------------------------------===//
- // Linalg generalization hooks.
- //===------------------------------------------------------------------===//
InterfaceMethod<
/*desc=*/[{
- Hook to provide a custom AffineMap used to compute all the operand
- subshapes given loop bounds. This is used to answer the question: "given
- an iteration space over the codomain, what are the subshapes of the
- operands involved in the computation".
- The default behavior is to just concatenate all the indexing maps.
- A custom AffineMap allows providing a map that can be used to
- compute subshapes even in cases where the concatenation of indexing maps
- (i.e. the data traversal order) is not a simple permutation of the loop
- traversal order. It is then possible to define ops with skewed data
- traversal order for which we can still easily compute hyperrectangular
- loop bounds and subviews.
+ Return the OpOperands for all the shaped operands.
}],
- /*retTy=*/"AffineMap",
- /*methodName=*/"getLoopsToShapesMap",
+ /*retTy=*/" MutableArrayRef<OpOperand>",
+ /*methodName=*/"getShapedOpOperands",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- auto r = $_op.indexing_maps().template getAsRange<AffineMapAttr>();
- auto maps = llvm::to_vector<8>(
- llvm::map_range(r, [](AffineMapAttr a) { return a.getValue(); }));
- return concatAffineMaps(maps);
+ return this->getOperation()->getOpOperands().take_front(
+ getNumShapedOperands());
}]
>,
InterfaceMethod<
/*desc=*/[{
- Hook to provide a custom AffineMap used to construct the
- hyperrectangular loop iteration space given all the operand subshapes.
- This is used to answer the question:
- "Given a list of operand ranges, what is the subportion of the iteration
- space involved in the computation".
- This is the inverse problem of `getLoopsToShapesMap`.
- Return the empty AffineMap when such an AffineMap cannot be constructed.
- The default behavior is based on a very simple inference procedure that
- only works with permutation affine maps.
- A more advanced Tensor-Comprehension like inference is possible but has
- proven to be ambiguous in unfavorable case.
- A safer and more robust alternative is to allow each each op to define
- its own AffineMap.
+ Return the OpOperands for all the shaped operands.
}],
- /*retTy=*/"AffineMap",
- /*methodName=*/"getShapesToLoopsMap",
- /*args=*/(ins),
+ /*retTy=*/" OpOperand&",
+ /*methodName=*/"getShapedOpOperand",
+ /*args=*/(ins "unsigned":$i),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return inversePermutation(getLoopsToShapesMap());
+ return *(this->getShapedOpOperands().begin() + i);
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the position in the results of the affine map computed
- by getLoopsToShapesMap() that represents the shape of an
- operand (input or output) at a dimension.
+ Return the range over input and output operands.
}],
- /*retTy=*/"Optional<unsigned>",
- /*methodName=*/"getOperandDimPositionInLoopsToShapeMap",
- /*args=*/(ins "unsigned":$operandIdx, "unsigned":$dim),
+ /*retTy=*/"SmallVector<ShapedType, 4>",
+ /*methodName=*/"getShapedOperandTypes",
+ /*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- unsigned pos = 0;
- for (auto type : llvm::enumerate(getShapedOperandTypes())) {
- if (type.index() == operandIdx) return pos + dim;
- pos += type.value().getRank();
- }
- return {};
+ return llvm::to_vector<4>(
+ llvm::map_range(
+ getShapedOperands(),
+ [](Value v) { return v.getType().cast<ShapedType>(); }));
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the position in the results of the affine map computed
- by getLoopsToShapesMap() that represents the shape of an
- input operand at a dimension.
+ Return the `i`-th shaped type
}],
- /*retTy=*/"Optional<unsigned>",
- /*methodName=*/"getInputValueDimPositionInLoopsToShapeMap",
- /*args=*/(ins "unsigned":$inputIdx, "unsigned":$dim),
+ /*retTy=*/"ShapedType",
+ /*methodName=*/"getShapedType",
+ /*args=*/(ins "unsigned":$i),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- if (inputIdx >= getNumInputs()) return {};
- return getOperandDimPositionInLoopsToShapeMap(inputIdx, dim);
+ return $_op.getShapedOperand(i).getType().template cast<ShapedType>();
}]
>,
InterfaceMethod<
/*desc=*/[{
- Return the range of position in the result of the affine map
- computed by getLoopsToShapesMap() which correspond to the
- AffineExprs used to access the outputs of the operation.
+ Return the input or output indexing map at index `i`.
}],
- /*retTy=*/"std::pair<unsigned, unsigned>",
- /*methodName=*/"getResultsPositionInLoopsToShapeMap",
- /*args=*/(ins),
+ /*retTy=*/"AffineMap",
+ /*methodName=*/"getIndexingMap",
+ /*args=*/(ins "unsigned":$i),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return
- {*getOperandDimPositionInLoopsToShapeMap(getNumInputs(), 0),
- (*getOperandDimPositionInLoopsToShapeMap
- (getNumInputs() + getNumOutputs() - 1,
- getOutputShapedType(getNumOutputs()-1).getRank() - 1)) + 1};
+ assert(i < $_op.getNumShapedOperands());
+ return getIndexingMaps()[i];
}]
>,
InterfaceMethod<
/*desc=*/[{
- Like `getShape`, but only returns statically-known information, without
- generating any new IR. For each shape dimension, returns >=0 if that
- dimension is statically known, or ShapeType::kDynamicSize otherwise.
+ Return the input indexing map at index `i`.
}],
- /*retTy=*/"SmallVector<int64_t, 8>",
- /*methodName=*/"getStaticShape",
- /*args=*/(ins),
+ /*retTy=*/"AffineMap",
+ /*methodName=*/"getInputIndexingMap",
+ /*args=*/(ins "unsigned":$i),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- SmallVector<int64_t, 8> res;
- for (Value v : getShapedOperands()) {
- auto shape = v.getType().cast<ShapedType>().getShape();
- res.append(shape.begin(), shape.end());
- }
- return res;
+ assert(i < $_op.getNumInputs());
+ return getIndexingMaps()[i];
}]
>,
InterfaceMethod<
/*desc=*/[{
- Returns the statically-known loop ranges. Composes
- `getShapesToLoopsMap()` with the result of `getStaticShape`.
- Returns None if `getShapesToLoopsMap()` fails. Returns
- ShapeType::kDynamicSize for non-statically-known loop ranges.
+ Return the input indexing maps.
}],
- /*retTy=*/"Optional<SmallVector<int64_t, 4>>",
- /*methodName=*/"getStaticLoopRanges",
+ /*retTy=*/"SmallVector<AffineMap>",
+ /*methodName=*/"getInputIndexingMaps",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- SmallVector<int64_t, 8> viewSizes = getStaticShape();
- AffineMap invertedMap = getShapesToLoopsMap();
- if (!invertedMap)
- return {};
- return invertedMap.compose(viewSizes);
+ auto maps = $_op.getIndexingMaps();
+ return SmallVector<AffineMap>{maps.begin(),
+ maps.begin() + $_op.getNumInputs()};
}]
>,
-
- //===------------------------------------------------------------------===//
- // Other static interface methods.
- //===------------------------------------------------------------------===//
InterfaceMethod<
/*desc=*/[{
- Clone the current operation with the given location and operands. This
- is used to abstract away the optional underlying region creation. This
- does not change the balance between input, output_buffer and
- init_tensors operands.
+ Return the output indexing map at index `i`.
}],
- /*retTy=*/"Operation *",
- /*methodName=*/"clone",
- (ins "OpBuilder &":$b, "Location":$loc, "TypeRange":$resultTypes,
- "ValueRange":$operands),
- [{
- BlockAndValueMapping bvm;
- OperationState state(
- loc, ConcreteOp::getOperationName(), operands, resultTypes,
- $_op->getAttrs());
- for (Region &r : $_op->getRegions())
- r.cloneInto(state.addRegion(), bvm);
- return b.createOperation(state);
+ /*retTy=*/"AffineMap",
+ /*methodName=*/"getOutputIndexingMap",
+ /*args=*/(ins "unsigned":$i),
+ /*methodBody=*/"",
+ /*defaultImplementation=*/[{
+ assert(i < $_op.getNumOutputs());
+ return getIndexingMaps()[i + $_op.getNumInputs()];
}]
>,
InterfaceMethod<
/*desc=*/[{
- Clone the current operation with the given location, operands
- and BlockAndValueMapping. This is used to abstract away the
- optional underlying region creation. This does not change the
- balance between input, output_buffer and init_tensors
- operands.
+ Return the output indexing maps.
}],
- /*retTy=*/"Operation *",
- /*methodName=*/"cloneWithMapper",
- (ins "OpBuilder &":$b, "Location":$loc, "TypeRange":$resultTypes,
- "ValueRange":$operands, "BlockAndValueMapping &":$bvm),
- [{
- OperationState state(
- loc, ConcreteOp::getOperationName(), operands, resultTypes,
- $_op->getAttrs());
- for (Region &r : $_op->getRegions())
- r.cloneInto(state.addRegion(), bvm);
- return b.createOperation(state);
+ /*retTy=*/"SmallVector<AffineMap>",
+ /*methodName=*/"getOutputIndexingMaps",
+ /*args=*/(ins),
+ /*methodBody=*/"",
+ /*defaultImplementation=*/[{
+ auto maps = $_op.getIndexingMaps();
+ return SmallVector<AffineMap>{maps.begin() + $_op.getNumInputs(),
+ maps.begin() + $_op.getNumShapedOperands()};
}]
- >,
- StaticInterfaceMethod<
- /*desc=*/[{
- Returns the region builder for constructing the body for linalg.generic.
- Returns a null function if this named op does not define a region
- builder.
- }],
- /*retTy=*/"std::function<void(ImplicitLocOpBuilder &, Block &, ValueRange)>",
- /*methodName=*/"getRegionBuilder",
- (ins),
- [{ return ConcreteOp::getRegionBuilder(); }]
>
];
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
index b5e3ca1fa00ef..c7372cdc97609 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
@@ -155,6 +155,14 @@ LogicalResult mlir::linalg::detail::verifyContractionInterface(Operation *op) {
// StructuredOpInterface implementation
//===----------------------------------------------------------------------===//
+OpOperandVector::operator SmallVector<Value>() {
+ SmallVector<Value> result;
+ result.reserve(this->size());
+ llvm::transform(*this, std::back_inserter(result),
+ [](OpOperand *opOperand) { return opOperand->get(); });
+ return result;
+}
+
/// Fully compose map with operands and canonicalize the result.
/// Return the `createOrFold`'ed AffineApply op.
static Value createFoldedComposedAffineApply(OpBuilder &b, Location loc,
More information about the Mlir-commits
mailing list