[Mlir-commits] [mlir] 2d27376 - [mlir][linalg][NFC] Drop emitAccessorPrefix from Linalg dialect
Matthias Springer
llvmlistbot at llvm.org
Sun Oct 2 19:35:58 PDT 2022
Author: Matthias Springer
Date: 2022-10-03T11:35:41+09:00
New Revision: 2d2737667e26b72546a4ba0dc7023f8f2a41fb01
URL: https://github.com/llvm/llvm-project/commit/2d2737667e26b72546a4ba0dc7023f8f2a41fb01
DIFF: https://github.com/llvm/llvm-project/commit/2d2737667e26b72546a4ba0dc7023f8f2a41fb01.diff
LOG: [mlir][linalg][NFC] Drop emitAccessorPrefix from Linalg dialect
Differential Revision: https://reviews.llvm.org/D135048
Added:
Modified:
mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td
mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td
index e60b5972c0c1d..36cf41da8826e 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td
@@ -58,8 +58,6 @@ def Linalg_Dialect : Dialect {
private:
llvm::StringMap<RegionBuilderFunType> namedStructuredOpRegionBuilders;
}];
-
- let emitAccessorPrefix = kEmitAccessorPrefix_Both;
}
// Define the function attribute enums matching the OpDSL functions.
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
index c989da01238a4..d8c1e0ba1b344 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
@@ -771,8 +771,9 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
// TODO: reevalute the need for a cast when a better mechanism exists.
//========================================================================//
- ValueRange inputs() {
- return cast<DestinationStyleOpInterface>(*this->getOperation()).inputs();
+ ValueRange getInputs() {
+ return cast<DestinationStyleOpInterface>(*this->getOperation())
+ .getInputs();
}
int64_t getNumInputs() {
@@ -780,7 +781,7 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
.getNumInputs();
}
- ValueRange outputs() {
+ ValueRange getOutputs() {
return cast<DestinationStyleOpInterface>(*this->getOperation())
.getOutputs();
}
@@ -922,7 +923,7 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
// The 'DestinationStyleOpInterface' provides access to the methods relevant
// for destination-style ops. A destination-style operation has 'n' input
// arguments and 'm' output arguments. Each op that wants to implement
-// DestinationStyleOpInterface needs to define inputs() and getOutputs()
+// DestinationStyleOpInterface needs to define getInputs() and getOutputs()
// methods.
def DestinationStyleOpInterface : OpInterface<"DestinationStyleOpInterface"> {
let cppNamespace = "::mlir::linalg";
@@ -930,18 +931,18 @@ def DestinationStyleOpInterface : OpInterface<"DestinationStyleOpInterface"> {
//===------------------------------------------------------------------===//
// Num input/output arguments handling.
//===------------------------------------------------------------------===//
- // `inputs` must be defined by each op that wants to implement the
+ // `getInputs` must be defined by each op that wants to implement the
// DestinationStyleOpInterface.
InterfaceMethod<
/*desc=*/[{
Return the input shape operands.
}],
/*retTy=*/"ValueRange",
- /*methodName=*/"inputs",
+ /*methodName=*/"getInputs",
/*args=*/(ins)
>,
- // These special methods rely on `inputs` and `outputs` being defined by
- // each op that wants to implement the DestinationStyleOpInterface.
+ // These special methods rely on `getInputs` and `getOutputs` being defined
+ // by each op that wants to implement the DestinationStyleOpInterface.
InterfaceMethod<
/*desc=*/[{
Return the number of inputs.
diff --git a/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp b/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
index 57153499a2da2..80bdc06ef1a60 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp
@@ -360,7 +360,7 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReductionByScaling(
SmallVector<Operation *> initOrAllocTensorOps;
SmallVector<linalg::FillOp> fillOps;
fillOps.reserve(op.getNumOutputs());
- for (auto it : llvm::zip(op.outputs(), neutralElements)) {
+ for (auto it : llvm::zip(op.getOutputs(), neutralElements)) {
Value rankedTensor = std::get<0>(it);
auto t = rankedTensor.getType().cast<RankedTensorType>();
RankedTensorType newT = RankedTensorType::Builder(t).insertDim(
@@ -403,7 +403,7 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReductionByScaling(
// Step 3. Handle operands.
// Compute the new input tensors.
- auto newInputs = llvm::to_vector<4>(op.inputs());
+ auto newInputs = llvm::to_vector<4>(op.getInputs());
// Add a single shape-only tensor to carry the dimensions without resorting to
// more complex inversions.
newInputs.push_back(b.create<linalg::InitTensorOp>(
@@ -433,7 +433,7 @@ FailureOr<SplitReductionResult> mlir::linalg::splitReductionByScaling(
// multi-reduction support is available.
SmallVector<LinalgOp> results;
for (auto it :
- llvm::zip(genericOp->getResults(), op.outputs(), combinerOps)) {
+ llvm::zip(genericOp->getResults(), op.getOutputs(), combinerOps)) {
Value reindexedOutput = std::get<0>(it);
Value originalOutput = std::get<1>(it);
auto originalOutputType = originalOutput.getType().cast<RankedTensorType>();
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 0a117df6f9ab1..a8397f384f5bd 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -1334,9 +1334,9 @@ struct Conv1DGenerator : public StructuredGenerator<LinalgOp> {
// Determine whether `linalgOp` can be generated with this generator
if (linalgOp.getNumInputs() != 2 || linalgOp.getNumOutputs() != 1)
return;
- lhsShaped = linalgOp.inputs()[0];
- rhsShaped = linalgOp.inputs()[1];
- resShaped = linalgOp.outputs()[0];
+ lhsShaped = linalgOp.getInputs()[0];
+ rhsShaped = linalgOp.getInputs()[1];
+ resShaped = linalgOp.getOutputs()[0];
lhsShapedType = lhsShaped.getType().dyn_cast<ShapedType>();
rhsShapedType = rhsShaped.getType().dyn_cast<ShapedType>();
resShapedType = resShaped.getType().dyn_cast<ShapedType>();
diff --git a/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml b/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
index 81f40df090066..205cde22c9970 100644
--- a/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
+++ b/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml
@@ -167,9 +167,9 @@ structured_op: !LinalgStructuredOpConfig
# ODS-NEXT: LogicalResult verifyIndexingMapRequiredAttributes();
# IMPL: getSymbolBindings(Test2Op self)
-# IMPL: cst2 = self.strides().getValues<int64_t>()[0];
+# IMPL: cst2 = self.getStrides().getValues<int64_t>()[0];
# IMPL-NEXT: getAffineConstantExpr(cst2, context)
-# IMPL: cst3 = self.strides().getValues<int64_t>()[1];
+# IMPL: cst3 = self.getStrides().getValues<int64_t>()[1];
# IMPL-NEXT: getAffineConstantExpr(cst3, context)
# IMPL: Test2Op::getIndexingMaps()
diff --git a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
index 56bd8968f660f..1bea98fe71bfb 100644
--- a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
+++ b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
@@ -670,7 +670,7 @@ ParseResult {0}::parse(OpAsmParser &parser, OperationState &result) {{
{0}::getNumRegionArgs(), {0}::getRegionBuilder());
}
void {0}::print(OpAsmPrinter &p) {{
- ::printNamedStructuredOp(p, getOperation(), inputs(), outputs());
+ ::printNamedStructuredOp(p, getOperation(), getInputs(), getOutputs());
}
)FMT";
@@ -857,7 +857,7 @@ static SmallVector<AffineExpr> getSymbolBindings({0} self) {
// {1}: Symbol position
// {2}: Attribute index
static const char structuredOpAccessAttrFormat[] = R"FMT(
-int64_t cst{1} = self.{0}().getValues<int64_t>()[{2}];
+int64_t cst{1} = self.get{0}().getValues<int64_t>()[{2}];
exprs.push_back(getAffineConstantExpr(cst{1}, context));
)FMT";
// Update all symbol bindings mapped to an attribute.
@@ -868,8 +868,10 @@ exprs.push_back(getAffineConstantExpr(cst{1}, context));
for (auto &en :
llvm::enumerate(arg.indexAttrMap->affineMap().getResults())) {
if (auto symbol = en.value().dyn_cast<AffineSymbolExpr>()) {
+ std::string argName = arg.name;
+ argName[0] = toupper(argName[0]);
symbolBindings[symbol.getPosition()] =
- llvm::formatv(structuredOpAccessAttrFormat, arg.name,
+ llvm::formatv(structuredOpAccessAttrFormat, argName,
symbol.getPosition(), en.index());
}
}
More information about the Mlir-commits
mailing list