[Mlir-commits] [mlir] 3303980 - Revert "NFC: Fix some post-review nits for the Tosa dialect."
Stella Laurenzo
llvmlistbot at llvm.org
Sat Nov 7 09:36:31 PST 2020
Author: Stella Laurenzo
Date: 2020-11-07T09:35:49-08:00
New Revision: 330398052d049f90e6cfac80ab8b765b158a3e61
URL: https://github.com/llvm/llvm-project/commit/330398052d049f90e6cfac80ab8b765b158a3e61
DIFF: https://github.com/llvm/llvm-project/commit/330398052d049f90e6cfac80ab8b765b158a3e61.diff
LOG: Revert "NFC: Fix some post-review nits for the Tosa dialect."
* Introduced issue in debug builds.
This reverts commit b5fcd06105dec2a7b0e4114d6ad4524fc54498c5.
Added:
Modified:
mlir/include/mlir/Dialect/Tosa/IR/TosaInterfaces.td
mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td
mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
mlir/include/mlir/Dialect/Tosa/Transforms/Passes.h
mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp
mlir/lib/Dialect/Tosa/Utils/QuantUtils.cpp
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaInterfaces.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaInterfaces.td
index 5dfa1e681161..df4aa70427ac 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaInterfaces.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaInterfaces.td
@@ -17,7 +17,8 @@ include "mlir/IR/OpBase.td"
def TosaOpInterface : OpInterface<"TosaOp"> {
let description = [{
- Implemented by ops that correspond to the Tosa specification.
+ Implements interfaces implemented by ops that correspond to the Tosa
+ specification.
}];
}
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td
index 5701e6ec97b3..74c60b7f6a12 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td
@@ -114,9 +114,9 @@ def Tosa_ConvOpQuantInfoBuilder : OpBuilderDAG<
(ins "Type":$outputType, "Value":$input, "Value":$weight, "Value":$bias,
"ArrayAttr":$pad, "ArrayAttr":$stride, "ArrayAttr":$dilation),
[{
- buildConvOpWithQuantInfo($_builder, $_state, outputType,
- input, weight, bias,
- pad, stride, dilation);
+ ::buildConvOpWithQuantInfo($_builder, $_state, outputType,
+ input, weight, bias,
+ pad, stride, dilation);
}]>;
// Handles tosa.transpose_conv2d which has an outpad and output shape attribute.
@@ -125,10 +125,10 @@ def Tosa_TransConvOpQuantInfoBuilder : OpBuilderDAG<
"ArrayAttr":$outpad, "ArrayAttr":$stride, "ArrayAttr":$dilation,
"ArrayAttr":$outputShape),
[{
- buildTransConvOpWithQuantInfo($_builder, $_state, outputType,
- input, weight, bias,
- outpad, stride, dilation,
- outputShape);
+ ::buildTransConvOpWithQuantInfo($_builder, $_state, outputType,
+ input, weight, bias,
+ outpad, stride, dilation,
+ outputShape);
}]>;
// The tosa.fully_connected op has its own builder as it does not have
@@ -136,8 +136,8 @@ def Tosa_TransConvOpQuantInfoBuilder : OpBuilderDAG<
def Tosa_FCOpQuantInfoBuilder : OpBuilderDAG<
(ins "Type":$outputType, "Value":$input, "Value":$weight, "Value":$bias),
[{
- buildFCOpWithQuantInfo($_builder, $_state, outputType,
- input, weight, bias);
+ ::buildFCOpWithQuantInfo($_builder, $_state, outputType,
+ input, weight, bias);
}]>;
// The tosa.matmul op is also intended to be generated where a fully_connected
@@ -147,8 +147,8 @@ def Tosa_FCOpQuantInfoBuilder : OpBuilderDAG<
def Tosa_MatMulOpQuantInfoBuilder : OpBuilderDAG<
(ins "Type":$outputType, "Value":$a, "Value":$b),
[{
- buildMatMulOpWithQuantInfo($_builder, $_state, outputType,
- a, b);
+ ::buildMatMulOpWithQuantInfo($_builder, $_state, outputType,
+ a, b);
}]>;
// Both the tosa.avg_pool2d and unary ops use the same
@@ -158,8 +158,8 @@ def Tosa_AvgPool2dOpQuantInfoBuilder : OpBuilderDAG<
(ins "Type":$outputType, "Value":$input, "ArrayAttr":$kernel,
"ArrayAttr":$stride, "ArrayAttr":$pad),
[{
- buildAvgPool2dOpWithQuantInfo($_builder, $_state, outputType,
- input, kernel, stride, pad);
+ ::buildAvgPool2dOpWithQuantInfo($_builder, $_state, outputType,
+ input, kernel, stride, pad);
}]>;
// This builder is called on single-parameter unary operators that have a scale
@@ -168,7 +168,7 @@ def Tosa_AvgPool2dOpQuantInfoBuilder : OpBuilderDAG<
def Tosa_UnaryOpQuantInfoBuilder : OpBuilderDAG<
(ins "Type":$outputType, "Value":$input),
[{
- buildUnaryOpWithQuantInfo($_builder, $_state, outputType, input);
+ ::buildUnaryOpWithQuantInfo($_builder, $_state, outputType, input);
}]>;
// This builder is called on the TOSA pad operator that needs to create its own
@@ -177,8 +177,8 @@ def Tosa_UnaryOpQuantInfoBuilder : OpBuilderDAG<
def Tosa_PadOpQuantInfoBuilder : OpBuilderDAG<
(ins "Type":$outputType, "Value":$input, "Value":$paddings),
[{
- buildPadOpWithQuantInfo($_builder, $_state, outputType,
- input, paddings);
+ ::buildPadOpWithQuantInfo($_builder, $_state, outputType,
+ input, paddings);
}]>;
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
index 43e8bfacee27..e9dc5eb6180b 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
@@ -104,7 +104,7 @@ def Tosa_Conv2DOp : Tosa_Op<"conv2d", [NoSideEffect]> {
let builders = [Tosa_ConvOpQuantInfoBuilder];
- let verifier = [{ return verifyConvOp(*this); }];
+ let verifier = [{ return ::verifyConvOp(*this); }];
}
//===----------------------------------------------------------------------===//
@@ -134,7 +134,7 @@ def Tosa_Conv3DOp : Tosa_Op<"conv3d", [NoSideEffect]> {
let builders = [Tosa_ConvOpQuantInfoBuilder];
- let verifier = [{ return verifyConvOp(*this); }];
+ let verifier = [{ return ::verifyConvOp(*this); }];
}
//===----------------------------------------------------------------------===//
@@ -165,7 +165,7 @@ def Tosa_DepthwiseConv2DOp : Tosa_Op<"depthwise_conv2d", [NoSideEffect]> {
let builders = [Tosa_ConvOpQuantInfoBuilder];
- let verifier = [{ return verifyConvOp(*this); }];
+ let verifier = [{ return ::verifyConvOp(*this); }];
}
//===----------------------------------------------------------------------===//
@@ -191,7 +191,7 @@ def Tosa_FullyConnectedOp : Tosa_Op<"fully_connected", [NoSideEffect]> {
let builders = [Tosa_FCOpQuantInfoBuilder];
- let verifier = [{ return verifyConvOp(*this); }];
+ let verifier = [{ return ::verifyConvOp(*this); }];
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.h b/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.h
index b9032dfd351e..7742281568c1 100644
--- a/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.h
@@ -16,6 +16,7 @@
#include "mlir/Pass/Pass.h"
namespace mlir {
+
namespace tosa {
std::unique_ptr<Pass> createTosaMakeBroadcastablePass();
diff --git a/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h b/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
index 0ebec4edc315..d4e2016112eb 100644
--- a/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
+++ b/mlir/include/mlir/Dialect/Tosa/Utils/QuantUtils.h
@@ -19,8 +19,8 @@
#include "mlir/Dialect/Quant/FakeQuantSupport.h"
#include "mlir/Dialect/Quant/UniformSupport.h"
-namespace mlir {
-namespace tosa {
+using namespace mlir;
+using namespace mlir::tosa;
//===----------------------------------------------------------------------===//
// Utililty functions to support quantization handling in Tosa.
@@ -65,7 +65,4 @@ TypeAttr buildQTypeAttrFromMinMax(OpBuilder builder, Type inputDType,
IntegerAttr quantBits, int filterQuantDim,
bool isSigned, BoolAttr narrowRange);
-} // namespace tosa
-} // namespace mlir
-
#endif // DIALECT_TOSA_UTILS_QUANT_UTILS_H
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index daf10b2013bf..9e27cbe73714 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -93,8 +93,7 @@ void TosaDialect::initialize() {
// TOSA Operator Verifiers.
//===----------------------------------------------------------------------===//
-template <typename T>
-static LogicalResult verifyConvOp(T op) {
+template <typename T> static LogicalResult verifyConvOp(T op) {
// All TOSA conv ops have an input() and weight().
auto inputType = op.input().getType().template dyn_cast<RankedTensorType>();
auto weightType = op.weight().getType().template dyn_cast<RankedTensorType>();
@@ -128,10 +127,10 @@ static LogicalResult verifyConvOp(T op) {
/// This builder is called on all convolution operators except TransposeConv,
/// which has specialized output shape semantics. The builder also defines the
/// bitwidth of the output given the bit width of the input & weight content.
-static void buildConvOpWithQuantInfo(OpBuilder &builder, OperationState &result,
- Type outputType, Value input, Value weight,
- Value bias, ArrayAttr pad,
- ArrayAttr stride, ArrayAttr dilation) {
+void buildConvOpWithQuantInfo(OpBuilder &builder, OperationState &result,
+ Type outputType, Value input, Value weight,
+ Value bias, ArrayAttr pad, ArrayAttr stride,
+ ArrayAttr dilation) {
result.addOperands({input, weight, bias});
result.addAttribute("pad", pad);
@@ -149,11 +148,11 @@ static void buildConvOpWithQuantInfo(OpBuilder &builder, OperationState &result,
}
/// Handles tosa.transpose_conv2d which has outpad and output shape attributes.
-static void
-buildTransConvOpWithQuantInfo(OpBuilder &builder, OperationState &result,
- Type outputType, Value input, Value weight,
- Value bias, ArrayAttr outpad, ArrayAttr stride,
- ArrayAttr dilation, ArrayAttr outputShape) {
+void buildTransConvOpWithQuantInfo(OpBuilder &builder, OperationState &result,
+ Type outputType, Value input, Value weight,
+ Value bias, ArrayAttr outpad,
+ ArrayAttr stride, ArrayAttr dilation,
+ ArrayAttr outputShape) {
result.addOperands({input, weight, bias});
result.addAttribute("out_pad", outpad);
result.addAttribute("stride", stride);
@@ -172,9 +171,9 @@ buildTransConvOpWithQuantInfo(OpBuilder &builder, OperationState &result,
/// The tosa.fully_connected op has its own builder as it does not have
/// strides/dilation/padding.
-static void buildFCOpWithQuantInfo(OpBuilder &builder, OperationState &result,
- Type outputType, Value input, Value weight,
- Value bias) {
+void buildFCOpWithQuantInfo(OpBuilder &builder, OperationState &result,
+ Type outputType, Value input, Value weight,
+ Value bias) {
result.addOperands({input, weight, bias});
auto quantAttr = ::buildConvOpQuantizationAttr(builder, input, weight);
@@ -191,9 +190,8 @@ static void buildFCOpWithQuantInfo(OpBuilder &builder, OperationState &result,
/// op must be constructed where the weight is not a constant. In this case,
/// the fully_connected op must be expressed using matmul.
/// TODO: Add link to the leglization document explaining this.
-static void buildMatMulOpWithQuantInfo(OpBuilder &builder,
- OperationState &result, Type outputType,
- Value a, Value b) {
+void buildMatMulOpWithQuantInfo(OpBuilder &builder, OperationState &result,
+ Type outputType, Value a, Value b) {
result.addOperands({a, b});
auto quantAttr = ::buildMatMulOpQuantizationAttr(builder, a, b);
@@ -229,11 +227,10 @@ static void buildMatMulOpWithQuantInfo(OpBuilder &builder,
/// Both the tosa.avg_pool2d and unary ops use the same UnaruOpQuantizationAttr
/// but avg_pool operator has its own builder as it has additional parameters
/// not part of the unary ops.
-static void buildAvgPool2dOpWithQuantInfo(OpBuilder &builder,
- OperationState &result,
- Type outputType, Value input,
- ArrayAttr kernel, ArrayAttr stride,
- ArrayAttr pad) {
+void buildAvgPool2dOpWithQuantInfo(OpBuilder &builder, OperationState &result,
+ Type outputType, Value input,
+ ArrayAttr kernel, ArrayAttr stride,
+ ArrayAttr pad) {
result.addOperands(input);
result.addAttribute("kernel", kernel);
result.addAttribute("stride", stride);
@@ -247,9 +244,8 @@ static void buildAvgPool2dOpWithQuantInfo(OpBuilder &builder,
/// This builder is called on single-parameter unary operators that have scale
/// relationship between their input and output, expressed by the
/// UnaryOpQuantizationAttr.
-static void buildUnaryOpWithQuantInfo(OpBuilder &builder,
- OperationState &result, Type outputType,
- Value input) {
+void buildUnaryOpWithQuantInfo(OpBuilder &builder, OperationState &result,
+ Type outputType, Value input) {
result.addOperands(input);
auto quantAttr = buildUnaryOpQuantizationAttr(builder, input, outputType);
if (quantAttr)
@@ -260,9 +256,8 @@ static void buildUnaryOpWithQuantInfo(OpBuilder &builder,
/// This builder is called on TOSA pad operator that needs to create its own
/// OptionalAttr quantization_attr parameter to scale the padding values
/// correctly.
-static void buildPadOpWithQuantInfo(OpBuilder &builder, OperationState &result,
- Type outputType, Value input,
- Value paddings) {
+void buildPadOpWithQuantInfo(OpBuilder &builder, OperationState &result,
+ Type outputType, Value input, Value paddings) {
result.addOperands({input, paddings});
auto quantAttr = buildPadOpQuantizationAttr(builder, input);
if (quantAttr)
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp
index ff1be9fe54c9..95076eb155a3 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp
@@ -128,6 +128,8 @@ static int reshapeLowerToHigher(PatternRewriter &rewriter, Location loc,
}
ArrayRef<int64_t> outputRankShape = outputType.getShape();
+ ArrayRef<int64_t> higherRankShape =
+ higherTensorValue.getType().cast<RankedTensorType>().getShape();
ArrayRef<int64_t> lowerRankShape =
lowerTensorValue.getType().cast<RankedTensorType>().getShape();
diff --git a/mlir/lib/Dialect/Tosa/Utils/QuantUtils.cpp b/mlir/lib/Dialect/Tosa/Utils/QuantUtils.cpp
index af3d2be4ec43..16ddd9f7383a 100644
--- a/mlir/lib/Dialect/Tosa/Utils/QuantUtils.cpp
+++ b/mlir/lib/Dialect/Tosa/Utils/QuantUtils.cpp
@@ -19,9 +19,8 @@ using namespace mlir::tosa;
/// From a scale value, generates multiplier and shift values where
/// mantissa is in [-1.0,-0.5] or [0.5, 1.0] such that
/// multiplier = mantissa*2^shift for 16-bit scaling.
-static void computeMultiplierAndShiftTosaScale16(double scale,
- int32_t &multiplier,
- int32_t &shift) {
+void computeMultiplierAndShiftTosaScale16(double scale, int32_t &multiplier,
+ int32_t &shift) {
const double mantissa = std::frexp(scale, &shift);
auto shiftedM = std::round(mantissa * (int64_t(1) << 15));
@@ -48,9 +47,8 @@ static void computeMultiplierAndShiftTosaScale16(double scale,
/// From a scale value, generates multiplier and shift values where
/// mantissa is in [-1.0,-0.5] or [0.5, 1.0] such that
/// multiplier = mantissa*2^shift for 32-bit scaling.
-static void computeMultiplierAndShiftTosaScale32(double scale,
- int32_t &multiplier,
- int32_t &shift) {
+void computeMultiplierAndShiftTosaScale32(double scale, int32_t &multiplier,
+ int32_t &shift) {
const double mantissa = std::frexp(scale, &shift);
auto shiftedM = std::round(mantissa * (int64_t(1) << 31));
@@ -74,8 +72,8 @@ static void computeMultiplierAndShiftTosaScale32(double scale,
}
/// Generates a quantized multiplier/shift from double.
-void mlir::tosa::computeMultiplierAndShift(double scale, int32_t &multiplier,
- int32_t &shift, int32_t scaleWidth) {
+void computeMultiplierAndShift(double scale, int32_t &multiplier,
+ int32_t &shift, int32_t scaleWidth) {
switch (scaleWidth) {
case 16:
@@ -98,9 +96,8 @@ void mlir::tosa::computeMultiplierAndShift(double scale, int32_t &multiplier,
/// ConvOpQuantInfoBuilder/TransConvOpQuantInfoBuilder:
/// input_zp: input zeropoint
/// weight_zp: weight zeropoint.
-ConvOpQuantizationAttr
-mlir::tosa::buildConvOpQuantizationAttr(OpBuilder &builder, Value input,
- Value weight) {
+ConvOpQuantizationAttr buildConvOpQuantizationAttr(OpBuilder &builder,
+ Value input, Value weight) {
auto inputType = input.getType().dyn_cast<RankedTensorType>();
auto weightType = weight.getType().dyn_cast<RankedTensorType>();
@@ -147,9 +144,8 @@ mlir::tosa::buildConvOpQuantizationAttr(OpBuilder &builder, Value input,
/// MatMulOpQuantInfoBuilder:
/// aZp: input a zeropoint
/// bZp: input b zeropoint.
-MatMulOpQuantizationAttr
-mlir::tosa::buildMatMulOpQuantizationAttr(OpBuilder &builder, Value a,
- Value b) {
+MatMulOpQuantizationAttr buildMatMulOpQuantizationAttr(OpBuilder &builder,
+ Value a, Value b) {
auto aType = a.getType().dyn_cast<RankedTensorType>();
auto bType = b.getType().dyn_cast<RankedTensorType>();
@@ -183,9 +179,9 @@ mlir::tosa::buildMatMulOpQuantizationAttr(OpBuilder &builder, Value a,
/// UnaryOpQuantInfoBuilder:
/// inputZp: input zeropoint
/// outputZp: output zeropoint.
-UnaryOpQuantizationAttr
-mlir::tosa::buildUnaryOpQuantizationAttr(OpBuilder &builder, Value input,
- Type outputRawType) {
+UnaryOpQuantizationAttr buildUnaryOpQuantizationAttr(OpBuilder &builder,
+ Value input,
+ Type outputRawType) {
auto inputType = input.getType().dyn_cast<RankedTensorType>();
auto outputType = outputRawType.dyn_cast<RankedTensorType>();
@@ -217,8 +213,8 @@ mlir::tosa::buildUnaryOpQuantizationAttr(OpBuilder &builder, Value input,
/// Builds PadOpQuantizationAttr, called from PadOpQuantInfoBuilder:
/// inputZp: input zeropoint.
-PadOpQuantizationAttr mlir::tosa::buildPadOpQuantizationAttr(OpBuilder &builder,
- Value input) {
+PadOpQuantizationAttr buildPadOpQuantizationAttr(OpBuilder &builder,
+ Value input) {
auto inputType = input.getType().dyn_cast<RankedTensorType>();
@@ -242,8 +238,8 @@ PadOpQuantizationAttr mlir::tosa::buildPadOpQuantizationAttr(OpBuilder &builder,
/// Builds output type for a quantized ConvOp with the right bitwidth.
/// This is called by the builder when dealing with quantized content.
-Type mlir::tosa::buildConvOpResultTypeInfo(OpBuilder &builder, Type outputType,
- Value input, Value weight) {
+Type buildConvOpResultTypeInfo(OpBuilder &builder, Type outputType, Value input,
+ Value weight) {
auto inputType = input.getType().dyn_cast<RankedTensorType>();
auto weightType = weight.getType().dyn_cast<RankedTensorType>();
@@ -276,10 +272,10 @@ Type mlir::tosa::buildConvOpResultTypeInfo(OpBuilder &builder, Type outputType,
}
/// Builds Tosa quantization attributes from min/max values.
-Type mlir::tosa::buildQTypeFromMinMax(OpBuilder builder, Type inputDType,
- Attribute minAttr, Attribute maxAttr,
- IntegerAttr quantBits, int filterQuantDim,
- bool isSigned, BoolAttr narrowRange) {
+Type buildQTypeFromMinMax(OpBuilder builder, Type inputDType, Attribute minAttr,
+ Attribute maxAttr, IntegerAttr quantBits,
+ int filterQuantDim, bool isSigned,
+ BoolAttr narrowRange) {
quant::QuantizedType retType;
@@ -343,11 +339,10 @@ Type mlir::tosa::buildQTypeFromMinMax(OpBuilder builder, Type inputDType,
}
/// Builds Tosa quantization attributes from min/max values.
-TypeAttr
-mlir::tosa::buildQTypeAttrFromMinMax(OpBuilder builder, Type inputDtype,
- Attribute minAttr, Attribute maxAttr,
- IntegerAttr quantBits, int filterQuantDim,
- bool isSigned, BoolAttr narrowRange) {
+TypeAttr buildQTypeAttrFromMinMax(OpBuilder builder, Type inputDtype,
+ Attribute minAttr, Attribute maxAttr,
+ IntegerAttr quantBits, int filterQuantDim,
+ bool isSigned, BoolAttr narrowRange) {
return TypeAttr::get(buildQTypeFromMinMax(builder, inputDtype, minAttr,
maxAttr, quantBits, filterQuantDim,
More information about the Mlir-commits
mailing list